Compare commits

..

No commits in common. "c7f0412effab1a2794b37aa15e776a4b7f358d84" and "9a0e02567b5a899b1f55437a5430ce7ff45dd8ab" have entirely different histories.

25 changed files with 3132 additions and 2166 deletions

View file

@ -1,123 +0,0 @@
use comet_resources::Vertex;
use wgpu::util::DeviceExt;
use wgpu::{BufferUsages, Device};
pub struct Batch {
label: String,
vertex_data: Vec<Vertex>,
index_data: Vec<u16>,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
}
impl Batch {
pub fn new(
label: String,
device: &Device,
vertex_data: Vec<Vertex>,
index_data: Vec<u16>,
) -> Self {
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Vertex Buffer", &label).as_str()),
contents: bytemuck::cast_slice(&vertex_data),
usage: BufferUsages::VERTEX | BufferUsages::COPY_DST,
});
let num_indices = index_data.len() as u32;
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Index Buffer", &label).as_str()),
contents: bytemuck::cast_slice(&index_data),
usage: BufferUsages::INDEX | BufferUsages::COPY_DST,
});
Self {
label,
vertex_data,
index_data,
vertex_buffer,
index_buffer,
num_indices,
}
}
pub fn vertex_buffer(&self) -> &wgpu::Buffer {
&self.vertex_buffer
}
pub fn vertex_data(&self) -> &Vec<Vertex> {
&self.vertex_data
}
pub fn index_buffer(&self) -> &wgpu::Buffer {
&self.index_buffer
}
pub fn index_data(&self) -> &Vec<u16> {
&self.index_data
}
pub fn num_indices(&self) -> u32 {
self.num_indices
}
pub fn update_vertex_buffer(
&mut self,
device: &Device,
queue: &wgpu::Queue,
vertex_data: Vec<Vertex>,
) {
let new_vertex_size = vertex_data.len() as u64 * size_of::<Vertex>() as u64;
match vertex_data == self.vertex_data {
true => {}
false => {
match new_vertex_size > self.vertex_buffer.size() {
false => queue.write_buffer(
&self.vertex_buffer,
0,
bytemuck::cast_slice(&vertex_data),
),
true => {
self.vertex_buffer =
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Vertex Buffer", self.label).as_str()),
contents: bytemuck::cast_slice(&vertex_data),
usage: BufferUsages::VERTEX | BufferUsages::COPY_DST,
});
}
}
self.vertex_data = vertex_data;
}
}
}
pub fn update_index_buffer(
&mut self,
device: &Device,
queue: &wgpu::Queue,
index_data: Vec<u16>,
) {
let new_index_size = index_data.len() as u64 * size_of::<u16>() as u64;
match index_data == self.index_data {
true => {}
false => {
match new_index_size > self.index_buffer.size() {
false => {
queue.write_buffer(&self.index_buffer, 0, bytemuck::cast_slice(&index_data))
}
true => {
self.index_buffer =
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Index Buffer", self.label).as_str()),
contents: bytemuck::cast_slice(&index_data),
usage: BufferUsages::INDEX | BufferUsages::COPY_DST,
});
}
}
self.num_indices = index_data.len() as u32;
self.index_data = index_data;
}
}
}
}

View file

@ -1,116 +1,318 @@
use comet_ecs::{Camera2D, Transform2D}; use comet_math::{m4, p3, v2, v3};
use comet_log::fatal;
use comet_math::{m4, v2, v3};
pub struct CameraManager { #[rustfmt::skip]
cameras: Vec<RenderCamera>, pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
active_camera: usize, 1.0, 0.0, 0.0, 0.0,
} 0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.5,
0.0, 0.0, 0.0, 1.0,
);
impl CameraManager { const SAFE_FRAC_PI_2: f32 = std::f32::consts::FRAC_PI_2 - 0.0001;
pub fn new() -> Self {
Self {
cameras: Vec::new(),
active_camera: 0,
}
}
pub fn set_cameras(&mut self, cameras: Vec<RenderCamera>) {
self.cameras = cameras
}
pub fn set_active(&mut self, active: usize) {
if active >= self.cameras.len() {
fatal!("Active camera index is out of range of the RenderCamera array!")
}
}
pub fn get_camera(&self) -> &RenderCamera {
self.cameras.get(self.active_camera).unwrap()
}
pub fn update_from_scene(&mut self, scene: &comet_ecs::Scene, camera_entities: Vec<usize>) {
self.cameras.clear();
let mut cameras_with_priority: Vec<(RenderCamera, u8)> = Vec::new();
for entity in camera_entities {
let camera_component = scene.get_component::<Camera2D>(entity).unwrap();
let transform_component = scene.get_component::<Transform2D>(entity).unwrap();
let render_cam = RenderCamera::new(
camera_component.zoom(),
camera_component.dimensions(),
v3::new(
transform_component.position().as_vec().x(),
transform_component.position().as_vec().y(),
0.0,
),
);
cameras_with_priority.push((render_cam, camera_component.priority()));
}
if cameras_with_priority.is_empty() {
return;
}
cameras_with_priority.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
self.cameras = cameras_with_priority.into_iter().map(|(c, _)| c).collect();
self.active_camera = 0;
}
pub fn has_active_camera(&self) -> bool {
!self.cameras.is_empty()
}
}
pub struct RenderCamera { pub struct RenderCamera {
zoom: f32, zoom: f32,
dimension: v2, dimension: v2,
position: v3, position: v3
} }
impl RenderCamera { impl RenderCamera {
pub fn new(zoom: f32, dimension: v2, position: v3) -> Self { pub fn new(
Self { zoom: f32,
zoom, dimension: v2,
dimension, position: v3
position, ) -> Self {
} Self {
} zoom,
dimension,
position
}
}
pub fn build_view_projection_matrix(&self) -> m4 { pub fn build_view_projection_matrix(&self) -> m4 {
let zoomed_width = self.dimension.x() / self.zoom; let zoomed_width = self.dimension.x() / self.zoom;
let zoomed_height = self.dimension.y() / self.zoom; let zoomed_height = self.dimension.y() / self.zoom;
m4::OPENGL_CONV m4::OPENGL_CONV * m4::orthographic_projection(self.position.x() - zoomed_width / 2.0,
* m4::orthographic_projection( self.position.x() + zoomed_width / 2.0,
self.position.x() - zoomed_width / 2.0, self.position.y() - zoomed_height / 2.0,
self.position.x() + zoomed_width / 2.0, self.position.y() + zoomed_height / 2.0,
self.position.y() - zoomed_height / 2.0, 1.0,
self.position.y() + zoomed_height / 2.0, 0.0)
1.0,
0.0, /*OPENGL_TO_WGPU_MATRIX * cgmath::ortho(self.position.x() - zoomed_width / 2.0,
) self.position.x() + zoomed_width / 2.0,
} self.position.y() - zoomed_height / 2.0,
self.position.y() + zoomed_height / 2.0,
1.0,
0.0)*/
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct CameraUniform {
view_proj: [[f32; 4]; 4],
}
impl CameraUniform {
pub fn new() -> Self {
use cgmath::SquareMatrix;
Self {
view_proj: cgmath::Matrix4::identity().into(),
}
}
pub fn update_view_proj(&mut self, camera: &RenderCamera) {
self.view_proj = camera.build_view_projection_matrix().into();
}
}
/*use comet_math::{Mat4, Point3, Vec3};
#[rustfmt::skip]
pub const OPENGL_TO_WGPU_MATRIX: Mat4 = Mat4::new(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
pub struct Camera {
eye: Point3,
target: Point3,
up: Vec3,
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
pub fn new(eye: Point3, target: Point3, up: Vec3, aspect: f32, fovy: f32, znear: f32, zfar: f32) -> Self {
Self {
eye,
target,
up,
aspect,
fovy,
znear,
zfar,
}
}
pub fn build_view_projection_matrix(&self) -> Mat4 {
let view = Mat4::look_at_rh(self.eye, self.target, self.up);
let proj = Mat4::perspective_matrix(self.fovy, self.aspect, self.znear, self.zfar);
(OPENGL_TO_WGPU_MATRIX * proj * view).transpose()
}
} }
#[repr(C)] #[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)] #[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct CameraUniform { pub struct CameraUniform {
view_proj: [[f32; 4]; 4], view_proj: [[f32; 4]; 4],
} }
impl CameraUniform { impl CameraUniform {
pub fn new() -> Self { pub fn new() -> Self {
use cgmath::SquareMatrix; Self {
Self { view_proj: Mat4::IDENTITY.into(),
view_proj: cgmath::Matrix4::identity().into(), }
} }
}
pub fn update_view_proj(&mut self, camera: &RenderCamera) { pub fn update_view_proj(&mut self, camera: &Camera) {
self.view_proj = camera.build_view_projection_matrix().into(); self.view_proj = camera.build_view_projection_matrix().into();
} }
}*/
/*use std::f32::consts::FRAC_PI_2;
use std::time::Duration;
use winit::dpi::PhysicalPosition;
use winit::event::*;
use winit::keyboard::KeyCode;
const SAFE_FRAC_PI_2: f32 = FRAC_PI_2 - 0.0001;
#[derive(Debug)]
pub struct Camera3D {
pub position: Point3,
yaw: f32,
pitch: f32,
} }
impl Camera3D {
pub fn new(
position: Point3,
yaw: f32,
pitch: f32,
) -> Self {
Self {
position: position.into(),
yaw: yaw.into(),
pitch: pitch.into(),
}
}
pub fn calc_matrix(&self) -> Mat4 {
let (sin_pitch, cos_pitch) = self.pitch.0.sin_cos();
let (sin_yaw, cos_yaw) = self.yaw.0.sin_cos();
Mat4::look_to_rh(
self.position,
Vec3::new(cos_pitch * cos_yaw, sin_pitch, cos_pitch * sin_yaw).normalize(),
Vec3::unit_y(),
)
}
}
pub struct Projection {
aspect: f32,
fovy: Rad<f32>,
znear: f32,
zfar: f32,
}
impl Projection {
pub fn new<F: Into<Rad<f32>>>(width: u32, height: u32, fovy: F, znear: f32, zfar: f32) -> Self {
Self {
aspect: width as f32 / height as f32,
fovy: fovy.into(),
znear,
zfar,
}
}
pub fn resize(&mut self, width: u32, height: u32) {
self.aspect = width as f32 / height as f32;
}
pub fn calc_matrix(&self) -> Matrix4<f32> {
// UDPATE
perspective(self.fovy, self.aspect, self.znear, self.zfar)
}
}
#[derive(Debug)]
pub struct CameraController {
amount_left: f32,
amount_right: f32,
amount_forward: f32,
amount_backward: f32,
amount_up: f32,
amount_down: f32,
rotate_horizontal: f32,
rotate_vertical: f32,
scroll: f32,
speed: f32,
sensitivity: f32,
}
impl CameraController {
pub fn new(speed: f32, sensitivity: f32) -> Self {
Self {
amount_left: 0.0,
amount_right: 0.0,
amount_forward: 0.0,
amount_backward: 0.0,
amount_up: 0.0,
amount_down: 0.0,
rotate_horizontal: 0.0,
rotate_vertical: 0.0,
scroll: 0.0,
speed,
sensitivity,
}
}
pub fn process_keyboard(&mut self, key: KeyCode, state: ElementState) -> bool {
let amount = if state == ElementState::Pressed {
1.0
} else {
0.0
};
match key {
KeyCode::KeyW | KeyCode::ArrowUp => {
self.amount_forward = amount;
true
}
KeyCode::KeyS | KeyCode::ArrowDown => {
self.amount_backward = amount;
true
}
KeyCode::KeyA | KeyCode::ArrowLeft => {
self.amount_left = amount;
true
}
KeyCode::KeyD | KeyCode::ArrowRight => {
self.amount_right = amount;
true
}
KeyCode::Space => {
self.amount_up = amount;
true
}
KeyCode::ShiftLeft => {
self.amount_down = amount;
true
}
_ => false,
}
}
pub fn process_mouse(&mut self, mouse_dx: f64, mouse_dy: f64) {
self.rotate_horizontal = mouse_dx as f32;
self.rotate_vertical = mouse_dy as f32;
}
pub fn process_scroll(&mut self, delta: &MouseScrollDelta) {
self.scroll = match delta {
// I'm assuming a line is about 100 pixels
MouseScrollDelta::LineDelta(_, scroll) => -scroll * 0.5,
MouseScrollDelta::PixelDelta(PhysicalPosition { y: scroll, .. }) => -*scroll as f32,
};
}
pub fn update_camera(&mut self, camera: &mut Camera, dt: Duration) {
let dt = dt.as_secs_f32();
// Move forward/backward and left/right
let (yaw_sin, yaw_cos) = camera.yaw.0.sin_cos();
let forward = Vector3::new(yaw_cos, 0.0, yaw_sin).normalize();
let right = Vector3::new(-yaw_sin, 0.0, yaw_cos).normalize();
camera.position += forward * (self.amount_forward - self.amount_backward) * self.speed * dt;
camera.position += right * (self.amount_right - self.amount_left) * self.speed * dt;
// Move in/out (aka. "zoom")
// Note: this isn't an actual zoom. The camera's position
// changes when zooming. I've added this to make it easier
// to get closer to an object you want to focus on.
let (pitch_sin, pitch_cos) = camera.pitch.0.sin_cos();
let scrollward =
Vector3::new(pitch_cos * yaw_cos, pitch_sin, pitch_cos * yaw_sin).normalize();
camera.position += scrollward * self.scroll * self.speed * self.sensitivity * dt;
self.scroll = 0.0;
// Move up/down. Since we don't use roll, we can just
// modify the y coordinate directly.
camera.position.y += (self.amount_up - self.amount_down) * self.speed * dt;
// Rotate
camera.yaw += Rad(self.rotate_horizontal) * self.sensitivity * dt;
camera.pitch += Rad(-self.rotate_vertical) * self.sensitivity * dt;
// If process_mouse isn't called every frame, these values
// will not get set to zero, and the camera will rotate
// when moving in a non cardinal direction.
self.rotate_horizontal = 0.0;
self.rotate_vertical = 0.0;
// Keep the camera's angle from going too high/low.
if camera.pitch < -Rad(SAFE_FRAC_PI_2) {
camera.pitch = -Rad(SAFE_FRAC_PI_2);
} else if camera.pitch > Rad(SAFE_FRAC_PI_2) {
camera.pitch = Rad(SAFE_FRAC_PI_2);
}
}
}*/

View file

@ -0,0 +1,151 @@
use wgpu::{BindGroupLayout, BufferUsages, Device};
use wgpu::util::DeviceExt;
use comet_resources::{Texture, Vertex};
use comet_log::*;
pub struct DrawInfo {
name: String,
texture: wgpu::BindGroup,
vertex_data: Vec<Vertex>,
index_data: Vec<u16>,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
}
impl DrawInfo {
pub fn new(
name: String,
device: &Device,
texture: &Texture,
texture_bind_group_layout: &BindGroupLayout,
texture_sampler: &wgpu::Sampler,
vertex_data: Vec<Vertex>,
index_data: Vec<u16>
) -> Self {
let texture_bind = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture_sampler),
},
],
label: Some(format!("{} Texture", name).as_str()),
});
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Vertex Buffer", &name).as_str()),
contents: bytemuck::cast_slice(&vertex_data),
usage: BufferUsages::VERTEX | BufferUsages::COPY_DST,
});
let num_indices = index_data.len() as u32;
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Index Buffer", &name).as_str()),
contents: bytemuck::cast_slice(&index_data),
usage: BufferUsages::INDEX | BufferUsages::COPY_DST,
});
Self {
name,
texture: texture_bind,
vertex_data,
index_data,
vertex_buffer,
index_buffer,
num_indices
}
}
pub fn name(&self) -> &String {
&self.name
}
pub fn texture(&self) -> &wgpu::BindGroup {
&self.texture
}
pub fn vertex_buffer(&self) -> &wgpu::Buffer {
&self.vertex_buffer
}
pub fn vertex_data(&self) -> &Vec<Vertex> {
&self.vertex_data
}
pub fn index_buffer(&self) -> &wgpu::Buffer {
&self.index_buffer
}
pub fn index_data(&self) -> &Vec<u16> {
&self.index_data
}
pub fn num_indices(&self) -> u32 {
self.num_indices
}
pub fn update_vertex_buffer(&mut self, device: &Device, queue: &wgpu::Queue, vertex_data: Vec<Vertex>) {
let new_vertex_size = vertex_data.len() as u64 * size_of::<Vertex>() as u64;
match vertex_data == self.vertex_data {
true => {},
false => {
match new_vertex_size > self.vertex_buffer.size() {
false => queue.write_buffer(&self.vertex_buffer, 0, bytemuck::cast_slice(&vertex_data)),
true => {
self.vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Vertex Buffer", self.name).as_str()),
contents: bytemuck::cast_slice(&vertex_data),
usage: BufferUsages::VERTEX | BufferUsages::COPY_DST,
});
}
}
self.vertex_data = vertex_data;
}
}
}
pub fn update_index_buffer(&mut self, device: &Device, queue: &wgpu::Queue, index_data: Vec<u16>) {
let new_index_size = index_data.len() as u64 * size_of::<u16>() as u64;
match index_data == self.index_data {
true => {},
false => {
match new_index_size > self.index_buffer.size() {
false => queue.write_buffer(&self.index_buffer, 0, bytemuck::cast_slice(&index_data)),
true => {
self.index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Index Buffer", self.name).as_str()),
contents: bytemuck::cast_slice(&index_data),
usage: BufferUsages::INDEX | BufferUsages::COPY_DST,
});
}
}
self.num_indices = index_data.len() as u32;
self.index_data = index_data;
}
}
}
pub fn set_texture(&mut self, device: &Device, layout: &BindGroupLayout, texture: &Texture) {
self.texture = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture.sampler),
},
],
label: Some(format!("{} Texture Bind Group", self.name).as_str()),
});
}
}

View file

@ -1,7 +1,7 @@
mod batch;
mod camera; mod camera;
pub mod render_context; mod draw_info;
mod render_group;
mod render_pass; mod render_pass;
pub mod render_resources;
pub mod renderer; pub mod renderer;
pub mod renderer2d; pub mod renderer2d;
pub mod renderer2d_;

View file

@ -0,0 +1,134 @@
use std::ops::Range;
use crate::texture;
pub trait Vertex {
fn desc() -> wgpu::VertexBufferLayout<'static>;
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct ModelVertex {
pub position: [f32; 3],
pub tex_coords: [f32; 2],
pub normal: [f32; 3],
}
impl Vertex for ModelVertex {
fn desc() -> wgpu::VertexBufferLayout<'static> {
use std::mem;
wgpu::VertexBufferLayout {
array_stride: mem::size_of::<ModelVertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &[
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float32x2,
},
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 5]>() as wgpu::BufferAddress,
shader_location: 2,
format: wgpu::VertexFormat::Float32x3,
},
],
}
}
}
pub struct Material {
#[allow(unused)]
pub name: String,
#[allow(unused)]
pub diffuse_texture: texture::Texture,
pub bind_group: wgpu::BindGroup,
}
pub struct Mesh {
#[allow(unused)]
pub name: String,
pub vertex_buffer: wgpu::Buffer,
pub index_buffer: wgpu::Buffer,
pub num_elements: u32,
pub material: usize,
}
pub struct Model {
pub meshes: Vec<Mesh>,
pub materials: Vec<Material>,
}
pub trait DrawModel<'a> {
#[allow(unused)]
fn draw_mesh(
&mut self,
mesh: &'a Mesh,
material: &'a Material,
camera_bind_group: &'a wgpu::BindGroup,
);
fn draw_mesh_instanced(
&mut self,
mesh: &'a Mesh,
material: &'a Material,
instances: Range<u32>,
camera_bind_group: &'a wgpu::BindGroup,
);
#[allow(unused)]
fn draw_model(&mut self, model: &'a Model, camera_bind_group: &'a wgpu::BindGroup);
fn draw_model_instanced(
&mut self,
model: &'a Model,
instances: Range<u32>,
camera_bind_group: &'a wgpu::BindGroup,
);
}
impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
where
'b: 'a,
{
fn draw_mesh(
&mut self,
mesh: &'b Mesh,
material: &'b Material,
camera_bind_group: &'b wgpu::BindGroup,
) {
self.draw_mesh_instanced(mesh, material, 0..1, camera_bind_group);
}
fn draw_mesh_instanced(
&mut self,
mesh: &'b Mesh,
material: &'b Material,
instances: Range<u32>,
camera_bind_group: &'b wgpu::BindGroup,
) {
self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
self.set_bind_group(0, &material.bind_group, &[]);
self.set_bind_group(1, camera_bind_group, &[]);
self.draw_indexed(0..mesh.num_elements, 0, instances);
}
fn draw_model(&mut self, model: &'b Model, camera_bind_group: &'b wgpu::BindGroup) {
self.draw_model_instanced(model, 0..1, camera_bind_group);
}
fn draw_model_instanced(
&mut self,
model: &'b Model,
instances: Range<u32>,
camera_bind_group: &'b wgpu::BindGroup,
) {
for mesh in &model.meshes {
let material = &model.materials[mesh.material];
self.draw_mesh_instanced(mesh, material, instances.clone(), camera_bind_group);
}
}
}

View file

@ -0,0 +1,313 @@
use crate::camera::{CameraUniform, RenderCamera};
use crate::draw_info::DrawInfo;
use crate::render_pass::{RenderPassInfo, RenderPassType};
use crate::renderer::Renderer;
use comet_colors::Color;
use comet_ecs::{Camera2D, Component, Position2D, Render, Render2D, Scene, Text, Transform2D};
use comet_log::*;
use comet_math::{p2, p3, v2, v3};
use comet_resources::texture_atlas::TextureRegion;
use comet_resources::{graphic_resource_manager::GraphicResourceManager, Texture, Vertex};
use comet_structs::ComponentSet;
use std::iter;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
use wgpu::core::command::DrawKind::Draw;
use wgpu::naga::ShaderStage;
use wgpu::util::DeviceExt;
use wgpu::BufferUsages;
use winit::dpi::PhysicalSize;
use winit::window::Window;
pub struct Renderer2D<'a> {
surface: wgpu::Surface<'a>,
device: wgpu::Device,
queue: wgpu::Queue,
config: wgpu::SurfaceConfiguration,
size: PhysicalSize<u32>,
render_pipeline_layout: wgpu::PipelineLayout,
universal_render_pipeline: wgpu::RenderPipeline,
texture_bind_group_layout: wgpu::BindGroupLayout,
dummy_texture_bind_group: wgpu::BindGroup,
texture_sampler: wgpu::Sampler,
camera: RenderCamera,
camera_uniform: CameraUniform,
camera_buffer: wgpu::Buffer,
camera_bind_group: wgpu::BindGroup,
render_pass: Vec<RenderPassInfo>,
draw_info: Vec<DrawInfo>,
graphic_resource_manager: GraphicResourceManager,
delta_time: f32,
last_frame_time: Instant,
clear_color: wgpu::Color,
}
impl<'a> Renderer2D<'a> {
pub fn new(window: Arc<Window>, clear_color: Option<impl Color>) -> Renderer2D<'a> {
let size = PhysicalSize::<u32>::new(1920, 1080);
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::PRIMARY,
..Default::default()
});
let surface = instance.create_surface(window).unwrap();
let adapter = pollster::block_on(instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
force_fallback_adapter: false,
}))
.unwrap();
let (device, queue) = pollster::block_on(adapter.request_device(
&wgpu::DeviceDescriptor {
label: None,
required_features: wgpu::Features::empty(),
required_limits: wgpu::Limits::default(),
memory_hints: Default::default(),
},
None, // Trace path
))
.unwrap();
let surface_caps = surface.get_capabilities(&adapter);
let surface_format = surface_caps
.formats
.iter()
.copied()
.find(|f| f.is_srgb())
.unwrap_or(surface_caps.formats[0]);
let config = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: surface_format,
width: size.width,
height: size.height,
present_mode: surface_caps.present_modes[0],
alpha_mode: surface_caps.alpha_modes[0],
view_formats: vec![],
desired_maximum_frame_latency: 2,
};
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Shader"),
source: wgpu::ShaderSource::Wgsl(include_str!("base2d.wgsl").into()),
});
let graphic_resource_manager = GraphicResourceManager::new();
let diffuse_bytes = include_bytes!(r"../../../res/textures/comet_icon.png");
let diffuse_texture =
Texture::from_bytes(&device, &queue, diffuse_bytes, "comet_icon.png", false).unwrap();
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
label: Some("texture_bind_group_layout"),
});
let camera = RenderCamera::new(1.0, v2::new(2.0, 2.0), v3::new(0.0, 0.0, 0.0));
let mut camera_uniform = CameraUniform::new();
camera_uniform.update_view_proj(&camera);
let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Camera Buffer"),
contents: bytemuck::cast_slice(&[camera_uniform]),
usage: BufferUsages::UNIFORM | BufferUsages::COPY_DST,
});
let camera_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
label: Some("camera_bind_group_layout"),
});
let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &camera_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: camera_buffer.as_entire_binding(),
}],
label: Some("camera_bind_group"),
});
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&texture_bind_group_layout, &camera_bind_group_layout],
push_constant_ranges: &[],
});
let universal_render_pipeline =
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[Vertex::desc()],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[Some(wgpu::ColorTargetState {
format: config.format,
blend: Some(wgpu::BlendState {
color: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
}),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
multiview: None,
cache: None,
});
let mut render_pass: Vec<RenderPassInfo> = Vec::new();
/*render_pass.push(RenderPassInfo::new_engine_pass(
&device,
"Standard Render Pass".to_string(),
&texture_bind_group_layout,
&diffuse_texture,
vec![],
vec![],
));*/
let clear_color = match clear_color {
Some(color) => color.to_wgpu(),
None => wgpu::Color {
r: 0.0,
g: 0.0,
b: 0.0,
a: 1.0,
},
};
let texture_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Linear,
lod_min_clamp: 0.0,
lod_max_clamp: 100.0,
compare: None,
anisotropy_clamp: 16,
border_color: None,
..Default::default()
});
let empty_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Empty Texture"),
size: wgpu::Extent3d {
width: config.width,
height: config.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
usage: wgpu::TextureUsages::COPY_SRC
| wgpu::TextureUsages::COPY_DST
| wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[wgpu::TextureFormat::Bgra8UnormSrgb],
});
let dummy_texture_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(
&empty_texture.create_view(&wgpu::TextureViewDescriptor::default()),
),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture_sampler),
},
],
label: Some("dummy_texture_bind_group"),
});
let mut draw_info: Vec<DrawInfo> = Vec::new();
Self {
surface,
device,
queue,
config,
size,
render_pipeline_layout,
universal_render_pipeline,
texture_bind_group_layout,
dummy_texture_bind_group,
texture_sampler,
camera,
camera_uniform,
camera_buffer,
camera_bind_group,
render_pass,
draw_info,
graphic_resource_manager,
delta_time: 0.0,
last_frame_time: Instant::now(),
clear_color,
}
}
}

View file

@ -1,180 +0,0 @@
use crate::{batch::Batch, render_resources::RenderResources};
use comet_colors::Color;
use comet_resources::Vertex;
use std::{collections::HashMap, sync::Arc};
use winit::{dpi::PhysicalSize, window::Window};
pub struct RenderContext<'a> {
device: wgpu::Device,
queue: wgpu::Queue,
surface: wgpu::Surface<'a>,
config: wgpu::SurfaceConfiguration,
size: PhysicalSize<u32>,
scale_factor: f64,
clear_color: wgpu::Color,
render_pipelines: HashMap<String, wgpu::RenderPipeline>,
batches: HashMap<String, Batch>,
resources: RenderResources,
}
impl<'a> RenderContext<'a> {
pub fn new(window: Arc<Window>, clear_color: Option<impl Color>) -> Self {
let size = window.inner_size();
let scale_factor = window.scale_factor();
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::PRIMARY,
..Default::default()
});
let surface = instance.create_surface(window).unwrap();
let adapter = pollster::block_on(instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
force_fallback_adapter: false,
}))
.unwrap();
let (device, queue) = pollster::block_on(adapter.request_device(
&wgpu::DeviceDescriptor {
label: None,
required_features: wgpu::Features::empty(),
required_limits: wgpu::Limits::default(),
memory_hints: Default::default(),
},
None,
))
.unwrap();
let surface_caps = surface.get_capabilities(&adapter);
let surface_format = surface_caps
.formats
.iter()
.copied()
.find(|f| f.is_srgb())
.unwrap_or(surface_caps.formats[0]);
let config = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: surface_format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo,
alpha_mode: surface_caps.alpha_modes[0],
view_formats: vec![],
desired_maximum_frame_latency: 2,
};
let clear_color = match clear_color {
Some(color) => color.to_wgpu(),
None => wgpu::Color {
r: 0.0,
g: 0.0,
b: 0.0,
a: 1.0,
},
};
Self {
device,
queue,
surface,
config,
size,
scale_factor,
clear_color,
render_pipelines: HashMap::new(),
batches: HashMap::new(),
resources: RenderResources::new(),
}
}
pub fn device(&self) -> &wgpu::Device {
&self.device
}
pub fn queue(&self) -> &wgpu::Queue {
&self.queue
}
pub fn surface(&self) -> &wgpu::Surface {
&self.surface
}
pub fn configure_surface(&mut self) {
self.surface.configure(&self.device, &self.config);
}
pub fn config(&self) -> &wgpu::SurfaceConfiguration {
&self.config
}
pub fn config_mut(&mut self) -> &mut wgpu::SurfaceConfiguration {
&mut self.config
}
pub fn size(&self) -> PhysicalSize<u32> {
self.size
}
pub fn set_size(&mut self, new_size: PhysicalSize<u32>) {
self.size = new_size
}
pub fn scale_factor(&self) -> f64 {
self.scale_factor
}
pub fn set_scale_factor(&mut self, scale_factor: f64) {
self.scale_factor = scale_factor
}
pub fn clear_color(&self) -> wgpu::Color {
self.clear_color
}
pub fn insert_pipeline(&mut self, label: String, pipeline: wgpu::RenderPipeline) {
self.render_pipelines.insert(label, pipeline);
}
pub fn get_pipeline(&self, label: String) -> Option<&wgpu::RenderPipeline> {
self.render_pipelines.get(&label)
}
pub fn get_batch(&self, label: String) -> Option<&Batch> {
self.batches.get(&label)
}
pub fn get_batch_mut(&mut self, label: String) -> Option<&mut Batch> {
self.batches.get_mut(&label)
}
pub fn new_batch(&mut self, label: String, vertex_data: Vec<Vertex>, index_data: Vec<u16>) {
self.batches.insert(
label.clone(),
Batch::new(label, &self.device, vertex_data, index_data),
);
}
pub fn update_batch_buffers(
&mut self,
label: String,
vertex_data: Vec<Vertex>,
index_data: Vec<u16>,
) {
if let Some(batch) = self.batches.get_mut(&label) {
batch.update_vertex_buffer(&self.device, &self.queue, vertex_data);
batch.update_index_buffer(&self.device, &self.queue, index_data);
} else {
let batch = Batch::new(label.clone(), &self.device, vertex_data, index_data);
self.batches.insert(label, batch);
}
}
pub fn resources(&self) -> &RenderResources {
&self.resources
}
pub fn resources_mut(&mut self) -> &mut RenderResources {
&mut self.resources
}
}

View file

@ -0,0 +1,4 @@
pub struct RenderGroup {
pipeline: wgpu::RenderPipeline,
entities: Vec<u32>
}

View file

@ -1,125 +1,338 @@
use crate::render_context::RenderContext; use wgpu::{ShaderModule, BindGroup, BindGroupLayout, BufferUsages, Device, Queue, RenderPipeline, PipelineLayout, SurfaceConfiguration, TextureFormat};
use wgpu::util::DeviceExt;
use comet_resources::{Vertex, Texture};
pub struct RenderPass { #[derive(Debug, Clone)]
pub label: String, pub enum RenderPassType {
pub execute: Box< Engine,
dyn Fn(String, &mut RenderContext, &mut wgpu::CommandEncoder, &wgpu::TextureView) User
+ Send
+ Sync,
>,
} }
impl RenderPass {
pub fn new( pub struct RenderPassInfo {
label: String, pass_name: String,
execute: Box< pass_type: RenderPassType,
dyn Fn(String, &mut RenderContext, &mut wgpu::CommandEncoder, &wgpu::TextureView) texture_bind_group: BindGroup,
+ Send vertex_buffer: wgpu::Buffer,
+ Sync, index_buffer: wgpu::Buffer,
>, vertex_data: Vec<Vertex>,
) -> Self { index_data: Vec<u16>,
Self { label, execute } num_indices: u32,
} pipeline: Option<RenderPipeline>
} }
pub fn universal_clear_execute( impl RenderPassInfo {
label: String, pub fn new_user_pass(
ctx: &mut RenderContext, device: &Device,
encoder: &mut wgpu::CommandEncoder, pass_name: String,
view: &wgpu::TextureView, texture_group_layout: &BindGroupLayout,
) { texture: &Texture,
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { shader: &ShaderModule,
label: Some(format!("{} Render Pass", label.clone()).as_str()), vertex_data: Vec<Vertex>,
color_attachments: &[Some(wgpu::RenderPassColorAttachment { index_data: Vec<u16>,
view: &view, pipeline_layout: &PipelineLayout,
resolve_target: None, config: &SurfaceConfiguration
ops: wgpu::Operations { ) -> Self {
load: wgpu::LoadOp::Clear(ctx.clear_color()), let num_indices = index_data.len() as u32;
store: wgpu::StoreOp::Store, let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
}, label: Some(format!("{} Vertex Buffer", pass_name).as_str()),
})], contents: bytemuck::cast_slice(&vertex_data),
depth_stencil_attachment: None, usage: BufferUsages::VERTEX | BufferUsages::COPY_DST,
occlusion_query_set: None, });
timestamp_writes: None,
});
render_pass.set_pipeline(&ctx.get_pipeline(label.clone()).unwrap()); let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Index Buffer", pass_name).as_str()),
contents: bytemuck::cast_slice(&index_data),
usage: BufferUsages::INDEX | BufferUsages::COPY_DST,
});
let groups = ctx.resources().get_bind_groups(&label).unwrap(); let texture_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
for i in 0..groups.len() { layout: &texture_group_layout,
render_pass.set_bind_group(i as u32, groups.get(i).unwrap(), &[]); entries: &[
} wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture.sampler),
},
],
label: Some(format!("{} Texture Bind Group", pass_name).as_str()),
});
render_pass.set_vertex_buffer( let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
0, label: Some("Render Pipeline"),
ctx.get_batch(label.clone()) layout: Some(&pipeline_layout),
.unwrap() vertex: wgpu::VertexState {
.vertex_buffer() module: &shader,
.slice(..), entry_point: "vs_main",
); buffers: &[Vertex::desc()],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[Some(wgpu::ColorTargetState {
format: config.format,
blend: Some(wgpu::BlendState {
color: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
}),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
multiview: None,
cache: None,
});
render_pass.set_index_buffer( Self {
ctx.get_batch(label.clone()) pass_name,
.unwrap() pass_type: RenderPassType::User,
.index_buffer() texture_bind_group,
.slice(..), vertex_buffer,
wgpu::IndexFormat::Uint16, index_buffer,
); vertex_data,
index_data,
num_indices,
pipeline: Some(pipeline)
}
}
render_pass.draw_indexed( pub fn new_engine_pass(
0..ctx.get_batch(label.clone()).unwrap().num_indices(), device: &Device,
0, pass_name: String,
0..1, texture_group_layout: &BindGroupLayout,
); texture: &Texture,
} vertex_data: Vec<Vertex>,
index_data: Vec<u16>,
) -> Self {
let num_indices = index_data.len() as u32;
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Vertex Buffer", pass_name).as_str()),
contents: bytemuck::cast_slice(&vertex_data),
usage: BufferUsages::VERTEX | BufferUsages::COPY_DST,
});
pub fn universal_load_execute( let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: String, label: Some(format!("{} Index Buffer", pass_name).as_str()),
ctx: &mut RenderContext, contents: bytemuck::cast_slice(&index_data),
encoder: &mut wgpu::CommandEncoder, usage: BufferUsages::INDEX | BufferUsages::COPY_DST,
view: &wgpu::TextureView, });
) {
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some(format!("{} Render Pass", label.clone()).as_str()),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
});
render_pass.set_pipeline(&ctx.get_pipeline(label.clone()).unwrap()); let texture_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture.sampler),
},
],
label: Some(format!("{} Texture Bind Group", pass_name).as_str()),
});
Self {
pass_name,
pass_type: RenderPassType::Engine,
texture_bind_group,
vertex_buffer,
index_buffer,
vertex_data,
index_data,
num_indices,
pipeline: None
}
}
let groups = ctx.resources().get_bind_groups(&label).unwrap(); pub fn pass_name(&self) -> &str {
for i in 0..groups.len() { &self.pass_name
render_pass.set_bind_group(i as u32, groups.get(i).unwrap(), &[]); }
}
render_pass.set_vertex_buffer( pub fn pass_type(&self) -> RenderPassType {
0, self.pass_type.clone()
ctx.get_batch(label.clone()) }
.unwrap()
.vertex_buffer()
.slice(..),
);
render_pass.set_index_buffer( pub fn set_shader(&mut self, device: &Device, config: &SurfaceConfiguration, pipeline_layout: &PipelineLayout, shader: &ShaderModule) {
ctx.get_batch(label.clone()) self.pipeline = Some(device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
.unwrap() label: Some("Render Pipeline"),
.index_buffer() layout: Some(&pipeline_layout),
.slice(..), vertex: wgpu::VertexState {
wgpu::IndexFormat::Uint16, module: &shader,
); entry_point: "vs_main",
buffers: &[Vertex::desc()],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[Some(wgpu::ColorTargetState {
format: config.format,
blend: Some(wgpu::BlendState {
color: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
}),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
multiview: None,
cache: None,
}));
}
render_pass.draw_indexed( pub fn texture_bind_group(&self) -> &BindGroup {
0..ctx.get_batch(label.clone()).unwrap().num_indices(), &self.texture_bind_group
0, }
0..1,
); pub fn set_texture(&mut self, device: &Device, layout: &BindGroupLayout, texture: &Texture) {
} self.texture_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture.sampler),
},
],
label: Some(format!("{} Texture Bind Group", self.pass_name).as_str()),
});
}
pub fn vertex_buffer(&self) -> &wgpu::Buffer {
&self.vertex_buffer
}
pub fn vertex_data(&self) -> &Vec<Vertex> {
&self.vertex_data
}
pub fn set_vertex_buffer(&mut self, device: &Device, queue: &Queue, vertex_data: Vec<Vertex>) {
let new_vertex_size = vertex_data.len() as u64 * size_of::<Vertex>() as u64;
match vertex_data == self.vertex_data {
true => {},
false => {
match new_vertex_size > self.vertex_buffer.size() {
false => queue.write_buffer(&self.vertex_buffer, 0, bytemuck::cast_slice(&vertex_data)),
true => {
self.vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Vertex Buffer", self.pass_name).as_str()),
contents: bytemuck::cast_slice(&vertex_data),
usage: BufferUsages::VERTEX | BufferUsages::COPY_DST,
});
}
}
self.vertex_data = vertex_data;
}
}
}
pub fn push_to_vertex_buffer(&mut self, device: &Device, vertex_data: &mut Vec<Vertex>) {
self.vertex_data.append(vertex_data);
self.vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Vertex Buffer", self.pass_name).as_str()),
contents: bytemuck::cast_slice(&vertex_data),
usage: BufferUsages::VERTEX | BufferUsages::COPY_DST,
});
}
pub fn index_buffer(&self) -> &wgpu::Buffer {
&self.index_buffer
}
pub fn index_data(&self) -> &Vec<u16> {
&self.index_data
}
pub fn num_indices(&self) -> u32 {
self.num_indices
}
pub fn set_index_buffer(&mut self, device: &Device, queue: &Queue, index_data: Vec<u16>) {
let new_index_size = index_data.len() as u64 * size_of::<u16>() as u64;
match index_data == self.index_data {
true => {},
false => {
match new_index_size > self.index_buffer.size() {
false => queue.write_buffer(&self.index_buffer, 0, bytemuck::cast_slice(&index_data)),
true => {
self.index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Index Buffer", self.pass_name).as_str()),
contents: bytemuck::cast_slice(&index_data),
usage: BufferUsages::INDEX | BufferUsages::COPY_DST,
});
}
}
self.num_indices = index_data.len() as u32;
self.index_data = index_data
}
}
}
pub fn push_to_index_buffer(&mut self, device: &Device, index_data: &mut Vec<u16>) {
self.index_data.append(index_data);
self.index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Index Buffer", self.pass_name).as_str()),
contents: bytemuck::cast_slice(&index_data),
usage: BufferUsages::INDEX | BufferUsages::COPY_DST,
});
self.num_indices = self.index_data.len() as u32;
}
pub fn pipeline(&self) -> Option<&RenderPipeline> {
self.pipeline.as_ref()
}
}

View file

@ -1,133 +0,0 @@
use comet_log::error;
use std::{collections::HashMap, sync::Arc};
pub struct RenderResources {
bind_groups: HashMap<String, Vec<Arc<wgpu::BindGroup>>>,
bind_group_layouts: HashMap<String, Vec<Arc<wgpu::BindGroupLayout>>>,
buffers: HashMap<String, Vec<Arc<wgpu::Buffer>>>,
samplers: HashMap<String, wgpu::Sampler>,
}
impl RenderResources {
pub fn new() -> Self {
Self {
bind_groups: HashMap::new(),
bind_group_layouts: HashMap::new(),
buffers: HashMap::new(),
samplers: HashMap::new(),
}
}
pub fn get_bind_groups(&self, label: &str) -> Option<&Vec<Arc<wgpu::BindGroup>>> {
self.bind_groups.get(label)
}
pub fn get_bind_group_layout(&self, label: &str) -> Option<&Vec<Arc<wgpu::BindGroupLayout>>> {
self.bind_group_layouts.get(label)
}
pub fn replace_bind_group_layout(
&mut self,
label: String,
pos: usize,
bind_group_layout: Arc<wgpu::BindGroupLayout>,
) {
match self.bind_group_layouts.get_mut(&label) {
None => {
error!("Render pass {} does not exist", label);
return;
}
Some(v) => {
if v.len() <= pos {
error!(
"Position {} is out of bounds for the bind group layouts of render pass {}",
pos, label
);
return;
}
v[pos] = bind_group_layout;
}
}
}
pub fn get_buffer(&self, label: &str) -> Option<&Vec<Arc<wgpu::Buffer>>> {
self.buffers.get(label)
}
pub fn get_sampler(&self, label: &str) -> Option<&wgpu::Sampler> {
self.samplers.get(label)
}
pub fn insert_bind_group(&mut self, label: String, bind_group: Arc<wgpu::BindGroup>) {
match self.bind_groups.get_mut(&label) {
None => {
self.bind_groups.insert(label, vec![bind_group]);
}
Some(v) => v.push(bind_group),
};
}
pub fn replace_bind_group(
&mut self,
label: String,
pos: usize,
bind_group: Arc<wgpu::BindGroup>,
) {
match self.bind_groups.get_mut(&label) {
None => {
error!("Render pass {} does not exist", label);
return;
}
Some(v) => {
if v.len() <= pos {
error!(
"Position {} is out of bounds for the bind groups of render pass {}",
pos, label
);
return;
}
v[pos] = bind_group;
}
}
}
pub fn insert_bind_group_layout(&mut self, label: String, layout: Arc<wgpu::BindGroupLayout>) {
match self.bind_group_layouts.get_mut(&label) {
None => {
self.bind_group_layouts.insert(label, vec![layout]);
}
Some(v) => v.push(layout),
}
}
pub fn insert_buffer(&mut self, label: String, buffer: Arc<wgpu::Buffer>) {
match self.buffers.get_mut(&label) {
None => {
self.buffers.insert(label, vec![buffer]);
}
Some(v) => v.push(buffer),
}
}
pub fn replace_buffer(&mut self, label: String, pos: usize, buffer: Arc<wgpu::Buffer>) {
match self.buffers.get_mut(&label) {
None => {
error!("Render pass {} does not exist", label);
return;
}
Some(v) => {
if v.len() <= pos {
error!(
"Position {} is out of bounds for the buffers of render pass {}",
pos, label
);
return;
}
v[pos] = buffer;
}
}
}
pub fn insert_sampler(&mut self, label: String, sampler: wgpu::Sampler) {
self.samplers.insert(label, sampler);
}
}

1360
crates/comet_renderer/src/renderer2d.rs Normal file → Executable file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,25 @@
struct VertexInput {
@location(0) position: vec3<f32>,
@location(1) tex_coords: vec2<f32>,
@location(2) color: vec4<f32>,
}
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
@location(0) tex_coords: vec2<f32>,
@location(1) color: vec4<f32>,
}
@vertex
fn vs_main(input: VertexInput) -> VertexOutput {
var out: VertexOutput;
out.clip_position = vec4(input.position, 1.0);
out.tex_coords = input.tex_coords;
out.color = input.color;
return out;
}
@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
return in.color;
}

View file

@ -0,0 +1,188 @@
mod render_context;
use render_context::*;
use crate::renderer::Renderer;
use comet_colors::Color;
use comet_resources::{graphic_resource_manager::GraphicResourceManager, Vertex};
use std::iter;
use std::sync::Arc;
use wgpu::util::DeviceExt;
use winit::dpi::PhysicalSize;
use winit::window::Window;
pub struct Renderer2D_<'a> {
render_context: RenderContext<'a>,
universal_render_pipeline: wgpu::RenderPipeline,
graphic_resource_manager: GraphicResourceManager,
vertex_vec: Vec<Vertex>,
vertex_buffer: wgpu::Buffer,
index_vec: Vec<u32>,
index_buffer: wgpu::Buffer,
num_indices: u32,
clear_color: wgpu::Color,
}
impl<'a> Renderer2D_<'a> {
pub fn new(window: Arc<Window>, clear_color: Option<impl Color>) -> Renderer2D_<'a> {
let render_context = RenderContext::new(window.clone(), clear_color);
let graphic_resource_manager = GraphicResourceManager::new();
let clear_color = match clear_color {
Some(color) => color.to_wgpu(),
None => wgpu::Color {
r: 0.0,
g: 0.0,
b: 0.0,
a: 1.0,
},
};
let universal_renderpipeline_module =
render_context
.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Universal Render Pipeline Shader Module"),
source: wgpu::ShaderSource::Wgsl(include_str!("base.wgsl").into()),
});
let universal_renderpipeline_layout =
render_context
.device
.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Universal Render Pipeline Layout"),
bind_group_layouts: &[],
push_constant_ranges: &[],
});
let universal_render_pipeline =
render_context
.device
.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Universal Render Pipeline"),
layout: Some(&universal_renderpipeline_layout),
vertex: wgpu::VertexState {
module: &universal_renderpipeline_module,
entry_point: "vs_main",
buffers: &[Vertex::desc()],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &universal_renderpipeline_module,
entry_point: "fs_main",
targets: &[Some(wgpu::ColorTargetState {
format: render_context.config.format,
blend: Some(wgpu::BlendState::ALPHA_BLENDING),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
multiview: None,
cache: None,
});
let vertex_buffer =
render_context
.device
.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: &[],
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
});
let index_buffer =
render_context
.device
.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: &[],
usage: wgpu::BufferUsages::INDEX | wgpu::BufferUsages::COPY_DST,
});
Self {
render_context,
universal_render_pipeline,
graphic_resource_manager,
vertex_buffer,
vertex_vec: vec![],
index_buffer,
index_vec: vec![],
num_indices: 0,
clear_color,
}
}
}
impl<'a> Renderer for Renderer2D_<'a> {
fn new(window: Arc<Window>, clear_color: Option<impl Color>) -> Renderer2D_<'a> {
Self::new(window, clear_color)
}
fn size(&self) -> PhysicalSize<u32> {
self.render_context.size()
}
fn resize(&mut self, new_size: PhysicalSize<u32>) {
self.render_context.resize(new_size)
}
fn update(&mut self) -> f32 {
self.render_context.update()
}
fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
let output = self.render_context.surface.get_current_texture()?;
let output_view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder =
self.render_context
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Universal Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &output_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(self.clear_color),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
});
render_pass.set_pipeline(&self.universal_render_pipeline);
render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
render_pass.draw_indexed(0..self.num_indices, 0, 0..1);
}
self.render_context
.queue
.submit(iter::once(encoder.finish()));
output.present();
Ok(())
}
}

View file

@ -0,0 +1,96 @@
use comet_colors::Color;
use std::sync::Arc;
use std::time::Instant;
use winit::dpi::PhysicalSize;
use winit::window::Window;
pub struct RenderContext<'a> {
pub surface: wgpu::Surface<'a>,
pub device: wgpu::Device,
pub queue: wgpu::Queue,
pub config: wgpu::SurfaceConfiguration,
pub size: PhysicalSize<u32>,
pub last_frame_time: Instant,
pub delta_time: f32,
}
impl<'a> RenderContext<'a> {
pub fn new(window: Arc<Window>, clear_color: Option<impl Color>) -> RenderContext<'a> {
let size = window.inner_size();
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::PRIMARY,
..Default::default()
});
let surface = instance.create_surface(window).unwrap();
let adapter = pollster::block_on(instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
force_fallback_adapter: false,
}))
.unwrap();
let (device, queue) = pollster::block_on(adapter.request_device(
&wgpu::DeviceDescriptor {
label: None,
required_features: wgpu::Features::empty(),
required_limits: wgpu::Limits::default(),
memory_hints: Default::default(),
},
None,
))
.unwrap();
let surface_caps = surface.get_capabilities(&adapter);
let surface_format = surface_caps
.formats
.iter()
.copied()
.find(|f| f.is_srgb())
.unwrap_or(surface_caps.formats[0]);
let config = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: surface_format,
width: size.width,
height: size.height,
present_mode: surface_caps.present_modes[0],
alpha_mode: surface_caps.alpha_modes[0],
view_formats: vec![],
desired_maximum_frame_latency: 2,
};
Self {
surface,
device,
queue,
config,
size,
last_frame_time: Instant::now(),
delta_time: 0.0,
}
}
pub fn size(&self) -> PhysicalSize<u32> {
self.size
}
pub fn resize(&mut self, new_size: PhysicalSize<u32>) {
if new_size.width > 0 && new_size.height > 0 {
self.size = new_size;
self.config.width = new_size.width;
self.config.height = new_size.height;
self.surface.configure(&self.device, &self.config);
}
}
pub fn update(&mut self) -> f32 {
let now = Instant::now();
let delta_time = now.duration_since(self.last_frame_time).as_millis() as f32 / 1000.0;
self.last_frame_time = now;
self.delta_time = delta_time;
delta_time
}
}

View file

@ -1,171 +1,248 @@
use std::{collections::HashMap, path::Path}; use std::{
collections::HashMap, path::Path
};
use crate::{ use wgpu::{naga, Device, FilterMode, Queue, ShaderModule, TextureFormat, TextureUsages};
font::Font, use wgpu::naga::ShaderStage;
texture_atlas::{TextureAtlas, TextureRegion},
Texture,
};
use comet_log::info; use comet_log::info;
use wgpu::{ use crate::{font, texture, Texture};
naga::{self, ShaderStage}, use crate::font::Font;
Device, Queue, ShaderModule, use crate::texture_atlas::{TextureAtlas, TextureRegion};
};
pub struct GraphicResourceManager { pub struct GraphicResourceManager {
texture_atlas: TextureAtlas, texture_atlas: TextureAtlas,
font_atlas: TextureAtlas, fonts: Vec<Font>,
fonts: Vec<Font>, data_files: HashMap<String, String>,
data_files: HashMap<String, String>, shaders: HashMap<String, ShaderModule>
shaders: HashMap<String, ShaderModule>,
} }
impl GraphicResourceManager { impl GraphicResourceManager {
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {
texture_atlas: TextureAtlas::empty(), texture_atlas: TextureAtlas::empty(),
font_atlas: TextureAtlas::empty(), fonts: Vec::new(),
fonts: Vec::new(), data_files: HashMap::new(),
data_files: HashMap::new(), shaders: HashMap::new()
shaders: HashMap::new(), }
} }
}
pub fn texture_atlas(&self) -> &TextureAtlas { pub fn texture_atlas(&self) -> &TextureAtlas {
&self.texture_atlas &self.texture_atlas
} }
pub fn font_atlas(&self) -> &TextureAtlas { pub fn texture_locations(&self) -> &HashMap<String, TextureRegion> {
&self.font_atlas &self.texture_atlas.textures()
} }
pub fn set_font_atlas(&mut self, font_atlas: TextureAtlas) { pub fn data_files(&self) -> &HashMap<String, String> {
self.font_atlas = font_atlas &self.data_files
} }
pub fn texture_locations(&self) -> &HashMap<String, TextureRegion> { pub fn fonts(&self) -> &Vec<Font> {
&self.texture_atlas.textures() &self.fonts
} }
pub fn data_files(&self) -> &HashMap<String, String> { pub fn get_glyph(&self, font: &str, ch: char) -> Option<&TextureRegion> {
&self.data_files self.fonts.iter().find(|f| f.name() == font).and_then(|f| f.get_glyph(ch))
} }
pub fn fonts(&self) -> &Vec<Font> { pub fn set_texture_atlas(&mut self, texture_atlas: TextureAtlas) {
&self.fonts self.texture_atlas = texture_atlas;
}
pub fn fonts_mut(&mut self) -> &mut Vec<Font> { // This is just for testing purposes
&mut self.fonts //self.texture_locations.insert("normal_comet.png".to_string(), ([0,0], [15,15]));
} //self.texture_locations.insert("green_comet.png".to_string(), ([0,15], [15,31]));
}
pub fn get_glyph(&self, font: &str, ch: char) -> Option<&TextureRegion> { pub fn create_texture_atlas(&mut self, paths: Vec<String>) {
self.fonts self.texture_atlas = TextureAtlas::from_texture_paths(paths)
.iter() }
.find(|f| f.name() == font)
.and_then(|f| f.get_glyph(ch))
}
pub fn set_texture_atlas(&mut self, texture_atlas: TextureAtlas) { pub fn load_string(&self, file_name: &str) -> anyhow::Result<String> {
self.texture_atlas = texture_atlas; let path = Path::new(std::env::var("OUT_DIR")?.as_str())
} .join("res")
.join(file_name);
let txt = std::fs::read_to_string(path)?;
pub fn create_texture_atlas(&mut self, paths: Vec<String>) { Ok(txt)
self.texture_atlas = TextureAtlas::from_texture_paths(paths) }
}
pub fn load_string(&self, file_name: &str) -> anyhow::Result<String> { pub fn load_binary(&self, file_name: &str) -> anyhow::Result<Vec<u8>> {
let base_path = std::env::var("OUT_DIR") let path = Path::new(std::env::var("OUT_DIR")?.as_str())
.map(|p| Path::new(&p).to_path_buf()) .join("res")
.unwrap_or_else(|_| Path::new(".").to_path_buf()); .join(file_name);
let data = std::fs::read(path)?;
let path = base_path.join(file_name); Ok(data)
let txt = std::fs::read_to_string(&path) }
.map_err(|e| anyhow::anyhow!("Failed to load {}: {}", path.display(), e))?;
Ok(txt) pub fn load_texture(
} &self,
file_name: &str,
is_normal_map: bool,
device: &Device,
queue: &Queue,
) -> anyhow::Result<Texture> {
let data = self.load_binary(file_name)?;
Texture::from_bytes(device, queue, &data, file_name, is_normal_map)
}
pub fn load_binary(&self, file_name: &str) -> anyhow::Result<Vec<u8>> { /// `file_name` is the full name, so with the extension
let path = Path::new(std::env::var("OUT_DIR")?.as_str()) /// `shader_stage` is only needed if it is a GLSL shader, so default to None if it isn't GLSL
.join("res") pub fn load_shader(
.join(file_name); &mut self,
let data = std::fs::read(path)?; shader_stage: Option<ShaderStage>,
file_name: &str,
device: &Device
) -> anyhow::Result<()> {
let shader_source = self.load_string(file_name)?;
Ok(data) let module = match file_name.split('.').last() {
} Some ("wgsl") => {
device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some(file_name.clone()),
source: wgpu::ShaderSource::Wgsl(shader_source.into())
})
},
Some("glsl") => {
if let Some(stage) = shader_stage {
device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some(file_name.clone()),
source: wgpu::ShaderSource::Glsl {
shader: shader_source.into(),
stage,
defines: naga::FastHashMap::default()
}
})
}
else {
return Err(anyhow::anyhow!("GLSL shader needs a stage"))
}
pub fn load_texture( }
&self, _ => return Err(anyhow::anyhow!("Unsupported shader type")),
file_name: &str, };
is_normal_map: bool,
device: &Device,
queue: &Queue,
) -> anyhow::Result<Texture> {
let data = self.load_binary(file_name)?;
Texture::from_bytes(device, queue, &data, file_name, is_normal_map)
}
/// `file_name` is the full name, so with the extension self.shaders.insert(file_name.to_string(), module);
/// `shader_stage` is only needed if it is a GLSL shader, so default to None if it isn't GLSL Ok(())
pub fn load_shader( }
&mut self,
device: &Device,
shader_stage: Option<ShaderStage>,
file_name: &str,
) -> anyhow::Result<()> {
let shader_source = self.load_string(file_name)?;
let module = match file_name.split('.').last() { pub fn get_shader(&self, shader: &str) -> Option<&ShaderModule> {
Some("wgsl") => device.create_shader_module(wgpu::ShaderModuleDescriptor { self.shaders.get(shader)
label: Some(file_name), }
source: wgpu::ShaderSource::Wgsl(shader_source.into()),
}),
Some("glsl") => {
if let Some(stage) = shader_stage {
device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some(file_name),
source: wgpu::ShaderSource::Glsl {
shader: shader_source.into(),
stage,
defines: naga::FastHashMap::default(),
},
})
} else {
return Err(anyhow::anyhow!("GLSL shader needs a stage"));
}
}
_ => return Err(anyhow::anyhow!("Unsupported shader type")),
};
self.shaders.insert(file_name.to_string(), module); pub fn load_font(&mut self, path: &str, size: f32) {
Ok(()) info!("Loading font: {}", path);
} let font = Font::new(path, size);
info!("Font {} loaded!", font.name());
self.fonts.push(font);
}
/// Loads the shader from a source code string /*pub async fn load_model(
/// Right now only works with wgsl &self,
pub fn load_shader_from_string( file_name: &str,
&mut self, device: &wgpu::Device,
device: &Device, queue: &wgpu::Queue,
shader_name: &str, layout: &wgpu::BindGroupLayout,
shader_src: &str, ) -> anyhow::Result<model::Model> {
) -> anyhow::Result<()> { let obj_text = self.load_string(file_name).await?;
let module = device.create_shader_module(wgpu::ShaderModuleDescriptor { let obj_cursor = Cursor::new(obj_text);
label: Some(shader_name), let mut obj_reader = BufReader::new(obj_cursor);
source: wgpu::ShaderSource::Wgsl(shader_src.into()),
});
self.shaders.insert(shader_name.to_string(), module); let (models, obj_materials) = tobj::load_obj_buf_async(
Ok(()) &mut obj_reader,
} &tobj::LoadOptions {
triangulate: true,
single_index: true,
..Default::default()
},
|p| async move {
let mat_text = self.load_string(&p).await.unwrap();
tobj::load_mtl_buf(&mut BufReader::new(Cursor::new(mat_text)))
},
)
.await?;
pub fn get_shader(&self, shader: &str) -> Option<&ShaderModule> { let mut materials = Vec::new();
self.shaders.get(shader) for m in obj_materials? {
} let diffuse_texture = self.load_texture(&m.diffuse_texture, false, device, queue).await?;
let normal_texture = self.load_texture(&m.normal_texture, true, device, queue).await?;
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
],
label: None,
});
pub fn load_font(&mut self, path: &str, size: f32) { materials.push(model::Material {
info!("Loading font: {}", path); name: m.name,
let font = Font::new(path, size); diffuse_texture,
info!("Font {} loaded!", font.name()); bind_group,
self.fonts.push(font); });
} }
let meshes = models
.into_iter()
.map(|m| {
let vertices = (0..m.mesh.positions.len() / 3)
.map(|i| {
if m.mesh.normals.is_empty() {
model::ModelVertex {
position: [
m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
],
tex_coords: [m.mesh.texcoords[i * 2], 1.0 - m.mesh.texcoords[i * 2 + 1]],
normal: [0.0, 0.0, 0.0],
}
} else {
model::ModelVertex {
position: [
m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
],
tex_coords: [m.mesh.texcoords[i * 2], 1.0 - m.mesh.texcoords[i * 2 + 1]],
normal: [
m.mesh.normals[i * 3],
m.mesh.normals[i * 3 + 1],
m.mesh.normals[i * 3 + 2],
],
}
}
})
.collect::<Vec<_>>();
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", file_name)),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsages::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", file_name)),
contents: bytemuck::cast_slice(&m.mesh.indices),
usage: wgpu::BufferUsages::INDEX,
});
model::Mesh {
name: file_name.to_string(),
vertex_buffer,
index_buffer,
num_elements: m.mesh.indices.len() as u32,
material: m.mesh.material_id.unwrap_or(0),
}
})
.collect::<Vec<_>>();
Ok(model::Model { meshes, materials })
}*/
} }

View file

@ -2,9 +2,148 @@ pub use resources::*;
pub use texture::*; pub use texture::*;
pub use vertex::*; pub use vertex::*;
pub mod font;
pub mod graphic_resource_manager;
pub mod resources; pub mod resources;
pub mod texture; pub mod texture;
pub mod texture_atlas;
pub mod vertex; pub mod vertex;
pub mod texture_atlas;
pub mod graphic_resource_manager;
mod material;
pub mod font;
/*use std::io::{BufReader, Cursor};
use wgpu::util::DeviceExt;
use crate::{model, texture};
pub async fn load_string(file_name: &str) -> anyhow::Result<String> {
let path = std::path::Path::new(env!("OUT_DIR"))
.join("res")
.join(file_name);
let txt = std::fs::read_to_string(path)?;
Ok(txt)
}
pub async fn load_binary(file_name: &str) -> anyhow::Result<Vec<u8>> {
let path = std::path::Path::new(env!("OUT_DIR"))
.join("res")
.join(file_name);
let data = std::fs::read(path)?;
Ok(data)
}
pub async fn load_texture(
file_name: &str,
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> anyhow::Result<texture::Texture> {
let data = load_binary(file_name).await?;
texture::Texture::from_bytes(device, queue, &data, file_name)
}
pub async fn load_model(
file_name: &str,
device: &wgpu::Device,
queue: &wgpu::Queue,
layout: &wgpu::BindGroupLayout,
) -> anyhow::Result<model::Model> {
let obj_text = load_string(file_name).await?;
let obj_cursor = Cursor::new(obj_text);
let mut obj_reader = BufReader::new(obj_cursor);
let (models, obj_materials) = tobj::load_obj_buf_async(
&mut obj_reader,
&tobj::LoadOptions {
triangulate: true,
single_index: true,
..Default::default()
},
|p| async move {
let mat_text = load_string(&p).await.unwrap();
tobj::load_mtl_buf(&mut BufReader::new(Cursor::new(mat_text)))
},
)
.await?;
let mut materials = Vec::new();
for m in obj_materials? {
let diffuse_texture = load_texture(&m.diffuse_texture, device, queue).await?;
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
],
label: None,
});
materials.push(model::Material {
name: m.name,
diffuse_texture,
bind_group,
})
}
let meshes = models
.into_iter()
.map(|m| {
let vertices = (0..m.mesh.positions.len() / 3)
.map(|i| {
if m.mesh.normals.is_empty(){
model::ModelVertex {
position: [
m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
],
tex_coords: [m.mesh.texcoords[i * 2], 1.0 - m.mesh.texcoords[i * 2 + 1]],
normal: [0.0, 0.0, 0.0],
}
}else{
model::ModelVertex {
position: [
m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
],
tex_coords: [m.mesh.texcoords[i * 2], 1.0 - m.mesh.texcoords[i * 2 + 1]],
normal: [
m.mesh.normals[i * 3],
m.mesh.normals[i * 3 + 1],
m.mesh.normals[i * 3 + 2],
],
}
}
})
.collect::<Vec<_>>();
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", file_name)),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsages::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", file_name)),
contents: bytemuck::cast_slice(&m.mesh.indices),
usage: wgpu::BufferUsages::INDEX,
});
log::info!("Mesh: {}", m.name);
model::Mesh {
name: file_name.to_string(),
vertex_buffer,
index_buffer,
num_elements: m.mesh.indices.len() as u32,
material: m.mesh.material_id.unwrap_or(0),
}
})
.collect::<Vec<_>>();
Ok(model::Model { meshes, materials })
}*/

View file

@ -0,0 +1,8 @@
use crate::texture;
pub struct Material {
pub name: String,
pub diffuse_texture: texture::Texture,
pub normal_texture: texture::Texture,
pub bind_group: wgpu::BindGroup,
}

View file

@ -1,310 +1,325 @@
use anyhow::*; use anyhow::*;
use image::{DynamicImage, GenericImageView, RgbaImage}; use image::{DynamicImage, GenericImageView, RgbaImage};
use wgpu::{Device, Queue};
#[derive(Debug)] #[derive(Debug)]
pub struct Texture { pub struct Texture {
#[allow(unused)] #[allow(unused)]
pub texture: wgpu::Texture, pub texture: wgpu::Texture,
pub view: wgpu::TextureView, pub view: wgpu::TextureView,
pub sampler: wgpu::Sampler, pub sampler: wgpu::Sampler,
pub size: wgpu::Extent3d, pub size: wgpu::Extent3d,
} }
impl Texture { impl Texture {
pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float; pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
pub fn create_depth_texture( pub fn create_depth_texture(
device: &wgpu::Device, device: &wgpu::Device,
config: &wgpu::SurfaceConfiguration, config: &wgpu::SurfaceConfiguration,
label: &str, label: &str,
) -> Self { ) -> Self {
let size = wgpu::Extent3d { let size = wgpu::Extent3d {
width: config.width.max(1), width: config.width.max(1),
height: config.height.max(1), height: config.height.max(1),
depth_or_array_layers: 1, depth_or_array_layers: 1,
}; };
let desc = wgpu::TextureDescriptor { let desc = wgpu::TextureDescriptor {
label: Some(label), label: Some(label),
size, size,
mip_level_count: 1, mip_level_count: 1,
sample_count: 1, sample_count: 1,
dimension: wgpu::TextureDimension::D2, dimension: wgpu::TextureDimension::D2,
format: Self::DEPTH_FORMAT, format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING, usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[Self::DEPTH_FORMAT], view_formats: &[Self::DEPTH_FORMAT],
}; };
let texture = device.create_texture(&desc); let texture = device.create_texture(&desc);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default()); let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor { let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge, address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge, address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge, address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear, mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear, min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest, mipmap_filter: wgpu::FilterMode::Nearest,
compare: Some(wgpu::CompareFunction::LessEqual), compare: Some(wgpu::CompareFunction::LessEqual),
lod_min_clamp: 0.0, lod_min_clamp: 0.0,
lod_max_clamp: 100.0, lod_max_clamp: 100.0,
..Default::default() ..Default::default()
}); });
Self { Self {
texture, texture,
view, view,
sampler, sampler,
size, size, // NEW!
} }
} }
#[allow(dead_code)] #[allow(dead_code)]
pub fn from_bytes( pub fn from_bytes(
device: &wgpu::Device, device: &wgpu::Device,
queue: &wgpu::Queue, queue: &wgpu::Queue,
bytes: &[u8], bytes: &[u8],
label: &str, label: &str,
is_normal_map: bool, is_normal_map: bool,
) -> Result<Self> { ) -> Result<Self> {
let img = image::load_from_memory(bytes)?; let img = image::load_from_memory(bytes)?;
Self::from_image(device, queue, &img, Some(label), is_normal_map) Self::from_image(device, queue, &img, Some(label), is_normal_map)
} }
pub fn from_image( pub fn from_image(
device: &wgpu::Device, device: &wgpu::Device,
queue: &wgpu::Queue, queue: &wgpu::Queue,
img: &image::DynamicImage, img: &image::DynamicImage,
label: Option<&str>, label: Option<&str>,
is_normal_map: bool, is_normal_map: bool,
) -> Result<Self> { ) -> Result<Self> {
let dimensions = img.dimensions(); let dimensions = img.dimensions();
let rgba = img.to_rgba8(); let rgba = img.to_rgba8();
let format = if is_normal_map { let format = if is_normal_map {
wgpu::TextureFormat::Rgba8Unorm wgpu::TextureFormat::Rgba8Unorm
} else { } else {
wgpu::TextureFormat::Rgba8UnormSrgb wgpu::TextureFormat::Rgba8UnormSrgb
}; };
let usage = wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST; let usage = wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST;
let size = wgpu::Extent3d { let size = wgpu::Extent3d {
width: img.width(), width: img.width(),
height: img.height(), height: img.height(),
depth_or_array_layers: 1, depth_or_array_layers: 1,
}; };
let texture = Self::create_2d_texture( let texture = Self::create_2d_texture(
device, device,
size.width, size.width,
size.height, size.height,
format, format,
usage, usage,
wgpu::FilterMode::Nearest, wgpu::FilterMode::Nearest,
label, label,
); );
queue.write_texture( queue.write_texture(
wgpu::ImageCopyTexture { wgpu::ImageCopyTexture {
aspect: wgpu::TextureAspect::All, aspect: wgpu::TextureAspect::All,
texture: &texture.texture, texture: &texture.texture,
mip_level: 0, mip_level: 0,
origin: wgpu::Origin3d::ZERO, origin: wgpu::Origin3d::ZERO,
}, },
&rgba, &rgba,
wgpu::ImageDataLayout { wgpu::ImageDataLayout {
offset: 0, offset: 0,
bytes_per_row: Some(4 * dimensions.0), bytes_per_row: Some(4 * dimensions.0),
rows_per_image: Some(dimensions.1), rows_per_image: Some(dimensions.1),
}, },
size, size,
); );
Ok(texture) Ok(texture)
} }
pub(crate) fn create_2d_texture( pub(crate) fn create_2d_texture(
device: &wgpu::Device, device: &wgpu::Device,
width: u32, width: u32,
height: u32, height: u32,
format: wgpu::TextureFormat, format: wgpu::TextureFormat,
usage: wgpu::TextureUsages, usage: wgpu::TextureUsages,
mag_filter: wgpu::FilterMode, mag_filter: wgpu::FilterMode,
label: Option<&str>, label: Option<&str>,
) -> Self { ) -> Self {
let size = wgpu::Extent3d { let size = wgpu::Extent3d {
width, width,
height, height,
depth_or_array_layers: 1, depth_or_array_layers: 1,
}; };
Self::create_texture( Self::create_texture(
device, device,
label, label,
size, size,
format, format,
usage, usage,
wgpu::TextureDimension::D2, wgpu::TextureDimension::D2,
mag_filter, mag_filter,
) )
} }
pub fn create_texture( pub fn create_texture(
device: &wgpu::Device, device: &wgpu::Device,
label: Option<&str>, label: Option<&str>,
size: wgpu::Extent3d, size: wgpu::Extent3d,
format: wgpu::TextureFormat, format: wgpu::TextureFormat,
usage: wgpu::TextureUsages, usage: wgpu::TextureUsages,
dimension: wgpu::TextureDimension, dimension: wgpu::TextureDimension,
mag_filter: wgpu::FilterMode, mag_filter: wgpu::FilterMode,
) -> Self { ) -> Self {
let texture = device.create_texture(&wgpu::TextureDescriptor { let texture = device.create_texture(&wgpu::TextureDescriptor {
label, label,
size, size,
mip_level_count: 1, mip_level_count: 1,
sample_count: 1, sample_count: 1,
dimension, dimension,
format, format,
usage, usage,
view_formats: &[], view_formats: &[],
}); });
let view = texture.create_view(&wgpu::TextureViewDescriptor::default()); let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor { let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge, address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge, address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge, address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter, mag_filter,
min_filter: wgpu::FilterMode::Nearest, min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest, mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default() ..Default::default()
}); });
Self { Self {
texture, texture,
view, view,
sampler, sampler,
size, // NEW! size, // NEW!
} }
} }
pub fn to_image(&self, device: &wgpu::Device, queue: &wgpu::Queue) -> Result<DynamicImage> { pub fn to_image(
let width = self.size.width; &self,
let height = self.size.height; device: &wgpu::Device,
queue: &wgpu::Queue,
) -> Result<DynamicImage> {
// Size of the texture
let width = self.size.width;
let height = self.size.height;
let texture_size_bytes = (4 * width * height) as wgpu::BufferAddress; // Calculate the size of the texture in bytes
let texture_size_bytes = (4 * width * height) as wgpu::BufferAddress;
let buffer = device.create_buffer(&wgpu::BufferDescriptor { // Create a buffer for reading the texture data back from the GPU
label: Some("Texture Readback Buffer"), let buffer = device.create_buffer(&wgpu::BufferDescriptor {
size: texture_size_bytes, label: Some("Texture Readback Buffer"),
usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ, size: texture_size_bytes,
mapped_at_creation: false, usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
}); mapped_at_creation: false,
});
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { // Create a command encoder to copy the texture data to the buffer
label: Some("Texture to Buffer Encoder"), let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
}); label: Some("Texture to Buffer Encoder"),
});
encoder.copy_texture_to_buffer( // Define the copy operation from the texture to the buffer
wgpu::ImageCopyTexture { encoder.copy_texture_to_buffer(
texture: &self.texture, wgpu::ImageCopyTexture {
mip_level: 0, texture: &self.texture,
origin: wgpu::Origin3d::ZERO, mip_level: 0,
aspect: wgpu::TextureAspect::All, origin: wgpu::Origin3d::ZERO,
}, aspect: wgpu::TextureAspect::All,
wgpu::ImageCopyBuffer { },
buffer: &buffer, wgpu::ImageCopyBuffer {
layout: wgpu::ImageDataLayout { buffer: &buffer,
offset: 0, layout: wgpu::ImageDataLayout {
bytes_per_row: Some(4 * width), offset: 0,
rows_per_image: Some(height), bytes_per_row: Some(4 * width),
}, rows_per_image: Some(height),
}, },
self.size, },
); self.size,
);
queue.submit(Some(encoder.finish())); // Submit the command to the queue
queue.submit(Some(encoder.finish()));
let buffer_slice = buffer.slice(..); // Wait for the GPU to finish the operation
buffer_slice.map_async(wgpu::MapMode::Read, |result| { let buffer_slice = buffer.slice(..);
if let Err(e) = result { buffer_slice.map_async(wgpu::MapMode::Read, |result| {
eprintln!("Failed to map buffer: {:?}", e); if let Err(e) = result {
} eprintln!("Failed to map buffer: {:?}", e);
}); }
});
let data = buffer_slice.get_mapped_range(); // Get the buffer data
let data = buffer_slice.get_mapped_range();
let image = RgbaImage::from_raw(width, height, data.to_vec()) // Convert the raw data into an image::RgbaImage
.ok_or_else(|| anyhow!("Failed to create image from raw texture data"))?; let image = RgbaImage::from_raw(width, height, data.to_vec())
.ok_or_else(|| anyhow!("Failed to create image from raw texture data"))?;
buffer.unmap(); // Unmap the buffer now that we're done with it
buffer.unmap();
Ok(DynamicImage::ImageRgba8(image)) // Convert the RgbaImage into a DynamicImage
} Ok(DynamicImage::ImageRgba8(image))
}
} }
pub struct CubeTexture { pub struct CubeTexture {
texture: wgpu::Texture, texture: wgpu::Texture,
sampler: wgpu::Sampler, sampler: wgpu::Sampler,
view: wgpu::TextureView, view: wgpu::TextureView,
} }
impl CubeTexture { impl CubeTexture {
pub fn create_2d( pub fn create_2d(
device: &wgpu::Device, device: &wgpu::Device,
width: u32, width: u32,
height: u32, height: u32,
format: wgpu::TextureFormat, format: wgpu::TextureFormat,
mip_level_count: u32, mip_level_count: u32,
usage: wgpu::TextureUsages, usage: wgpu::TextureUsages,
mag_filter: wgpu::FilterMode, mag_filter: wgpu::FilterMode,
label: Option<&str>, label: Option<&str>,
) -> Self { ) -> Self {
let texture = device.create_texture(&wgpu::TextureDescriptor { let texture = device.create_texture(&wgpu::TextureDescriptor {
label, label,
size: wgpu::Extent3d { size: wgpu::Extent3d {
width, width,
height, height,
// A cube has 6 sides, so we need 6 layers // A cube has 6 sides, so we need 6 layers
depth_or_array_layers: 6, depth_or_array_layers: 6,
}, },
mip_level_count, mip_level_count,
sample_count: 1, sample_count: 1,
dimension: wgpu::TextureDimension::D2, dimension: wgpu::TextureDimension::D2,
format, format,
usage, usage,
view_formats: &[], view_formats: &[],
}); });
let view = texture.create_view(&wgpu::TextureViewDescriptor { let view = texture.create_view(&wgpu::TextureViewDescriptor {
label, label,
dimension: Some(wgpu::TextureViewDimension::Cube), dimension: Some(wgpu::TextureViewDimension::Cube),
array_layer_count: Some(6), array_layer_count: Some(6),
..Default::default() ..Default::default()
}); });
let sampler = device.create_sampler(&wgpu::SamplerDescriptor { let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label, label,
address_mode_u: wgpu::AddressMode::ClampToEdge, address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge, address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge, address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter, mag_filter,
min_filter: wgpu::FilterMode::Nearest, min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest, mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default() ..Default::default()
}); });
Self { Self {
texture, texture,
sampler, sampler,
view, view,
} }
} }
pub fn texture(&self) -> &wgpu::Texture { pub fn texture(&self) -> &wgpu::Texture {
&self.texture &self.texture
} }
pub fn view(&self) -> &wgpu::TextureView { pub fn view(&self) -> &wgpu::TextureView {
&self.view &self.view
} }
pub fn sampler(&self) -> &wgpu::Sampler {
&self.sampler
}
}
pub fn sampler(&self) -> &wgpu::Sampler {
&self.sampler
}
}

View file

@ -1,424 +1,304 @@
use crate::font::*;
use comet_log::*;
use image::{DynamicImage, GenericImage, GenericImageView};
use std::collections::HashMap; use std::collections::HashMap;
use std::path::Path; use std::path::Path;
use std::time::Instant;
use image::{DynamicImage, GenericImage, GenericImageView, ImageFormat};
use comet_log::*;
use wgpu::{Device, FilterMode, TextureFormat, TextureUsages};
use crate::font::GlyphData;
use crate::Texture;
#[derive(Debug, Clone)] #[derive(Debug)]
pub struct TextureRegion { pub struct TextureRegion {
u0: f32, u0: f32,
v0: f32, v0: f32,
u1: f32, u1: f32,
v1: f32, v1: f32,
advance: f32, advance: f32,
offset_x: f32, offset_x: f32,
offset_y: f32, offset_y: f32,
dimensions: (u32, u32), dimensions: (u32, u32),
} }
impl TextureRegion { impl TextureRegion {
pub fn new( pub fn new(u0: f32, v0: f32, u1: f32, v1: f32, dimensions: (u32, u32), advance: f32, offset_x: f32, offset_y: f32) -> Self {
u0: f32, Self {
v0: f32, u0,
u1: f32, v0,
v1: f32, u1,
dimensions: (u32, u32), v1,
advance: f32, advance,
offset_x: f32, offset_x,
offset_y: f32, offset_y,
) -> Self { dimensions
Self { }
u0, }
v0,
u1,
v1,
advance,
offset_x,
offset_y,
dimensions,
}
}
pub fn u0(&self) -> f32 { pub fn u0(&self) -> f32 {
self.u0 self.u0
} }
pub fn u1(&self) -> f32 { pub fn u1(&self) -> f32 {
self.u1 self.u1
} }
pub fn v0(&self) -> f32 { pub fn v0(&self) -> f32 {
self.v0 self.v0
} }
pub fn v1(&self) -> f32 { pub fn v1(&self) -> f32 {
self.v1 self.v1
} }
pub fn dimensions(&self) -> (u32, u32) { pub fn dimensions(&self) -> (u32, u32) {
self.dimensions self.dimensions
} }
pub fn advance(&self) -> f32 { pub fn advance(&self) -> f32 {
self.advance self.advance
} }
pub fn offset_x(&self) -> f32 { pub fn offset_x(&self) -> f32 {
self.offset_x self.offset_x
} }
pub fn offset_y(&self) -> f32 { pub fn offset_y(&self) -> f32 {
self.offset_y self.offset_y
} }
} }
#[derive(Debug, Clone)] #[derive(Debug)]
pub struct TextureAtlas { pub struct TextureAtlas {
atlas: DynamicImage, atlas: DynamicImage,
textures: HashMap<String, TextureRegion>, textures: HashMap<String, TextureRegion>,
} }
impl TextureAtlas { impl TextureAtlas {
pub fn empty() -> Self { pub fn empty() -> Self {
Self { Self {
atlas: DynamicImage::new(1, 1, image::ColorType::Rgb8), atlas: DynamicImage::new(1,1, image::ColorType::Rgb8),
textures: HashMap::new(), textures: HashMap::new()
} }
} }
pub fn texture_paths(&self) -> Vec<String> { pub fn texture_paths(&self) -> Vec<String> {
self.textures.keys().map(|k| k.to_string()).collect() self.textures.keys().map(|k| k.to_string()).collect()
} }
fn calculate_atlas_width(textures: &Vec<DynamicImage>) -> u32 { fn calculate_atlas_width(textures: &Vec<DynamicImage>) -> u32 {
let mut last_height: u32 = textures.get(0).unwrap().height(); let mut last_height: u32 = textures.get(0).unwrap().height();
let mut widths: Vec<u32> = Vec::new(); let mut widths: Vec<u32> = Vec::new();
let mut current_width: u32 = 0; let mut current_width: u32 = 0;
for texture in textures { for texture in textures {
if last_height != texture.height() { if last_height != texture.height() {
widths.push(current_width); widths.push(current_width);
current_width = 0; current_width = 0;
last_height = texture.height(); last_height = texture.height();
} }
current_width += texture.width(); current_width += texture.width();
} }
widths.push(current_width); widths.push(current_width);
*widths.iter().max().unwrap() *widths.iter().max().unwrap()
} }
fn calculate_atlas_height(textures: &Vec<DynamicImage>) -> u32 { fn calculate_atlas_height(textures: &Vec<DynamicImage>) -> u32 {
let last_height: u32 = textures.get(0).unwrap().height(); let last_height: u32 = textures.get(0).unwrap().height();
let mut height: u32 = 0; let mut height: u32 = 0;
height += last_height; height += last_height;
for texture in textures { for texture in textures {
if last_height == texture.height() { if last_height == texture.height() {
continue; continue;
} }
height += texture.height(); height += texture.height();
} }
height height
} }
fn insert_texture_at(base: &mut DynamicImage, texture: &DynamicImage, x_pos: u32, y_pos: u32) { fn insert_texture_at(base: &mut DynamicImage, texture: &DynamicImage, x_pos: u32, y_pos: u32) {
for y in 0..texture.height() { for y in 0..texture.height() {
for x in 0..texture.width() { for x in 0..texture.width() {
let pixel = texture.get_pixel(x, y); let pixel = texture.get_pixel(x,y);
base.put_pixel(x + x_pos, y + y_pos, pixel); base.put_pixel(x + x_pos, y + y_pos, pixel);
} }
} }
} }
pub fn from_texture_paths(paths: Vec<String>) -> Self { pub fn from_texture_paths(
let mut textures: Vec<DynamicImage> = Vec::new(); paths: Vec<String>,
let mut regions: HashMap<String, TextureRegion> = HashMap::new(); ) -> Self {
let mut textures: Vec<DynamicImage> = Vec::new();
info!("Loading textures..."); let mut regions: HashMap<String, TextureRegion> = HashMap::new();
for path in &paths { info!("Loading textures...");
textures.push(image::open(&Path::new(path.as_str())).expect("Failed to load texture"));
} for path in &paths {
textures.push(image::open(&Path::new(path.as_str())).expect("Failed to load texture"));
info!("Textures loaded!"); }
info!("Sorting textures by height...");
info!("Textures loaded!");
let mut texture_path_pairs: Vec<(&DynamicImage, &String)> = info!("Sorting textures by height...");
textures.iter().zip(paths.iter()).collect();
texture_path_pairs.sort_by(|a, b| b.0.height().cmp(&a.0.height())); let mut texture_path_pairs: Vec<(&DynamicImage, &String)> = textures.iter().zip(paths.iter()).collect();
let (sorted_textures, sorted_paths): (Vec<&DynamicImage>, Vec<&String>) = texture_path_pairs.sort_by(|a, b| b.0.height().cmp(&a.0.height()));
texture_path_pairs.into_iter().unzip(); let (sorted_textures, sorted_paths): (Vec<&DynamicImage>, Vec<&String>) = texture_path_pairs.into_iter().unzip();
let sorted_textures: Vec<DynamicImage> = let sorted_textures: Vec<DynamicImage> = sorted_textures.into_iter().map(|t| t.clone()).collect();
sorted_textures.into_iter().map(|t| t.clone()).collect(); let sorted_paths: Vec<String> = sorted_paths.into_iter().map(|s| s.to_string()).collect();
let sorted_paths: Vec<String> = sorted_paths.into_iter().map(|s| s.to_string()).collect();
let (height, width) = (Self::calculate_atlas_height(&sorted_textures), Self::calculate_atlas_width(&sorted_textures));
let (height, width) = ( let mut base = DynamicImage::new_rgba8(width,height);
Self::calculate_atlas_height(&sorted_textures),
Self::calculate_atlas_width(&sorted_textures), let mut previous = sorted_textures.get(0).unwrap().height();
); let mut x_offset: u32 = 0;
let mut base = DynamicImage::new_rgba8(width, height); let mut y_offset: u32 = 0;
let mut previous = sorted_textures.get(0).unwrap().height(); info!("Creating texture atlas...");
let mut x_offset: u32 = 0;
let mut y_offset: u32 = 0; for (texture, path) in sorted_textures.iter().zip(sorted_paths.iter()) {
if texture.height() != previous {
info!("Creating texture atlas..."); y_offset += previous;
x_offset = 0;
for (texture, path) in sorted_textures.iter().zip(sorted_paths.iter()) { previous = texture.height();
if texture.height() != previous { }
y_offset += previous;
x_offset = 0; Self::insert_texture_at(&mut base, &texture, x_offset, y_offset);
previous = texture.height(); regions.insert(path.to_string(), TextureRegion::new(
} x_offset as f32 / width as f32,
y_offset as f32 / height as f32,
Self::insert_texture_at(&mut base, &texture, x_offset, y_offset); (x_offset + texture.width()) as f32 / width as f32,
let texel_w = 0.5 / width as f32; (y_offset + texture.height()) as f32 / height as f32,
let texel_h = 0.5 / height as f32; texture.dimensions(),
0.0,
let u0 = (x_offset as f32 + texel_w) / width as f32; 0.0,
let v0 = (y_offset as f32 + texel_h) / height as f32; 0.0
let u1 = ((x_offset + texture.width()) as f32 - texel_w) / width as f32; ));
let v1 = ((y_offset + texture.height()) as f32 - texel_h) / height as f32; x_offset += texture.width();
}
regions.insert(
path.to_string(), info!("Texture atlas created!");
TextureRegion::new(u0, v0, u1, v1, texture.dimensions(), 0.0, 0.0, 0.0),
); TextureAtlas {
x_offset += texture.width(); atlas: base,
} textures: regions
}
info!("Texture atlas created!"); }
TextureAtlas { pub fn from_textures(
atlas: base, names: Vec<String>,
textures: regions, textures: Vec<DynamicImage>,
} ) -> Self {
} let mut regions: HashMap<String, TextureRegion> = HashMap::new();
pub fn from_textures(names: Vec<String>, textures: Vec<DynamicImage>) -> Self { info!("Sorting textures by height...");
let mut regions: HashMap<String, TextureRegion> = HashMap::new();
let mut texture_path_pairs: Vec<(&DynamicImage, &String)> = textures.iter().zip(names.iter()).collect();
info!("Sorting textures by height..."); texture_path_pairs.sort_by(|a, b| b.0.height().cmp(&a.0.height()));
let (sorted_textures, sorted_paths): (Vec<&DynamicImage>, Vec<&String>) = texture_path_pairs.into_iter().unzip();
let mut texture_path_pairs: Vec<(&DynamicImage, &String)> = let sorted_textures: Vec<DynamicImage> = sorted_textures.into_iter().map(|t| t.clone()).collect();
textures.iter().zip(names.iter()).collect(); let sorted_paths: Vec<String> = sorted_paths.into_iter().map(|s| s.to_string()).collect();
texture_path_pairs.sort_by(|a, b| b.0.height().cmp(&a.0.height()));
let (sorted_textures, sorted_paths): (Vec<&DynamicImage>, Vec<&String>) = let (height, width) = (Self::calculate_atlas_height(&sorted_textures), Self::calculate_atlas_width(&sorted_textures));
texture_path_pairs.into_iter().unzip(); let mut base = DynamicImage::new_rgba8(width,height);
let sorted_textures: Vec<DynamicImage> =
sorted_textures.into_iter().map(|t| t.clone()).collect(); let mut previous = sorted_textures.get(0).unwrap().height();
let sorted_paths: Vec<String> = sorted_paths.into_iter().map(|s| s.to_string()).collect(); let mut x_offset: u32 = 0;
let mut y_offset: u32 = 0;
let (height, width) = (
Self::calculate_atlas_height(&sorted_textures), info!("Creating texture atlas...");
Self::calculate_atlas_width(&sorted_textures),
); for (texture, name) in sorted_textures.iter().zip(sorted_paths.iter()) {
let mut base = DynamicImage::new_rgba8(width, height); if texture.height() != previous {
y_offset += previous;
let mut previous = sorted_textures.get(0).unwrap().height(); x_offset = 0;
let mut x_offset: u32 = 0; previous = texture.height();
let mut y_offset: u32 = 0; }
info!("Creating texture atlas..."); Self::insert_texture_at(&mut base, &texture, x_offset, y_offset);
regions.insert(name.to_string(), TextureRegion::new(
for (texture, name) in sorted_textures.iter().zip(sorted_paths.iter()) { x_offset as f32 / width as f32,
if texture.height() != previous { y_offset as f32 / height as f32,
y_offset += previous; (x_offset + texture.width()) as f32 / width as f32,
x_offset = 0; (y_offset + texture.height()) as f32 / height as f32,
previous = texture.height(); texture.dimensions(),
} 0.0,
0.0,
Self::insert_texture_at(&mut base, &texture, x_offset, y_offset); 0.0
regions.insert( ));
name.to_string(), x_offset += texture.width();
TextureRegion::new( }
x_offset as f32 / width as f32,
y_offset as f32 / height as f32, info!("Texture atlas created!");
(x_offset + texture.width()) as f32 / width as f32,
(y_offset + texture.height()) as f32 / height as f32, TextureAtlas {
texture.dimensions(), atlas: base,
0.0, textures: regions
0.0, }
0.0, }
),
); pub fn from_glyphs(mut glyphs: Vec<GlyphData>) -> Self {
x_offset += texture.width(); glyphs.sort_by(|a, b| b.render.height().cmp(&a.render.height()));
}
let height = Self::calculate_atlas_height(
info!("Texture atlas created!"); &glyphs.iter().map(|g| g.render.clone()).collect::<Vec<_>>()
);
TextureAtlas { let width = Self::calculate_atlas_width(
atlas: base, &glyphs.iter().map(|g| g.render.clone()).collect::<Vec<_>>()
textures: regions, );
}
} let padding = (glyphs.len() * 3) as u32;
pub fn from_glyphs(mut glyphs: Vec<GlyphData>) -> Self { let mut base = DynamicImage::new_rgba8(width + padding, height);
glyphs.sort_by(|a, b| b.render.height().cmp(&a.render.height())); let mut regions = HashMap::new();
let mut current_row_height = glyphs[0].render.height();
let height = Self::calculate_atlas_height( let mut x_offset: u32 = 0;
&glyphs.iter().map(|g| g.render.clone()).collect::<Vec<_>>(), let mut y_offset: u32 = 0;
);
let width = Self::calculate_atlas_width(
&glyphs.iter().map(|g| g.render.clone()).collect::<Vec<_>>(), for g in glyphs.iter() {
); let glyph_w = g.render.width();
let glyph_h = g.render.height();
let padding = (glyphs.len() * 3) as u32;
if glyph_h != current_row_height {
let mut base = DynamicImage::new_rgba8(width + padding, height); y_offset += current_row_height + 3;
let mut regions = HashMap::new(); x_offset = 0;
let mut current_row_height = glyphs[0].render.height(); current_row_height = glyph_h;
let mut x_offset: u32 = 0; }
let mut y_offset: u32 = 0;
Self::insert_texture_at(&mut base, &g.render, x_offset, y_offset);
for g in glyphs.iter() {
let glyph_w = g.render.width(); let u0 = x_offset as f32 / (width + padding) as f32;
let glyph_h = g.render.height(); let v0 = y_offset as f32 / height as f32;
let u1 = (x_offset + glyph_w) as f32 / (width + padding) as f32;
if glyph_h != current_row_height { let v1 = (y_offset + glyph_h) as f32 / height as f32;
y_offset += current_row_height + 3;
x_offset = 0; let region = TextureRegion::new(
current_row_height = glyph_h; u0, v0, u1, v1,
} (glyph_w, glyph_h),
g.advance,
Self::insert_texture_at(&mut base, &g.render, x_offset, y_offset); g.offset_x,
g.offset_y,
let u0 = x_offset as f32 / (width + padding) as f32; );
let v0 = y_offset as f32 / height as f32;
let u1 = (x_offset + glyph_w) as f32 / (width + padding) as f32; regions.insert(g.name.clone(), region);
let v1 = (y_offset + glyph_h) as f32 / height as f32;
x_offset += glyph_w + 3;
let region = TextureRegion::new( }
u0,
v0, TextureAtlas {
u1, atlas: base,
v1, textures: regions,
(glyph_w, glyph_h), }
g.advance, }
g.offset_x,
g.offset_y, pub fn atlas(&self) -> &DynamicImage {
); &self.atlas
}
regions.insert(g.name.clone(), region);
pub fn textures(&self) -> &HashMap<String, TextureRegion> {
x_offset += glyph_w + 3; &self.textures
} }
}
TextureAtlas {
atlas: base,
textures: regions,
}
}
pub fn from_fonts(fonts: &Vec<Font>) -> Self {
if fonts.is_empty() {
return Self::empty();
}
let mut all_glyphs: Vec<(String, DynamicImage, TextureRegion)> = Vec::new();
let mut font_indices: Vec<usize> = (0..fonts.len()).collect();
font_indices.sort_by(|&a, &b| fonts[a].name().cmp(&fonts[b].name()));
for fi in font_indices {
let font = &fonts[fi];
let font_name = font.name();
let mut glyph_names: Vec<String> = font.glyphs().textures().keys().cloned().collect();
glyph_names.sort();
for glyph_name in glyph_names {
let region = font.glyphs().textures().get(&glyph_name).unwrap();
let (u0, v0, u1, v1) = (region.u0(), region.v0(), region.u1(), region.v1());
let (width, height) = region.dimensions();
let src_x = (u0 * font.glyphs().atlas().width() as f32) as u32;
let src_y = (v0 * font.glyphs().atlas().height() as f32) as u32;
let glyph_img = DynamicImage::ImageRgba8(
font.glyphs()
.atlas()
.view(src_x, src_y, width, height)
.to_image(),
);
let key = format!("{}::{}", font_name, glyph_name);
all_glyphs.push((key, glyph_img, region.clone()));
}
}
all_glyphs.sort_by(|a, b| {
let ha = a.1.height();
let hb = b.1.height();
match hb.cmp(&ha) {
std::cmp::Ordering::Equal => a.0.cmp(&b.0),
other => other,
}
});
let textures: Vec<DynamicImage> =
all_glyphs.iter().map(|(_, img, _)| img.clone()).collect();
let atlas_height = Self::calculate_atlas_height(&textures);
let atlas_width = Self::calculate_atlas_width(&textures);
let padding = (all_glyphs.len() * 3) as u32;
let mut base = DynamicImage::new_rgba8(atlas_width + padding, atlas_height);
let mut regions = HashMap::new();
let mut current_row_height = textures[0].height();
let mut x_offset: u32 = 0;
let mut y_offset: u32 = 0;
for (key, img, original_region) in all_glyphs {
let w = img.width();
let h = img.height();
if h != current_row_height {
y_offset += current_row_height + 3;
x_offset = 0;
current_row_height = h;
}
Self::insert_texture_at(&mut base, &img, x_offset, y_offset);
let u0 = x_offset as f32 / (atlas_width + padding) as f32;
let v0 = y_offset as f32 / atlas_height as f32;
let u1 = (x_offset + w) as f32 / (atlas_width + padding) as f32;
let v1 = (y_offset + h) as f32 / atlas_height as f32;
let region = TextureRegion::new(
u0,
v0,
u1,
v1,
(w, h),
original_region.advance(),
original_region.offset_x(),
original_region.offset_y(),
);
regions.insert(key, region);
x_offset += w + 3;
}
TextureAtlas {
atlas: base,
textures: regions,
}
}
pub fn atlas(&self) -> &DynamicImage {
&self.atlas
}
pub fn textures(&self) -> &HashMap<String, TextureRegion> {
&self.textures
}
}

View file

@ -2,7 +2,7 @@ use comet::prelude::*;
fn setup(app: &mut App, renderer: &mut Renderer2D) { fn setup(app: &mut App, renderer: &mut Renderer2D) {
// Initialize the texture atlas // Initialize the texture atlas
renderer.init_atlas(); renderer.initialize_atlas();
// Register components // Register components
app.register_component::<Position2D>(); app.register_component::<Position2D>();

View file

@ -4,7 +4,7 @@ use winit_input_helper::WinitInputHelper;
fn setup(app: &mut App, renderer: &mut Renderer2D) { fn setup(app: &mut App, renderer: &mut Renderer2D) {
// Takes all the textures from res/textures and puts them into a texture atlas // Takes all the textures from res/textures and puts them into a texture atlas
renderer.init_atlas(); renderer.initialize_atlas();
let camera = app.new_entity(); let camera = app.new_entity();
app.add_component(camera, Transform2D::new()); app.add_component(camera, Transform2D::new());
@ -14,7 +14,7 @@ fn setup(app: &mut App, renderer: &mut Renderer2D) {
app.add_component(e1, Transform2D::new()); app.add_component(e1, Transform2D::new());
let renderer2d = Render2D::with_texture("res/textures/comet_icon.png"); let mut renderer2d = Render2D::with_texture("res/textures/comet_icon.png");
app.add_component(e1, renderer2d); app.add_component(e1, renderer2d);
} }

View file

@ -1,7 +1,6 @@
use comet::prelude::*; use comet::prelude::*;
fn setup(app: &mut App, renderer: &mut Renderer2D) { fn setup(app: &mut App, renderer: &mut Renderer2D) {
renderer.init_atlas();
// Loading the font from the res/fonts directory with a rendered size of 77px // Loading the font from the res/fonts directory with a rendered size of 77px
renderer.load_font("./res/fonts/PressStart2P-Regular.ttf", 77.0); renderer.load_font("./res/fonts/PressStart2P-Regular.ttf", 77.0);

View file

@ -2,7 +2,7 @@ use comet::prelude::*;
fn setup(app: &mut App, renderer: &mut Renderer2D) { fn setup(app: &mut App, renderer: &mut Renderer2D) {
// Creating a texture atlas from the provided textures in the vector // Creating a texture atlas from the provided textures in the vector
renderer.init_atlas_by_paths(vec!["./res/textures/comet_icon.png".to_string()]); renderer.set_texture_atlas_by_paths(vec!["./res/textures/comet_icon.png".to_string()]);
// Creating a camera entity // Creating a camera entity
let cam = app.new_entity(); let cam = app.new_entity();

View file

@ -69,6 +69,6 @@ pub mod prelude {
pub use comet_input::keyboard::Key; pub use comet_input::keyboard::Key;
pub use comet_log::*; pub use comet_log::*;
pub use comet_math::*; pub use comet_math::*;
pub use comet_renderer::{renderer::Renderer, renderer2d::Renderer2D}; pub use comet_renderer::renderer2d::Renderer2D;
pub use winit_input_helper::WinitInputHelper as InputManager; pub use winit_input_helper::WinitInputHelper as InputManager;
} }