Compare commits

...

15 commits

Author SHA1 Message Date
lisk77
c7f0412eff
Merge pull request #2 from lisk77/feat/renderer2d-rework 2025-11-02 02:19:51 +01:00
027cd79b34 feat(comet): prelude explicitly exports Renderer2D now 2025-11-02 02:16:09 +01:00
32d06c5164 fix(examples): changed functions for atlas initialization 2025-11-02 02:15:03 +01:00
5a9b771967 feat(renderer2d): added font rendering and fixed some texture rendering issues 2025-11-02 02:14:01 +01:00
09ed792338 fix(comet_resources): removed deleted references in lib.rs 2025-11-02 02:12:31 +01:00
86392d4c05 feat(graphics_resource_manager): added font atlas and a way to load a shader directly from a source string (wgsl only right now) 2025-11-02 02:11:17 +01:00
8831c46b4c fix(texture_atlas): added texel offset to ensure correct interpolation on the GPU side 2025-11-02 02:09:58 +01:00
025d2b3a5f feat(render_pass)!: renamed the universal_execute to universal_clear_execute and made a universal_load_execute 2025-11-02 02:08:09 +01:00
98200cf6b5 refactor(camera): cleanup 2025-11-02 02:06:36 +01:00
609ba45813 feat(render_resources): added replace_bind_group_layout 2025-11-01 00:09:01 +01:00
40d60771a3 fix(examples): changed function calls from the new Renderer2D implementation 2025-10-31 01:17:06 +01:00
1f983fb2ad refactor(renderer): completely overhauled the comet_renderer crate 2025-10-31 01:13:25 +01:00
fafc7d22a4 fix(resources): load_string is no longer build.rs dependent 2025-10-31 01:10:31 +01:00
c2776e1bc4 feat(renderer): added RenderResources and Batches and filled out the Renderer trait for Renderer2D 2025-10-27 17:34:03 +01:00
66c444371a refactor(renderer2d): modularized and abstracted away the render context 2025-10-26 15:21:26 +01:00
25 changed files with 2104 additions and 3070 deletions

View file

@ -0,0 +1,123 @@
use comet_resources::Vertex;
use wgpu::util::DeviceExt;
use wgpu::{BufferUsages, Device};
pub struct Batch {
label: String,
vertex_data: Vec<Vertex>,
index_data: Vec<u16>,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
}
impl Batch {
pub fn new(
label: String,
device: &Device,
vertex_data: Vec<Vertex>,
index_data: Vec<u16>,
) -> Self {
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Vertex Buffer", &label).as_str()),
contents: bytemuck::cast_slice(&vertex_data),
usage: BufferUsages::VERTEX | BufferUsages::COPY_DST,
});
let num_indices = index_data.len() as u32;
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Index Buffer", &label).as_str()),
contents: bytemuck::cast_slice(&index_data),
usage: BufferUsages::INDEX | BufferUsages::COPY_DST,
});
Self {
label,
vertex_data,
index_data,
vertex_buffer,
index_buffer,
num_indices,
}
}
pub fn vertex_buffer(&self) -> &wgpu::Buffer {
&self.vertex_buffer
}
pub fn vertex_data(&self) -> &Vec<Vertex> {
&self.vertex_data
}
pub fn index_buffer(&self) -> &wgpu::Buffer {
&self.index_buffer
}
pub fn index_data(&self) -> &Vec<u16> {
&self.index_data
}
pub fn num_indices(&self) -> u32 {
self.num_indices
}
pub fn update_vertex_buffer(
&mut self,
device: &Device,
queue: &wgpu::Queue,
vertex_data: Vec<Vertex>,
) {
let new_vertex_size = vertex_data.len() as u64 * size_of::<Vertex>() as u64;
match vertex_data == self.vertex_data {
true => {}
false => {
match new_vertex_size > self.vertex_buffer.size() {
false => queue.write_buffer(
&self.vertex_buffer,
0,
bytemuck::cast_slice(&vertex_data),
),
true => {
self.vertex_buffer =
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Vertex Buffer", self.label).as_str()),
contents: bytemuck::cast_slice(&vertex_data),
usage: BufferUsages::VERTEX | BufferUsages::COPY_DST,
});
}
}
self.vertex_data = vertex_data;
}
}
}
pub fn update_index_buffer(
&mut self,
device: &Device,
queue: &wgpu::Queue,
index_data: Vec<u16>,
) {
let new_index_size = index_data.len() as u64 * size_of::<u16>() as u64;
match index_data == self.index_data {
true => {}
false => {
match new_index_size > self.index_buffer.size() {
false => {
queue.write_buffer(&self.index_buffer, 0, bytemuck::cast_slice(&index_data))
}
true => {
self.index_buffer =
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Index Buffer", self.label).as_str()),
contents: bytemuck::cast_slice(&index_data),
usage: BufferUsages::INDEX | BufferUsages::COPY_DST,
});
}
}
self.num_indices = index_data.len() as u32;
self.index_data = index_data;
}
}
}
}

View file

@ -1,31 +1,82 @@
use comet_math::{m4, p3, v2, v3};
use comet_ecs::{Camera2D, Transform2D};
use comet_log::fatal;
use comet_math::{m4, v2, v3};
#[rustfmt::skip]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.5,
0.0, 0.0, 0.0, 1.0,
pub struct CameraManager {
cameras: Vec<RenderCamera>,
active_camera: usize,
}
impl CameraManager {
pub fn new() -> Self {
Self {
cameras: Vec::new(),
active_camera: 0,
}
}
pub fn set_cameras(&mut self, cameras: Vec<RenderCamera>) {
self.cameras = cameras
}
pub fn set_active(&mut self, active: usize) {
if active >= self.cameras.len() {
fatal!("Active camera index is out of range of the RenderCamera array!")
}
}
pub fn get_camera(&self) -> &RenderCamera {
self.cameras.get(self.active_camera).unwrap()
}
pub fn update_from_scene(&mut self, scene: &comet_ecs::Scene, camera_entities: Vec<usize>) {
self.cameras.clear();
let mut cameras_with_priority: Vec<(RenderCamera, u8)> = Vec::new();
for entity in camera_entities {
let camera_component = scene.get_component::<Camera2D>(entity).unwrap();
let transform_component = scene.get_component::<Transform2D>(entity).unwrap();
let render_cam = RenderCamera::new(
camera_component.zoom(),
camera_component.dimensions(),
v3::new(
transform_component.position().as_vec().x(),
transform_component.position().as_vec().y(),
0.0,
),
);
const SAFE_FRAC_PI_2: f32 = std::f32::consts::FRAC_PI_2 - 0.0001;
cameras_with_priority.push((render_cam, camera_component.priority()));
}
if cameras_with_priority.is_empty() {
return;
}
cameras_with_priority.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
self.cameras = cameras_with_priority.into_iter().map(|(c, _)| c).collect();
self.active_camera = 0;
}
pub fn has_active_camera(&self) -> bool {
!self.cameras.is_empty()
}
}
pub struct RenderCamera {
zoom: f32,
dimension: v2,
position: v3
position: v3,
}
impl RenderCamera {
pub fn new(
zoom: f32,
dimension: v2,
position: v3
) -> Self {
pub fn new(zoom: f32, dimension: v2, position: v3) -> Self {
Self {
zoom,
dimension,
position
position,
}
}
@ -33,23 +84,18 @@ impl RenderCamera {
let zoomed_width = self.dimension.x() / self.zoom;
let zoomed_height = self.dimension.y() / self.zoom;
m4::OPENGL_CONV * m4::orthographic_projection(self.position.x() - zoomed_width / 2.0,
m4::OPENGL_CONV
* m4::orthographic_projection(
self.position.x() - zoomed_width / 2.0,
self.position.x() + zoomed_width / 2.0,
self.position.y() - zoomed_height / 2.0,
self.position.y() + zoomed_height / 2.0,
1.0,
0.0)
/*OPENGL_TO_WGPU_MATRIX * cgmath::ortho(self.position.x() - zoomed_width / 2.0,
self.position.x() + zoomed_width / 2.0,
self.position.y() - zoomed_height / 2.0,
self.position.y() + zoomed_height / 2.0,
1.0,
0.0)*/
0.0,
)
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct CameraUniform {
@ -68,251 +114,3 @@ impl CameraUniform {
self.view_proj = camera.build_view_projection_matrix().into();
}
}
/*use comet_math::{Mat4, Point3, Vec3};
#[rustfmt::skip]
pub const OPENGL_TO_WGPU_MATRIX: Mat4 = Mat4::new(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
pub struct Camera {
eye: Point3,
target: Point3,
up: Vec3,
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
pub fn new(eye: Point3, target: Point3, up: Vec3, aspect: f32, fovy: f32, znear: f32, zfar: f32) -> Self {
Self {
eye,
target,
up,
aspect,
fovy,
znear,
zfar,
}
}
pub fn build_view_projection_matrix(&self) -> Mat4 {
let view = Mat4::look_at_rh(self.eye, self.target, self.up);
let proj = Mat4::perspective_matrix(self.fovy, self.aspect, self.znear, self.zfar);
(OPENGL_TO_WGPU_MATRIX * proj * view).transpose()
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct CameraUniform {
view_proj: [[f32; 4]; 4],
}
impl CameraUniform {
pub fn new() -> Self {
Self {
view_proj: Mat4::IDENTITY.into(),
}
}
pub fn update_view_proj(&mut self, camera: &Camera) {
self.view_proj = camera.build_view_projection_matrix().into();
}
}*/
/*use std::f32::consts::FRAC_PI_2;
use std::time::Duration;
use winit::dpi::PhysicalPosition;
use winit::event::*;
use winit::keyboard::KeyCode;
const SAFE_FRAC_PI_2: f32 = FRAC_PI_2 - 0.0001;
#[derive(Debug)]
pub struct Camera3D {
pub position: Point3,
yaw: f32,
pitch: f32,
}
impl Camera3D {
pub fn new(
position: Point3,
yaw: f32,
pitch: f32,
) -> Self {
Self {
position: position.into(),
yaw: yaw.into(),
pitch: pitch.into(),
}
}
pub fn calc_matrix(&self) -> Mat4 {
let (sin_pitch, cos_pitch) = self.pitch.0.sin_cos();
let (sin_yaw, cos_yaw) = self.yaw.0.sin_cos();
Mat4::look_to_rh(
self.position,
Vec3::new(cos_pitch * cos_yaw, sin_pitch, cos_pitch * sin_yaw).normalize(),
Vec3::unit_y(),
)
}
}
pub struct Projection {
aspect: f32,
fovy: Rad<f32>,
znear: f32,
zfar: f32,
}
impl Projection {
pub fn new<F: Into<Rad<f32>>>(width: u32, height: u32, fovy: F, znear: f32, zfar: f32) -> Self {
Self {
aspect: width as f32 / height as f32,
fovy: fovy.into(),
znear,
zfar,
}
}
pub fn resize(&mut self, width: u32, height: u32) {
self.aspect = width as f32 / height as f32;
}
pub fn calc_matrix(&self) -> Matrix4<f32> {
// UDPATE
perspective(self.fovy, self.aspect, self.znear, self.zfar)
}
}
#[derive(Debug)]
pub struct CameraController {
amount_left: f32,
amount_right: f32,
amount_forward: f32,
amount_backward: f32,
amount_up: f32,
amount_down: f32,
rotate_horizontal: f32,
rotate_vertical: f32,
scroll: f32,
speed: f32,
sensitivity: f32,
}
impl CameraController {
pub fn new(speed: f32, sensitivity: f32) -> Self {
Self {
amount_left: 0.0,
amount_right: 0.0,
amount_forward: 0.0,
amount_backward: 0.0,
amount_up: 0.0,
amount_down: 0.0,
rotate_horizontal: 0.0,
rotate_vertical: 0.0,
scroll: 0.0,
speed,
sensitivity,
}
}
pub fn process_keyboard(&mut self, key: KeyCode, state: ElementState) -> bool {
let amount = if state == ElementState::Pressed {
1.0
} else {
0.0
};
match key {
KeyCode::KeyW | KeyCode::ArrowUp => {
self.amount_forward = amount;
true
}
KeyCode::KeyS | KeyCode::ArrowDown => {
self.amount_backward = amount;
true
}
KeyCode::KeyA | KeyCode::ArrowLeft => {
self.amount_left = amount;
true
}
KeyCode::KeyD | KeyCode::ArrowRight => {
self.amount_right = amount;
true
}
KeyCode::Space => {
self.amount_up = amount;
true
}
KeyCode::ShiftLeft => {
self.amount_down = amount;
true
}
_ => false,
}
}
pub fn process_mouse(&mut self, mouse_dx: f64, mouse_dy: f64) {
self.rotate_horizontal = mouse_dx as f32;
self.rotate_vertical = mouse_dy as f32;
}
pub fn process_scroll(&mut self, delta: &MouseScrollDelta) {
self.scroll = match delta {
// I'm assuming a line is about 100 pixels
MouseScrollDelta::LineDelta(_, scroll) => -scroll * 0.5,
MouseScrollDelta::PixelDelta(PhysicalPosition { y: scroll, .. }) => -*scroll as f32,
};
}
pub fn update_camera(&mut self, camera: &mut Camera, dt: Duration) {
let dt = dt.as_secs_f32();
// Move forward/backward and left/right
let (yaw_sin, yaw_cos) = camera.yaw.0.sin_cos();
let forward = Vector3::new(yaw_cos, 0.0, yaw_sin).normalize();
let right = Vector3::new(-yaw_sin, 0.0, yaw_cos).normalize();
camera.position += forward * (self.amount_forward - self.amount_backward) * self.speed * dt;
camera.position += right * (self.amount_right - self.amount_left) * self.speed * dt;
// Move in/out (aka. "zoom")
// Note: this isn't an actual zoom. The camera's position
// changes when zooming. I've added this to make it easier
// to get closer to an object you want to focus on.
let (pitch_sin, pitch_cos) = camera.pitch.0.sin_cos();
let scrollward =
Vector3::new(pitch_cos * yaw_cos, pitch_sin, pitch_cos * yaw_sin).normalize();
camera.position += scrollward * self.scroll * self.speed * self.sensitivity * dt;
self.scroll = 0.0;
// Move up/down. Since we don't use roll, we can just
// modify the y coordinate directly.
camera.position.y += (self.amount_up - self.amount_down) * self.speed * dt;
// Rotate
camera.yaw += Rad(self.rotate_horizontal) * self.sensitivity * dt;
camera.pitch += Rad(-self.rotate_vertical) * self.sensitivity * dt;
// If process_mouse isn't called every frame, these values
// will not get set to zero, and the camera will rotate
// when moving in a non cardinal direction.
self.rotate_horizontal = 0.0;
self.rotate_vertical = 0.0;
// Keep the camera's angle from going too high/low.
if camera.pitch < -Rad(SAFE_FRAC_PI_2) {
camera.pitch = -Rad(SAFE_FRAC_PI_2);
} else if camera.pitch > Rad(SAFE_FRAC_PI_2) {
camera.pitch = Rad(SAFE_FRAC_PI_2);
}
}
}*/

View file

@ -1,151 +0,0 @@
use wgpu::{BindGroupLayout, BufferUsages, Device};
use wgpu::util::DeviceExt;
use comet_resources::{Texture, Vertex};
use comet_log::*;
pub struct DrawInfo {
name: String,
texture: wgpu::BindGroup,
vertex_data: Vec<Vertex>,
index_data: Vec<u16>,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
num_indices: u32,
}
impl DrawInfo {
pub fn new(
name: String,
device: &Device,
texture: &Texture,
texture_bind_group_layout: &BindGroupLayout,
texture_sampler: &wgpu::Sampler,
vertex_data: Vec<Vertex>,
index_data: Vec<u16>
) -> Self {
let texture_bind = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture_sampler),
},
],
label: Some(format!("{} Texture", name).as_str()),
});
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Vertex Buffer", &name).as_str()),
contents: bytemuck::cast_slice(&vertex_data),
usage: BufferUsages::VERTEX | BufferUsages::COPY_DST,
});
let num_indices = index_data.len() as u32;
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Index Buffer", &name).as_str()),
contents: bytemuck::cast_slice(&index_data),
usage: BufferUsages::INDEX | BufferUsages::COPY_DST,
});
Self {
name,
texture: texture_bind,
vertex_data,
index_data,
vertex_buffer,
index_buffer,
num_indices
}
}
pub fn name(&self) -> &String {
&self.name
}
pub fn texture(&self) -> &wgpu::BindGroup {
&self.texture
}
pub fn vertex_buffer(&self) -> &wgpu::Buffer {
&self.vertex_buffer
}
pub fn vertex_data(&self) -> &Vec<Vertex> {
&self.vertex_data
}
pub fn index_buffer(&self) -> &wgpu::Buffer {
&self.index_buffer
}
pub fn index_data(&self) -> &Vec<u16> {
&self.index_data
}
pub fn num_indices(&self) -> u32 {
self.num_indices
}
pub fn update_vertex_buffer(&mut self, device: &Device, queue: &wgpu::Queue, vertex_data: Vec<Vertex>) {
let new_vertex_size = vertex_data.len() as u64 * size_of::<Vertex>() as u64;
match vertex_data == self.vertex_data {
true => {},
false => {
match new_vertex_size > self.vertex_buffer.size() {
false => queue.write_buffer(&self.vertex_buffer, 0, bytemuck::cast_slice(&vertex_data)),
true => {
self.vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Vertex Buffer", self.name).as_str()),
contents: bytemuck::cast_slice(&vertex_data),
usage: BufferUsages::VERTEX | BufferUsages::COPY_DST,
});
}
}
self.vertex_data = vertex_data;
}
}
}
pub fn update_index_buffer(&mut self, device: &Device, queue: &wgpu::Queue, index_data: Vec<u16>) {
let new_index_size = index_data.len() as u64 * size_of::<u16>() as u64;
match index_data == self.index_data {
true => {},
false => {
match new_index_size > self.index_buffer.size() {
false => queue.write_buffer(&self.index_buffer, 0, bytemuck::cast_slice(&index_data)),
true => {
self.index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Index Buffer", self.name).as_str()),
contents: bytemuck::cast_slice(&index_data),
usage: BufferUsages::INDEX | BufferUsages::COPY_DST,
});
}
}
self.num_indices = index_data.len() as u32;
self.index_data = index_data;
}
}
}
pub fn set_texture(&mut self, device: &Device, layout: &BindGroupLayout, texture: &Texture) {
self.texture = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture.sampler),
},
],
label: Some(format!("{} Texture Bind Group", self.name).as_str()),
});
}
}

View file

@ -1,7 +1,7 @@
mod batch;
mod camera;
mod draw_info;
mod render_group;
pub mod render_context;
mod render_pass;
pub mod render_resources;
pub mod renderer;
pub mod renderer2d;
pub mod renderer2d_;

View file

@ -1,134 +0,0 @@
use std::ops::Range;
use crate::texture;
pub trait Vertex {
fn desc() -> wgpu::VertexBufferLayout<'static>;
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct ModelVertex {
pub position: [f32; 3],
pub tex_coords: [f32; 2],
pub normal: [f32; 3],
}
impl Vertex for ModelVertex {
fn desc() -> wgpu::VertexBufferLayout<'static> {
use std::mem;
wgpu::VertexBufferLayout {
array_stride: mem::size_of::<ModelVertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &[
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float32x2,
},
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 5]>() as wgpu::BufferAddress,
shader_location: 2,
format: wgpu::VertexFormat::Float32x3,
},
],
}
}
}
pub struct Material {
#[allow(unused)]
pub name: String,
#[allow(unused)]
pub diffuse_texture: texture::Texture,
pub bind_group: wgpu::BindGroup,
}
pub struct Mesh {
#[allow(unused)]
pub name: String,
pub vertex_buffer: wgpu::Buffer,
pub index_buffer: wgpu::Buffer,
pub num_elements: u32,
pub material: usize,
}
pub struct Model {
pub meshes: Vec<Mesh>,
pub materials: Vec<Material>,
}
pub trait DrawModel<'a> {
#[allow(unused)]
fn draw_mesh(
&mut self,
mesh: &'a Mesh,
material: &'a Material,
camera_bind_group: &'a wgpu::BindGroup,
);
fn draw_mesh_instanced(
&mut self,
mesh: &'a Mesh,
material: &'a Material,
instances: Range<u32>,
camera_bind_group: &'a wgpu::BindGroup,
);
#[allow(unused)]
fn draw_model(&mut self, model: &'a Model, camera_bind_group: &'a wgpu::BindGroup);
fn draw_model_instanced(
&mut self,
model: &'a Model,
instances: Range<u32>,
camera_bind_group: &'a wgpu::BindGroup,
);
}
impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
where
'b: 'a,
{
fn draw_mesh(
&mut self,
mesh: &'b Mesh,
material: &'b Material,
camera_bind_group: &'b wgpu::BindGroup,
) {
self.draw_mesh_instanced(mesh, material, 0..1, camera_bind_group);
}
fn draw_mesh_instanced(
&mut self,
mesh: &'b Mesh,
material: &'b Material,
instances: Range<u32>,
camera_bind_group: &'b wgpu::BindGroup,
) {
self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
self.set_bind_group(0, &material.bind_group, &[]);
self.set_bind_group(1, camera_bind_group, &[]);
self.draw_indexed(0..mesh.num_elements, 0, instances);
}
fn draw_model(&mut self, model: &'b Model, camera_bind_group: &'b wgpu::BindGroup) {
self.draw_model_instanced(model, 0..1, camera_bind_group);
}
fn draw_model_instanced(
&mut self,
model: &'b Model,
instances: Range<u32>,
camera_bind_group: &'b wgpu::BindGroup,
) {
for mesh in &model.meshes {
let material = &model.materials[mesh.material];
self.draw_mesh_instanced(mesh, material, instances.clone(), camera_bind_group);
}
}
}

View file

@ -1,313 +0,0 @@
use crate::camera::{CameraUniform, RenderCamera};
use crate::draw_info::DrawInfo;
use crate::render_pass::{RenderPassInfo, RenderPassType};
use crate::renderer::Renderer;
use comet_colors::Color;
use comet_ecs::{Camera2D, Component, Position2D, Render, Render2D, Scene, Text, Transform2D};
use comet_log::*;
use comet_math::{p2, p3, v2, v3};
use comet_resources::texture_atlas::TextureRegion;
use comet_resources::{graphic_resource_manager::GraphicResourceManager, Texture, Vertex};
use comet_structs::ComponentSet;
use std::iter;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
use wgpu::core::command::DrawKind::Draw;
use wgpu::naga::ShaderStage;
use wgpu::util::DeviceExt;
use wgpu::BufferUsages;
use winit::dpi::PhysicalSize;
use winit::window::Window;
pub struct Renderer2D<'a> {
surface: wgpu::Surface<'a>,
device: wgpu::Device,
queue: wgpu::Queue,
config: wgpu::SurfaceConfiguration,
size: PhysicalSize<u32>,
render_pipeline_layout: wgpu::PipelineLayout,
universal_render_pipeline: wgpu::RenderPipeline,
texture_bind_group_layout: wgpu::BindGroupLayout,
dummy_texture_bind_group: wgpu::BindGroup,
texture_sampler: wgpu::Sampler,
camera: RenderCamera,
camera_uniform: CameraUniform,
camera_buffer: wgpu::Buffer,
camera_bind_group: wgpu::BindGroup,
render_pass: Vec<RenderPassInfo>,
draw_info: Vec<DrawInfo>,
graphic_resource_manager: GraphicResourceManager,
delta_time: f32,
last_frame_time: Instant,
clear_color: wgpu::Color,
}
impl<'a> Renderer2D<'a> {
pub fn new(window: Arc<Window>, clear_color: Option<impl Color>) -> Renderer2D<'a> {
let size = PhysicalSize::<u32>::new(1920, 1080);
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::PRIMARY,
..Default::default()
});
let surface = instance.create_surface(window).unwrap();
let adapter = pollster::block_on(instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
force_fallback_adapter: false,
}))
.unwrap();
let (device, queue) = pollster::block_on(adapter.request_device(
&wgpu::DeviceDescriptor {
label: None,
required_features: wgpu::Features::empty(),
required_limits: wgpu::Limits::default(),
memory_hints: Default::default(),
},
None, // Trace path
))
.unwrap();
let surface_caps = surface.get_capabilities(&adapter);
let surface_format = surface_caps
.formats
.iter()
.copied()
.find(|f| f.is_srgb())
.unwrap_or(surface_caps.formats[0]);
let config = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: surface_format,
width: size.width,
height: size.height,
present_mode: surface_caps.present_modes[0],
alpha_mode: surface_caps.alpha_modes[0],
view_formats: vec![],
desired_maximum_frame_latency: 2,
};
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Shader"),
source: wgpu::ShaderSource::Wgsl(include_str!("base2d.wgsl").into()),
});
let graphic_resource_manager = GraphicResourceManager::new();
let diffuse_bytes = include_bytes!(r"../../../res/textures/comet_icon.png");
let diffuse_texture =
Texture::from_bytes(&device, &queue, diffuse_bytes, "comet_icon.png", false).unwrap();
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
label: Some("texture_bind_group_layout"),
});
let camera = RenderCamera::new(1.0, v2::new(2.0, 2.0), v3::new(0.0, 0.0, 0.0));
let mut camera_uniform = CameraUniform::new();
camera_uniform.update_view_proj(&camera);
let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Camera Buffer"),
contents: bytemuck::cast_slice(&[camera_uniform]),
usage: BufferUsages::UNIFORM | BufferUsages::COPY_DST,
});
let camera_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
label: Some("camera_bind_group_layout"),
});
let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &camera_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: camera_buffer.as_entire_binding(),
}],
label: Some("camera_bind_group"),
});
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&texture_bind_group_layout, &camera_bind_group_layout],
push_constant_ranges: &[],
});
let universal_render_pipeline =
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[Vertex::desc()],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[Some(wgpu::ColorTargetState {
format: config.format,
blend: Some(wgpu::BlendState {
color: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
}),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
multiview: None,
cache: None,
});
let mut render_pass: Vec<RenderPassInfo> = Vec::new();
/*render_pass.push(RenderPassInfo::new_engine_pass(
&device,
"Standard Render Pass".to_string(),
&texture_bind_group_layout,
&diffuse_texture,
vec![],
vec![],
));*/
let clear_color = match clear_color {
Some(color) => color.to_wgpu(),
None => wgpu::Color {
r: 0.0,
g: 0.0,
b: 0.0,
a: 1.0,
},
};
let texture_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Linear,
lod_min_clamp: 0.0,
lod_max_clamp: 100.0,
compare: None,
anisotropy_clamp: 16,
border_color: None,
..Default::default()
});
let empty_texture = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Empty Texture"),
size: wgpu::Extent3d {
width: config.width,
height: config.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
usage: wgpu::TextureUsages::COPY_SRC
| wgpu::TextureUsages::COPY_DST
| wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[wgpu::TextureFormat::Bgra8UnormSrgb],
});
let dummy_texture_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(
&empty_texture.create_view(&wgpu::TextureViewDescriptor::default()),
),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture_sampler),
},
],
label: Some("dummy_texture_bind_group"),
});
let mut draw_info: Vec<DrawInfo> = Vec::new();
Self {
surface,
device,
queue,
config,
size,
render_pipeline_layout,
universal_render_pipeline,
texture_bind_group_layout,
dummy_texture_bind_group,
texture_sampler,
camera,
camera_uniform,
camera_buffer,
camera_bind_group,
render_pass,
draw_info,
graphic_resource_manager,
delta_time: 0.0,
last_frame_time: Instant::now(),
clear_color,
}
}
}

View file

@ -0,0 +1,180 @@
use crate::{batch::Batch, render_resources::RenderResources};
use comet_colors::Color;
use comet_resources::Vertex;
use std::{collections::HashMap, sync::Arc};
use winit::{dpi::PhysicalSize, window::Window};
pub struct RenderContext<'a> {
device: wgpu::Device,
queue: wgpu::Queue,
surface: wgpu::Surface<'a>,
config: wgpu::SurfaceConfiguration,
size: PhysicalSize<u32>,
scale_factor: f64,
clear_color: wgpu::Color,
render_pipelines: HashMap<String, wgpu::RenderPipeline>,
batches: HashMap<String, Batch>,
resources: RenderResources,
}
impl<'a> RenderContext<'a> {
pub fn new(window: Arc<Window>, clear_color: Option<impl Color>) -> Self {
let size = window.inner_size();
let scale_factor = window.scale_factor();
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::PRIMARY,
..Default::default()
});
let surface = instance.create_surface(window).unwrap();
let adapter = pollster::block_on(instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
force_fallback_adapter: false,
}))
.unwrap();
let (device, queue) = pollster::block_on(adapter.request_device(
&wgpu::DeviceDescriptor {
label: None,
required_features: wgpu::Features::empty(),
required_limits: wgpu::Limits::default(),
memory_hints: Default::default(),
},
None,
))
.unwrap();
let surface_caps = surface.get_capabilities(&adapter);
let surface_format = surface_caps
.formats
.iter()
.copied()
.find(|f| f.is_srgb())
.unwrap_or(surface_caps.formats[0]);
let config = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: surface_format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo,
alpha_mode: surface_caps.alpha_modes[0],
view_formats: vec![],
desired_maximum_frame_latency: 2,
};
let clear_color = match clear_color {
Some(color) => color.to_wgpu(),
None => wgpu::Color {
r: 0.0,
g: 0.0,
b: 0.0,
a: 1.0,
},
};
Self {
device,
queue,
surface,
config,
size,
scale_factor,
clear_color,
render_pipelines: HashMap::new(),
batches: HashMap::new(),
resources: RenderResources::new(),
}
}
pub fn device(&self) -> &wgpu::Device {
&self.device
}
pub fn queue(&self) -> &wgpu::Queue {
&self.queue
}
pub fn surface(&self) -> &wgpu::Surface {
&self.surface
}
pub fn configure_surface(&mut self) {
self.surface.configure(&self.device, &self.config);
}
pub fn config(&self) -> &wgpu::SurfaceConfiguration {
&self.config
}
pub fn config_mut(&mut self) -> &mut wgpu::SurfaceConfiguration {
&mut self.config
}
pub fn size(&self) -> PhysicalSize<u32> {
self.size
}
pub fn set_size(&mut self, new_size: PhysicalSize<u32>) {
self.size = new_size
}
pub fn scale_factor(&self) -> f64 {
self.scale_factor
}
pub fn set_scale_factor(&mut self, scale_factor: f64) {
self.scale_factor = scale_factor
}
pub fn clear_color(&self) -> wgpu::Color {
self.clear_color
}
pub fn insert_pipeline(&mut self, label: String, pipeline: wgpu::RenderPipeline) {
self.render_pipelines.insert(label, pipeline);
}
pub fn get_pipeline(&self, label: String) -> Option<&wgpu::RenderPipeline> {
self.render_pipelines.get(&label)
}
pub fn get_batch(&self, label: String) -> Option<&Batch> {
self.batches.get(&label)
}
pub fn get_batch_mut(&mut self, label: String) -> Option<&mut Batch> {
self.batches.get_mut(&label)
}
pub fn new_batch(&mut self, label: String, vertex_data: Vec<Vertex>, index_data: Vec<u16>) {
self.batches.insert(
label.clone(),
Batch::new(label, &self.device, vertex_data, index_data),
);
}
pub fn update_batch_buffers(
&mut self,
label: String,
vertex_data: Vec<Vertex>,
index_data: Vec<u16>,
) {
if let Some(batch) = self.batches.get_mut(&label) {
batch.update_vertex_buffer(&self.device, &self.queue, vertex_data);
batch.update_index_buffer(&self.device, &self.queue, index_data);
} else {
let batch = Batch::new(label.clone(), &self.device, vertex_data, index_data);
self.batches.insert(label, batch);
}
}
pub fn resources(&self) -> &RenderResources {
&self.resources
}
pub fn resources_mut(&mut self) -> &mut RenderResources {
&mut self.resources
}
}

View file

@ -1,4 +0,0 @@
pub struct RenderGroup {
pipeline: wgpu::RenderPipeline,
entities: Vec<u32>
}

View file

@ -1,338 +1,125 @@
use wgpu::{ShaderModule, BindGroup, BindGroupLayout, BufferUsages, Device, Queue, RenderPipeline, PipelineLayout, SurfaceConfiguration, TextureFormat};
use wgpu::util::DeviceExt;
use comet_resources::{Vertex, Texture};
use crate::render_context::RenderContext;
#[derive(Debug, Clone)]
pub enum RenderPassType {
Engine,
User
pub struct RenderPass {
pub label: String,
pub execute: Box<
dyn Fn(String, &mut RenderContext, &mut wgpu::CommandEncoder, &wgpu::TextureView)
+ Send
+ Sync,
>,
}
pub struct RenderPassInfo {
pass_name: String,
pass_type: RenderPassType,
texture_bind_group: BindGroup,
vertex_buffer: wgpu::Buffer,
index_buffer: wgpu::Buffer,
vertex_data: Vec<Vertex>,
index_data: Vec<u16>,
num_indices: u32,
pipeline: Option<RenderPipeline>
}
impl RenderPassInfo {
pub fn new_user_pass(
device: &Device,
pass_name: String,
texture_group_layout: &BindGroupLayout,
texture: &Texture,
shader: &ShaderModule,
vertex_data: Vec<Vertex>,
index_data: Vec<u16>,
pipeline_layout: &PipelineLayout,
config: &SurfaceConfiguration
impl RenderPass {
pub fn new(
label: String,
execute: Box<
dyn Fn(String, &mut RenderContext, &mut wgpu::CommandEncoder, &wgpu::TextureView)
+ Send
+ Sync,
>,
) -> Self {
let num_indices = index_data.len() as u32;
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Vertex Buffer", pass_name).as_str()),
contents: bytemuck::cast_slice(&vertex_data),
usage: BufferUsages::VERTEX | BufferUsages::COPY_DST,
});
Self { label, execute }
}
}
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Index Buffer", pass_name).as_str()),
contents: bytemuck::cast_slice(&index_data),
usage: BufferUsages::INDEX | BufferUsages::COPY_DST,
});
let texture_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture.view),
pub fn universal_clear_execute(
label: String,
ctx: &mut RenderContext,
encoder: &mut wgpu::CommandEncoder,
view: &wgpu::TextureView,
) {
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some(format!("{} Render Pass", label.clone()).as_str()),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(ctx.clear_color()),
store: wgpu::StoreOp::Store,
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture.sampler),
},
],
label: Some(format!("{} Texture Bind Group", pass_name).as_str()),
});
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[Vertex::desc()],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[Some(wgpu::ColorTargetState {
format: config.format,
blend: Some(wgpu::BlendState {
color: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
}),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
multiview: None,
cache: None,
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
});
Self {
pass_name,
pass_type: RenderPassType::User,
texture_bind_group,
vertex_buffer,
index_buffer,
vertex_data,
index_data,
num_indices,
pipeline: Some(pipeline)
}
render_pass.set_pipeline(&ctx.get_pipeline(label.clone()).unwrap());
let groups = ctx.resources().get_bind_groups(&label).unwrap();
for i in 0..groups.len() {
render_pass.set_bind_group(i as u32, groups.get(i).unwrap(), &[]);
}
pub fn new_engine_pass(
device: &Device,
pass_name: String,
texture_group_layout: &BindGroupLayout,
texture: &Texture,
vertex_data: Vec<Vertex>,
index_data: Vec<u16>,
) -> Self {
let num_indices = index_data.len() as u32;
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Vertex Buffer", pass_name).as_str()),
contents: bytemuck::cast_slice(&vertex_data),
usage: BufferUsages::VERTEX | BufferUsages::COPY_DST,
});
render_pass.set_vertex_buffer(
0,
ctx.get_batch(label.clone())
.unwrap()
.vertex_buffer()
.slice(..),
);
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Index Buffer", pass_name).as_str()),
contents: bytemuck::cast_slice(&index_data),
usage: BufferUsages::INDEX | BufferUsages::COPY_DST,
});
render_pass.set_index_buffer(
ctx.get_batch(label.clone())
.unwrap()
.index_buffer()
.slice(..),
wgpu::IndexFormat::Uint16,
);
let texture_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture.view),
render_pass.draw_indexed(
0..ctx.get_batch(label.clone()).unwrap().num_indices(),
0,
0..1,
);
}
pub fn universal_load_execute(
label: String,
ctx: &mut RenderContext,
encoder: &mut wgpu::CommandEncoder,
view: &wgpu::TextureView,
) {
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some(format!("{} Render Pass", label.clone()).as_str()),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
store: wgpu::StoreOp::Store,
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture.sampler),
},
],
label: Some(format!("{} Texture Bind Group", pass_name).as_str()),
});
Self {
pass_name,
pass_type: RenderPassType::Engine,
texture_bind_group,
vertex_buffer,
index_buffer,
vertex_data,
index_data,
num_indices,
pipeline: None
}
}
pub fn pass_name(&self) -> &str {
&self.pass_name
}
pub fn pass_type(&self) -> RenderPassType {
self.pass_type.clone()
}
pub fn set_shader(&mut self, device: &Device, config: &SurfaceConfiguration, pipeline_layout: &PipelineLayout, shader: &ShaderModule) {
self.pipeline = Some(device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[Vertex::desc()],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[Some(wgpu::ColorTargetState {
format: config.format,
blend: Some(wgpu::BlendState {
color: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
}),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
multiview: None,
cache: None,
}));
}
pub fn texture_bind_group(&self) -> &BindGroup {
&self.texture_bind_group
}
pub fn set_texture(&mut self, device: &Device, layout: &BindGroupLayout, texture: &Texture) {
self.texture_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&texture.sampler),
},
],
label: Some(format!("{} Texture Bind Group", self.pass_name).as_str()),
});
}
pub fn vertex_buffer(&self) -> &wgpu::Buffer {
&self.vertex_buffer
}
pub fn vertex_data(&self) -> &Vec<Vertex> {
&self.vertex_data
}
pub fn set_vertex_buffer(&mut self, device: &Device, queue: &Queue, vertex_data: Vec<Vertex>) {
let new_vertex_size = vertex_data.len() as u64 * size_of::<Vertex>() as u64;
match vertex_data == self.vertex_data {
true => {},
false => {
match new_vertex_size > self.vertex_buffer.size() {
false => queue.write_buffer(&self.vertex_buffer, 0, bytemuck::cast_slice(&vertex_data)),
true => {
self.vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Vertex Buffer", self.pass_name).as_str()),
contents: bytemuck::cast_slice(&vertex_data),
usage: BufferUsages::VERTEX | BufferUsages::COPY_DST,
});
}
}
self.vertex_data = vertex_data;
}
}
}
pub fn push_to_vertex_buffer(&mut self, device: &Device, vertex_data: &mut Vec<Vertex>) {
self.vertex_data.append(vertex_data);
self.vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Vertex Buffer", self.pass_name).as_str()),
contents: bytemuck::cast_slice(&vertex_data),
usage: BufferUsages::VERTEX | BufferUsages::COPY_DST,
});
}
pub fn index_buffer(&self) -> &wgpu::Buffer {
&self.index_buffer
}
pub fn index_data(&self) -> &Vec<u16> {
&self.index_data
}
pub fn num_indices(&self) -> u32 {
self.num_indices
}
pub fn set_index_buffer(&mut self, device: &Device, queue: &Queue, index_data: Vec<u16>) {
let new_index_size = index_data.len() as u64 * size_of::<u16>() as u64;
match index_data == self.index_data {
true => {},
false => {
match new_index_size > self.index_buffer.size() {
false => queue.write_buffer(&self.index_buffer, 0, bytemuck::cast_slice(&index_data)),
true => {
self.index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Index Buffer", self.pass_name).as_str()),
contents: bytemuck::cast_slice(&index_data),
usage: BufferUsages::INDEX | BufferUsages::COPY_DST,
});
}
}
self.num_indices = index_data.len() as u32;
self.index_data = index_data
}
}
}
pub fn push_to_index_buffer(&mut self, device: &Device, index_data: &mut Vec<u16>) {
self.index_data.append(index_data);
self.index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(format!("{} Index Buffer", self.pass_name).as_str()),
contents: bytemuck::cast_slice(&index_data),
usage: BufferUsages::INDEX | BufferUsages::COPY_DST,
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
});
self.num_indices = self.index_data.len() as u32;
render_pass.set_pipeline(&ctx.get_pipeline(label.clone()).unwrap());
let groups = ctx.resources().get_bind_groups(&label).unwrap();
for i in 0..groups.len() {
render_pass.set_bind_group(i as u32, groups.get(i).unwrap(), &[]);
}
pub fn pipeline(&self) -> Option<&RenderPipeline> {
self.pipeline.as_ref()
}
render_pass.set_vertex_buffer(
0,
ctx.get_batch(label.clone())
.unwrap()
.vertex_buffer()
.slice(..),
);
render_pass.set_index_buffer(
ctx.get_batch(label.clone())
.unwrap()
.index_buffer()
.slice(..),
wgpu::IndexFormat::Uint16,
);
render_pass.draw_indexed(
0..ctx.get_batch(label.clone()).unwrap().num_indices(),
0,
0..1,
);
}

View file

@ -0,0 +1,133 @@
use comet_log::error;
use std::{collections::HashMap, sync::Arc};
pub struct RenderResources {
bind_groups: HashMap<String, Vec<Arc<wgpu::BindGroup>>>,
bind_group_layouts: HashMap<String, Vec<Arc<wgpu::BindGroupLayout>>>,
buffers: HashMap<String, Vec<Arc<wgpu::Buffer>>>,
samplers: HashMap<String, wgpu::Sampler>,
}
impl RenderResources {
pub fn new() -> Self {
Self {
bind_groups: HashMap::new(),
bind_group_layouts: HashMap::new(),
buffers: HashMap::new(),
samplers: HashMap::new(),
}
}
pub fn get_bind_groups(&self, label: &str) -> Option<&Vec<Arc<wgpu::BindGroup>>> {
self.bind_groups.get(label)
}
pub fn get_bind_group_layout(&self, label: &str) -> Option<&Vec<Arc<wgpu::BindGroupLayout>>> {
self.bind_group_layouts.get(label)
}
pub fn replace_bind_group_layout(
&mut self,
label: String,
pos: usize,
bind_group_layout: Arc<wgpu::BindGroupLayout>,
) {
match self.bind_group_layouts.get_mut(&label) {
None => {
error!("Render pass {} does not exist", label);
return;
}
Some(v) => {
if v.len() <= pos {
error!(
"Position {} is out of bounds for the bind group layouts of render pass {}",
pos, label
);
return;
}
v[pos] = bind_group_layout;
}
}
}
pub fn get_buffer(&self, label: &str) -> Option<&Vec<Arc<wgpu::Buffer>>> {
self.buffers.get(label)
}
pub fn get_sampler(&self, label: &str) -> Option<&wgpu::Sampler> {
self.samplers.get(label)
}
pub fn insert_bind_group(&mut self, label: String, bind_group: Arc<wgpu::BindGroup>) {
match self.bind_groups.get_mut(&label) {
None => {
self.bind_groups.insert(label, vec![bind_group]);
}
Some(v) => v.push(bind_group),
};
}
pub fn replace_bind_group(
&mut self,
label: String,
pos: usize,
bind_group: Arc<wgpu::BindGroup>,
) {
match self.bind_groups.get_mut(&label) {
None => {
error!("Render pass {} does not exist", label);
return;
}
Some(v) => {
if v.len() <= pos {
error!(
"Position {} is out of bounds for the bind groups of render pass {}",
pos, label
);
return;
}
v[pos] = bind_group;
}
}
}
pub fn insert_bind_group_layout(&mut self, label: String, layout: Arc<wgpu::BindGroupLayout>) {
match self.bind_group_layouts.get_mut(&label) {
None => {
self.bind_group_layouts.insert(label, vec![layout]);
}
Some(v) => v.push(layout),
}
}
pub fn insert_buffer(&mut self, label: String, buffer: Arc<wgpu::Buffer>) {
match self.buffers.get_mut(&label) {
None => {
self.buffers.insert(label, vec![buffer]);
}
Some(v) => v.push(buffer),
}
}
pub fn replace_buffer(&mut self, label: String, pos: usize, buffer: Arc<wgpu::Buffer>) {
match self.buffers.get_mut(&label) {
None => {
error!("Render pass {} does not exist", label);
return;
}
Some(v) => {
if v.len() <= pos {
error!(
"Position {} is out of bounds for the buffers of render pass {}",
pos, label
);
return;
}
v[pos] = buffer;
}
}
}
pub fn insert_sampler(&mut self, label: String, sampler: wgpu::Sampler) {
self.samplers.insert(label, sampler);
}
}

1222
crates/comet_renderer/src/renderer2d.rs Executable file → Normal file

File diff suppressed because it is too large Load diff

View file

@ -1,25 +0,0 @@
struct VertexInput {
@location(0) position: vec3<f32>,
@location(1) tex_coords: vec2<f32>,
@location(2) color: vec4<f32>,
}
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
@location(0) tex_coords: vec2<f32>,
@location(1) color: vec4<f32>,
}
@vertex
fn vs_main(input: VertexInput) -> VertexOutput {
var out: VertexOutput;
out.clip_position = vec4(input.position, 1.0);
out.tex_coords = input.tex_coords;
out.color = input.color;
return out;
}
@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
return in.color;
}

View file

@ -1,188 +0,0 @@
mod render_context;
use render_context::*;
use crate::renderer::Renderer;
use comet_colors::Color;
use comet_resources::{graphic_resource_manager::GraphicResourceManager, Vertex};
use std::iter;
use std::sync::Arc;
use wgpu::util::DeviceExt;
use winit::dpi::PhysicalSize;
use winit::window::Window;
pub struct Renderer2D_<'a> {
render_context: RenderContext<'a>,
universal_render_pipeline: wgpu::RenderPipeline,
graphic_resource_manager: GraphicResourceManager,
vertex_vec: Vec<Vertex>,
vertex_buffer: wgpu::Buffer,
index_vec: Vec<u32>,
index_buffer: wgpu::Buffer,
num_indices: u32,
clear_color: wgpu::Color,
}
impl<'a> Renderer2D_<'a> {
pub fn new(window: Arc<Window>, clear_color: Option<impl Color>) -> Renderer2D_<'a> {
let render_context = RenderContext::new(window.clone(), clear_color);
let graphic_resource_manager = GraphicResourceManager::new();
let clear_color = match clear_color {
Some(color) => color.to_wgpu(),
None => wgpu::Color {
r: 0.0,
g: 0.0,
b: 0.0,
a: 1.0,
},
};
let universal_renderpipeline_module =
render_context
.device
.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Universal Render Pipeline Shader Module"),
source: wgpu::ShaderSource::Wgsl(include_str!("base.wgsl").into()),
});
let universal_renderpipeline_layout =
render_context
.device
.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Universal Render Pipeline Layout"),
bind_group_layouts: &[],
push_constant_ranges: &[],
});
let universal_render_pipeline =
render_context
.device
.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Universal Render Pipeline"),
layout: Some(&universal_renderpipeline_layout),
vertex: wgpu::VertexState {
module: &universal_renderpipeline_module,
entry_point: "vs_main",
buffers: &[Vertex::desc()],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &universal_renderpipeline_module,
entry_point: "fs_main",
targets: &[Some(wgpu::ColorTargetState {
format: render_context.config.format,
blend: Some(wgpu::BlendState::ALPHA_BLENDING),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
multiview: None,
cache: None,
});
let vertex_buffer =
render_context
.device
.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: &[],
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
});
let index_buffer =
render_context
.device
.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: &[],
usage: wgpu::BufferUsages::INDEX | wgpu::BufferUsages::COPY_DST,
});
Self {
render_context,
universal_render_pipeline,
graphic_resource_manager,
vertex_buffer,
vertex_vec: vec![],
index_buffer,
index_vec: vec![],
num_indices: 0,
clear_color,
}
}
}
impl<'a> Renderer for Renderer2D_<'a> {
fn new(window: Arc<Window>, clear_color: Option<impl Color>) -> Renderer2D_<'a> {
Self::new(window, clear_color)
}
fn size(&self) -> PhysicalSize<u32> {
self.render_context.size()
}
fn resize(&mut self, new_size: PhysicalSize<u32>) {
self.render_context.resize(new_size)
}
fn update(&mut self) -> f32 {
self.render_context.update()
}
fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
let output = self.render_context.surface.get_current_texture()?;
let output_view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder =
self.render_context
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Universal Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &output_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(self.clear_color),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
});
render_pass.set_pipeline(&self.universal_render_pipeline);
render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
render_pass.draw_indexed(0..self.num_indices, 0, 0..1);
}
self.render_context
.queue
.submit(iter::once(encoder.finish()));
output.present();
Ok(())
}
}

View file

@ -1,96 +0,0 @@
use comet_colors::Color;
use std::sync::Arc;
use std::time::Instant;
use winit::dpi::PhysicalSize;
use winit::window::Window;
pub struct RenderContext<'a> {
pub surface: wgpu::Surface<'a>,
pub device: wgpu::Device,
pub queue: wgpu::Queue,
pub config: wgpu::SurfaceConfiguration,
pub size: PhysicalSize<u32>,
pub last_frame_time: Instant,
pub delta_time: f32,
}
impl<'a> RenderContext<'a> {
pub fn new(window: Arc<Window>, clear_color: Option<impl Color>) -> RenderContext<'a> {
let size = window.inner_size();
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::PRIMARY,
..Default::default()
});
let surface = instance.create_surface(window).unwrap();
let adapter = pollster::block_on(instance.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
force_fallback_adapter: false,
}))
.unwrap();
let (device, queue) = pollster::block_on(adapter.request_device(
&wgpu::DeviceDescriptor {
label: None,
required_features: wgpu::Features::empty(),
required_limits: wgpu::Limits::default(),
memory_hints: Default::default(),
},
None,
))
.unwrap();
let surface_caps = surface.get_capabilities(&adapter);
let surface_format = surface_caps
.formats
.iter()
.copied()
.find(|f| f.is_srgb())
.unwrap_or(surface_caps.formats[0]);
let config = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: surface_format,
width: size.width,
height: size.height,
present_mode: surface_caps.present_modes[0],
alpha_mode: surface_caps.alpha_modes[0],
view_formats: vec![],
desired_maximum_frame_latency: 2,
};
Self {
surface,
device,
queue,
config,
size,
last_frame_time: Instant::now(),
delta_time: 0.0,
}
}
pub fn size(&self) -> PhysicalSize<u32> {
self.size
}
pub fn resize(&mut self, new_size: PhysicalSize<u32>) {
if new_size.width > 0 && new_size.height > 0 {
self.size = new_size;
self.config.width = new_size.width;
self.config.height = new_size.height;
self.surface.configure(&self.device, &self.config);
}
}
pub fn update(&mut self) -> f32 {
let now = Instant::now();
let delta_time = now.duration_since(self.last_frame_time).as_millis() as f32 / 1000.0;
self.last_frame_time = now;
self.delta_time = delta_time;
delta_time
}
}

View file

@ -1,28 +1,32 @@
use std::{
collections::HashMap, path::Path
};
use std::{collections::HashMap, path::Path};
use wgpu::{naga, Device, FilterMode, Queue, ShaderModule, TextureFormat, TextureUsages};
use wgpu::naga::ShaderStage;
use crate::{
font::Font,
texture_atlas::{TextureAtlas, TextureRegion},
Texture,
};
use comet_log::info;
use crate::{font, texture, Texture};
use crate::font::Font;
use crate::texture_atlas::{TextureAtlas, TextureRegion};
use wgpu::{
naga::{self, ShaderStage},
Device, Queue, ShaderModule,
};
pub struct GraphicResourceManager {
texture_atlas: TextureAtlas,
font_atlas: TextureAtlas,
fonts: Vec<Font>,
data_files: HashMap<String, String>,
shaders: HashMap<String, ShaderModule>
shaders: HashMap<String, ShaderModule>,
}
impl GraphicResourceManager {
pub fn new() -> Self {
Self {
texture_atlas: TextureAtlas::empty(),
font_atlas: TextureAtlas::empty(),
fonts: Vec::new(),
data_files: HashMap::new(),
shaders: HashMap::new()
shaders: HashMap::new(),
}
}
@ -30,6 +34,14 @@ impl GraphicResourceManager {
&self.texture_atlas
}
pub fn font_atlas(&self) -> &TextureAtlas {
&self.font_atlas
}
pub fn set_font_atlas(&mut self, font_atlas: TextureAtlas) {
self.font_atlas = font_atlas
}
pub fn texture_locations(&self) -> &HashMap<String, TextureRegion> {
&self.texture_atlas.textures()
}
@ -42,16 +54,19 @@ impl GraphicResourceManager {
&self.fonts
}
pub fn fonts_mut(&mut self) -> &mut Vec<Font> {
&mut self.fonts
}
pub fn get_glyph(&self, font: &str, ch: char) -> Option<&TextureRegion> {
self.fonts.iter().find(|f| f.name() == font).and_then(|f| f.get_glyph(ch))
self.fonts
.iter()
.find(|f| f.name() == font)
.and_then(|f| f.get_glyph(ch))
}
pub fn set_texture_atlas(&mut self, texture_atlas: TextureAtlas) {
self.texture_atlas = texture_atlas;
// This is just for testing purposes
//self.texture_locations.insert("normal_comet.png".to_string(), ([0,0], [15,15]));
//self.texture_locations.insert("green_comet.png".to_string(), ([0,15], [15,31]));
}
pub fn create_texture_atlas(&mut self, paths: Vec<String>) {
@ -59,10 +74,13 @@ impl GraphicResourceManager {
}
pub fn load_string(&self, file_name: &str) -> anyhow::Result<String> {
let path = Path::new(std::env::var("OUT_DIR")?.as_str())
.join("res")
.join(file_name);
let txt = std::fs::read_to_string(path)?;
let base_path = std::env::var("OUT_DIR")
.map(|p| Path::new(&p).to_path_buf())
.unwrap_or_else(|_| Path::new(".").to_path_buf());
let path = base_path.join(file_name);
let txt = std::fs::read_to_string(&path)
.map_err(|e| anyhow::anyhow!("Failed to load {}: {}", path.display(), e))?;
Ok(txt)
}
@ -91,34 +109,30 @@ impl GraphicResourceManager {
/// `shader_stage` is only needed if it is a GLSL shader, so default to None if it isn't GLSL
pub fn load_shader(
&mut self,
device: &Device,
shader_stage: Option<ShaderStage>,
file_name: &str,
device: &Device
) -> anyhow::Result<()> {
let shader_source = self.load_string(file_name)?;
let module = match file_name.split('.').last() {
Some ("wgsl") => {
device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some(file_name.clone()),
source: wgpu::ShaderSource::Wgsl(shader_source.into())
})
},
Some("wgsl") => device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some(file_name),
source: wgpu::ShaderSource::Wgsl(shader_source.into()),
}),
Some("glsl") => {
if let Some(stage) = shader_stage {
device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some(file_name.clone()),
label: Some(file_name),
source: wgpu::ShaderSource::Glsl {
shader: shader_source.into(),
stage,
defines: naga::FastHashMap::default()
}
defines: naga::FastHashMap::default(),
},
})
} else {
return Err(anyhow::anyhow!("GLSL shader needs a stage"));
}
else {
return Err(anyhow::anyhow!("GLSL shader needs a stage"))
}
}
_ => return Err(anyhow::anyhow!("Unsupported shader type")),
};
@ -127,6 +141,23 @@ impl GraphicResourceManager {
Ok(())
}
/// Loads the shader from a source code string
/// Right now only works with wgsl
pub fn load_shader_from_string(
&mut self,
device: &Device,
shader_name: &str,
shader_src: &str,
) -> anyhow::Result<()> {
let module = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some(shader_name),
source: wgpu::ShaderSource::Wgsl(shader_src.into()),
});
self.shaders.insert(shader_name.to_string(), module);
Ok(())
}
pub fn get_shader(&self, shader: &str) -> Option<&ShaderModule> {
self.shaders.get(shader)
}
@ -137,112 +168,4 @@ impl GraphicResourceManager {
info!("Font {} loaded!", font.name());
self.fonts.push(font);
}
/*pub async fn load_model(
&self,
file_name: &str,
device: &wgpu::Device,
queue: &wgpu::Queue,
layout: &wgpu::BindGroupLayout,
) -> anyhow::Result<model::Model> {
let obj_text = self.load_string(file_name).await?;
let obj_cursor = Cursor::new(obj_text);
let mut obj_reader = BufReader::new(obj_cursor);
let (models, obj_materials) = tobj::load_obj_buf_async(
&mut obj_reader,
&tobj::LoadOptions {
triangulate: true,
single_index: true,
..Default::default()
},
|p| async move {
let mat_text = self.load_string(&p).await.unwrap();
tobj::load_mtl_buf(&mut BufReader::new(Cursor::new(mat_text)))
},
)
.await?;
let mut materials = Vec::new();
for m in obj_materials? {
let diffuse_texture = self.load_texture(&m.diffuse_texture, false, device, queue).await?;
let normal_texture = self.load_texture(&m.normal_texture, true, device, queue).await?;
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
],
label: None,
});
materials.push(model::Material {
name: m.name,
diffuse_texture,
bind_group,
});
}
let meshes = models
.into_iter()
.map(|m| {
let vertices = (0..m.mesh.positions.len() / 3)
.map(|i| {
if m.mesh.normals.is_empty() {
model::ModelVertex {
position: [
m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
],
tex_coords: [m.mesh.texcoords[i * 2], 1.0 - m.mesh.texcoords[i * 2 + 1]],
normal: [0.0, 0.0, 0.0],
}
} else {
model::ModelVertex {
position: [
m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
],
tex_coords: [m.mesh.texcoords[i * 2], 1.0 - m.mesh.texcoords[i * 2 + 1]],
normal: [
m.mesh.normals[i * 3],
m.mesh.normals[i * 3 + 1],
m.mesh.normals[i * 3 + 2],
],
}
}
})
.collect::<Vec<_>>();
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", file_name)),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsages::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", file_name)),
contents: bytemuck::cast_slice(&m.mesh.indices),
usage: wgpu::BufferUsages::INDEX,
});
model::Mesh {
name: file_name.to_string(),
vertex_buffer,
index_buffer,
num_elements: m.mesh.indices.len() as u32,
material: m.mesh.material_id.unwrap_or(0),
}
})
.collect::<Vec<_>>();
Ok(model::Model { meshes, materials })
}*/
}

View file

@ -2,148 +2,9 @@ pub use resources::*;
pub use texture::*;
pub use vertex::*;
pub mod font;
pub mod graphic_resource_manager;
pub mod resources;
pub mod texture;
pub mod vertex;
pub mod texture_atlas;
pub mod graphic_resource_manager;
mod material;
pub mod font;
/*use std::io::{BufReader, Cursor};
use wgpu::util::DeviceExt;
use crate::{model, texture};
pub async fn load_string(file_name: &str) -> anyhow::Result<String> {
let path = std::path::Path::new(env!("OUT_DIR"))
.join("res")
.join(file_name);
let txt = std::fs::read_to_string(path)?;
Ok(txt)
}
pub async fn load_binary(file_name: &str) -> anyhow::Result<Vec<u8>> {
let path = std::path::Path::new(env!("OUT_DIR"))
.join("res")
.join(file_name);
let data = std::fs::read(path)?;
Ok(data)
}
pub async fn load_texture(
file_name: &str,
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> anyhow::Result<texture::Texture> {
let data = load_binary(file_name).await?;
texture::Texture::from_bytes(device, queue, &data, file_name)
}
pub async fn load_model(
file_name: &str,
device: &wgpu::Device,
queue: &wgpu::Queue,
layout: &wgpu::BindGroupLayout,
) -> anyhow::Result<model::Model> {
let obj_text = load_string(file_name).await?;
let obj_cursor = Cursor::new(obj_text);
let mut obj_reader = BufReader::new(obj_cursor);
let (models, obj_materials) = tobj::load_obj_buf_async(
&mut obj_reader,
&tobj::LoadOptions {
triangulate: true,
single_index: true,
..Default::default()
},
|p| async move {
let mat_text = load_string(&p).await.unwrap();
tobj::load_mtl_buf(&mut BufReader::new(Cursor::new(mat_text)))
},
)
.await?;
let mut materials = Vec::new();
for m in obj_materials? {
let diffuse_texture = load_texture(&m.diffuse_texture, device, queue).await?;
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
],
label: None,
});
materials.push(model::Material {
name: m.name,
diffuse_texture,
bind_group,
})
}
let meshes = models
.into_iter()
.map(|m| {
let vertices = (0..m.mesh.positions.len() / 3)
.map(|i| {
if m.mesh.normals.is_empty(){
model::ModelVertex {
position: [
m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
],
tex_coords: [m.mesh.texcoords[i * 2], 1.0 - m.mesh.texcoords[i * 2 + 1]],
normal: [0.0, 0.0, 0.0],
}
}else{
model::ModelVertex {
position: [
m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2],
],
tex_coords: [m.mesh.texcoords[i * 2], 1.0 - m.mesh.texcoords[i * 2 + 1]],
normal: [
m.mesh.normals[i * 3],
m.mesh.normals[i * 3 + 1],
m.mesh.normals[i * 3 + 2],
],
}
}
})
.collect::<Vec<_>>();
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", file_name)),
contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsages::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", file_name)),
contents: bytemuck::cast_slice(&m.mesh.indices),
usage: wgpu::BufferUsages::INDEX,
});
log::info!("Mesh: {}", m.name);
model::Mesh {
name: file_name.to_string(),
vertex_buffer,
index_buffer,
num_elements: m.mesh.indices.len() as u32,
material: m.mesh.material_id.unwrap_or(0),
}
})
.collect::<Vec<_>>();
Ok(model::Model { meshes, materials })
}*/
pub mod vertex;

View file

@ -1,8 +0,0 @@
use crate::texture;
pub struct Material {
pub name: String,
pub diffuse_texture: texture::Texture,
pub normal_texture: texture::Texture,
pub bind_group: wgpu::BindGroup,
}

View file

@ -1,6 +1,5 @@
use anyhow::*;
use image::{DynamicImage, GenericImageView, RgbaImage};
use wgpu::{Device, Queue};
#[derive(Debug)]
pub struct Texture {
@ -53,7 +52,7 @@ impl Texture {
texture,
view,
sampler,
size, // NEW!
size,
}
}
@ -183,19 +182,12 @@ impl Texture {
}
}
pub fn to_image(
&self,
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> Result<DynamicImage> {
// Size of the texture
pub fn to_image(&self, device: &wgpu::Device, queue: &wgpu::Queue) -> Result<DynamicImage> {
let width = self.size.width;
let height = self.size.height;
// Calculate the size of the texture in bytes
let texture_size_bytes = (4 * width * height) as wgpu::BufferAddress;
// Create a buffer for reading the texture data back from the GPU
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Texture Readback Buffer"),
size: texture_size_bytes,
@ -203,12 +195,10 @@ impl Texture {
mapped_at_creation: false,
});
// Create a command encoder to copy the texture data to the buffer
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Texture to Buffer Encoder"),
});
// Define the copy operation from the texture to the buffer
encoder.copy_texture_to_buffer(
wgpu::ImageCopyTexture {
texture: &self.texture,
@ -227,10 +217,8 @@ impl Texture {
self.size,
);
// Submit the command to the queue
queue.submit(Some(encoder.finish()));
// Wait for the GPU to finish the operation
let buffer_slice = buffer.slice(..);
buffer_slice.map_async(wgpu::MapMode::Read, |result| {
if let Err(e) = result {
@ -238,17 +226,13 @@ impl Texture {
}
});
// Get the buffer data
let data = buffer_slice.get_mapped_range();
// Convert the raw data into an image::RgbaImage
let image = RgbaImage::from_raw(width, height, data.to_vec())
.ok_or_else(|| anyhow!("Failed to create image from raw texture data"))?;
// Unmap the buffer now that we're done with it
buffer.unmap();
// Convert the RgbaImage into a DynamicImage
Ok(DynamicImage::ImageRgba8(image))
}
}
@ -323,3 +307,4 @@ impl CubeTexture {
&self.sampler
}
}

View file

@ -1,13 +1,10 @@
use crate::font::*;
use comet_log::*;
use image::{DynamicImage, GenericImage, GenericImageView};
use std::collections::HashMap;
use std::path::Path;
use std::time::Instant;
use image::{DynamicImage, GenericImage, GenericImageView, ImageFormat};
use comet_log::*;
use wgpu::{Device, FilterMode, TextureFormat, TextureUsages};
use crate::font::GlyphData;
use crate::Texture;
#[derive(Debug)]
#[derive(Debug, Clone)]
pub struct TextureRegion {
u0: f32,
v0: f32,
@ -20,7 +17,16 @@ pub struct TextureRegion {
}
impl TextureRegion {
pub fn new(u0: f32, v0: f32, u1: f32, v1: f32, dimensions: (u32, u32), advance: f32, offset_x: f32, offset_y: f32) -> Self {
pub fn new(
u0: f32,
v0: f32,
u1: f32,
v1: f32,
dimensions: (u32, u32),
advance: f32,
offset_x: f32,
offset_y: f32,
) -> Self {
Self {
u0,
v0,
@ -29,7 +35,7 @@ impl TextureRegion {
advance,
offset_x,
offset_y,
dimensions
dimensions,
}
}
@ -66,7 +72,7 @@ impl TextureRegion {
}
}
#[derive(Debug)]
#[derive(Debug, Clone)]
pub struct TextureAtlas {
atlas: DynamicImage,
textures: HashMap<String, TextureRegion>,
@ -76,7 +82,7 @@ impl TextureAtlas {
pub fn empty() -> Self {
Self {
atlas: DynamicImage::new(1, 1, image::ColorType::Rgb8),
textures: HashMap::new()
textures: HashMap::new(),
}
}
@ -128,9 +134,7 @@ impl TextureAtlas {
}
}
pub fn from_texture_paths(
paths: Vec<String>,
) -> Self {
pub fn from_texture_paths(paths: Vec<String>) -> Self {
let mut textures: Vec<DynamicImage> = Vec::new();
let mut regions: HashMap<String, TextureRegion> = HashMap::new();
@ -143,13 +147,19 @@ impl TextureAtlas {
info!("Textures loaded!");
info!("Sorting textures by height...");
let mut texture_path_pairs: Vec<(&DynamicImage, &String)> = textures.iter().zip(paths.iter()).collect();
let mut texture_path_pairs: Vec<(&DynamicImage, &String)> =
textures.iter().zip(paths.iter()).collect();
texture_path_pairs.sort_by(|a, b| b.0.height().cmp(&a.0.height()));
let (sorted_textures, sorted_paths): (Vec<&DynamicImage>, Vec<&String>) = texture_path_pairs.into_iter().unzip();
let sorted_textures: Vec<DynamicImage> = sorted_textures.into_iter().map(|t| t.clone()).collect();
let (sorted_textures, sorted_paths): (Vec<&DynamicImage>, Vec<&String>) =
texture_path_pairs.into_iter().unzip();
let sorted_textures: Vec<DynamicImage> =
sorted_textures.into_iter().map(|t| t.clone()).collect();
let sorted_paths: Vec<String> = sorted_paths.into_iter().map(|s| s.to_string()).collect();
let (height, width) = (Self::calculate_atlas_height(&sorted_textures), Self::calculate_atlas_width(&sorted_textures));
let (height, width) = (
Self::calculate_atlas_height(&sorted_textures),
Self::calculate_atlas_width(&sorted_textures),
);
let mut base = DynamicImage::new_rgba8(width, height);
let mut previous = sorted_textures.get(0).unwrap().height();
@ -166,16 +176,18 @@ impl TextureAtlas {
}
Self::insert_texture_at(&mut base, &texture, x_offset, y_offset);
regions.insert(path.to_string(), TextureRegion::new(
x_offset as f32 / width as f32,
y_offset as f32 / height as f32,
(x_offset + texture.width()) as f32 / width as f32,
(y_offset + texture.height()) as f32 / height as f32,
texture.dimensions(),
0.0,
0.0,
0.0
));
let texel_w = 0.5 / width as f32;
let texel_h = 0.5 / height as f32;
let u0 = (x_offset as f32 + texel_w) / width as f32;
let v0 = (y_offset as f32 + texel_h) / height as f32;
let u1 = ((x_offset + texture.width()) as f32 - texel_w) / width as f32;
let v1 = ((y_offset + texture.height()) as f32 - texel_h) / height as f32;
regions.insert(
path.to_string(),
TextureRegion::new(u0, v0, u1, v1, texture.dimensions(), 0.0, 0.0, 0.0),
);
x_offset += texture.width();
}
@ -183,25 +195,28 @@ impl TextureAtlas {
TextureAtlas {
atlas: base,
textures: regions
textures: regions,
}
}
pub fn from_textures(
names: Vec<String>,
textures: Vec<DynamicImage>,
) -> Self {
pub fn from_textures(names: Vec<String>, textures: Vec<DynamicImage>) -> Self {
let mut regions: HashMap<String, TextureRegion> = HashMap::new();
info!("Sorting textures by height...");
let mut texture_path_pairs: Vec<(&DynamicImage, &String)> = textures.iter().zip(names.iter()).collect();
let mut texture_path_pairs: Vec<(&DynamicImage, &String)> =
textures.iter().zip(names.iter()).collect();
texture_path_pairs.sort_by(|a, b| b.0.height().cmp(&a.0.height()));
let (sorted_textures, sorted_paths): (Vec<&DynamicImage>, Vec<&String>) = texture_path_pairs.into_iter().unzip();
let sorted_textures: Vec<DynamicImage> = sorted_textures.into_iter().map(|t| t.clone()).collect();
let (sorted_textures, sorted_paths): (Vec<&DynamicImage>, Vec<&String>) =
texture_path_pairs.into_iter().unzip();
let sorted_textures: Vec<DynamicImage> =
sorted_textures.into_iter().map(|t| t.clone()).collect();
let sorted_paths: Vec<String> = sorted_paths.into_iter().map(|s| s.to_string()).collect();
let (height, width) = (Self::calculate_atlas_height(&sorted_textures), Self::calculate_atlas_width(&sorted_textures));
let (height, width) = (
Self::calculate_atlas_height(&sorted_textures),
Self::calculate_atlas_width(&sorted_textures),
);
let mut base = DynamicImage::new_rgba8(width, height);
let mut previous = sorted_textures.get(0).unwrap().height();
@ -218,7 +233,9 @@ impl TextureAtlas {
}
Self::insert_texture_at(&mut base, &texture, x_offset, y_offset);
regions.insert(name.to_string(), TextureRegion::new(
regions.insert(
name.to_string(),
TextureRegion::new(
x_offset as f32 / width as f32,
y_offset as f32 / height as f32,
(x_offset + texture.width()) as f32 / width as f32,
@ -226,8 +243,9 @@ impl TextureAtlas {
texture.dimensions(),
0.0,
0.0,
0.0
));
0.0,
),
);
x_offset += texture.width();
}
@ -235,7 +253,7 @@ impl TextureAtlas {
TextureAtlas {
atlas: base,
textures: regions
textures: regions,
}
}
@ -243,10 +261,10 @@ impl TextureAtlas {
glyphs.sort_by(|a, b| b.render.height().cmp(&a.render.height()));
let height = Self::calculate_atlas_height(
&glyphs.iter().map(|g| g.render.clone()).collect::<Vec<_>>()
&glyphs.iter().map(|g| g.render.clone()).collect::<Vec<_>>(),
);
let width = Self::calculate_atlas_width(
&glyphs.iter().map(|g| g.render.clone()).collect::<Vec<_>>()
&glyphs.iter().map(|g| g.render.clone()).collect::<Vec<_>>(),
);
let padding = (glyphs.len() * 3) as u32;
@ -257,7 +275,6 @@ impl TextureAtlas {
let mut x_offset: u32 = 0;
let mut y_offset: u32 = 0;
for g in glyphs.iter() {
let glyph_w = g.render.width();
let glyph_h = g.render.height();
@ -276,7 +293,10 @@ impl TextureAtlas {
let v1 = (y_offset + glyph_h) as f32 / height as f32;
let region = TextureRegion::new(
u0, v0, u1, v1,
u0,
v0,
u1,
v1,
(glyph_w, glyph_h),
g.advance,
g.offset_x,
@ -294,6 +314,106 @@ impl TextureAtlas {
}
}
pub fn from_fonts(fonts: &Vec<Font>) -> Self {
if fonts.is_empty() {
return Self::empty();
}
let mut all_glyphs: Vec<(String, DynamicImage, TextureRegion)> = Vec::new();
let mut font_indices: Vec<usize> = (0..fonts.len()).collect();
font_indices.sort_by(|&a, &b| fonts[a].name().cmp(&fonts[b].name()));
for fi in font_indices {
let font = &fonts[fi];
let font_name = font.name();
let mut glyph_names: Vec<String> = font.glyphs().textures().keys().cloned().collect();
glyph_names.sort();
for glyph_name in glyph_names {
let region = font.glyphs().textures().get(&glyph_name).unwrap();
let (u0, v0, u1, v1) = (region.u0(), region.v0(), region.u1(), region.v1());
let (width, height) = region.dimensions();
let src_x = (u0 * font.glyphs().atlas().width() as f32) as u32;
let src_y = (v0 * font.glyphs().atlas().height() as f32) as u32;
let glyph_img = DynamicImage::ImageRgba8(
font.glyphs()
.atlas()
.view(src_x, src_y, width, height)
.to_image(),
);
let key = format!("{}::{}", font_name, glyph_name);
all_glyphs.push((key, glyph_img, region.clone()));
}
}
all_glyphs.sort_by(|a, b| {
let ha = a.1.height();
let hb = b.1.height();
match hb.cmp(&ha) {
std::cmp::Ordering::Equal => a.0.cmp(&b.0),
other => other,
}
});
let textures: Vec<DynamicImage> =
all_glyphs.iter().map(|(_, img, _)| img.clone()).collect();
let atlas_height = Self::calculate_atlas_height(&textures);
let atlas_width = Self::calculate_atlas_width(&textures);
let padding = (all_glyphs.len() * 3) as u32;
let mut base = DynamicImage::new_rgba8(atlas_width + padding, atlas_height);
let mut regions = HashMap::new();
let mut current_row_height = textures[0].height();
let mut x_offset: u32 = 0;
let mut y_offset: u32 = 0;
for (key, img, original_region) in all_glyphs {
let w = img.width();
let h = img.height();
if h != current_row_height {
y_offset += current_row_height + 3;
x_offset = 0;
current_row_height = h;
}
Self::insert_texture_at(&mut base, &img, x_offset, y_offset);
let u0 = x_offset as f32 / (atlas_width + padding) as f32;
let v0 = y_offset as f32 / atlas_height as f32;
let u1 = (x_offset + w) as f32 / (atlas_width + padding) as f32;
let v1 = (y_offset + h) as f32 / atlas_height as f32;
let region = TextureRegion::new(
u0,
v0,
u1,
v1,
(w, h),
original_region.advance(),
original_region.offset_x(),
original_region.offset_y(),
);
regions.insert(key, region);
x_offset += w + 3;
}
TextureAtlas {
atlas: base,
textures: regions,
}
}
pub fn atlas(&self) -> &DynamicImage {
&self.atlas
}

View file

@ -2,7 +2,7 @@ use comet::prelude::*;
fn setup(app: &mut App, renderer: &mut Renderer2D) {
// Initialize the texture atlas
renderer.initialize_atlas();
renderer.init_atlas();
// Register components
app.register_component::<Position2D>();

View file

@ -4,7 +4,7 @@ use winit_input_helper::WinitInputHelper;
fn setup(app: &mut App, renderer: &mut Renderer2D) {
// Takes all the textures from res/textures and puts them into a texture atlas
renderer.initialize_atlas();
renderer.init_atlas();
let camera = app.new_entity();
app.add_component(camera, Transform2D::new());
@ -14,7 +14,7 @@ fn setup(app: &mut App, renderer: &mut Renderer2D) {
app.add_component(e1, Transform2D::new());
let mut renderer2d = Render2D::with_texture("res/textures/comet_icon.png");
let renderer2d = Render2D::with_texture("res/textures/comet_icon.png");
app.add_component(e1, renderer2d);
}

View file

@ -1,6 +1,7 @@
use comet::prelude::*;
fn setup(app: &mut App, renderer: &mut Renderer2D) {
renderer.init_atlas();
// Loading the font from the res/fonts directory with a rendered size of 77px
renderer.load_font("./res/fonts/PressStart2P-Regular.ttf", 77.0);

View file

@ -2,7 +2,7 @@ use comet::prelude::*;
fn setup(app: &mut App, renderer: &mut Renderer2D) {
// Creating a texture atlas from the provided textures in the vector
renderer.set_texture_atlas_by_paths(vec!["./res/textures/comet_icon.png".to_string()]);
renderer.init_atlas_by_paths(vec!["./res/textures/comet_icon.png".to_string()]);
// Creating a camera entity
let cam = app.new_entity();

View file

@ -69,6 +69,6 @@ pub mod prelude {
pub use comet_input::keyboard::Key;
pub use comet_log::*;
pub use comet_math::*;
pub use comet_renderer::renderer2d::Renderer2D;
pub use comet_renderer::{renderer::Renderer, renderer2d::Renderer2D};
pub use winit_input_helper::WinitInputHelper as InputManager;
}