initial commit

This commit is contained in:
lisk77 2024-10-26 02:15:26 +02:00
commit 6154c72b0e
55 changed files with 9481 additions and 0 deletions

View file

@ -0,0 +1,27 @@
[package]
name = "comet_renderer"
version = "0.1.0"
edition = "2021"
[dependencies]
comet_math = { path = "../comet_math" }
comet_resources = { path = "../comet_resources" }
comet_colors = { path = "../comet_colors" }
comet_log = { path = "../comet_log" }
cfg-if = "1"
anyhow = "1.0"
bytemuck = { version = "1.16", features = [ "derive" ] }
cgmath = "0.18"
env_logger = "0.10"
pollster = "0.3"
log = "0.4"
tobj = { version = "3.2", default-features = false, features = ["async"]}
wgpu = { version = "22.0"}
winit = { version = "0.29", features = ["rwh_05"] }
instant = "0.1"
[dependencies.image]
version = "0.24"
default-features = false
features = ["png", "jpeg", "hdr"]

View file

@ -0,0 +1,321 @@
use comet_math::Point3;
#[rustfmt::skip]
pub const OPENGL_TO_WGPU_MATRIX: cgmath::Matrix4<f32> = cgmath::Matrix4::new(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.5,
0.0, 0.0, 0.0, 1.0,
);
const SAFE_FRAC_PI_2: f32 = std::f32::consts::FRAC_PI_2 - 0.0001;
pub struct Camera {
eye: cgmath::Point3<f32>,
target: cgmath::Point3<f32>,
up: cgmath::Vector3<f32>,
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
pub fn new(
eye: cgmath::Point3<f32>,
target: cgmath::Point3<f32>,
up: cgmath::Vector3<f32>,
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
) -> Self {
Self {
eye,
target,
up,
aspect,
fovy,
znear,
zfar,
}
}
pub fn build_view_projection_matrix(&self) -> cgmath::Matrix4<f32> {
// 1.
let view = cgmath::Matrix4::look_at_rh(self.eye, self.target, self.up);
// 2.
let proj = cgmath::perspective(cgmath::Deg(self.fovy), self.aspect, self.znear, self.zfar);
// 3.
return OPENGL_TO_WGPU_MATRIX * proj * view;
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct CameraUniform {
view_proj: [[f32; 4]; 4],
}
impl CameraUniform {
pub fn new() -> Self {
use cgmath::SquareMatrix;
Self {
view_proj: cgmath::Matrix4::identity().into(),
}
}
pub fn update_view_proj(&mut self, camera: &Camera) {
self.view_proj = camera.build_view_projection_matrix().into();
}
}
/*use comet_math::{Mat4, Point3, Vec3};
#[rustfmt::skip]
pub const OPENGL_TO_WGPU_MATRIX: Mat4 = Mat4::new(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.0, 0.0, 0.5, 1.0,
);
pub struct Camera {
eye: Point3,
target: Point3,
up: Vec3,
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
pub fn new(eye: Point3, target: Point3, up: Vec3, aspect: f32, fovy: f32, znear: f32, zfar: f32) -> Self {
Self {
eye,
target,
up,
aspect,
fovy,
znear,
zfar,
}
}
pub fn build_view_projection_matrix(&self) -> Mat4 {
let view = Mat4::look_at_rh(self.eye, self.target, self.up);
let proj = Mat4::perspective_matrix(self.fovy, self.aspect, self.znear, self.zfar);
(OPENGL_TO_WGPU_MATRIX * proj * view).transpose()
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
pub struct CameraUniform {
view_proj: [[f32; 4]; 4],
}
impl CameraUniform {
pub fn new() -> Self {
Self {
view_proj: Mat4::IDENTITY.into(),
}
}
pub fn update_view_proj(&mut self, camera: &Camera) {
self.view_proj = camera.build_view_projection_matrix().into();
}
}*/
/*use std::f32::consts::FRAC_PI_2;
use std::time::Duration;
use winit::dpi::PhysicalPosition;
use winit::event::*;
use winit::keyboard::KeyCode;
const SAFE_FRAC_PI_2: f32 = FRAC_PI_2 - 0.0001;
#[derive(Debug)]
pub struct Camera3D {
pub position: Point3,
yaw: f32,
pitch: f32,
}
impl Camera3D {
pub fn new(
position: Point3,
yaw: f32,
pitch: f32,
) -> Self {
Self {
position: position.into(),
yaw: yaw.into(),
pitch: pitch.into(),
}
}
pub fn calc_matrix(&self) -> Mat4 {
let (sin_pitch, cos_pitch) = self.pitch.0.sin_cos();
let (sin_yaw, cos_yaw) = self.yaw.0.sin_cos();
Mat4::look_to_rh(
self.position,
Vec3::new(cos_pitch * cos_yaw, sin_pitch, cos_pitch * sin_yaw).normalize(),
Vec3::unit_y(),
)
}
}
pub struct Projection {
aspect: f32,
fovy: Rad<f32>,
znear: f32,
zfar: f32,
}
impl Projection {
pub fn new<F: Into<Rad<f32>>>(width: u32, height: u32, fovy: F, znear: f32, zfar: f32) -> Self {
Self {
aspect: width as f32 / height as f32,
fovy: fovy.into(),
znear,
zfar,
}
}
pub fn resize(&mut self, width: u32, height: u32) {
self.aspect = width as f32 / height as f32;
}
pub fn calc_matrix(&self) -> Matrix4<f32> {
// UDPATE
perspective(self.fovy, self.aspect, self.znear, self.zfar)
}
}
#[derive(Debug)]
pub struct CameraController {
amount_left: f32,
amount_right: f32,
amount_forward: f32,
amount_backward: f32,
amount_up: f32,
amount_down: f32,
rotate_horizontal: f32,
rotate_vertical: f32,
scroll: f32,
speed: f32,
sensitivity: f32,
}
impl CameraController {
pub fn new(speed: f32, sensitivity: f32) -> Self {
Self {
amount_left: 0.0,
amount_right: 0.0,
amount_forward: 0.0,
amount_backward: 0.0,
amount_up: 0.0,
amount_down: 0.0,
rotate_horizontal: 0.0,
rotate_vertical: 0.0,
scroll: 0.0,
speed,
sensitivity,
}
}
pub fn process_keyboard(&mut self, key: KeyCode, state: ElementState) -> bool {
let amount = if state == ElementState::Pressed {
1.0
} else {
0.0
};
match key {
KeyCode::KeyW | KeyCode::ArrowUp => {
self.amount_forward = amount;
true
}
KeyCode::KeyS | KeyCode::ArrowDown => {
self.amount_backward = amount;
true
}
KeyCode::KeyA | KeyCode::ArrowLeft => {
self.amount_left = amount;
true
}
KeyCode::KeyD | KeyCode::ArrowRight => {
self.amount_right = amount;
true
}
KeyCode::Space => {
self.amount_up = amount;
true
}
KeyCode::ShiftLeft => {
self.amount_down = amount;
true
}
_ => false,
}
}
pub fn process_mouse(&mut self, mouse_dx: f64, mouse_dy: f64) {
self.rotate_horizontal = mouse_dx as f32;
self.rotate_vertical = mouse_dy as f32;
}
pub fn process_scroll(&mut self, delta: &MouseScrollDelta) {
self.scroll = match delta {
// I'm assuming a line is about 100 pixels
MouseScrollDelta::LineDelta(_, scroll) => -scroll * 0.5,
MouseScrollDelta::PixelDelta(PhysicalPosition { y: scroll, .. }) => -*scroll as f32,
};
}
pub fn update_camera(&mut self, camera: &mut Camera, dt: Duration) {
let dt = dt.as_secs_f32();
// Move forward/backward and left/right
let (yaw_sin, yaw_cos) = camera.yaw.0.sin_cos();
let forward = Vector3::new(yaw_cos, 0.0, yaw_sin).normalize();
let right = Vector3::new(-yaw_sin, 0.0, yaw_cos).normalize();
camera.position += forward * (self.amount_forward - self.amount_backward) * self.speed * dt;
camera.position += right * (self.amount_right - self.amount_left) * self.speed * dt;
// Move in/out (aka. "zoom")
// Note: this isn't an actual zoom. The camera's position
// changes when zooming. I've added this to make it easier
// to get closer to an object you want to focus on.
let (pitch_sin, pitch_cos) = camera.pitch.0.sin_cos();
let scrollward =
Vector3::new(pitch_cos * yaw_cos, pitch_sin, pitch_cos * yaw_sin).normalize();
camera.position += scrollward * self.scroll * self.speed * self.sensitivity * dt;
self.scroll = 0.0;
// Move up/down. Since we don't use roll, we can just
// modify the y coordinate directly.
camera.position.y += (self.amount_up - self.amount_down) * self.speed * dt;
// Rotate
camera.yaw += Rad(self.rotate_horizontal) * self.sensitivity * dt;
camera.pitch += Rad(-self.rotate_vertical) * self.sensitivity * dt;
// If process_mouse isn't called every frame, these values
// will not get set to zero, and the camera will rotate
// when moving in a non cardinal direction.
self.rotate_horizontal = 0.0;
self.rotate_vertical = 0.0;
// Keep the camera's angle from going too high/low.
if camera.pitch < -Rad(SAFE_FRAC_PI_2) {
camera.pitch = -Rad(SAFE_FRAC_PI_2);
} else if camera.pitch > Rad(SAFE_FRAC_PI_2) {
camera.pitch = Rad(SAFE_FRAC_PI_2);
}
}
}*/

View file

@ -0,0 +1,671 @@
mod camera;
use core::default::Default;
use std::iter;
use std::sync::Arc;
use std::time::Instant;
use cgmath::num_traits::FloatConst;
use image::GenericImageView;
use log::info;
use wgpu::Color;
use wgpu::util::DeviceExt;
use winit::{
dpi::PhysicalSize,
window::Window
};
use comet_colors::LinearRgba;
use comet_math;
use comet_math::{Mat4, Point3, Vec3};
use comet_resources::{ResourceManager, texture, Vertex, Texture};
use crate::camera::{Camera, CameraUniform};
// RAINBOW TRIANGLE
/*const VERTICES: &[Vertex] = &[
Vertex :: new ( [0.0, 0.5, 0.0], [1.0, 0.0, 0.0] ),
Vertex :: new ( [-0.5, -0.5, 0.0], [0.0, 1.0, 0.0] ),
Vertex :: new ( [0.5, -0.5, 0.0], [0.0, 0.0, 1.0] ),
];*/
// RAW PENTAGON
/*const VERTICES: &[Vertex] = &[
Vertex :: new ( [-0.0868241, 0.49240386, 0.0], [0.5, 0.0, 0.5] ), // A
Vertex :: new ( [-0.49513406, 0.06958647, 0.0], [0.5, 0.0, 0.5] ), // B
Vertex :: new ( [-0.21918549, -0.44939706, 0.0], [0.5, 0.0, 0.5] ), // C
Vertex :: new ( [0.35966998, -0.3473291, 0.0], [0.5, 0.0, 0.5] ), // D
Vertex :: new ( [0.44147372, 0.2347359, 0.0], [0.5, 0.0, 0.5] ), // E
];*/
// RAINBOW PENTAGON
/*const VERTICES: &[Vertex] = &[
Vertex :: new ( [-0.0868241, 0.49240386, 0.0], [1.0, 0.0, 0.0] ), // A
Vertex :: new ( [-0.49513406, 0.06958647, 0.0], [0.0, 0.35, 1.0] ), // B
Vertex :: new ( [-0.21918549, -0.44939706, 0.0], [0.2, 1.0, 0.2] ), // C
Vertex :: new ( [0.35966998, -0.3473291, 0.0], [1.0, 0.85, 0.2] ), // D
Vertex :: new ( [0.44147372, 0.2347359, 0.0], [1.0, 0.2, 0.6] ), // E
];
const INDICES: &[u16] = &[
0, 1, 4,
1, 2, 4,
2, 3, 4,
];*/
// RAW QUAD
/*const VERTICES: &[Vertex] = &[
Vertex :: new ( [-0.5, 0.5, 0.0], [0.0, 0.0, 1.0] ),
Vertex :: new ( [-0.5, -0.5, 0.0], [0.0, 1.0, 0.0] ),
Vertex :: new ( [0.5, -0.5, 0.0], [1.0, 0.0, 0.0] ),
Vertex :: new ( [0.5, 0.5, 0.0], [1.0, 0.0, 1.0] )
];
const INDICES: &[u16] = &[
0, 1, 3,
1, 2, 3
];*/
/*
vec![
Vertex :: new ( [-0.1, 0.1, 0.0], [0.0, 0.0] ),
Vertex :: new ( [-0.1, -0.1, 0.0], [0.0, 1.0] ),
Vertex :: new ( [0.1, -0.1, 0.0], [1.0, 1.0] ),
Vertex :: new ( [0.1, 0.1, 0.0], [1.0, 0.0] ),
], vec![
0, 1, 3,
1, 2, 3
]
*/
pub struct Projection {
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32
}
impl Projection {
pub fn new(width: u32, height: u32, fovy: f32, znear: f32, zfar: f32) -> Self {
Self {
aspect: width as f32 / height as f32,
fovy,
znear,
zfar
}
}
pub fn resize(&mut self, width: u32, height: u32) { self.aspect = width as f32 / height as f32; }
pub fn calc_matrix(&self) -> Mat4 {
Mat4::perspective_matrix(self.fovy, self.aspect, self.znear, self.zfar)
}
}
pub struct Renderer<'a> {
window: &'a Window,
surface: wgpu::Surface<'a>,
device: wgpu::Device,
queue: wgpu::Queue,
config: wgpu::SurfaceConfiguration,
size: winit::dpi::PhysicalSize<u32>,
//projection: Projection,
render_pipeline: wgpu::RenderPipeline,
last_frame_time: Instant,
deltatime: f32,
vertex_buffer: wgpu::Buffer,
vertex_data: Vec<Vertex>,
index_buffer: wgpu::Buffer,
index_data: Vec<u16>,
num_indices: u32,
clear_color: Color,
diffuse_texture: texture::Texture,
diffuse_bind_group: wgpu::BindGroup,
resource_manager: ResourceManager,
/*camera: Camera,
camera_uniform: CameraUniform,
camera_buffer: wgpu::Buffer,
camera_bind_group: wgpu::BindGroup,*/
}
impl<'a> Renderer<'a> {
pub async fn new(window: &'a Window, clear_color: Option<LinearRgba>) -> anyhow::Result<Renderer<'a>> {
let vertex_data: Vec<Vertex> = vec![];
let index_data: Vec<u16> = vec![];
let size = PhysicalSize::<u32>::new(1920, 1080); //window.inner_size();
// The instance is a handle to our GPU
// BackendBit::PRIMARY => Vulkan + Metal + DX12 + Browser WebGPU
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::PRIMARY,
..Default::default()
});
let surface = instance.create_surface(window).unwrap();
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
force_fallback_adapter: false,
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
required_features: wgpu::Features::empty(),
// WebGL doesn't support all of wgpu's features, so if
// we're building for the web we'll have to disable some.
required_limits: wgpu::Limits::default(),
memory_hints: Default::default(),
},
None, // Trace path
)
.await
.unwrap();
let surface_caps = surface.get_capabilities(&adapter);
// Shader code in this tutorial assumes an Srgb surface texture. Using a different
// one will result all the colors comming out darker. If you want to support non
// Srgb surfaces, you'll need to account for that when drawing to the frame.
let surface_format = surface_caps
.formats
.iter()
.copied()
.find(|f| f.is_srgb())
.unwrap_or(surface_caps.formats[0]);
let config = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: surface_format,
width: size.width,
height: size.height,
present_mode: surface_caps.present_modes[0],
alpha_mode: surface_caps.alpha_modes[0],
view_formats: vec![],
desired_maximum_frame_latency: 2,
};
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Shader"),
source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()),
});
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(&vertex_data),
usage: wgpu::BufferUsages::VERTEX,
});
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(&index_data),
usage: wgpu::BufferUsages::INDEX
});
let num_indices = index_data.len() as u32;
let resource_manager = ResourceManager::new();
let diffuse_bytes = include_bytes!(r"../../../resources/textures/comet_icon.png");
let diffuse_texture =
texture::Texture::from_bytes(&device, &queue, diffuse_bytes, "comet_icon.png", false).unwrap();
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
// This should match the filterable field of the
// corresponding Texture entry above.
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
label: Some("texture_bind_group_layout"),
});
let diffuse_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
},
],
label: Some("diffuse_bind_group"),
});
/*let camera = Camera::new(
// position the camera 1 unit up and 2 units back
// +z is out of the screen
(0.0, 1.0, 2.0).into(),
// have it look at the origin
(0.0, 0.0, 0.0).into(),
// which way is "up"
cgmath::Vector3::unit_y(),
config.width as f32 / config.height as f32,
45.0,
0.1,
100.0,
);
let mut camera_uniform = CameraUniform::new();
camera_uniform.update_view_proj(&camera);
let camera_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Camera Buffer"),
contents: bytemuck::cast_slice(&[camera_uniform]),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
}
);
let camera_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}
],
label: Some("camera_bind_group_layout"),
});
let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &camera_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: camera_buffer.as_entire_binding(),
}
],
label: Some("camera_bind_group"),
});*/
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[
&texture_bind_group_layout,
//&camera_bind_group_layout,
],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[Vertex::desc()],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[Some(wgpu::ColorTargetState {
format: config.format,
blend: Some(wgpu::BlendState {
color: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::SrcAlpha,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
alpha: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::OneMinusSrcAlpha,
operation: wgpu::BlendOperation::Add,
},
}),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
// Setting this to anything other than Fill requires Features::POLYGON_MODE_LINE
// or Features::POLYGON_MODE_POINT
polygon_mode: wgpu::PolygonMode::Fill,
// Requires Features::DEPTH_CLIP_CONTROL
unclipped_depth: false,
// Requires Features::CONSERVATIVE_RASTERIZATION
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
// If the pipeline will be used with a multiview render pass, this
// indicates how many array layers the attachments will have.
multiview: None,
// Useful for optimizing shader compilation on Android
cache: None,
});
let clear_color = match clear_color {
Some(color) => color.to_wgpu(),
None => wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}
};
Ok(Self {
window,
surface,
device,
queue,
config,
size,
//projection,
render_pipeline,
last_frame_time: Instant::now(),
deltatime: 0.0,
vertex_buffer,
vertex_data,
index_buffer,
index_data,
num_indices,
clear_color,
diffuse_texture,
diffuse_bind_group,
resource_manager,
/*camera,
camera_uniform,
camera_buffer,
camera_bind_group,*/
})
}
pub fn dt(&self) -> f32 {
self.deltatime
}
fn vertex_data_mut(&mut self) -> &mut Vec<Vertex> {
&mut self.vertex_data
}
fn index_data_mut(&mut self) -> &mut Vec<u16> {
&mut self.index_data
}
fn create_rectangle(&self, width: f32, height: f32) -> Vec<Vertex> {
let (bound_x, bound_y) =
((width/ self.config.width as f32) * 0.5, (height/ self.config.height as f32) * 0.5);
vec![
Vertex :: new ( [-bound_x, bound_y, 0.0], [0.0, 0.0] ),
Vertex :: new ( [-bound_x, -bound_y, 0.0], [0.0, 1.0] ),
Vertex :: new ( [ bound_x, -bound_y, 0.0], [1.0, 1.0]) ,
Vertex :: new ( [ bound_x, bound_y, 0.0], [1.0, 0.0] )
]
}
pub fn display_atlas(&mut self) {
let atlas = vec![
r"C:\Users\lisk77\Code Sharing\comet-engine\resources\textures\comet-128.png".to_string(),
r"C:\Users\lisk77\Code Sharing\comet-engine\resources\textures\comet-256.png".to_string(),
];
//self.diffuse_texture = Texture::from_image(&self.device, &self.queue, atlas.atlas(), None, false).unwrap();
self.set_texture_atlas(atlas);
let (bound_x, bound_y) =
((self.diffuse_texture.size.width as f32/ self.config.width as f32) * 0.5, (self.diffuse_texture.size.height as f32/ self.config.height as f32) * 0.5);
let vertices: Vec<Vertex> = vec![
Vertex :: new ( [-bound_x, bound_y, 0.0], [0.0, 0.0] ),
Vertex :: new ( [-bound_x, -bound_y, 0.0], [0.0, 1.0] ),
Vertex :: new ( [ bound_x, -bound_y, 0.0], [1.0, 1.0]) ,
Vertex :: new ( [ bound_x, bound_y, 0.0], [1.0, 0.0] )
];
/*let vertices: Vec<Vertex> = vec![
Vertex :: new ( [-1.0, 1.0, 0.0], [0.0, 0.0] ),
Vertex :: new ( [-1.0, -1.0, 0.0], [0.0, 1.0] ),
Vertex :: new ( [ 1.0, -1.0, 0.0], [1.0, 1.0]) ,
Vertex :: new ( [ 1.0, 1.0, 0.0], [1.0, 0.0] )
];*/
let indices: Vec<u16> = vec![
0, 1, 3,
1, 2, 3
];
self.set_buffers(vertices, indices)
}
pub fn set_texture_atlas(&mut self, paths: Vec<String>) {
self.resource_manager.create_texture_atlas(paths);
self.diffuse_texture = Texture::from_image(&self.device, &self.queue, self.resource_manager.texture_atlas().atlas(), None, false).unwrap();
let texture_bind_group_layout =
self.device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
// This should match the filterable field of the
// corresponding Texture entry above.
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
label: Some("texture_bind_group_layout"),
});
let diffuse_bind_group = self.device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &texture_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&self.diffuse_texture.view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&self.diffuse_texture.sampler),
},
],
label: Some("diffuse_bind_group"),
});
self.diffuse_bind_group = diffuse_bind_group;
}
pub(crate) fn set_buffers(&mut self, new_vertex_buffer: Vec<Vertex>, new_index_buffer: Vec<u16>) {
match new_vertex_buffer == self.vertex_data {
true => return,
false => {
self.vertex_buffer = self.device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Updated Vertex Buffer"),
contents: bytemuck::cast_slice(&new_vertex_buffer),
usage: wgpu::BufferUsages::VERTEX,
});
self.vertex_data = new_vertex_buffer;
}
}
match new_index_buffer == self.index_data {
true => return,
false => {
self.index_buffer = self.device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Updated Index Buffer"),
contents: bytemuck::cast_slice(&new_index_buffer),
usage: wgpu::BufferUsages::INDEX,
});
self.index_data = new_index_buffer.clone();
self.num_indices = new_index_buffer.len() as u32;
}
}
}
pub(crate) fn push_to_buffers(&mut self, new_vertex_buffer: &mut Vec<Vertex>, new_index_buffer: &mut Vec<u16>) {
self.vertex_data.append(new_vertex_buffer);
self.index_data.append(new_index_buffer);
self.vertex_buffer = self.device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Updated Vertex Buffer"),
contents: bytemuck::cast_slice(&self.vertex_data),
usage: wgpu::BufferUsages::VERTEX,
});
self.index_buffer = self.device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Updated Index Buffer"),
contents: bytemuck::cast_slice(&self.index_data),
usage: wgpu::BufferUsages::INDEX,
});
self.num_indices = self.index_data.len() as u32;
}
pub fn clear_buffers(&mut self) {
self.vertex_data = vec![];
self.index_data = vec![];
self.vertex_buffer = self.device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Updated Vertex Buffer"),
contents: bytemuck::cast_slice(&self.vertex_data),
usage: wgpu::BufferUsages::VERTEX,
});
self.index_buffer = self.device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Updated Index Buffer"),
contents: bytemuck::cast_slice(&self.index_data),
usage: wgpu::BufferUsages::INDEX,
});
self.num_indices = self.index_data.len() as u32;
}
pub fn draw_texture_at(&mut self, texture_path: &str, position: Point3) {
let region = self.resource_manager.texture_locations().get(texture_path).unwrap();
let (dim_x, dim_y) = region.dimensions();
let (bound_x, bound_y) =
((dim_x as f32/ self.config.width as f32) * 0.5, (dim_y as f32/ self.config.height as f32) * 0.5);
/*let bound_x = dim_x as f32/ self.config.width as f32 * 0.5;
let bound_y = bound_x;*/
let vertices: &mut Vec<Vertex> = &mut vec![
Vertex :: new ( [-bound_x + position.x(), bound_y + position.y(), 0.0 + position.z()], [region.x0(), region.y0()] ),
Vertex :: new ( [-bound_x + position.x(), -bound_y + position.y(), 0.0 + position.z()], [region.x0(), region.y1()] ),
Vertex :: new ( [ bound_x + position.x(), -bound_y + position.y(), 0.0 + position.z()], [region.x1(), region.y1()] ) ,
Vertex :: new ( [ bound_x + position.x(), bound_y + position.y(), 0.0 + position.z()], [region.x1(), region.y0()] )
];
let buffer_size = self.vertex_data.len() as u16;
let indices: &mut Vec<u16> = &mut vec![
0 + buffer_size, 1 + buffer_size, 3 + buffer_size,
1 + buffer_size, 2 + buffer_size, 3 + buffer_size
];
self.push_to_buffers(vertices, indices)
}
pub fn window(&self) -> &Window {
&self.window
}
pub fn size(&self) -> PhysicalSize<u32> {
self.size
}
pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
if new_size.width > 0 && new_size.height > 0 {
//self.projection.resize(new_size.width, new_size.height);
self.size = new_size;
self.config.width = new_size.width;
self.config.height = new_size.height;
self.surface.configure(&self.device, &self.config);
}
}
pub fn update(&mut self) {
let now = Instant::now();
self.deltatime = now.duration_since(self.last_frame_time).as_secs_f32(); // Time delta in seconds
self.last_frame_time = now;
}
pub fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
let output = self.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(self.clear_color),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.diffuse_bind_group, &[]);
//render_pass.set_bind_group(1, &self.camera_bind_group, &[]);
render_pass.set_vertex_buffer(0, self.vertex_buffer.slice(..));
render_pass.set_index_buffer(self.index_buffer.slice(..), wgpu::IndexFormat::Uint16);
render_pass.draw_indexed(0..self.num_indices, 0, 0..1);
}
self.queue.submit(iter::once(encoder.finish()));
output.present();
Ok(())
}
}

View file

@ -0,0 +1,134 @@
use std::ops::Range;
use crate::texture;
pub trait Vertex {
fn desc() -> wgpu::VertexBufferLayout<'static>;
}
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
pub struct ModelVertex {
pub position: [f32; 3],
pub tex_coords: [f32; 2],
pub normal: [f32; 3],
}
impl Vertex for ModelVertex {
fn desc() -> wgpu::VertexBufferLayout<'static> {
use std::mem;
wgpu::VertexBufferLayout {
array_stride: mem::size_of::<ModelVertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &[
wgpu::VertexAttribute {
offset: 0,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
shader_location: 1,
format: wgpu::VertexFormat::Float32x2,
},
wgpu::VertexAttribute {
offset: mem::size_of::<[f32; 5]>() as wgpu::BufferAddress,
shader_location: 2,
format: wgpu::VertexFormat::Float32x3,
},
],
}
}
}
pub struct Material {
#[allow(unused)]
pub name: String,
#[allow(unused)]
pub diffuse_texture: texture::Texture,
pub bind_group: wgpu::BindGroup,
}
pub struct Mesh {
#[allow(unused)]
pub name: String,
pub vertex_buffer: wgpu::Buffer,
pub index_buffer: wgpu::Buffer,
pub num_elements: u32,
pub material: usize,
}
pub struct Model {
pub meshes: Vec<Mesh>,
pub materials: Vec<Material>,
}
pub trait DrawModel<'a> {
#[allow(unused)]
fn draw_mesh(
&mut self,
mesh: &'a Mesh,
material: &'a Material,
camera_bind_group: &'a wgpu::BindGroup,
);
fn draw_mesh_instanced(
&mut self,
mesh: &'a Mesh,
material: &'a Material,
instances: Range<u32>,
camera_bind_group: &'a wgpu::BindGroup,
);
#[allow(unused)]
fn draw_model(&mut self, model: &'a Model, camera_bind_group: &'a wgpu::BindGroup);
fn draw_model_instanced(
&mut self,
model: &'a Model,
instances: Range<u32>,
camera_bind_group: &'a wgpu::BindGroup,
);
}
impl<'a, 'b> DrawModel<'b> for wgpu::RenderPass<'a>
where
'b: 'a,
{
fn draw_mesh(
&mut self,
mesh: &'b Mesh,
material: &'b Material,
camera_bind_group: &'b wgpu::BindGroup,
) {
self.draw_mesh_instanced(mesh, material, 0..1, camera_bind_group);
}
fn draw_mesh_instanced(
&mut self,
mesh: &'b Mesh,
material: &'b Material,
instances: Range<u32>,
camera_bind_group: &'b wgpu::BindGroup,
) {
self.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
self.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
self.set_bind_group(0, &material.bind_group, &[]);
self.set_bind_group(1, camera_bind_group, &[]);
self.draw_indexed(0..mesh.num_elements, 0, instances);
}
fn draw_model(&mut self, model: &'b Model, camera_bind_group: &'b wgpu::BindGroup) {
self.draw_model_instanced(model, 0..1, camera_bind_group);
}
fn draw_model_instanced(
&mut self,
model: &'b Model,
instances: Range<u32>,
camera_bind_group: &'b wgpu::BindGroup,
) {
for mesh in &model.meshes {
let material = &model.materials[mesh.material];
self.draw_mesh_instanced(mesh, material, instances.clone(), camera_bind_group);
}
}
}

View file

@ -0,0 +1,38 @@
// Vertex shader
/*struct CameraUniform {
view_proj: mat4x4<f32>,
};
@group(1) @binding(0) // 1.
var<uniform> camera: CameraUniform;*/
struct VertexInput {
@location(0) position: vec3<f32>,
@location(1) tex_coords: vec2<f32>,
}
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
@location(0) tex_coords: vec2<f32>,
}
@vertex
fn vs_main(
model: VertexInput,
) -> VertexOutput {
var out: VertexOutput;
out.tex_coords = model.tex_coords;
out.clip_position = /*camera.view_proj **/ vec4<f32>(model.position, 1.0);
return out;
}
// Fragment shader
@group(0) @binding(0)
var t_diffuse: texture_2d<f32>;
@group(0) @binding(1)
var s_diffuse: sampler;
@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
return textureSample(t_diffuse, s_diffuse, in.tex_coords);
}

View file

@ -0,0 +1,254 @@
use anyhow::*;
use image::GenericImageView;
pub struct Texture {
#[allow(unused)]
pub texture: wgpu::Texture,
pub view: wgpu::TextureView,
pub sampler: wgpu::Sampler,
pub size: wgpu::Extent3d,
}
impl Texture {
pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
pub fn create_depth_texture(
device: &wgpu::Device,
config: &wgpu::SurfaceConfiguration,
label: &str,
) -> Self {
let size = wgpu::Extent3d {
width: config.width.max(1),
height: config.height.max(1),
depth_or_array_layers: 1,
};
let desc = wgpu::TextureDescriptor {
label: Some(label),
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[Self::DEPTH_FORMAT],
};
let texture = device.create_texture(&desc);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest,
compare: Some(wgpu::CompareFunction::LessEqual),
lod_min_clamp: 0.0,
lod_max_clamp: 100.0,
..Default::default()
});
Self {
texture,
view,
sampler,
size, // NEW!
}
}
#[allow(dead_code)]
pub fn from_bytes(
device: &wgpu::Device,
queue: &wgpu::Queue,
bytes: &[u8],
label: &str,
is_normal_map: bool,
) -> Result<Self> {
let img = image::load_from_memory(bytes)?;
Self::from_image(device, queue, &img, Some(label), is_normal_map)
}
pub fn from_image(
device: &wgpu::Device,
queue: &wgpu::Queue,
img: &image::DynamicImage,
label: Option<&str>,
is_normal_map: bool,
) -> Result<Self> {
let dimensions = img.dimensions();
let rgba = img.to_rgba8();
let format = if is_normal_map {
wgpu::TextureFormat::Rgba8Unorm
} else {
wgpu::TextureFormat::Rgba8UnormSrgb
};
let usage = wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST;
let size = wgpu::Extent3d {
width: img.width(),
height: img.height(),
depth_or_array_layers: 1,
};
let texture = Self::create_2d_texture(
device,
size.width,
size.height,
format,
usage,
wgpu::FilterMode::Linear,
label,
);
queue.write_texture(
wgpu::ImageCopyTexture {
aspect: wgpu::TextureAspect::All,
texture: &texture.texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
},
&rgba,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: Some(4 * dimensions.0),
rows_per_image: Some(dimensions.1),
},
size,
);
Ok(texture)
}
pub(crate) fn create_2d_texture(
device: &wgpu::Device,
width: u32,
height: u32,
format: wgpu::TextureFormat,
usage: wgpu::TextureUsages,
mag_filter: wgpu::FilterMode,
label: Option<&str>,
) -> Self {
let size = wgpu::Extent3d {
width,
height,
depth_or_array_layers: 1,
};
Self::create_texture(
device,
label,
size,
format,
usage,
wgpu::TextureDimension::D2,
mag_filter,
)
}
pub fn create_texture(
device: &wgpu::Device,
label: Option<&str>,
size: wgpu::Extent3d,
format: wgpu::TextureFormat,
usage: wgpu::TextureUsages,
dimension: wgpu::TextureDimension,
mag_filter: wgpu::FilterMode,
) -> Self {
let texture = device.create_texture(&wgpu::TextureDescriptor {
label,
size,
mip_level_count: 1,
sample_count: 1,
dimension,
format,
usage,
view_formats: &[],
});
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
Self {
texture,
view,
sampler,
size, // NEW!
}
}
}
pub struct CubeTexture {
texture: wgpu::Texture,
sampler: wgpu::Sampler,
view: wgpu::TextureView,
}
impl CubeTexture {
pub fn create_2d(
device: &wgpu::Device,
width: u32,
height: u32,
format: wgpu::TextureFormat,
mip_level_count: u32,
usage: wgpu::TextureUsages,
mag_filter: wgpu::FilterMode,
label: Option<&str>,
) -> Self {
let texture = device.create_texture(&wgpu::TextureDescriptor {
label,
size: wgpu::Extent3d {
width,
height,
// A cube has 6 sides, so we need 6 layers
depth_or_array_layers: 6,
},
mip_level_count,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format,
usage,
view_formats: &[],
});
let view = texture.create_view(&wgpu::TextureViewDescriptor {
label,
dimension: Some(wgpu::TextureViewDimension::Cube),
array_layer_count: Some(6),
..Default::default()
});
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label,
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
Self {
texture,
sampler,
view,
}
}
pub fn texture(&self) -> &wgpu::Texture {
&self.texture
}
pub fn view(&self) -> &wgpu::TextureView {
&self.view
}
pub fn sampler(&self) -> &wgpu::Sampler {
&self.sampler
}
}