fix(resources): load_string is no longer build.rs dependent

This commit is contained in:
lisk77 2025-10-31 01:10:31 +01:00
parent c2776e1bc4
commit fafc7d22a4
2 changed files with 480 additions and 495 deletions

View file

@ -1,248 +1,248 @@
use std::{ use std::{collections::HashMap, path::Path};
collections::HashMap, path::Path
};
use wgpu::{naga, Device, FilterMode, Queue, ShaderModule, TextureFormat, TextureUsages};
use wgpu::naga::ShaderStage;
use comet_log::info;
use crate::{font, texture, Texture};
use crate::font::Font; use crate::font::Font;
use crate::texture_atlas::{TextureAtlas, TextureRegion}; use crate::texture_atlas::{TextureAtlas, TextureRegion};
use crate::{font, texture, Texture};
use comet_log::info;
use wgpu::naga::ShaderStage;
use wgpu::{naga, Device, FilterMode, Queue, ShaderModule, TextureFormat, TextureUsages};
pub struct GraphicResourceManager { pub struct GraphicResourceManager {
texture_atlas: TextureAtlas, texture_atlas: TextureAtlas,
fonts: Vec<Font>, fonts: Vec<Font>,
data_files: HashMap<String, String>, data_files: HashMap<String, String>,
shaders: HashMap<String, ShaderModule> shaders: HashMap<String, ShaderModule>,
} }
impl GraphicResourceManager { impl GraphicResourceManager {
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {
texture_atlas: TextureAtlas::empty(), texture_atlas: TextureAtlas::empty(),
fonts: Vec::new(), fonts: Vec::new(),
data_files: HashMap::new(), data_files: HashMap::new(),
shaders: HashMap::new() shaders: HashMap::new(),
} }
} }
pub fn texture_atlas(&self) -> &TextureAtlas { pub fn texture_atlas(&self) -> &TextureAtlas {
&self.texture_atlas &self.texture_atlas
} }
pub fn texture_locations(&self) -> &HashMap<String, TextureRegion> { pub fn texture_locations(&self) -> &HashMap<String, TextureRegion> {
&self.texture_atlas.textures() &self.texture_atlas.textures()
} }
pub fn data_files(&self) -> &HashMap<String, String> { pub fn data_files(&self) -> &HashMap<String, String> {
&self.data_files &self.data_files
} }
pub fn fonts(&self) -> &Vec<Font> { pub fn fonts(&self) -> &Vec<Font> {
&self.fonts &self.fonts
} }
pub fn get_glyph(&self, font: &str, ch: char) -> Option<&TextureRegion> { pub fn get_glyph(&self, font: &str, ch: char) -> Option<&TextureRegion> {
self.fonts.iter().find(|f| f.name() == font).and_then(|f| f.get_glyph(ch)) self.fonts
} .iter()
.find(|f| f.name() == font)
.and_then(|f| f.get_glyph(ch))
}
pub fn set_texture_atlas(&mut self, texture_atlas: TextureAtlas) { pub fn set_texture_atlas(&mut self, texture_atlas: TextureAtlas) {
self.texture_atlas = texture_atlas; self.texture_atlas = texture_atlas;
// This is just for testing purposes // This is just for testing purposes
//self.texture_locations.insert("normal_comet.png".to_string(), ([0,0], [15,15])); //self.texture_locations.insert("normal_comet.png".to_string(), ([0,0], [15,15]));
//self.texture_locations.insert("green_comet.png".to_string(), ([0,15], [15,31])); //self.texture_locations.insert("green_comet.png".to_string(), ([0,15], [15,31]));
} }
pub fn create_texture_atlas(&mut self, paths: Vec<String>) { pub fn create_texture_atlas(&mut self, paths: Vec<String>) {
self.texture_atlas = TextureAtlas::from_texture_paths(paths) self.texture_atlas = TextureAtlas::from_texture_paths(paths)
} }
pub fn load_string(&self, file_name: &str) -> anyhow::Result<String> { pub fn load_string(&self, file_name: &str) -> anyhow::Result<String> {
let path = Path::new(std::env::var("OUT_DIR")?.as_str()) let base_path = std::env::var("OUT_DIR")
.join("res") .map(|p| Path::new(&p).to_path_buf())
.join(file_name); .unwrap_or_else(|_| Path::new(".").to_path_buf());
let txt = std::fs::read_to_string(path)?;
Ok(txt) let path = base_path.join(file_name);
} let txt = std::fs::read_to_string(&path)
.map_err(|e| anyhow::anyhow!("Failed to load {}: {}", path.display(), e))?;
pub fn load_binary(&self, file_name: &str) -> anyhow::Result<Vec<u8>> { Ok(txt)
let path = Path::new(std::env::var("OUT_DIR")?.as_str()) }
.join("res")
.join(file_name);
let data = std::fs::read(path)?;
Ok(data) pub fn load_binary(&self, file_name: &str) -> anyhow::Result<Vec<u8>> {
} let path = Path::new(std::env::var("OUT_DIR")?.as_str())
.join("res")
.join(file_name);
let data = std::fs::read(path)?;
pub fn load_texture( Ok(data)
&self, }
file_name: &str,
is_normal_map: bool,
device: &Device,
queue: &Queue,
) -> anyhow::Result<Texture> {
let data = self.load_binary(file_name)?;
Texture::from_bytes(device, queue, &data, file_name, is_normal_map)
}
/// `file_name` is the full name, so with the extension pub fn load_texture(
/// `shader_stage` is only needed if it is a GLSL shader, so default to None if it isn't GLSL &self,
pub fn load_shader( file_name: &str,
&mut self, is_normal_map: bool,
shader_stage: Option<ShaderStage>, device: &Device,
file_name: &str, queue: &Queue,
device: &Device ) -> anyhow::Result<Texture> {
) -> anyhow::Result<()> { let data = self.load_binary(file_name)?;
let shader_source = self.load_string(file_name)?; Texture::from_bytes(device, queue, &data, file_name, is_normal_map)
}
let module = match file_name.split('.').last() { /// `file_name` is the full name, so with the extension
Some ("wgsl") => { /// `shader_stage` is only needed if it is a GLSL shader, so default to None if it isn't GLSL
device.create_shader_module(wgpu::ShaderModuleDescriptor { pub fn load_shader(
label: Some(file_name.clone()), &mut self,
source: wgpu::ShaderSource::Wgsl(shader_source.into()) shader_stage: Option<ShaderStage>,
}) file_name: &str,
}, device: &Device,
Some("glsl") => { ) -> anyhow::Result<()> {
if let Some(stage) = shader_stage { let shader_source = self.load_string(file_name)?;
device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some(file_name.clone()),
source: wgpu::ShaderSource::Glsl {
shader: shader_source.into(),
stage,
defines: naga::FastHashMap::default()
}
})
}
else {
return Err(anyhow::anyhow!("GLSL shader needs a stage"))
}
} let module = match file_name.split('.').last() {
_ => return Err(anyhow::anyhow!("Unsupported shader type")), Some("wgsl") => device.create_shader_module(wgpu::ShaderModuleDescriptor {
}; label: Some(file_name),
source: wgpu::ShaderSource::Wgsl(shader_source.into()),
}),
Some("glsl") => {
if let Some(stage) = shader_stage {
device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some(file_name),
source: wgpu::ShaderSource::Glsl {
shader: shader_source.into(),
stage,
defines: naga::FastHashMap::default(),
},
})
} else {
return Err(anyhow::anyhow!("GLSL shader needs a stage"));
}
}
_ => return Err(anyhow::anyhow!("Unsupported shader type")),
};
self.shaders.insert(file_name.to_string(), module); self.shaders.insert(file_name.to_string(), module);
Ok(()) Ok(())
} }
pub fn get_shader(&self, shader: &str) -> Option<&ShaderModule> { pub fn get_shader(&self, shader: &str) -> Option<&ShaderModule> {
self.shaders.get(shader) self.shaders.get(shader)
} }
pub fn load_font(&mut self, path: &str, size: f32) { pub fn load_font(&mut self, path: &str, size: f32) {
info!("Loading font: {}", path); info!("Loading font: {}", path);
let font = Font::new(path, size); let font = Font::new(path, size);
info!("Font {} loaded!", font.name()); info!("Font {} loaded!", font.name());
self.fonts.push(font); self.fonts.push(font);
} }
/*pub async fn load_model( /*pub async fn load_model(
&self, &self,
file_name: &str, file_name: &str,
device: &wgpu::Device, device: &wgpu::Device,
queue: &wgpu::Queue, queue: &wgpu::Queue,
layout: &wgpu::BindGroupLayout, layout: &wgpu::BindGroupLayout,
) -> anyhow::Result<model::Model> { ) -> anyhow::Result<model::Model> {
let obj_text = self.load_string(file_name).await?; let obj_text = self.load_string(file_name).await?;
let obj_cursor = Cursor::new(obj_text); let obj_cursor = Cursor::new(obj_text);
let mut obj_reader = BufReader::new(obj_cursor); let mut obj_reader = BufReader::new(obj_cursor);
let (models, obj_materials) = tobj::load_obj_buf_async( let (models, obj_materials) = tobj::load_obj_buf_async(
&mut obj_reader, &mut obj_reader,
&tobj::LoadOptions { &tobj::LoadOptions {
triangulate: true, triangulate: true,
single_index: true, single_index: true,
..Default::default() ..Default::default()
}, },
|p| async move { |p| async move {
let mat_text = self.load_string(&p).await.unwrap(); let mat_text = self.load_string(&p).await.unwrap();
tobj::load_mtl_buf(&mut BufReader::new(Cursor::new(mat_text))) tobj::load_mtl_buf(&mut BufReader::new(Cursor::new(mat_text)))
}, },
) )
.await?; .await?;
let mut materials = Vec::new(); let mut materials = Vec::new();
for m in obj_materials? { for m in obj_materials? {
let diffuse_texture = self.load_texture(&m.diffuse_texture, false, device, queue).await?; let diffuse_texture = self.load_texture(&m.diffuse_texture, false, device, queue).await?;
let normal_texture = self.load_texture(&m.normal_texture, true, device, queue).await?; let normal_texture = self.load_texture(&m.normal_texture, true, device, queue).await?;
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout, layout,
entries: &[ entries: &[
wgpu::BindGroupEntry { wgpu::BindGroupEntry {
binding: 0, binding: 0,
resource: wgpu::BindingResource::TextureView(&diffuse_texture.view), resource: wgpu::BindingResource::TextureView(&diffuse_texture.view),
}, },
wgpu::BindGroupEntry { wgpu::BindGroupEntry {
binding: 1, binding: 1,
resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler), resource: wgpu::BindingResource::Sampler(&diffuse_texture.sampler),
}, },
], ],
label: None, label: None,
}); });
materials.push(model::Material { materials.push(model::Material {
name: m.name, name: m.name,
diffuse_texture, diffuse_texture,
bind_group, bind_group,
}); });
} }
let meshes = models let meshes = models
.into_iter() .into_iter()
.map(|m| { .map(|m| {
let vertices = (0..m.mesh.positions.len() / 3) let vertices = (0..m.mesh.positions.len() / 3)
.map(|i| { .map(|i| {
if m.mesh.normals.is_empty() { if m.mesh.normals.is_empty() {
model::ModelVertex { model::ModelVertex {
position: [ position: [
m.mesh.positions[i * 3], m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1], m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2], m.mesh.positions[i * 3 + 2],
], ],
tex_coords: [m.mesh.texcoords[i * 2], 1.0 - m.mesh.texcoords[i * 2 + 1]], tex_coords: [m.mesh.texcoords[i * 2], 1.0 - m.mesh.texcoords[i * 2 + 1]],
normal: [0.0, 0.0, 0.0], normal: [0.0, 0.0, 0.0],
} }
} else { } else {
model::ModelVertex { model::ModelVertex {
position: [ position: [
m.mesh.positions[i * 3], m.mesh.positions[i * 3],
m.mesh.positions[i * 3 + 1], m.mesh.positions[i * 3 + 1],
m.mesh.positions[i * 3 + 2], m.mesh.positions[i * 3 + 2],
], ],
tex_coords: [m.mesh.texcoords[i * 2], 1.0 - m.mesh.texcoords[i * 2 + 1]], tex_coords: [m.mesh.texcoords[i * 2], 1.0 - m.mesh.texcoords[i * 2 + 1]],
normal: [ normal: [
m.mesh.normals[i * 3], m.mesh.normals[i * 3],
m.mesh.normals[i * 3 + 1], m.mesh.normals[i * 3 + 1],
m.mesh.normals[i * 3 + 2], m.mesh.normals[i * 3 + 2],
], ],
} }
} }
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Vertex Buffer", file_name)), label: Some(&format!("{:?} Vertex Buffer", file_name)),
contents: bytemuck::cast_slice(&vertices), contents: bytemuck::cast_slice(&vertices),
usage: wgpu::BufferUsages::VERTEX, usage: wgpu::BufferUsages::VERTEX,
}); });
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some(&format!("{:?} Index Buffer", file_name)), label: Some(&format!("{:?} Index Buffer", file_name)),
contents: bytemuck::cast_slice(&m.mesh.indices), contents: bytemuck::cast_slice(&m.mesh.indices),
usage: wgpu::BufferUsages::INDEX, usage: wgpu::BufferUsages::INDEX,
}); });
model::Mesh { model::Mesh {
name: file_name.to_string(), name: file_name.to_string(),
vertex_buffer, vertex_buffer,
index_buffer, index_buffer,
num_elements: m.mesh.indices.len() as u32, num_elements: m.mesh.indices.len() as u32,
material: m.mesh.material_id.unwrap_or(0), material: m.mesh.material_id.unwrap_or(0),
} }
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
Ok(model::Model { meshes, materials }) Ok(model::Model { meshes, materials })
}*/ }*/
} }

View file

@ -1,325 +1,310 @@
use anyhow::*; use anyhow::*;
use image::{DynamicImage, GenericImageView, RgbaImage}; use image::{DynamicImage, GenericImageView, RgbaImage};
use wgpu::{Device, Queue};
#[derive(Debug)] #[derive(Debug)]
pub struct Texture { pub struct Texture {
#[allow(unused)] #[allow(unused)]
pub texture: wgpu::Texture, pub texture: wgpu::Texture,
pub view: wgpu::TextureView, pub view: wgpu::TextureView,
pub sampler: wgpu::Sampler, pub sampler: wgpu::Sampler,
pub size: wgpu::Extent3d, pub size: wgpu::Extent3d,
} }
impl Texture { impl Texture {
pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float; pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
pub fn create_depth_texture( pub fn create_depth_texture(
device: &wgpu::Device, device: &wgpu::Device,
config: &wgpu::SurfaceConfiguration, config: &wgpu::SurfaceConfiguration,
label: &str, label: &str,
) -> Self { ) -> Self {
let size = wgpu::Extent3d { let size = wgpu::Extent3d {
width: config.width.max(1), width: config.width.max(1),
height: config.height.max(1), height: config.height.max(1),
depth_or_array_layers: 1, depth_or_array_layers: 1,
}; };
let desc = wgpu::TextureDescriptor { let desc = wgpu::TextureDescriptor {
label: Some(label), label: Some(label),
size, size,
mip_level_count: 1, mip_level_count: 1,
sample_count: 1, sample_count: 1,
dimension: wgpu::TextureDimension::D2, dimension: wgpu::TextureDimension::D2,
format: Self::DEPTH_FORMAT, format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING, usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[Self::DEPTH_FORMAT], view_formats: &[Self::DEPTH_FORMAT],
}; };
let texture = device.create_texture(&desc); let texture = device.create_texture(&desc);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default()); let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor { let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge, address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge, address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge, address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear, mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Linear, min_filter: wgpu::FilterMode::Linear,
mipmap_filter: wgpu::FilterMode::Nearest, mipmap_filter: wgpu::FilterMode::Nearest,
compare: Some(wgpu::CompareFunction::LessEqual), compare: Some(wgpu::CompareFunction::LessEqual),
lod_min_clamp: 0.0, lod_min_clamp: 0.0,
lod_max_clamp: 100.0, lod_max_clamp: 100.0,
..Default::default() ..Default::default()
}); });
Self { Self {
texture, texture,
view, view,
sampler, sampler,
size, // NEW! size,
} }
} }
#[allow(dead_code)] #[allow(dead_code)]
pub fn from_bytes( pub fn from_bytes(
device: &wgpu::Device, device: &wgpu::Device,
queue: &wgpu::Queue, queue: &wgpu::Queue,
bytes: &[u8], bytes: &[u8],
label: &str, label: &str,
is_normal_map: bool, is_normal_map: bool,
) -> Result<Self> { ) -> Result<Self> {
let img = image::load_from_memory(bytes)?; let img = image::load_from_memory(bytes)?;
Self::from_image(device, queue, &img, Some(label), is_normal_map) Self::from_image(device, queue, &img, Some(label), is_normal_map)
} }
pub fn from_image( pub fn from_image(
device: &wgpu::Device, device: &wgpu::Device,
queue: &wgpu::Queue, queue: &wgpu::Queue,
img: &image::DynamicImage, img: &image::DynamicImage,
label: Option<&str>, label: Option<&str>,
is_normal_map: bool, is_normal_map: bool,
) -> Result<Self> { ) -> Result<Self> {
let dimensions = img.dimensions(); let dimensions = img.dimensions();
let rgba = img.to_rgba8(); let rgba = img.to_rgba8();
let format = if is_normal_map { let format = if is_normal_map {
wgpu::TextureFormat::Rgba8Unorm wgpu::TextureFormat::Rgba8Unorm
} else { } else {
wgpu::TextureFormat::Rgba8UnormSrgb wgpu::TextureFormat::Rgba8UnormSrgb
}; };
let usage = wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST; let usage = wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST;
let size = wgpu::Extent3d { let size = wgpu::Extent3d {
width: img.width(), width: img.width(),
height: img.height(), height: img.height(),
depth_or_array_layers: 1, depth_or_array_layers: 1,
}; };
let texture = Self::create_2d_texture( let texture = Self::create_2d_texture(
device, device,
size.width, size.width,
size.height, size.height,
format, format,
usage, usage,
wgpu::FilterMode::Nearest, wgpu::FilterMode::Nearest,
label, label,
); );
queue.write_texture( queue.write_texture(
wgpu::ImageCopyTexture { wgpu::ImageCopyTexture {
aspect: wgpu::TextureAspect::All, aspect: wgpu::TextureAspect::All,
texture: &texture.texture, texture: &texture.texture,
mip_level: 0, mip_level: 0,
origin: wgpu::Origin3d::ZERO, origin: wgpu::Origin3d::ZERO,
}, },
&rgba, &rgba,
wgpu::ImageDataLayout { wgpu::ImageDataLayout {
offset: 0, offset: 0,
bytes_per_row: Some(4 * dimensions.0), bytes_per_row: Some(4 * dimensions.0),
rows_per_image: Some(dimensions.1), rows_per_image: Some(dimensions.1),
}, },
size, size,
); );
Ok(texture) Ok(texture)
} }
pub(crate) fn create_2d_texture( pub(crate) fn create_2d_texture(
device: &wgpu::Device, device: &wgpu::Device,
width: u32, width: u32,
height: u32, height: u32,
format: wgpu::TextureFormat, format: wgpu::TextureFormat,
usage: wgpu::TextureUsages, usage: wgpu::TextureUsages,
mag_filter: wgpu::FilterMode, mag_filter: wgpu::FilterMode,
label: Option<&str>, label: Option<&str>,
) -> Self { ) -> Self {
let size = wgpu::Extent3d { let size = wgpu::Extent3d {
width, width,
height, height,
depth_or_array_layers: 1, depth_or_array_layers: 1,
}; };
Self::create_texture( Self::create_texture(
device, device,
label, label,
size, size,
format, format,
usage, usage,
wgpu::TextureDimension::D2, wgpu::TextureDimension::D2,
mag_filter, mag_filter,
) )
} }
pub fn create_texture( pub fn create_texture(
device: &wgpu::Device, device: &wgpu::Device,
label: Option<&str>, label: Option<&str>,
size: wgpu::Extent3d, size: wgpu::Extent3d,
format: wgpu::TextureFormat, format: wgpu::TextureFormat,
usage: wgpu::TextureUsages, usage: wgpu::TextureUsages,
dimension: wgpu::TextureDimension, dimension: wgpu::TextureDimension,
mag_filter: wgpu::FilterMode, mag_filter: wgpu::FilterMode,
) -> Self { ) -> Self {
let texture = device.create_texture(&wgpu::TextureDescriptor { let texture = device.create_texture(&wgpu::TextureDescriptor {
label, label,
size, size,
mip_level_count: 1, mip_level_count: 1,
sample_count: 1, sample_count: 1,
dimension, dimension,
format, format,
usage, usage,
view_formats: &[], view_formats: &[],
}); });
let view = texture.create_view(&wgpu::TextureViewDescriptor::default()); let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
let sampler = device.create_sampler(&wgpu::SamplerDescriptor { let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge, address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge, address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge, address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter, mag_filter,
min_filter: wgpu::FilterMode::Nearest, min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest, mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default() ..Default::default()
}); });
Self { Self {
texture, texture,
view, view,
sampler, sampler,
size, // NEW! size, // NEW!
} }
} }
pub fn to_image( pub fn to_image(&self, device: &wgpu::Device, queue: &wgpu::Queue) -> Result<DynamicImage> {
&self, let width = self.size.width;
device: &wgpu::Device, let height = self.size.height;
queue: &wgpu::Queue,
) -> Result<DynamicImage> {
// Size of the texture
let width = self.size.width;
let height = self.size.height;
// Calculate the size of the texture in bytes let texture_size_bytes = (4 * width * height) as wgpu::BufferAddress;
let texture_size_bytes = (4 * width * height) as wgpu::BufferAddress;
// Create a buffer for reading the texture data back from the GPU let buffer = device.create_buffer(&wgpu::BufferDescriptor {
let buffer = device.create_buffer(&wgpu::BufferDescriptor { label: Some("Texture Readback Buffer"),
label: Some("Texture Readback Buffer"), size: texture_size_bytes,
size: texture_size_bytes, usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ, mapped_at_creation: false,
mapped_at_creation: false, });
});
// Create a command encoder to copy the texture data to the buffer let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("Texture to Buffer Encoder"),
label: Some("Texture to Buffer Encoder"), });
});
// Define the copy operation from the texture to the buffer encoder.copy_texture_to_buffer(
encoder.copy_texture_to_buffer( wgpu::ImageCopyTexture {
wgpu::ImageCopyTexture { texture: &self.texture,
texture: &self.texture, mip_level: 0,
mip_level: 0, origin: wgpu::Origin3d::ZERO,
origin: wgpu::Origin3d::ZERO, aspect: wgpu::TextureAspect::All,
aspect: wgpu::TextureAspect::All, },
}, wgpu::ImageCopyBuffer {
wgpu::ImageCopyBuffer { buffer: &buffer,
buffer: &buffer, layout: wgpu::ImageDataLayout {
layout: wgpu::ImageDataLayout { offset: 0,
offset: 0, bytes_per_row: Some(4 * width),
bytes_per_row: Some(4 * width), rows_per_image: Some(height),
rows_per_image: Some(height), },
}, },
}, self.size,
self.size, );
);
// Submit the command to the queue queue.submit(Some(encoder.finish()));
queue.submit(Some(encoder.finish()));
// Wait for the GPU to finish the operation let buffer_slice = buffer.slice(..);
let buffer_slice = buffer.slice(..); buffer_slice.map_async(wgpu::MapMode::Read, |result| {
buffer_slice.map_async(wgpu::MapMode::Read, |result| { if let Err(e) = result {
if let Err(e) = result { eprintln!("Failed to map buffer: {:?}", e);
eprintln!("Failed to map buffer: {:?}", e); }
} });
});
// Get the buffer data let data = buffer_slice.get_mapped_range();
let data = buffer_slice.get_mapped_range();
// Convert the raw data into an image::RgbaImage let image = RgbaImage::from_raw(width, height, data.to_vec())
let image = RgbaImage::from_raw(width, height, data.to_vec()) .ok_or_else(|| anyhow!("Failed to create image from raw texture data"))?;
.ok_or_else(|| anyhow!("Failed to create image from raw texture data"))?;
// Unmap the buffer now that we're done with it buffer.unmap();
buffer.unmap();
// Convert the RgbaImage into a DynamicImage Ok(DynamicImage::ImageRgba8(image))
Ok(DynamicImage::ImageRgba8(image)) }
}
} }
pub struct CubeTexture { pub struct CubeTexture {
texture: wgpu::Texture, texture: wgpu::Texture,
sampler: wgpu::Sampler, sampler: wgpu::Sampler,
view: wgpu::TextureView, view: wgpu::TextureView,
} }
impl CubeTexture { impl CubeTexture {
pub fn create_2d( pub fn create_2d(
device: &wgpu::Device, device: &wgpu::Device,
width: u32, width: u32,
height: u32, height: u32,
format: wgpu::TextureFormat, format: wgpu::TextureFormat,
mip_level_count: u32, mip_level_count: u32,
usage: wgpu::TextureUsages, usage: wgpu::TextureUsages,
mag_filter: wgpu::FilterMode, mag_filter: wgpu::FilterMode,
label: Option<&str>, label: Option<&str>,
) -> Self { ) -> Self {
let texture = device.create_texture(&wgpu::TextureDescriptor { let texture = device.create_texture(&wgpu::TextureDescriptor {
label, label,
size: wgpu::Extent3d { size: wgpu::Extent3d {
width, width,
height, height,
// A cube has 6 sides, so we need 6 layers // A cube has 6 sides, so we need 6 layers
depth_or_array_layers: 6, depth_or_array_layers: 6,
}, },
mip_level_count, mip_level_count,
sample_count: 1, sample_count: 1,
dimension: wgpu::TextureDimension::D2, dimension: wgpu::TextureDimension::D2,
format, format,
usage, usage,
view_formats: &[], view_formats: &[],
}); });
let view = texture.create_view(&wgpu::TextureViewDescriptor { let view = texture.create_view(&wgpu::TextureViewDescriptor {
label, label,
dimension: Some(wgpu::TextureViewDimension::Cube), dimension: Some(wgpu::TextureViewDimension::Cube),
array_layer_count: Some(6), array_layer_count: Some(6),
..Default::default() ..Default::default()
}); });
let sampler = device.create_sampler(&wgpu::SamplerDescriptor { let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
label, label,
address_mode_u: wgpu::AddressMode::ClampToEdge, address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge, address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge, address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter, mag_filter,
min_filter: wgpu::FilterMode::Nearest, min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest, mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default() ..Default::default()
}); });
Self { Self {
texture, texture,
sampler, sampler,
view, view,
} }
} }
pub fn texture(&self) -> &wgpu::Texture { pub fn texture(&self) -> &wgpu::Texture {
&self.texture &self.texture
} }
pub fn view(&self) -> &wgpu::TextureView { pub fn view(&self) -> &wgpu::TextureView {
&self.view &self.view
} }
pub fn sampler(&self) -> &wgpu::Sampler {
&self.sampler
}
}
pub fn sampler(&self) -> &wgpu::Sampler {
&self.sampler
}
}