renderling/
texture.rs

1//! Wrapper around [`wgpu::Texture`].
2use core::sync::atomic::AtomicUsize;
3use std::{
4    ops::Deref,
5    sync::{Arc, LazyLock, Mutex},
6};
7
8use craballoc::runtime::WgpuRuntime;
9use glam::{Mat4, UVec2};
10use image::{
11    load_from_memory, DynamicImage, GenericImage, GenericImageView, ImageBuffer, ImageError, Luma,
12    PixelWithColorType, Rgba32FImage,
13};
14use mips::MipMapGenerator;
15use snafu::prelude::*;
16
17use crate::{
18    atlas::{AtlasImage, AtlasImageFormat},
19    camera::Camera,
20};
21
22pub mod mips;
23
24#[derive(Debug, Snafu)]
25/// Enumeration of errors produced by [`Texture`].
26pub enum TextureError {
27    #[snafu(display("Unable to load '{}' image from memory: {}", label, source))]
28    Loading { source: ImageError, label: String },
29
30    #[snafu(display("Image buffer '{}' unsupported color type: {:?}", label, color_type))]
31    UnsupportedColorType {
32        color_type: image::ExtendedColorType,
33        label: String,
34    },
35
36    #[snafu(display("Could not map buffer"))]
37    CouldNotMapBuffer { source: wgpu::BufferAsyncError },
38
39    #[snafu(display("Could not convert image buffer"))]
40    CouldNotConvertImageBuffer,
41
42    #[snafu(display("Could not create an image buffer"))]
43    CouldNotCreateImageBuffer,
44
45    #[snafu(display("Unsupported format: {format:#?}"))]
46    UnsupportedFormat { format: wgpu::TextureFormat },
47
48    #[snafu(display("Buffer async error: {source}"))]
49    BufferAsync { source: wgpu::BufferAsyncError },
50
51    #[snafu(display("Driver poll error: {source}"))]
52    Poll { source: wgpu::PollError },
53}
54
55type Result<T, E = TextureError> = std::result::Result<T, E>;
56
57pub fn wgpu_texture_format_channels_and_subpixel_bytes(
58    format: wgpu::TextureFormat,
59) -> Result<(u32, u32)> {
60    Ok(match format {
61        wgpu::TextureFormat::Depth32Float => (1, 4),
62        wgpu::TextureFormat::R32Float => (1, 4),
63        wgpu::TextureFormat::Rg16Float => (2, 2),
64        wgpu::TextureFormat::Rgba8Unorm => (4, 1),
65        wgpu::TextureFormat::Rgba16Float => (4, 2),
66        wgpu::TextureFormat::Rgba32Float => (4, 4),
67        wgpu::TextureFormat::Rgba8UnormSrgb => (4, 1),
68        wgpu::TextureFormat::R8Unorm => (1, 1),
69        f => UnsupportedFormatSnafu { format: f }.fail()?,
70    })
71}
72
73/// ## Panics
74pub fn wgpu_texture_format_channels_and_subpixel_bytes_todo(
75    format: wgpu::TextureFormat,
76) -> (u32, u32) {
77    wgpu_texture_format_channels_and_subpixel_bytes(format).unwrap()
78}
79
80static NEXT_TEXTURE_ID: LazyLock<Arc<AtomicUsize>> = LazyLock::new(|| Arc::new(0.into()));
81
82pub(crate) fn get_next_texture_id() -> usize {
83    NEXT_TEXTURE_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed)
84}
85
86/// A texture living on the GPU.
87#[derive(Debug, Clone)]
88pub struct Texture {
89    pub texture: Arc<wgpu::Texture>,
90    // TODO: revisit whether we really need to create view and sampler for textures
91    // automatically
92    pub view: Arc<wgpu::TextureView>,
93    pub sampler: Arc<wgpu::Sampler>,
94    pub(crate) id: usize,
95}
96
97impl Texture {
98    /// Returns the id of this texture.
99    ///
100    /// The id is a monotonically increasing count of all textures created.
101    ///
102    /// This can be used to determine if a texture has been
103    /// replaced by another, which can be used, for example, to invalidate
104    /// a [`wgpu::BindGroup`].
105    pub fn id(&self) -> usize {
106        self.id
107    }
108
109    pub fn width(&self) -> u32 {
110        self.texture.width()
111    }
112
113    pub fn height(&self) -> u32 {
114        self.texture.height()
115    }
116
117    pub fn size(&self) -> UVec2 {
118        UVec2::new(self.width(), self.height())
119    }
120
121    /// Create a cubemap texture from 6 faces.
122    pub fn new_cubemap_texture(
123        runtime: impl AsRef<WgpuRuntime>,
124        label: Option<impl AsRef<str>>,
125        texture_size: u32,
126        face_textures: &[Texture],
127        image_format: wgpu::TextureFormat,
128        mip_levels: u32,
129    ) -> Self {
130        let label = label.as_ref().map(|s| s.as_ref());
131        let WgpuRuntime { device, queue } = runtime.as_ref();
132        let size = wgpu::Extent3d {
133            width: texture_size,
134            height: texture_size,
135            depth_or_array_layers: 6,
136        };
137        let cubemap_texture = device.create_texture(&wgpu::TextureDescriptor {
138            label: None,
139            size,
140            mip_level_count: mip_levels,
141            sample_count: 1,
142            dimension: wgpu::TextureDimension::D2,
143            format: image_format,
144            usage: wgpu::TextureUsages::TEXTURE_BINDING
145                | wgpu::TextureUsages::COPY_DST
146                | wgpu::TextureUsages::COPY_SRC,
147            view_formats: &[],
148        });
149
150        let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
151            label: Some("texture_buffer_copy_encoder"),
152        });
153
154        for i in 0..6 {
155            for mip_level in 0..mip_levels as usize {
156                let mip_size = texture_size >> mip_level;
157                let index = i * mip_levels as usize + mip_level;
158                let texture = &face_textures[index].texture;
159                encoder.copy_texture_to_texture(
160                    wgpu::TexelCopyTextureInfo {
161                        texture,
162                        mip_level: 0,
163                        origin: wgpu::Origin3d::ZERO,
164                        aspect: wgpu::TextureAspect::All,
165                    },
166                    wgpu::TexelCopyTextureInfo {
167                        texture: &cubemap_texture,
168                        mip_level: mip_level as u32,
169                        origin: wgpu::Origin3d {
170                            x: 0,
171                            y: 0,
172                            z: i as u32,
173                        },
174                        aspect: wgpu::TextureAspect::All,
175                    },
176                    wgpu::Extent3d {
177                        width: mip_size,
178                        height: mip_size,
179                        depth_or_array_layers: 1,
180                    },
181                );
182            }
183        }
184        queue.submit([encoder.finish()]);
185
186        let view = cubemap_texture.create_view(&wgpu::TextureViewDescriptor {
187            dimension: Some(wgpu::TextureViewDimension::Cube),
188            label,
189            ..Default::default()
190        });
191
192        let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
193            address_mode_u: wgpu::AddressMode::ClampToEdge,
194            address_mode_v: wgpu::AddressMode::ClampToEdge,
195            address_mode_w: wgpu::AddressMode::ClampToEdge,
196            mag_filter: wgpu::FilterMode::Linear,
197            min_filter: wgpu::FilterMode::Linear,
198            mipmap_filter: wgpu::FilterMode::Linear,
199            label,
200            ..Default::default()
201        });
202
203        Texture {
204            texture: cubemap_texture.into(),
205            view: view.into(),
206            sampler: sampler.into(),
207            id: get_next_texture_id(),
208        }
209    }
210
211    /// Create a new texture.
212    #[allow(clippy::too_many_arguments)]
213    pub fn new_with(
214        runtime: impl AsRef<WgpuRuntime>,
215        label: Option<&str>,
216        usage: Option<wgpu::TextureUsages>,
217        sampler: Option<wgpu::Sampler>,
218        format: wgpu::TextureFormat,
219        color_channels: u32,
220        color_channel_bytes: u32,
221        width: u32,
222        height: u32,
223        mip_level_count: u32,
224        data: &[u8],
225    ) -> Self {
226        let runtime = runtime.as_ref();
227        let device = &runtime.device;
228        let queue = &runtime.queue;
229        let mip_level_count = 1.max(mip_level_count);
230        let size = wgpu::Extent3d {
231            width,
232            height,
233            depth_or_array_layers: 1,
234        };
235
236        let texture = device.create_texture(&wgpu::TextureDescriptor {
237            label,
238            size,
239            mip_level_count,
240            sample_count: 1,
241            dimension: wgpu::TextureDimension::D2,
242            format,
243            usage: usage
244                .unwrap_or(wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST),
245            view_formats: &[],
246        });
247
248        if !data.is_empty() {
249            queue.write_texture(
250                wgpu::TexelCopyTextureInfo {
251                    texture: &texture,
252                    mip_level: 0,
253                    origin: wgpu::Origin3d::ZERO,
254                    aspect: wgpu::TextureAspect::All,
255                },
256                data,
257                wgpu::TexelCopyBufferLayout {
258                    offset: 0,
259                    bytes_per_row: Some(color_channels * color_channel_bytes * width),
260                    rows_per_image: None,
261                },
262                size,
263            );
264        }
265
266        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
267        let sampler = sampler.unwrap_or_else(|| {
268            device.create_sampler(&wgpu::SamplerDescriptor {
269                address_mode_u: wgpu::AddressMode::ClampToEdge,
270                address_mode_v: wgpu::AddressMode::ClampToEdge,
271                address_mode_w: wgpu::AddressMode::ClampToEdge,
272                mag_filter: wgpu::FilterMode::Linear,
273                min_filter: wgpu::FilterMode::Linear,
274                mipmap_filter: wgpu::FilterMode::Linear,
275                ..Default::default()
276            })
277        });
278
279        Texture {
280            texture: Arc::new(texture),
281            view: Arc::new(view),
282            sampler: Arc::new(sampler),
283            id: get_next_texture_id(),
284        }
285    }
286
287    /// Create a new texture.
288    ///
289    /// This defaults the format to `Rgba8UnormSrgb` and assumes a pixel is 1
290    /// byte per channel.
291    #[allow(clippy::too_many_arguments)]
292    pub fn new(
293        runtime: impl AsRef<WgpuRuntime>,
294        label: Option<&str>,
295        usage: Option<wgpu::TextureUsages>,
296        color_channels: u32,
297        width: u32,
298        height: u32,
299        data: &[u8],
300    ) -> Self {
301        let runtime = runtime.as_ref();
302        Self::new_with(
303            runtime,
304            label,
305            usage,
306            None,
307            wgpu::TextureFormat::Rgba8UnormSrgb,
308            color_channels,
309            1,
310            width,
311            height,
312            1,
313            data,
314        )
315    }
316
317    pub fn from_image_bytes(
318        runtime: impl AsRef<WgpuRuntime>,
319        bytes: &[u8],
320        label: &str,
321    ) -> Result<Self> {
322        let img = load_from_memory(bytes).with_context(|_| LoadingSnafu {
323            label: label.to_string(),
324        })?;
325
326        match img {
327            DynamicImage::ImageLuma8(b) => {
328                Self::from_image_buffer(runtime, &b, Some(label), None, None)
329            }
330            DynamicImage::ImageLumaA8(b) => {
331                Self::from_image_buffer(runtime, &b, Some(label), None, None)
332            }
333            DynamicImage::ImageRgb8(b) => {
334                Self::from_image_buffer(runtime, &b, Some(label), None, None)
335            }
336            DynamicImage::ImageRgba8(b) => {
337                Self::from_image_buffer(runtime, &b, Some(label), None, None)
338            }
339            img => Self::from_image_buffer(runtime, &img.to_rgba8(), Some(label), None, None),
340        }
341    }
342
343    pub fn from_dynamic_image(
344        runtime: impl AsRef<WgpuRuntime>,
345        dyn_img: image::DynamicImage,
346        label: Option<&str>,
347        usage: Option<wgpu::TextureUsages>,
348        mip_level_count: u32,
349    ) -> Self {
350        let runtime = runtime.as_ref();
351        let device = &runtime.device;
352        let queue = &runtime.queue;
353        let mip_level_count = mip_level_count.max(1);
354        let dimensions = dyn_img.dimensions();
355
356        let size = wgpu::Extent3d {
357            width: dimensions.0,
358            height: dimensions.1,
359            depth_or_array_layers: 1,
360        };
361
362        let (img, format, channels) = match dyn_img {
363            img @ DynamicImage::ImageLuma8(_) => (img, wgpu::TextureFormat::R8Unorm, 1),
364            img @ DynamicImage::ImageRgba8(_) => (img, wgpu::TextureFormat::Rgba8UnormSrgb, 4),
365            img @ DynamicImage::ImageLuma16(_) => (img, wgpu::TextureFormat::R16Unorm, 1),
366            img @ DynamicImage::ImageRgba16(_) => (img, wgpu::TextureFormat::Rgba16Unorm, 4),
367            img @ DynamicImage::ImageRgba32F(_) => (img, wgpu::TextureFormat::Rgba32Float, 4),
368            img => {
369                let rgba8 = DynamicImage::ImageRgba8(img.into_rgba8());
370                (rgba8, wgpu::TextureFormat::Rgba8UnormSrgb, 4)
371            }
372        };
373
374        let texture = device.create_texture(&wgpu::TextureDescriptor {
375            label,
376            size,
377            mip_level_count,
378            sample_count: 1,
379            dimension: wgpu::TextureDimension::D2,
380            format,
381            usage: usage
382                .unwrap_or(wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST),
383            view_formats: &[],
384        });
385
386        queue.write_texture(
387            wgpu::TexelCopyTextureInfo {
388                texture: &texture,
389                mip_level: 0,
390                origin: wgpu::Origin3d::ZERO,
391                aspect: wgpu::TextureAspect::All,
392            },
393            img.as_bytes(),
394            wgpu::TexelCopyBufferLayout {
395                offset: 0,
396                bytes_per_row: Some(channels * dimensions.0),
397                rows_per_image: Some(dimensions.1),
398            },
399            size,
400        );
401
402        Self::from_wgpu_tex(device, texture, None, None)
403    }
404
405    pub fn from_image_buffer<P>(
406        runtime: impl AsRef<WgpuRuntime>,
407        img: &ImageBuffer<P, Vec<u8>>,
408        label: Option<&str>,
409        usage: Option<wgpu::TextureUsages>,
410        mip_level_count: Option<u32>,
411    ) -> Result<Self>
412    where
413        P: PixelWithColorType,
414        ImageBuffer<P, Vec<u8>>: GenericImage + Deref<Target = [u8]>,
415    {
416        let runtime = runtime.as_ref();
417        let dimensions = img.dimensions();
418
419        let size = wgpu::Extent3d {
420            width: dimensions.0,
421            height: dimensions.1,
422            depth_or_array_layers: 1,
423        };
424
425        let texture = runtime.device.create_texture(&wgpu::TextureDescriptor {
426            label,
427            size,
428            mip_level_count: 1,
429            sample_count: 1,
430            dimension: wgpu::TextureDimension::D2,
431            format: {
432                ensure!(
433                    P::COLOR_TYPE == image::ExtendedColorType::Rgba8,
434                    UnsupportedColorTypeSnafu {
435                        color_type: P::COLOR_TYPE,
436                        label: label
437                            .map(ToString::to_string)
438                            .unwrap_or_else(|| "unknown".to_string()),
439                    }
440                );
441                wgpu::TextureFormat::Rgba8UnormSrgb
442            },
443            usage: usage
444                .unwrap_or(wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST),
445            view_formats: &[],
446        });
447
448        runtime.queue.write_texture(
449            wgpu::TexelCopyTextureInfo {
450                texture: &texture,
451                mip_level: 0,
452                origin: wgpu::Origin3d::ZERO,
453                aspect: wgpu::TextureAspect::All,
454            },
455            img.deref(),
456            wgpu::TexelCopyBufferLayout {
457                offset: 0,
458                bytes_per_row: Some(P::CHANNEL_COUNT as u32 * dimensions.0),
459                rows_per_image: Some(dimensions.1),
460            },
461            size,
462        );
463
464        Ok(Self::from_wgpu_tex(
465            &runtime.device,
466            texture,
467            None,
468            mip_level_count,
469        ))
470    }
471
472    pub fn from_wgpu_tex(
473        device: &wgpu::Device,
474        texture: impl Into<Arc<wgpu::Texture>>,
475        sampler: Option<wgpu::SamplerDescriptor>,
476        mip_level_count: Option<u32>,
477    ) -> Self {
478        let texture = texture.into();
479        let view = Arc::new(texture.create_view(&wgpu::TextureViewDescriptor {
480            mip_level_count,
481            ..Default::default()
482        }));
483        let sampler_descriptor = sampler.unwrap_or_else(|| wgpu::SamplerDescriptor {
484            address_mode_u: wgpu::AddressMode::ClampToEdge,
485            address_mode_v: wgpu::AddressMode::ClampToEdge,
486            address_mode_w: wgpu::AddressMode::ClampToEdge,
487            mag_filter: wgpu::FilterMode::Linear,
488            min_filter: wgpu::FilterMode::Linear,
489            mipmap_filter: wgpu::FilterMode::Linear,
490            ..Default::default()
491        });
492        let sampler = Arc::new(device.create_sampler(&sampler_descriptor));
493
494        Self {
495            texture,
496            view,
497            sampler,
498            id: get_next_texture_id(),
499        }
500    }
501
502    pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
503
504    pub fn create_depth_texture(
505        device: &wgpu::Device,
506        width: u32,
507        height: u32,
508        multisample_count: u32,
509        label: Option<&str>,
510    ) -> Self {
511        let size = wgpu::Extent3d {
512            width,
513            height,
514            depth_or_array_layers: 1,
515        };
516        let desc = wgpu::TextureDescriptor {
517            label,
518            size,
519            mip_level_count: 1,
520            sample_count: multisample_count,
521            dimension: wgpu::TextureDimension::D2,
522            format: Self::DEPTH_FORMAT,
523            usage: wgpu::TextureUsages::RENDER_ATTACHMENT
524                | wgpu::TextureUsages::TEXTURE_BINDING
525                | wgpu::TextureUsages::COPY_SRC,
526            view_formats: &[],
527        };
528        let texture = device.create_texture(&desc);
529
530        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
531        let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
532            address_mode_u: wgpu::AddressMode::ClampToEdge,
533            address_mode_v: wgpu::AddressMode::ClampToEdge,
534            address_mode_w: wgpu::AddressMode::ClampToEdge,
535            mag_filter: wgpu::FilterMode::Linear,
536            min_filter: wgpu::FilterMode::Linear,
537            mipmap_filter: wgpu::FilterMode::Nearest,
538            compare: Some(wgpu::CompareFunction::LessEqual),
539            lod_min_clamp: 0.0,
540            lod_max_clamp: 100.0,
541            ..Default::default()
542        });
543
544        Self {
545            texture: Arc::new(texture),
546            view: Arc::new(view),
547            sampler: Arc::new(sampler),
548            id: get_next_texture_id(),
549        }
550    }
551
552    pub fn create_depth_texture_for_shadow_map(
553        device: &wgpu::Device,
554        width: u32,
555        height: u32,
556        multisample_count: u32,
557        label: Option<&str>,
558        is_point_light: bool,
559    ) -> Self {
560        let size = wgpu::Extent3d {
561            width,
562            height,
563            depth_or_array_layers: if is_point_light { 6 } else { 1 },
564        };
565        let desc = wgpu::TextureDescriptor {
566            label,
567            size,
568            mip_level_count: 1,
569            sample_count: multisample_count,
570            dimension: wgpu::TextureDimension::D2,
571            format: Self::DEPTH_FORMAT,
572            usage: wgpu::TextureUsages::RENDER_ATTACHMENT
573                | wgpu::TextureUsages::TEXTURE_BINDING
574                | wgpu::TextureUsages::COPY_SRC,
575            view_formats: &[],
576        };
577        let texture = device.create_texture(&desc);
578
579        let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
580        let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
581            address_mode_u: wgpu::AddressMode::ClampToEdge,
582            address_mode_v: wgpu::AddressMode::ClampToEdge,
583            address_mode_w: wgpu::AddressMode::ClampToEdge,
584            mag_filter: wgpu::FilterMode::Linear,
585            min_filter: wgpu::FilterMode::Linear,
586            mipmap_filter: wgpu::FilterMode::Nearest,
587            compare: Some(wgpu::CompareFunction::LessEqual),
588            lod_min_clamp: 0.0,
589            lod_max_clamp: 100.0,
590            ..Default::default()
591        });
592
593        Self {
594            texture: Arc::new(texture),
595            view: Arc::new(view),
596            sampler: Arc::new(sampler),
597            id: get_next_texture_id(),
598        }
599    }
600
601    /// Read the texture from the GPU.
602    ///
603    /// To read the texture you must provide the width, height, the number of
604    /// color/alpha channels and the number of bytes in the underlying
605    /// subpixel type (usually u8=1, u16=2 or f32=4).
606    // TODO: remove width and height from these calls, as they can be obtained
607    // from Texture::size()
608    pub fn read(
609        runtime: impl AsRef<WgpuRuntime>,
610        texture: &wgpu::Texture,
611        width: usize,
612        height: usize,
613        channels: usize,
614        subpixel_bytes: usize,
615    ) -> CopiedTextureBuffer {
616        CopiedTextureBuffer::read_from(
617            runtime,
618            texture,
619            width,
620            height,
621            channels,
622            subpixel_bytes,
623            0,
624            None,
625        )
626    }
627
628    pub async fn read_hdr_image(
629        &self,
630        runtime: impl AsRef<WgpuRuntime>,
631    ) -> Result<Rgba32FImage, TextureError> {
632        let runtime = runtime.as_ref();
633        let width = self.width();
634        let height = self.height();
635        let copied = Texture::read(
636            runtime,
637            &self.texture,
638            width as usize,
639            height as usize,
640            4,
641            2,
642        );
643
644        let pixels = copied.pixels(&runtime.device).await?;
645        let pixels = bytemuck::cast_slice::<u8, u16>(pixels.as_slice())
646            .iter()
647            .map(|p| half::f16::from_bits(*p).to_f32())
648            .collect::<Vec<_>>();
649        assert_eq!((width * height * 4) as usize, pixels.len());
650        let img: image::Rgba32FImage = image::ImageBuffer::from_vec(width, height, pixels)
651            .context(CouldNotCreateImageBufferSnafu)?;
652        Ok(img)
653    }
654
655    /// Generate `mipmap_levels - 1` mipmaps for the given texture.
656    ///
657    /// ## Note
658    /// Ensure that `self` only has one mip level. If not it will try to sample
659    /// from an empty mip.
660    pub fn generate_mips(
661        &mut self,
662        runtime: impl AsRef<WgpuRuntime>,
663        _label: Option<&str>,
664        mip_levels: u32,
665    ) -> Vec<Self> {
666        let runtime = runtime.as_ref();
667        let generator = MipMapGenerator::new(&runtime.device, self.texture.format());
668        // UNWRAP: safe because we know the formats match.
669        generator.generate(runtime, self, mip_levels).unwrap()
670    }
671
672    pub const HDR_TEXTURE_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Rgba16Float;
673
674    /// Create a new HDR texture.
675    pub fn create_hdr_texture(
676        device: &wgpu::Device,
677        width: u32,
678        height: u32,
679        multisample_count: u32,
680    ) -> Texture {
681        // * The hdr texture is what we render to in most cases
682        // * we also read from it to calculate bloom
683        // * we also write the bloom mix result back to it
684        // * we also read the texture in tests
685        let usage = wgpu::TextureUsages::RENDER_ATTACHMENT
686            | wgpu::TextureUsages::TEXTURE_BINDING
687            | wgpu::TextureUsages::COPY_DST
688            | wgpu::TextureUsages::COPY_SRC;
689        let texture = Arc::new(device.create_texture(&wgpu::TextureDescriptor {
690            label: Some("hdr"),
691            size: wgpu::Extent3d {
692                width,
693                height,
694                depth_or_array_layers: 1,
695            },
696            mip_level_count: 1,
697            sample_count: multisample_count,
698            dimension: wgpu::TextureDimension::D2,
699            format: Self::HDR_TEXTURE_FORMAT,
700            usage,
701            view_formats: &[],
702        }));
703        let sampler = Arc::new(device.create_sampler(&wgpu::SamplerDescriptor {
704            address_mode_u: wgpu::AddressMode::ClampToEdge,
705            address_mode_v: wgpu::AddressMode::ClampToEdge,
706            address_mode_w: wgpu::AddressMode::ClampToEdge,
707            mag_filter: wgpu::FilterMode::Nearest,
708            min_filter: wgpu::FilterMode::Nearest,
709            mipmap_filter: wgpu::FilterMode::Nearest,
710            ..Default::default()
711        }));
712        let view = Arc::new(texture.create_view(&wgpu::TextureViewDescriptor::default()));
713        Texture {
714            texture,
715            view,
716            sampler,
717            id: get_next_texture_id(),
718        }
719    }
720
721    #[allow(clippy::too_many_arguments)]
722    pub(crate) fn render_cubemap(
723        runtime: impl AsRef<WgpuRuntime>,
724        label: &str,
725        pipeline: &wgpu::RenderPipeline,
726        mut buffer_upkeep: impl FnMut(),
727        camera: &Camera,
728        bindgroup: &wgpu::BindGroup,
729        views: [Mat4; 6],
730        texture_size: u32,
731        mip_levels: Option<u32>,
732    ) -> Self {
733        let runtime = runtime.as_ref();
734        let device = &runtime.device;
735        let queue = &runtime.queue;
736        let mut cubemap_faces = Vec::new();
737        let mip_levels = mip_levels.unwrap_or(1);
738
739        // Render every cube face.
740        for (i, view) in views.iter().enumerate() {
741            let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
742                label: Some(&format!("{label}-cubemap{i}")),
743            });
744
745            let mut cubemap_face = Texture::new_with(
746                runtime,
747                Some(&format!("{label}-cubemap{i}")),
748                Some(
749                    wgpu::TextureUsages::RENDER_ATTACHMENT
750                        | wgpu::TextureUsages::COPY_SRC
751                        | wgpu::TextureUsages::COPY_DST
752                        | wgpu::TextureUsages::TEXTURE_BINDING,
753                ),
754                None,
755                wgpu::TextureFormat::Rgba16Float,
756                4,
757                2,
758                texture_size,
759                texture_size,
760                1,
761                &[],
762            );
763
764            // update the view to point at one of the cube faces
765            camera.set_view(*view);
766            buffer_upkeep();
767
768            {
769                let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
770                    label: Some(&format!("{label}-cubemap{i}")),
771                    color_attachments: &[Some(wgpu::RenderPassColorAttachment {
772                        view: &cubemap_face.view,
773                        resolve_target: None,
774                        ops: wgpu::Operations {
775                            load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
776                            store: wgpu::StoreOp::Store,
777                        },
778                        depth_slice: None,
779                    })],
780                    depth_stencil_attachment: None,
781                    ..Default::default()
782                });
783
784                render_pass.set_pipeline(pipeline);
785                render_pass.set_bind_group(0, Some(bindgroup), &[]);
786                render_pass.draw(0..36, 0..1);
787            }
788
789            queue.submit([encoder.finish()]);
790            let mips = cubemap_face.generate_mips(
791                runtime,
792                Some(&format!("{label}-cubemap mips")),
793                mip_levels,
794            );
795            cubemap_faces.push(cubemap_face);
796            cubemap_faces.extend(mips);
797        }
798
799        Texture::new_cubemap_texture(
800            runtime,
801            Some(format!("{label}-cubemap")),
802            texture_size,
803            cubemap_faces.as_slice(),
804            wgpu::TextureFormat::Rgba16Float,
805            mip_levels,
806        )
807    }
808}
809
810pub async fn read_depth_texture_to_image(
811    runtime: impl AsRef<WgpuRuntime>,
812    width: usize,
813    height: usize,
814    texture: &wgpu::Texture,
815) -> Result<Option<image::GrayImage>> {
816    let depth_copied_buffer = Texture::read(runtime.as_ref(), texture, width, height, 1, 4);
817    let pixels = depth_copied_buffer.pixels(&runtime.as_ref().device).await?;
818    let pixels = bytemuck::cast_slice::<u8, f32>(&pixels)
819        .iter()
820        .copied()
821        .map(|f| {
822            // Depth texture is stored as Depth32Float, but the values are normalized 0.0-1.0
823            (255.0 * f) as u8
824        })
825        .collect::<Vec<u8>>();
826    Ok(image::GrayImage::from_raw(
827        width as u32,
828        height as u32,
829        pixels,
830    ))
831}
832
833pub async fn read_depth_texture_f32(
834    runtime: impl AsRef<WgpuRuntime>,
835    width: usize,
836    height: usize,
837    texture: &wgpu::Texture,
838) -> Result<Option<image::ImageBuffer<Luma<f32>, Vec<f32>>>> {
839    let depth_copied_buffer = Texture::read(runtime.as_ref(), texture, width, height, 1, 4);
840    let pixels = depth_copied_buffer.pixels(&runtime.as_ref().device).await?;
841    let pixels = bytemuck::cast_slice::<u8, f32>(&pixels).to_vec();
842    Ok(image::ImageBuffer::from_raw(
843        width as u32,
844        height as u32,
845        pixels,
846    ))
847}
848
849/// A depth texture.
850pub struct DepthTexture {
851    pub(crate) runtime: WgpuRuntime,
852    pub(crate) texture: Arc<wgpu::Texture>,
853}
854
855impl Deref for DepthTexture {
856    type Target = wgpu::Texture;
857
858    fn deref(&self) -> &Self::Target {
859        &self.texture
860    }
861}
862
863impl DepthTexture {
864    pub fn new(runtime: impl AsRef<WgpuRuntime>, texture: impl Into<Arc<wgpu::Texture>>) -> Self {
865        Self {
866            runtime: runtime.as_ref().clone(),
867            texture: texture.into(),
868        }
869    }
870
871    pub fn try_new_from(
872        runtime: impl AsRef<WgpuRuntime>,
873        value: Texture,
874    ) -> Result<Self, TextureError> {
875        let format = value.texture.format();
876        if format != wgpu::TextureFormat::Depth32Float {
877            return UnsupportedFormatSnafu { format }.fail();
878        }
879
880        Ok(Self {
881            runtime: runtime.as_ref().clone(),
882            texture: value.texture,
883        })
884    }
885
886    /// Converts the depth texture into an image.
887    ///
888    /// Assumes the format is single channel 32bit.
889    ///
890    /// ## Panics
891    /// This may panic if the depth texture has a multisample count greater than
892    /// 1.
893    pub async fn read_image(&self) -> Result<Option<image::GrayImage>> {
894        read_depth_texture_to_image(
895            &self.runtime,
896            self.width() as usize,
897            self.height() as usize,
898            &self.texture,
899        )
900        .await
901    }
902}
903
904/// Helper for retreiving an image from a texture.
905#[derive(Clone, Copy, Debug)]
906pub struct BufferDimensions {
907    pub width: usize,
908    pub height: usize,
909    pub unpadded_bytes_per_row: usize,
910    pub padded_bytes_per_row: usize,
911}
912
913impl BufferDimensions {
914    pub fn new(channels: usize, subpixel_bytes: usize, width: usize, height: usize) -> Self {
915        let bytes_per_pixel = channels * subpixel_bytes;
916        let unpadded_bytes_per_row = width * bytes_per_pixel;
917        let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT as usize;
918        let padded_bytes_per_row_padding = (align - unpadded_bytes_per_row % align) % align;
919        let padded_bytes_per_row = unpadded_bytes_per_row + padded_bytes_per_row_padding;
920        Self {
921            width,
922            height,
923            unpadded_bytes_per_row,
924            padded_bytes_per_row,
925        }
926    }
927}
928
929/// A buffer that is being mapped.
930///
931/// This implements `Future<Output = Vec<u8>>`.
932pub struct MappedBuffer<'a> {
933    waker: Arc<Mutex<Option<std::task::Waker>>>,
934    result: Arc<Mutex<Option<Result<(), wgpu::BufferAsyncError>>>>,
935    dimensions: BufferDimensions,
936    buffer_slice: wgpu::BufferSlice<'a>,
937}
938
939impl std::future::Future for MappedBuffer<'_> {
940    type Output = Result<Vec<u8>, wgpu::BufferAsyncError>;
941
942    fn poll(
943        self: core::pin::Pin<&mut Self>,
944        cx: &mut core::task::Context<'_>,
945    ) -> core::task::Poll<Self::Output> {
946        let this = self.deref();
947        if let Some(result) = this.result.lock().unwrap().take() {
948            std::task::Poll::Ready(result.map(|()| {
949                let padded_buffer = this.buffer_slice.get_mapped_range();
950                let mut unpadded_buffer = vec![];
951                // from the padded_buffer we write just the unpadded bytes into the
952                // unpadded_buffer
953                for chunk in padded_buffer.chunks(self.dimensions.padded_bytes_per_row) {
954                    unpadded_buffer
955                        .extend_from_slice(&chunk[..self.dimensions.unpadded_bytes_per_row]);
956                }
957                unpadded_buffer
958            }))
959        } else {
960            let waker = cx.waker().clone();
961            *this.waker.lock().unwrap() = Some(waker);
962            std::task::Poll::Pending
963        }
964    }
965}
966
967/// Helper for retreiving a rendered frame.
968pub struct CopiedTextureBuffer {
969    pub format: wgpu::TextureFormat,
970    pub dimensions: BufferDimensions,
971    pub buffer: wgpu::Buffer,
972}
973
974impl CopiedTextureBuffer {
975    /// Return a mapped buffer that can be `await`ed for data from the GPU.
976    fn get_mapped_buffer(&self) -> MappedBuffer<'_> {
977        let buffer_slice = self.buffer.slice(..);
978        let waker: Arc<Mutex<Option<std::task::Waker>>> = Default::default();
979        let result = Arc::new(Mutex::new(None));
980        buffer_slice.map_async(wgpu::MapMode::Read, {
981            let waker = waker.clone();
982            let result = result.clone();
983            move |res| {
984                let mut result = result.lock().unwrap();
985                *result = Some(res);
986                if let Some(waker) = waker.lock().unwrap().take() {
987                    waker.wake();
988                }
989            }
990        });
991        MappedBuffer {
992            result,
993            waker,
994            buffer_slice,
995            dimensions: self.dimensions,
996        }
997    }
998
999    /// Access the raw unpadded pixels of the buffer.
1000    ///
1001    /// This calls `wgpu::Device::poll`.
1002    pub async fn pixels(&self, device: &wgpu::Device) -> Result<Vec<u8>> {
1003        let buffer = self.get_mapped_buffer();
1004        device.poll(wgpu::PollType::Wait).context(PollSnafu)?;
1005        buffer.await.context(BufferAsyncSnafu)
1006    }
1007
1008    /// Convert the post render buffer into an RgbaImage.
1009    pub async fn convert_to_rgba(self) -> Result<image::RgbaImage, TextureError> {
1010        let fut_buffer = self.get_mapped_buffer();
1011        let pixels = fut_buffer.await.context(BufferAsyncSnafu)?;
1012        let mut img_buffer: image::ImageBuffer<image::Rgba<u8>, Vec<u8>> =
1013            image::ImageBuffer::from_raw(
1014                self.dimensions.width as u32,
1015                self.dimensions.height as u32,
1016                pixels,
1017            )
1018            .context(CouldNotConvertImageBufferSnafu)?;
1019        if self.format.is_srgb() {
1020            log::trace!("converting applying linear transfer to srgb pixels");
1021            // Convert back to linear
1022            img_buffer.pixels_mut().for_each(|p| {
1023                crate::color::linear_xfer_u8(&mut p.0[0]);
1024                crate::color::linear_xfer_u8(&mut p.0[1]);
1025                crate::color::linear_xfer_u8(&mut p.0[2]);
1026                crate::color::linear_xfer_u8(&mut p.0[3]);
1027            });
1028        }
1029        Ok(image::DynamicImage::ImageRgba8(img_buffer).to_rgba8())
1030    }
1031
1032    /// Convert the post render buffer into an image.
1033    ///
1034    /// `Sp` is the sub-pixel type. eg, `u8` or `f32`
1035    ///
1036    /// `P` is the pixel type. eg, `Rgba<u8>` or `Luma<f32>`
1037    pub async fn into_image<Sp, P>(
1038        self,
1039        device: &wgpu::Device,
1040    ) -> Result<image::DynamicImage, TextureError>
1041    where
1042        Sp: bytemuck::AnyBitPattern,
1043        P: image::Pixel<Subpixel = Sp>,
1044        image::DynamicImage: From<image::ImageBuffer<P, Vec<Sp>>>,
1045    {
1046        let pixels = self.pixels(device).await?;
1047        let coerced_pixels: &[Sp] = bytemuck::cast_slice(&pixels);
1048        let img_buffer: image::ImageBuffer<P, Vec<Sp>> = image::ImageBuffer::from_raw(
1049            self.dimensions.width as u32,
1050            self.dimensions.height as u32,
1051            coerced_pixels.to_vec(),
1052        )
1053        .context(CouldNotConvertImageBufferSnafu)?;
1054        Ok(image::DynamicImage::from(img_buffer))
1055    }
1056
1057    /// Convert the post render buffer into an internal-format [`AtlasImage`].
1058    pub async fn into_atlas_image(self, device: &wgpu::Device) -> Result<AtlasImage, TextureError> {
1059        let pixels = self.pixels(device).await?;
1060        let img = AtlasImage {
1061            pixels,
1062            size: UVec2::new(self.dimensions.width as u32, self.dimensions.height as u32),
1063            format: AtlasImageFormat::from_wgpu_texture_format(self.format).context(
1064                UnsupportedFormatSnafu {
1065                    format: self.format,
1066                },
1067            )?,
1068            apply_linear_transfer: false,
1069        };
1070        Ok(img)
1071    }
1072
1073    /// Convert the post render buffer into an RgbaImage.
1074    ///
1075    /// Ensures that the pixels are in the given color space by applying the
1076    /// correct transfer function if needed.
1077    ///
1078    /// Assumes the texture is in `Rgba8` format.
1079    pub async fn into_rgba(
1080        self,
1081        device: &wgpu::Device,
1082        // `true` - the resulting image will be in a linear color space
1083        // `false` - the resulting image will be in an sRGB color space
1084        linear: bool,
1085    ) -> Result<image::RgbaImage, TextureError> {
1086        let format = self.format;
1087        let mut img_buffer = self
1088            .into_image::<u8, image::Rgba<u8>>(device)
1089            .await?
1090            .into_rgba8();
1091        let linear_xfer = format.is_srgb() && linear;
1092        let opto_xfer = !format.is_srgb() && !linear;
1093        let should_xfer = linear_xfer || opto_xfer;
1094
1095        if should_xfer {
1096            let f = if linear_xfer {
1097                log::trace!(
1098                    "converting by applying linear transfer fn to srgb pixels (sRGB -> linear)"
1099                );
1100                crate::color::linear_xfer_u8
1101            } else {
1102                log::trace!(
1103                    "converting by applying opto transfer fn to linear pixels (linear -> sRGB)"
1104                );
1105                crate::color::opto_xfer_u8
1106            };
1107            // Convert back to linear
1108            img_buffer.pixels_mut().for_each(|p| {
1109                f(&mut p.0[0]);
1110                f(&mut p.0[1]);
1111                f(&mut p.0[2]);
1112                f(&mut p.0[3]);
1113            });
1114        }
1115
1116        Ok(img_buffer)
1117    }
1118
1119    /// Convert the post render buffer into an RgbaImage.
1120    ///
1121    /// Ensures that the pixels are in a linear color space by applying the
1122    /// linear transfer if the texture this buffer was copied from was sRGB.
1123    pub async fn into_linear_rgba(
1124        self,
1125        device: &wgpu::Device,
1126    ) -> Result<image::RgbaImage, TextureError> {
1127        let format = self.format;
1128        let mut img_buffer = self
1129            .into_image::<u8, image::Rgba<u8>>(device)
1130            .await?
1131            .into_rgba8();
1132        if format.is_srgb() {
1133            log::trace!(
1134                "converting by applying linear transfer fn to srgb pixels (sRGB -> linear)"
1135            );
1136            // Convert back to linear
1137            img_buffer.pixels_mut().for_each(|p| {
1138                crate::color::linear_xfer_u8(&mut p.0[0]);
1139                crate::color::linear_xfer_u8(&mut p.0[1]);
1140                crate::color::linear_xfer_u8(&mut p.0[2]);
1141                crate::color::linear_xfer_u8(&mut p.0[3]);
1142            });
1143        }
1144
1145        Ok(img_buffer)
1146    }
1147
1148    /// Convert the post render buffer into an RgbaImage.
1149    ///
1150    /// Ensures that the pixels are in a sRGB color space by applying the
1151    /// opto transfer function if the texture this buffer was copied from was linear.
1152    pub async fn into_srgba(self, device: &wgpu::Device) -> Result<image::RgbaImage, TextureError> {
1153        let format = self.format;
1154        let mut img_buffer = self
1155            .into_image::<u8, image::Rgba<u8>>(device)
1156            .await?
1157            .into_rgba8();
1158        if !format.is_srgb() {
1159            log::warn!("converting by applying opto transfer fn to linear pixels (linear -> sRGB)");
1160            // Convert back to linear
1161            img_buffer.pixels_mut().for_each(|p| {
1162                crate::color::opto_xfer_u8(&mut p.0[0]);
1163                crate::color::opto_xfer_u8(&mut p.0[1]);
1164                crate::color::opto_xfer_u8(&mut p.0[2]);
1165                crate::color::opto_xfer_u8(&mut p.0[3]);
1166            });
1167        }
1168
1169        Ok(img_buffer)
1170    }
1171
1172    /// Read the texture from the GPU.
1173    ///
1174    /// To read the texture you must provide the width, height, the number of
1175    /// color/alpha channels and the number of bytes in the underlying
1176    /// subpixel type (usually u8=1, u16=2 or f32=4).
1177    #[allow(clippy::too_many_arguments)]
1178    pub fn read_from(
1179        runtime: impl AsRef<WgpuRuntime>,
1180        texture: &wgpu::Texture,
1181        width: usize,
1182        height: usize,
1183        channels: usize,
1184        subpixel_bytes: usize,
1185        mip_level: u32,
1186        origin: Option<wgpu::Origin3d>,
1187    ) -> CopiedTextureBuffer {
1188        let runtime = runtime.as_ref();
1189        let device = &runtime.device;
1190        let queue = &runtime.queue;
1191        let dimensions = BufferDimensions::new(channels, subpixel_bytes, width, height);
1192        // The output buffer lets us retrieve the self as an array
1193        let buffer = device.create_buffer(&wgpu::BufferDescriptor {
1194            label: Some("Texture::read buffer"),
1195            size: (dimensions.padded_bytes_per_row * dimensions.height) as u64,
1196            usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST,
1197            mapped_at_creation: false,
1198        });
1199        let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
1200            label: Some("post render screen capture encoder"),
1201        });
1202        let mut source = texture.as_image_copy();
1203        source.mip_level = mip_level;
1204        if let Some(origin) = origin {
1205            source.origin = origin;
1206        }
1207        // Copy the data from the surface texture to the buffer
1208        encoder.copy_texture_to_buffer(
1209            source,
1210            wgpu::TexelCopyBufferInfo {
1211                buffer: &buffer,
1212                layout: wgpu::TexelCopyBufferLayout {
1213                    offset: 0,
1214                    bytes_per_row: Some(dimensions.padded_bytes_per_row as u32),
1215                    rows_per_image: None,
1216                },
1217            },
1218            wgpu::Extent3d {
1219                width: dimensions.width as u32,
1220                height: dimensions.height as u32,
1221                depth_or_array_layers: 1,
1222            },
1223        );
1224        queue.submit(std::iter::once(encoder.finish()));
1225
1226        CopiedTextureBuffer {
1227            dimensions,
1228            buffer,
1229            format: texture.format(),
1230        }
1231    }
1232
1233    /// Copy the entire texture into a buffer, at mip `0`.
1234    ///
1235    /// Attempts to figure out the parameters to [`CopiedTextureBuffer::read_from`].
1236    pub fn new(runtime: impl AsRef<WgpuRuntime>, texture: &wgpu::Texture) -> Result<Self> {
1237        let (channels, subpixel_bytes) =
1238            wgpu_texture_format_channels_and_subpixel_bytes(texture.format())?;
1239        Ok(Self::read_from(
1240            runtime,
1241            texture,
1242            texture.width() as usize,
1243            texture.height() as usize,
1244            channels as usize,
1245            subpixel_bytes as usize,
1246            0,
1247            None,
1248        ))
1249    }
1250}
1251
1252#[cfg(test)]
1253mod test {
1254    use crate::{context::Context, test::BlockOnFuture, texture::CopiedTextureBuffer};
1255
1256    use super::Texture;
1257
1258    #[test]
1259    fn generate_mipmaps() {
1260        let r = Context::headless(10, 10).block();
1261        let img = image::open("../../img/sandstone.png").unwrap();
1262        let width = img.width();
1263        let height = img.height();
1264        let mip_level_count = 5;
1265        let mut texture = Texture::from_dynamic_image(
1266            &r,
1267            img,
1268            Some("sandstone"),
1269            Some(
1270                wgpu::TextureUsages::COPY_SRC
1271                    | wgpu::TextureUsages::TEXTURE_BINDING
1272                    | wgpu::TextureUsages::COPY_DST,
1273            ),
1274            1,
1275        );
1276        let mips = texture.generate_mips(&r, None, mip_level_count);
1277
1278        let (channels, subpixel_bytes) =
1279            super::wgpu_texture_format_channels_and_subpixel_bytes_todo(texture.texture.format());
1280        for (level, mip) in mips.into_iter().enumerate() {
1281            let mip_level = level + 1;
1282            let mip_width = width >> mip_level;
1283            let mip_height = height >> mip_level;
1284            // save out the mips
1285            let copied_buffer = CopiedTextureBuffer::read_from(
1286                &r,
1287                &mip.texture,
1288                mip_width as usize,
1289                mip_height as usize,
1290                channels as usize,
1291                subpixel_bytes as usize,
1292                0,
1293                None,
1294            );
1295            let pixels = copied_buffer.pixels(r.get_device()).block().unwrap();
1296            assert_eq!((mip_width * mip_height * 4) as usize, pixels.len());
1297            let img: image::RgbaImage =
1298                image::ImageBuffer::from_vec(mip_width, mip_height, pixels).unwrap();
1299            let img = image::DynamicImage::from(img);
1300            let img = img.to_rgba8();
1301            img_diff::assert_img_eq(&format!("texture/sandstone_mip{mip_level}.png"), img);
1302        }
1303    }
1304}