summaryrefslogtreecommitdiffstats
path: root/third_party/rust/gfx-backend-vulkan
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
commit2aa4a82499d4becd2284cdb482213d541b8804dd (patch)
treeb80bf8bf13c3766139fbacc530efd0dd9d54394c /third_party/rust/gfx-backend-vulkan
parentInitial commit. (diff)
downloadfirefox-2aa4a82499d4becd2284cdb482213d541b8804dd.tar.xz
firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.zip
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/gfx-backend-vulkan')
-rw-r--r--third_party/rust/gfx-backend-vulkan/.cargo-checksum.json1
-rw-r--r--third_party/rust/gfx-backend-vulkan/Cargo.toml39
-rw-r--r--third_party/rust/gfx-backend-vulkan/README.md13
-rw-r--r--third_party/rust/gfx-backend-vulkan/src/command.rs1145
-rw-r--r--third_party/rust/gfx-backend-vulkan/src/conv.rs670
-rw-r--r--third_party/rust/gfx-backend-vulkan/src/device.rs2325
-rw-r--r--third_party/rust/gfx-backend-vulkan/src/info.rs5
-rw-r--r--third_party/rust/gfx-backend-vulkan/src/lib.rs1575
-rw-r--r--third_party/rust/gfx-backend-vulkan/src/native.rs178
-rw-r--r--third_party/rust/gfx-backend-vulkan/src/pool.rs60
-rw-r--r--third_party/rust/gfx-backend-vulkan/src/window.rs584
11 files changed, 6595 insertions, 0 deletions
diff --git a/third_party/rust/gfx-backend-vulkan/.cargo-checksum.json b/third_party/rust/gfx-backend-vulkan/.cargo-checksum.json
new file mode 100644
index 0000000000..7445a57fae
--- /dev/null
+++ b/third_party/rust/gfx-backend-vulkan/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"6ea39aa0d638db071fdac520b928d0dac0e477c52f1d4769f29827b7b04ee251","README.md":"8cc42e022567870c58a53ff1cb6f94e961482e789fe5e22f9960408a43cf8405","src/command.rs":"0d2a662565827218b58218196d558c7b69dcc55dcd3f6f2a19505d70a0aaae40","src/conv.rs":"c3848c33771fdaad12d6376376887f669e9f6348d49d3188fda3b5e4166ca124","src/device.rs":"61a80ea7e4d7b9415d29166441197ed62c40cedee28774fa6030d2c71a3861bd","src/info.rs":"4a21b54f85ff73c538ca2f57f4d371eb862b5a28f126cd0ecafd37fc6dfd1318","src/lib.rs":"7e0e76813aca8df75d7ef8d9318949aead57c9e296f84717df16d36ce983c67f","src/native.rs":"38cd1c65eda7bf44b9f13a5bc24945bee0dd36651a023298a6b4dd9ff06d5bd0","src/pool.rs":"8420db4bf73faa144b94cb7867b0d01973bf4d7ac3d1ccb8ac952fd18500074a","src/window.rs":"9565dbc4d45f7257e22bee823dee7a9b139c5eae782eda0c6568a970aa79cef5"},"package":null} \ No newline at end of file
diff --git a/third_party/rust/gfx-backend-vulkan/Cargo.toml b/third_party/rust/gfx-backend-vulkan/Cargo.toml
new file mode 100644
index 0000000000..38020fa28a
--- /dev/null
+++ b/third_party/rust/gfx-backend-vulkan/Cargo.toml
@@ -0,0 +1,39 @@
+[package]
+name = "gfx-backend-vulkan"
+version = "0.6.5"
+description = "Vulkan API backend for gfx-rs"
+homepage = "https://github.com/gfx-rs/gfx"
+repository = "https://github.com/gfx-rs/gfx"
+keywords = ["graphics", "gamedev"]
+license = "MIT OR Apache-2.0"
+authors = ["The Gfx-rs Developers"]
+readme = "README.md"
+documentation = "https://docs.rs/gfx-backend-vulkan"
+workspace = "../../.."
+edition = "2018"
+
+[features]
+default = []
+use-rtld-next = ["shared_library"]
+
+[lib]
+name = "gfx_backend_vulkan"
+
+[dependencies]
+arrayvec = "0.5"
+byteorder = "1"
+log = { version = "0.4" }
+lazy_static = "1"
+shared_library = { version = "0.1.9", optional = true }
+ash = "0.31"
+hal = { path = "../../hal", version = "0.6", package = "gfx-hal" }
+smallvec = "1.0"
+raw-window-handle = "0.3"
+inplace_it = "0.3.2"
+
+[target.'cfg(windows)'.dependencies]
+winapi = { version = "0.3", features = ["libloaderapi", "windef", "winuser"] }
+[target.'cfg(target_os = "macos")'.dependencies]
+objc = "0.2.5"
+core-graphics-types = "0.1"
+
diff --git a/third_party/rust/gfx-backend-vulkan/README.md b/third_party/rust/gfx-backend-vulkan/README.md
new file mode 100644
index 0000000000..0e8420ecb8
--- /dev/null
+++ b/third_party/rust/gfx-backend-vulkan/README.md
@@ -0,0 +1,13 @@
+# gfx-backend-vulkan
+
+[Vulkan](https://www.khronos.org/vulkan/) backend for gfx-rs.
+
+## Normalized Coordinates
+
+Render | Depth | Texture
+-------|-------|--------
+![render_coordinates](../../../info/vk_render_coordinates.png) | ![depth_coordinates](../../../info/dx_depth_coordinates.png) | ![texture_coordinates](../../../info/dx_texture_coordinates.png)
+
+## Mirroring
+
+HAL is modelled after Vulkan, so everything should be 1:1.
diff --git a/third_party/rust/gfx-backend-vulkan/src/command.rs b/third_party/rust/gfx-backend-vulkan/src/command.rs
new file mode 100644
index 0000000000..18fdd0e606
--- /dev/null
+++ b/third_party/rust/gfx-backend-vulkan/src/command.rs
@@ -0,0 +1,1145 @@
+use ash::{version::DeviceV1_0, vk};
+use smallvec::SmallVec;
+use std::{borrow::Borrow, ffi::CString, mem, ops::Range, slice, sync::Arc};
+
+use inplace_it::inplace_or_alloc_array;
+
+use crate::{conv, native as n, Backend, DebugMessenger, RawDevice};
+use hal::{
+ buffer, command as com,
+ format::Aspects,
+ image::{Filter, Layout, SubresourceRange},
+ memory, pso, query, DrawCount, IndexCount, IndexType, InstanceCount, TaskCount, VertexCount,
+ VertexOffset, WorkGroupCount,
+};
+
+#[derive(Debug)]
+pub struct CommandBuffer {
+ pub raw: vk::CommandBuffer,
+ pub device: Arc<RawDevice>,
+}
+
+fn debug_color(color: u32) -> [f32; 4] {
+ let mut result = [0.0; 4];
+ for (i, c) in result.iter_mut().enumerate() {
+ *c = ((color >> (24 - i * 8)) & 0xFF) as f32 / 255.0;
+ }
+ result
+}
+
+fn map_subpass_contents(contents: com::SubpassContents) -> vk::SubpassContents {
+ match contents {
+ com::SubpassContents::Inline => vk::SubpassContents::INLINE,
+ com::SubpassContents::SecondaryBuffers => vk::SubpassContents::SECONDARY_COMMAND_BUFFERS,
+ }
+}
+
+fn map_buffer_image_regions<T>(
+ _image: &n::Image,
+ regions: T,
+) -> impl ExactSizeIterator<Item = vk::BufferImageCopy>
+where
+ T: IntoIterator,
+ T::Item: Borrow<com::BufferImageCopy>,
+ T::IntoIter: ExactSizeIterator,
+{
+ regions.into_iter().map(|region| {
+ let r = region.borrow();
+ let image_subresource = conv::map_subresource_layers(&r.image_layers);
+ vk::BufferImageCopy {
+ buffer_offset: r.buffer_offset,
+ buffer_row_length: r.buffer_width,
+ buffer_image_height: r.buffer_height,
+ image_subresource,
+ image_offset: conv::map_offset(r.image_offset),
+ image_extent: conv::map_extent(r.image_extent),
+ }
+ })
+}
+
+struct BarrierSet {
+ global: SmallVec<[vk::MemoryBarrier; 4]>,
+ buffer: SmallVec<[vk::BufferMemoryBarrier; 4]>,
+ image: SmallVec<[vk::ImageMemoryBarrier; 4]>,
+}
+
+fn destructure_barriers<'a, T>(barriers: T) -> BarrierSet
+where
+ T: IntoIterator,
+ T::Item: Borrow<memory::Barrier<'a, Backend>>,
+{
+ let mut global: SmallVec<[vk::MemoryBarrier; 4]> = SmallVec::new();
+ let mut buffer: SmallVec<[vk::BufferMemoryBarrier; 4]> = SmallVec::new();
+ let mut image: SmallVec<[vk::ImageMemoryBarrier; 4]> = SmallVec::new();
+
+ for barrier in barriers {
+ match *barrier.borrow() {
+ memory::Barrier::AllBuffers(ref access) => {
+ global.push(
+ vk::MemoryBarrier::builder()
+ .src_access_mask(conv::map_buffer_access(access.start))
+ .dst_access_mask(conv::map_buffer_access(access.end))
+ .build(),
+ );
+ }
+ memory::Barrier::AllImages(ref access) => {
+ global.push(
+ vk::MemoryBarrier::builder()
+ .src_access_mask(conv::map_image_access(access.start))
+ .dst_access_mask(conv::map_image_access(access.end))
+ .build(),
+ );
+ }
+ memory::Barrier::Buffer {
+ ref states,
+ target,
+ ref range,
+ ref families,
+ } => {
+ let families = match families {
+ Some(f) => f.start.0 as u32..f.end.0 as u32,
+ None => vk::QUEUE_FAMILY_IGNORED..vk::QUEUE_FAMILY_IGNORED,
+ };
+ buffer.push(
+ vk::BufferMemoryBarrier::builder()
+ .src_access_mask(conv::map_buffer_access(states.start))
+ .dst_access_mask(conv::map_buffer_access(states.end))
+ .src_queue_family_index(families.start)
+ .dst_queue_family_index(families.end)
+ .buffer(target.raw)
+ .offset(range.offset)
+ .size(range.size.unwrap_or(vk::WHOLE_SIZE))
+ .build(),
+ );
+ }
+ memory::Barrier::Image {
+ ref states,
+ target,
+ ref range,
+ ref families,
+ } => {
+ let subresource_range = conv::map_subresource_range(range);
+ let families = match families {
+ Some(f) => f.start.0 as u32..f.end.0 as u32,
+ None => vk::QUEUE_FAMILY_IGNORED..vk::QUEUE_FAMILY_IGNORED,
+ };
+ image.push(
+ vk::ImageMemoryBarrier::builder()
+ .src_access_mask(conv::map_image_access(states.start.0))
+ .dst_access_mask(conv::map_image_access(states.end.0))
+ .old_layout(conv::map_image_layout(states.start.1))
+ .new_layout(conv::map_image_layout(states.end.1))
+ .src_queue_family_index(families.start)
+ .dst_queue_family_index(families.end)
+ .image(target.raw)
+ .subresource_range(subresource_range)
+ .build(),
+ );
+ }
+ }
+ }
+
+ BarrierSet {
+ global,
+ buffer,
+ image,
+ }
+}
+
+impl CommandBuffer {
+ fn bind_descriptor_sets<I, J>(
+ &mut self,
+ bind_point: vk::PipelineBindPoint,
+ layout: &n::PipelineLayout,
+ first_set: usize,
+ sets: I,
+ offsets: J,
+ ) where
+ I: IntoIterator,
+ I::Item: Borrow<n::DescriptorSet>,
+ I::IntoIter: ExactSizeIterator,
+ J: IntoIterator,
+ J::Item: Borrow<com::DescriptorSetOffset>,
+ J::IntoIter: ExactSizeIterator,
+ {
+ let sets = sets.into_iter().map(|set| set.borrow().raw);
+ let dynamic_offsets = offsets.into_iter().map(|offset| *offset.borrow());
+
+ inplace_or_alloc_array(sets.len(), |uninit_guard| {
+ let sets = uninit_guard.init_with_iter(sets);
+ inplace_or_alloc_array(dynamic_offsets.len(), |uninit_guard| {
+ let dynamic_offsets = uninit_guard.init_with_iter(dynamic_offsets);
+
+ unsafe {
+ self.device.raw.cmd_bind_descriptor_sets(
+ self.raw,
+ bind_point,
+ layout.raw,
+ first_set as u32,
+ &sets,
+ &dynamic_offsets,
+ );
+ }
+ });
+ });
+ }
+}
+
+impl com::CommandBuffer<Backend> for CommandBuffer {
+ unsafe fn begin(
+ &mut self,
+ flags: com::CommandBufferFlags,
+ info: com::CommandBufferInheritanceInfo<Backend>,
+ ) {
+ let inheritance_info = vk::CommandBufferInheritanceInfo::builder()
+ .render_pass(
+ info.subpass
+ .map_or(vk::RenderPass::null(), |subpass| subpass.main_pass.raw),
+ )
+ .subpass(info.subpass.map_or(0, |subpass| subpass.index as u32))
+ .framebuffer(
+ info.framebuffer
+ .map_or(vk::Framebuffer::null(), |buffer| buffer.raw),
+ )
+ .occlusion_query_enable(info.occlusion_query_enable)
+ .query_flags(conv::map_query_control_flags(info.occlusion_query_flags))
+ .pipeline_statistics(conv::map_pipeline_statistics(info.pipeline_statistics));
+
+ let info = vk::CommandBufferBeginInfo::builder()
+ .flags(conv::map_command_buffer_flags(flags))
+ .inheritance_info(&inheritance_info);
+
+ assert_eq!(
+ Ok(()),
+ self.device.raw.begin_command_buffer(self.raw, &info)
+ );
+ }
+
+ unsafe fn finish(&mut self) {
+ assert_eq!(Ok(()), self.device.raw.end_command_buffer(self.raw));
+ }
+
+ unsafe fn reset(&mut self, release_resources: bool) {
+ let flags = if release_resources {
+ vk::CommandBufferResetFlags::RELEASE_RESOURCES
+ } else {
+ vk::CommandBufferResetFlags::empty()
+ };
+
+ assert_eq!(
+ Ok(()),
+ self.device.raw.reset_command_buffer(self.raw, flags)
+ );
+ }
+
+ unsafe fn begin_render_pass<T>(
+ &mut self,
+ render_pass: &n::RenderPass,
+ frame_buffer: &n::Framebuffer,
+ render_area: pso::Rect,
+ clear_values: T,
+ first_subpass: com::SubpassContents,
+ ) where
+ T: IntoIterator,
+ T::Item: Borrow<com::ClearValue>,
+ T::IntoIter: ExactSizeIterator,
+ {
+ let render_area = conv::map_rect(&render_area);
+
+ // Vulkan wants one clear value per attachment (even those that don't need clears),
+ // but can receive less clear values than total attachments.
+ let clear_value_count = 64 - render_pass.clear_attachments_mask.leading_zeros() as u32;
+ let mut clear_value_iter = clear_values.into_iter();
+ let raw_clear_values = (0..clear_value_count).map(|i| {
+ if render_pass.clear_attachments_mask & (1 << i) != 0 {
+ // Vulkan and HAL share same memory layout
+ let next = clear_value_iter.next().unwrap();
+ mem::transmute(*next.borrow())
+ } else {
+ mem::zeroed()
+ }
+ });
+
+ inplace_or_alloc_array(raw_clear_values.len(), |uninit_guard| {
+ let raw_clear_values = uninit_guard.init_with_iter(raw_clear_values);
+
+ let info = vk::RenderPassBeginInfo::builder()
+ .render_pass(render_pass.raw)
+ .framebuffer(frame_buffer.raw)
+ .render_area(render_area)
+ .clear_values(&raw_clear_values);
+
+ let contents = map_subpass_contents(first_subpass);
+ self.device
+ .raw
+ .cmd_begin_render_pass(self.raw, &info, contents);
+ });
+ }
+
+ unsafe fn next_subpass(&mut self, contents: com::SubpassContents) {
+ let contents = map_subpass_contents(contents);
+ self.device.raw.cmd_next_subpass(self.raw, contents);
+ }
+
+ unsafe fn end_render_pass(&mut self) {
+ self.device.raw.cmd_end_render_pass(self.raw);
+ }
+
+ unsafe fn pipeline_barrier<'a, T>(
+ &mut self,
+ stages: Range<pso::PipelineStage>,
+ dependencies: memory::Dependencies,
+ barriers: T,
+ ) where
+ T: IntoIterator,
+ T::Item: Borrow<memory::Barrier<'a, Backend>>,
+ {
+ let BarrierSet {
+ global,
+ buffer,
+ image,
+ } = destructure_barriers(barriers);
+
+ self.device.raw.cmd_pipeline_barrier(
+ self.raw, // commandBuffer
+ conv::map_pipeline_stage(stages.start),
+ conv::map_pipeline_stage(stages.end),
+ mem::transmute(dependencies),
+ &global,
+ &buffer,
+ &image,
+ );
+ }
+
+ unsafe fn fill_buffer(&mut self, buffer: &n::Buffer, range: buffer::SubRange, data: u32) {
+ self.device.raw.cmd_fill_buffer(
+ self.raw,
+ buffer.raw,
+ range.offset,
+ range.size.unwrap_or(vk::WHOLE_SIZE),
+ data,
+ );
+ }
+
+ unsafe fn update_buffer(&mut self, buffer: &n::Buffer, offset: buffer::Offset, data: &[u8]) {
+ self.device
+ .raw
+ .cmd_update_buffer(self.raw, buffer.raw, offset, data);
+ }
+
+ unsafe fn clear_image<T>(
+ &mut self,
+ image: &n::Image,
+ layout: Layout,
+ value: com::ClearValue,
+ subresource_ranges: T,
+ ) where
+ T: IntoIterator,
+ T::Item: Borrow<SubresourceRange>,
+ {
+ let mut color_ranges = Vec::new();
+ let mut ds_ranges = Vec::new();
+
+ for subresource_range in subresource_ranges {
+ let sub = subresource_range.borrow();
+ let aspect_ds = sub.aspects & (Aspects::DEPTH | Aspects::STENCIL);
+ let vk_range = conv::map_subresource_range(sub);
+ if sub.aspects.contains(Aspects::COLOR) {
+ color_ranges.push(vk::ImageSubresourceRange {
+ aspect_mask: conv::map_image_aspects(Aspects::COLOR),
+ ..vk_range
+ });
+ }
+ if !aspect_ds.is_empty() {
+ ds_ranges.push(vk::ImageSubresourceRange {
+ aspect_mask: conv::map_image_aspects(aspect_ds),
+ ..vk_range
+ });
+ }
+ }
+
+ // Vulkan and HAL share same memory layout
+ let color_value = mem::transmute(value.color);
+ let depth_stencil_value = vk::ClearDepthStencilValue {
+ depth: value.depth_stencil.depth,
+ stencil: value.depth_stencil.stencil,
+ };
+
+ if !color_ranges.is_empty() {
+ self.device.raw.cmd_clear_color_image(
+ self.raw,
+ image.raw,
+ conv::map_image_layout(layout),
+ &color_value,
+ &color_ranges,
+ )
+ }
+ if !ds_ranges.is_empty() {
+ self.device.raw.cmd_clear_depth_stencil_image(
+ self.raw,
+ image.raw,
+ conv::map_image_layout(layout),
+ &depth_stencil_value,
+ &ds_ranges,
+ )
+ }
+ }
+
+ unsafe fn clear_attachments<T, U>(&mut self, clears: T, rects: U)
+ where
+ T: IntoIterator,
+ T::Item: Borrow<com::AttachmentClear>,
+ T::IntoIter: ExactSizeIterator,
+ U: IntoIterator,
+ U::Item: Borrow<pso::ClearRect>,
+ U::IntoIter: ExactSizeIterator,
+ {
+ let clears = clears.into_iter().map(|clear| match *clear.borrow() {
+ com::AttachmentClear::Color { index, value } => vk::ClearAttachment {
+ aspect_mask: vk::ImageAspectFlags::COLOR,
+ color_attachment: index as _,
+ clear_value: vk::ClearValue {
+ color: mem::transmute(value),
+ },
+ },
+ com::AttachmentClear::DepthStencil { depth, stencil } => vk::ClearAttachment {
+ aspect_mask: if depth.is_some() {
+ vk::ImageAspectFlags::DEPTH
+ } else {
+ vk::ImageAspectFlags::empty()
+ } | if stencil.is_some() {
+ vk::ImageAspectFlags::STENCIL
+ } else {
+ vk::ImageAspectFlags::empty()
+ },
+ color_attachment: 0,
+ clear_value: vk::ClearValue {
+ depth_stencil: vk::ClearDepthStencilValue {
+ depth: depth.unwrap_or_default(),
+ stencil: stencil.unwrap_or_default(),
+ },
+ },
+ },
+ });
+
+ let rects = rects
+ .into_iter()
+ .map(|rect| conv::map_clear_rect(rect.borrow()));
+
+ inplace_or_alloc_array(clears.len(), |uninit_guard| {
+ let clears = uninit_guard.init_with_iter(clears);
+
+ inplace_or_alloc_array(rects.len(), |uninit_guard| {
+ let rects = uninit_guard.init_with_iter(rects);
+
+ self.device
+ .raw
+ .cmd_clear_attachments(self.raw, &clears, &rects);
+ });
+ });
+ }
+
+ unsafe fn resolve_image<T>(
+ &mut self,
+ src: &n::Image,
+ src_layout: Layout,
+ dst: &n::Image,
+ dst_layout: Layout,
+ regions: T,
+ ) where
+ T: IntoIterator,
+ T::Item: Borrow<com::ImageResolve>,
+ T::IntoIter: ExactSizeIterator,
+ {
+ let regions = regions.into_iter().map(|region| {
+ let r = region.borrow();
+ vk::ImageResolve {
+ src_subresource: conv::map_subresource_layers(&r.src_subresource),
+ src_offset: conv::map_offset(r.src_offset),
+ dst_subresource: conv::map_subresource_layers(&r.dst_subresource),
+ dst_offset: conv::map_offset(r.dst_offset),
+ extent: conv::map_extent(r.extent),
+ }
+ });
+
+ inplace_or_alloc_array(regions.len(), |uninit_guard| {
+ let regions = uninit_guard.init_with_iter(regions);
+
+ self.device.raw.cmd_resolve_image(
+ self.raw,
+ src.raw,
+ conv::map_image_layout(src_layout),
+ dst.raw,
+ conv::map_image_layout(dst_layout),
+ &regions,
+ );
+ });
+ }
+
+ unsafe fn blit_image<T>(
+ &mut self,
+ src: &n::Image,
+ src_layout: Layout,
+ dst: &n::Image,
+ dst_layout: Layout,
+ filter: Filter,
+ regions: T,
+ ) where
+ T: IntoIterator,
+ T::Item: Borrow<com::ImageBlit>,
+ T::IntoIter: ExactSizeIterator,
+ {
+ let regions = regions.into_iter().map(|region| {
+ let r = region.borrow();
+ vk::ImageBlit {
+ src_subresource: conv::map_subresource_layers(&r.src_subresource),
+ src_offsets: [
+ conv::map_offset(r.src_bounds.start),
+ conv::map_offset(r.src_bounds.end),
+ ],
+ dst_subresource: conv::map_subresource_layers(&r.dst_subresource),
+ dst_offsets: [
+ conv::map_offset(r.dst_bounds.start),
+ conv::map_offset(r.dst_bounds.end),
+ ],
+ }
+ });
+
+ inplace_or_alloc_array(regions.len(), |uninit_guard| {
+ let regions = uninit_guard.init_with_iter(regions);
+
+ self.device.raw.cmd_blit_image(
+ self.raw,
+ src.raw,
+ conv::map_image_layout(src_layout),
+ dst.raw,
+ conv::map_image_layout(dst_layout),
+ &regions,
+ conv::map_filter(filter),
+ );
+ });
+ }
+
+ unsafe fn bind_index_buffer(
+ &mut self,
+ buffer: &n::Buffer,
+ sub: buffer::SubRange,
+ ty: IndexType,
+ ) {
+ self.device.raw.cmd_bind_index_buffer(
+ self.raw,
+ buffer.raw,
+ sub.offset,
+ conv::map_index_type(ty),
+ );
+ }
+
+ unsafe fn bind_vertex_buffers<I, T>(&mut self, first_binding: pso::BufferIndex, buffers: I)
+ where
+ I: IntoIterator<Item = (T, buffer::SubRange)>,
+ T: Borrow<n::Buffer>,
+ {
+ let (buffers, offsets): (SmallVec<[vk::Buffer; 16]>, SmallVec<[vk::DeviceSize; 16]>) =
+ buffers
+ .into_iter()
+ .map(|(buffer, sub)| (buffer.borrow().raw, sub.offset))
+ .unzip();
+
+ self.device
+ .raw
+ .cmd_bind_vertex_buffers(self.raw, first_binding, &buffers, &offsets);
+ }
+
+ unsafe fn set_viewports<T>(&mut self, first_viewport: u32, viewports: T)
+ where
+ T: IntoIterator,
+ T::Item: Borrow<pso::Viewport>,
+ T::IntoIter: ExactSizeIterator,
+ {
+ let viewports = viewports
+ .into_iter()
+ .map(|viewport| self.device.map_viewport(viewport.borrow()));
+
+ inplace_or_alloc_array(viewports.len(), |uninit_guard| {
+ let viewports = uninit_guard.init_with_iter(viewports);
+ self.device
+ .raw
+ .cmd_set_viewport(self.raw, first_viewport, &viewports);
+ });
+ }
+
+ unsafe fn set_scissors<T>(&mut self, first_scissor: u32, scissors: T)
+ where
+ T: IntoIterator,
+ T::Item: Borrow<pso::Rect>,
+ T::IntoIter: ExactSizeIterator,
+ {
+ let scissors = scissors
+ .into_iter()
+ .map(|scissor| conv::map_rect(scissor.borrow()));
+
+ inplace_or_alloc_array(scissors.len(), |uninit_guard| {
+ let scissors = uninit_guard.init_with_iter(scissors);
+
+ self.device
+ .raw
+ .cmd_set_scissor(self.raw, first_scissor, &scissors);
+ });
+ }
+
+ unsafe fn set_stencil_reference(&mut self, faces: pso::Face, value: pso::StencilValue) {
+ // Vulkan and HAL share same faces bit flags
+ self.device
+ .raw
+ .cmd_set_stencil_reference(self.raw, mem::transmute(faces), value);
+ }
+
+ unsafe fn set_stencil_read_mask(&mut self, faces: pso::Face, value: pso::StencilValue) {
+ // Vulkan and HAL share same faces bit flags
+ self.device
+ .raw
+ .cmd_set_stencil_compare_mask(self.raw, mem::transmute(faces), value);
+ }
+
+ unsafe fn set_stencil_write_mask(&mut self, faces: pso::Face, value: pso::StencilValue) {
+ // Vulkan and HAL share same faces bit flags
+ self.device
+ .raw
+ .cmd_set_stencil_write_mask(self.raw, mem::transmute(faces), value);
+ }
+
+ unsafe fn set_blend_constants(&mut self, color: pso::ColorValue) {
+ self.device.raw.cmd_set_blend_constants(self.raw, &color);
+ }
+
+ unsafe fn set_depth_bounds(&mut self, bounds: Range<f32>) {
+ self.device
+ .raw
+ .cmd_set_depth_bounds(self.raw, bounds.start, bounds.end);
+ }
+
+ unsafe fn set_line_width(&mut self, width: f32) {
+ self.device.raw.cmd_set_line_width(self.raw, width);
+ }
+
+ unsafe fn set_depth_bias(&mut self, depth_bias: pso::DepthBias) {
+ self.device.raw.cmd_set_depth_bias(
+ self.raw,
+ depth_bias.const_factor,
+ depth_bias.clamp,
+ depth_bias.slope_factor,
+ );
+ }
+
+ unsafe fn bind_graphics_pipeline(&mut self, pipeline: &n::GraphicsPipeline) {
+ self.device
+ .raw
+ .cmd_bind_pipeline(self.raw, vk::PipelineBindPoint::GRAPHICS, pipeline.0)
+ }
+
+ unsafe fn bind_graphics_descriptor_sets<I, J>(
+ &mut self,
+ layout: &n::PipelineLayout,
+ first_set: usize,
+ sets: I,
+ offsets: J,
+ ) where
+ I: IntoIterator,
+ I::Item: Borrow<n::DescriptorSet>,
+ I::IntoIter: ExactSizeIterator,
+ J: IntoIterator,
+ J::Item: Borrow<com::DescriptorSetOffset>,
+ J::IntoIter: ExactSizeIterator,
+ {
+ self.bind_descriptor_sets(
+ vk::PipelineBindPoint::GRAPHICS,
+ layout,
+ first_set,
+ sets,
+ offsets,
+ );
+ }
+
+ unsafe fn bind_compute_pipeline(&mut self, pipeline: &n::ComputePipeline) {
+ self.device
+ .raw
+ .cmd_bind_pipeline(self.raw, vk::PipelineBindPoint::COMPUTE, pipeline.0)
+ }
+
+ unsafe fn bind_compute_descriptor_sets<I, J>(
+ &mut self,
+ layout: &n::PipelineLayout,
+ first_set: usize,
+ sets: I,
+ offsets: J,
+ ) where
+ I: IntoIterator,
+ I::Item: Borrow<n::DescriptorSet>,
+ I::IntoIter: ExactSizeIterator,
+ J: IntoIterator,
+ J::Item: Borrow<com::DescriptorSetOffset>,
+ J::IntoIter: ExactSizeIterator,
+ {
+ self.bind_descriptor_sets(
+ vk::PipelineBindPoint::COMPUTE,
+ layout,
+ first_set,
+ sets,
+ offsets,
+ );
+ }
+
+ unsafe fn dispatch(&mut self, count: WorkGroupCount) {
+ self.device
+ .raw
+ .cmd_dispatch(self.raw, count[0], count[1], count[2])
+ }
+
+ unsafe fn dispatch_indirect(&mut self, buffer: &n::Buffer, offset: buffer::Offset) {
+ self.device
+ .raw
+ .cmd_dispatch_indirect(self.raw, buffer.raw, offset)
+ }
+
+ unsafe fn copy_buffer<T>(&mut self, src: &n::Buffer, dst: &n::Buffer, regions: T)
+ where
+ T: IntoIterator,
+ T::Item: Borrow<com::BufferCopy>,
+ T::IntoIter: ExactSizeIterator,
+ {
+ let regions = regions.into_iter().map(|region| {
+ let region = region.borrow();
+ vk::BufferCopy {
+ src_offset: region.src,
+ dst_offset: region.dst,
+ size: region.size,
+ }
+ });
+
+ inplace_or_alloc_array(regions.len(), |uninit_guard| {
+ let regions = uninit_guard.init_with_iter(regions);
+ self.device
+ .raw
+ .cmd_copy_buffer(self.raw, src.raw, dst.raw, &regions)
+ })
+ }
+
+ unsafe fn copy_image<T>(
+ &mut self,
+ src: &n::Image,
+ src_layout: Layout,
+ dst: &n::Image,
+ dst_layout: Layout,
+ regions: T,
+ ) where
+ T: IntoIterator,
+ T::Item: Borrow<com::ImageCopy>,
+ T::IntoIter: ExactSizeIterator,
+ {
+ let regions = regions.into_iter().map(|region| {
+ let r = region.borrow();
+ vk::ImageCopy {
+ src_subresource: conv::map_subresource_layers(&r.src_subresource),
+ src_offset: conv::map_offset(r.src_offset),
+ dst_subresource: conv::map_subresource_layers(&r.dst_subresource),
+ dst_offset: conv::map_offset(r.dst_offset),
+ extent: conv::map_extent(r.extent),
+ }
+ });
+
+ inplace_or_alloc_array(regions.len(), |uninit_guard| {
+ let regions = uninit_guard.init_with_iter(regions);
+ self.device.raw.cmd_copy_image(
+ self.raw,
+ src.raw,
+ conv::map_image_layout(src_layout),
+ dst.raw,
+ conv::map_image_layout(dst_layout),
+ &regions,
+ );
+ });
+ }
+
+ unsafe fn copy_buffer_to_image<T>(
+ &mut self,
+ src: &n::Buffer,
+ dst: &n::Image,
+ dst_layout: Layout,
+ regions: T,
+ ) where
+ T: IntoIterator,
+ T::Item: Borrow<com::BufferImageCopy>,
+ T::IntoIter: ExactSizeIterator,
+ {
+ let regions_iter = map_buffer_image_regions(dst, regions);
+
+ inplace_or_alloc_array(regions_iter.len(), |uninit_guard| {
+ let regions = uninit_guard.init_with_iter(regions_iter);
+
+ self.device.raw.cmd_copy_buffer_to_image(
+ self.raw,
+ src.raw,
+ dst.raw,
+ conv::map_image_layout(dst_layout),
+ &regions,
+ );
+ });
+ }
+
+ unsafe fn copy_image_to_buffer<T>(
+ &mut self,
+ src: &n::Image,
+ src_layout: Layout,
+ dst: &n::Buffer,
+ regions: T,
+ ) where
+ T: IntoIterator,
+ T::Item: Borrow<com::BufferImageCopy>,
+ T::IntoIter: ExactSizeIterator,
+ {
+ let regions_iter = map_buffer_image_regions(src, regions);
+
+ inplace_or_alloc_array(regions_iter.len(), |uninit_guard| {
+ let regions = uninit_guard.init_with_iter(regions_iter);
+
+ self.device.raw.cmd_copy_image_to_buffer(
+ self.raw,
+ src.raw,
+ conv::map_image_layout(src_layout),
+ dst.raw,
+ &regions,
+ );
+ });
+ }
+
+ unsafe fn draw(&mut self, vertices: Range<VertexCount>, instances: Range<InstanceCount>) {
+ self.device.raw.cmd_draw(
+ self.raw,
+ vertices.end - vertices.start,
+ instances.end - instances.start,
+ vertices.start,
+ instances.start,
+ )
+ }
+
+ unsafe fn draw_indexed(
+ &mut self,
+ indices: Range<IndexCount>,
+ base_vertex: VertexOffset,
+ instances: Range<InstanceCount>,
+ ) {
+ self.device.raw.cmd_draw_indexed(
+ self.raw,
+ indices.end - indices.start,
+ instances.end - instances.start,
+ indices.start,
+ base_vertex,
+ instances.start,
+ )
+ }
+
+ unsafe fn draw_indirect(
+ &mut self,
+ buffer: &n::Buffer,
+ offset: buffer::Offset,
+ draw_count: DrawCount,
+ stride: u32,
+ ) {
+ self.device
+ .raw
+ .cmd_draw_indirect(self.raw, buffer.raw, offset, draw_count, stride)
+ }
+
+ unsafe fn draw_indexed_indirect(
+ &mut self,
+ buffer: &n::Buffer,
+ offset: buffer::Offset,
+ draw_count: DrawCount,
+ stride: u32,
+ ) {
+ self.device
+ .raw
+ .cmd_draw_indexed_indirect(self.raw, buffer.raw, offset, draw_count, stride)
+ }
+
+ unsafe fn draw_mesh_tasks(&mut self, task_count: TaskCount, first_task: TaskCount) {
+ self.device
+ .extension_fns
+ .mesh_shaders
+ .as_ref()
+ .expect("Draw command not supported. You must request feature MESH_SHADER.")
+ .cmd_draw_mesh_tasks(self.raw, task_count, first_task);
+ }
+
+ unsafe fn draw_mesh_tasks_indirect(
+ &mut self,
+ buffer: &n::Buffer,
+ offset: buffer::Offset,
+ draw_count: hal::DrawCount,
+ stride: u32,
+ ) {
+ self.device
+ .extension_fns
+ .mesh_shaders
+ .as_ref()
+ .expect("Draw command not supported. You must request feature MESH_SHADER.")
+ .cmd_draw_mesh_tasks_indirect(self.raw, buffer.raw, offset, draw_count, stride);
+ }
+
+ unsafe fn draw_mesh_tasks_indirect_count(
+ &mut self,
+ buffer: &n::Buffer,
+ offset: buffer::Offset,
+ count_buffer: &n::Buffer,
+ count_buffer_offset: buffer::Offset,
+ max_draw_count: DrawCount,
+ stride: u32,
+ ) {
+ self.device
+ .extension_fns
+ .mesh_shaders
+ .as_ref()
+ .expect("Draw command not supported. You must request feature MESH_SHADER.")
+ .cmd_draw_mesh_tasks_indirect_count(
+ self.raw,
+ buffer.raw,
+ offset,
+ count_buffer.raw,
+ count_buffer_offset,
+ max_draw_count,
+ stride,
+ );
+ }
+
+ unsafe fn draw_indirect_count(
+ &mut self,
+ buffer: &n::Buffer,
+ offset: buffer::Offset,
+ count_buffer: &n::Buffer,
+ count_buffer_offset: buffer::Offset,
+ max_draw_count: DrawCount,
+ stride: u32,
+ ) {
+ self.device
+ .extension_fns
+ .draw_indirect_count
+ .as_ref()
+ .expect("Feature DRAW_INDIRECT_COUNT must be enabled to call draw_indirect_count")
+ .cmd_draw_indirect_count(
+ self.raw,
+ buffer.raw,
+ offset,
+ count_buffer.raw,
+ count_buffer_offset,
+ max_draw_count,
+ stride,
+ );
+ }
+
+ unsafe fn draw_indexed_indirect_count(
+ &mut self,
+ buffer: &n::Buffer,
+ offset: buffer::Offset,
+ count_buffer: &n::Buffer,
+ count_buffer_offset: buffer::Offset,
+ max_draw_count: DrawCount,
+ stride: u32,
+ ) {
+ self.device
+ .extension_fns
+ .draw_indirect_count
+ .as_ref()
+ .expect(
+ "Feature DRAW_INDIRECT_COUNT must be enabled to call draw_indexed_indirect_count",
+ )
+ .cmd_draw_indexed_indirect_count(
+ self.raw,
+ buffer.raw,
+ offset,
+ count_buffer.raw,
+ count_buffer_offset,
+ max_draw_count,
+ stride,
+ );
+ }
+
+ unsafe fn set_event(&mut self, event: &n::Event, stage_mask: pso::PipelineStage) {
+ self.device.raw.cmd_set_event(
+ self.raw,
+ event.0,
+ vk::PipelineStageFlags::from_raw(stage_mask.bits()),
+ )
+ }
+
+ unsafe fn reset_event(&mut self, event: &n::Event, stage_mask: pso::PipelineStage) {
+ self.device.raw.cmd_reset_event(
+ self.raw,
+ event.0,
+ vk::PipelineStageFlags::from_raw(stage_mask.bits()),
+ )
+ }
+
+ unsafe fn wait_events<'a, I, J>(
+ &mut self,
+ events: I,
+ stages: Range<pso::PipelineStage>,
+ barriers: J,
+ ) where
+ I: IntoIterator,
+ I::Item: Borrow<n::Event>,
+ I::IntoIter: ExactSizeIterator,
+ J: IntoIterator,
+ J::Item: Borrow<memory::Barrier<'a, Backend>>,
+ {
+ let events = events.into_iter().map(|e| e.borrow().0);
+
+ let BarrierSet {
+ global,
+ buffer,
+ image,
+ } = destructure_barriers(barriers);
+
+ inplace_or_alloc_array(events.len(), |uninit_guard| {
+ let events = uninit_guard.init_with_iter(events);
+
+ self.device.raw.cmd_wait_events(
+ self.raw,
+ &events,
+ vk::PipelineStageFlags::from_raw(stages.start.bits()),
+ vk::PipelineStageFlags::from_raw(stages.end.bits()),
+ &global,
+ &buffer,
+ &image,
+ )
+ })
+ }
+
+ unsafe fn begin_query(&mut self, query: query::Query<Backend>, flags: query::ControlFlags) {
+ self.device.raw.cmd_begin_query(
+ self.raw,
+ query.pool.0,
+ query.id,
+ conv::map_query_control_flags(flags),
+ )
+ }
+
+ unsafe fn end_query(&mut self, query: query::Query<Backend>) {
+ self.device
+ .raw
+ .cmd_end_query(self.raw, query.pool.0, query.id)
+ }
+
+ unsafe fn reset_query_pool(&mut self, pool: &n::QueryPool, queries: Range<query::Id>) {
+ self.device.raw.cmd_reset_query_pool(
+ self.raw,
+ pool.0,
+ queries.start,
+ queries.end - queries.start,
+ )
+ }
+
+ unsafe fn copy_query_pool_results(
+ &mut self,
+ pool: &n::QueryPool,
+ queries: Range<query::Id>,
+ buffer: &n::Buffer,
+ offset: buffer::Offset,
+ stride: buffer::Offset,
+ flags: query::ResultFlags,
+ ) {
+ //TODO: use safer wrapper
+ self.device.raw.fp_v1_0().cmd_copy_query_pool_results(
+ self.raw,
+ pool.0,
+ queries.start,
+ queries.end - queries.start,
+ buffer.raw,
+ offset,
+ stride,
+ conv::map_query_result_flags(flags),
+ );
+ }
+
+ unsafe fn write_timestamp(&mut self, stage: pso::PipelineStage, query: query::Query<Backend>) {
+ self.device.raw.cmd_write_timestamp(
+ self.raw,
+ conv::map_pipeline_stage(stage),
+ query.pool.0,
+ query.id,
+ )
+ }
+
+ unsafe fn push_compute_constants(
+ &mut self,
+ layout: &n::PipelineLayout,
+ offset: u32,
+ constants: &[u32],
+ ) {
+ self.device.raw.cmd_push_constants(
+ self.raw,
+ layout.raw,
+ vk::ShaderStageFlags::COMPUTE,
+ offset,
+ slice::from_raw_parts(constants.as_ptr() as _, constants.len() * 4),
+ );
+ }
+
+ unsafe fn push_graphics_constants(
+ &mut self,
+ layout: &n::PipelineLayout,
+ stages: pso::ShaderStageFlags,
+ offset: u32,
+ constants: &[u32],
+ ) {
+ self.device.raw.cmd_push_constants(
+ self.raw,
+ layout.raw,
+ conv::map_stage_flags(stages),
+ offset,
+ slice::from_raw_parts(constants.as_ptr() as _, constants.len() * 4),
+ );
+ }
+
+ unsafe fn execute_commands<'a, T, I>(&mut self, buffers: I)
+ where
+ T: 'a + Borrow<CommandBuffer>,
+ I: IntoIterator,
+ I: IntoIterator<Item = &'a T>,
+ I::IntoIter: ExactSizeIterator,
+ {
+ let command_buffers = buffers.into_iter().map(|b| b.borrow().raw);
+
+ inplace_or_alloc_array(command_buffers.len(), |uninit_guard| {
+ let command_buffers = uninit_guard.init_with_iter(command_buffers);
+ self.device
+ .raw
+ .cmd_execute_commands(self.raw, &command_buffers);
+ });
+ }
+
+ unsafe fn insert_debug_marker(&mut self, name: &str, color: u32) {
+ if let Some(&DebugMessenger::Utils(ref ext, _)) = self.device.debug_messenger() {
+ let cstr = CString::new(name).unwrap();
+ let label = vk::DebugUtilsLabelEXT::builder()
+ .label_name(&cstr)
+ .color(debug_color(color))
+ .build();
+ ext.cmd_insert_debug_utils_label(self.raw, &label);
+ }
+ }
+ unsafe fn begin_debug_marker(&mut self, name: &str, color: u32) {
+ if let Some(&DebugMessenger::Utils(ref ext, _)) = self.device.debug_messenger() {
+ let cstr = CString::new(name).unwrap();
+ let label = vk::DebugUtilsLabelEXT::builder()
+ .label_name(&cstr)
+ .color(debug_color(color))
+ .build();
+ ext.cmd_begin_debug_utils_label(self.raw, &label);
+ }
+ }
+ unsafe fn end_debug_marker(&mut self) {
+ if let Some(&DebugMessenger::Utils(ref ext, _)) = self.device.debug_messenger() {
+ ext.cmd_end_debug_utils_label(self.raw);
+ }
+ }
+}
diff --git a/third_party/rust/gfx-backend-vulkan/src/conv.rs b/third_party/rust/gfx-backend-vulkan/src/conv.rs
new file mode 100644
index 0000000000..342b18738a
--- /dev/null
+++ b/third_party/rust/gfx-backend-vulkan/src/conv.rs
@@ -0,0 +1,670 @@
+use crate::native as n;
+
+use ash::vk;
+
+use hal::{
+ buffer, command, format, image,
+ memory::Segment,
+ pass, pso, query,
+ window::{CompositeAlphaMode, PresentMode},
+ Features, IndexType,
+};
+
+use smallvec::SmallVec;
+
+use std::{borrow::Borrow, mem};
+
+pub fn map_format(format: format::Format) -> vk::Format {
+ vk::Format::from_raw(format as i32)
+}
+
+pub fn map_vk_format(vk_format: vk::Format) -> Option<format::Format> {
+ if (vk_format.as_raw() as usize) < format::NUM_FORMATS && vk_format != vk::Format::UNDEFINED {
+ Some(unsafe { mem::transmute(vk_format) })
+ } else {
+ None
+ }
+}
+
+pub fn map_tiling(tiling: image::Tiling) -> vk::ImageTiling {
+ vk::ImageTiling::from_raw(tiling as i32)
+}
+
+pub fn map_component(component: format::Component) -> vk::ComponentSwizzle {
+ use hal::format::Component::*;
+ match component {
+ Zero => vk::ComponentSwizzle::ZERO,
+ One => vk::ComponentSwizzle::ONE,
+ R => vk::ComponentSwizzle::R,
+ G => vk::ComponentSwizzle::G,
+ B => vk::ComponentSwizzle::B,
+ A => vk::ComponentSwizzle::A,
+ }
+}
+
+pub fn map_swizzle(swizzle: format::Swizzle) -> vk::ComponentMapping {
+ vk::ComponentMapping {
+ r: map_component(swizzle.0),
+ g: map_component(swizzle.1),
+ b: map_component(swizzle.2),
+ a: map_component(swizzle.3),
+ }
+}
+
+pub fn map_index_type(index_type: IndexType) -> vk::IndexType {
+ match index_type {
+ IndexType::U16 => vk::IndexType::UINT16,
+ IndexType::U32 => vk::IndexType::UINT32,
+ }
+}
+
+pub fn map_image_layout(layout: image::Layout) -> vk::ImageLayout {
+ use hal::image::Layout as Il;
+ match layout {
+ Il::General => vk::ImageLayout::GENERAL,
+ Il::ColorAttachmentOptimal => vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL,
+ Il::DepthStencilAttachmentOptimal => vk::ImageLayout::DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
+ Il::DepthStencilReadOnlyOptimal => vk::ImageLayout::DEPTH_STENCIL_READ_ONLY_OPTIMAL,
+ Il::ShaderReadOnlyOptimal => vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL,
+ Il::TransferSrcOptimal => vk::ImageLayout::TRANSFER_SRC_OPTIMAL,
+ Il::TransferDstOptimal => vk::ImageLayout::TRANSFER_DST_OPTIMAL,
+ Il::Undefined => vk::ImageLayout::UNDEFINED,
+ Il::Preinitialized => vk::ImageLayout::PREINITIALIZED,
+ Il::Present => vk::ImageLayout::PRESENT_SRC_KHR,
+ }
+}
+
+pub fn map_image_aspects(aspects: format::Aspects) -> vk::ImageAspectFlags {
+ vk::ImageAspectFlags::from_raw(aspects.bits() as u32)
+}
+
+pub fn map_offset(offset: image::Offset) -> vk::Offset3D {
+ vk::Offset3D {
+ x: offset.x,
+ y: offset.y,
+ z: offset.z,
+ }
+}
+
+pub fn map_extent(offset: image::Extent) -> vk::Extent3D {
+ vk::Extent3D {
+ width: offset.width,
+ height: offset.height,
+ depth: offset.depth,
+ }
+}
+
+pub fn map_subresource(sub: &image::Subresource) -> vk::ImageSubresource {
+ vk::ImageSubresource {
+ aspect_mask: map_image_aspects(sub.aspects),
+ mip_level: sub.level as _,
+ array_layer: sub.layer as _,
+ }
+}
+
+pub fn map_subresource_layers(sub: &image::SubresourceLayers) -> vk::ImageSubresourceLayers {
+ vk::ImageSubresourceLayers {
+ aspect_mask: map_image_aspects(sub.aspects),
+ mip_level: sub.level as _,
+ base_array_layer: sub.layers.start as _,
+ layer_count: (sub.layers.end - sub.layers.start) as _,
+ }
+}
+
+pub fn map_subresource_range(range: &image::SubresourceRange) -> vk::ImageSubresourceRange {
+ vk::ImageSubresourceRange {
+ aspect_mask: map_image_aspects(range.aspects),
+ base_mip_level: range.level_start as _,
+ level_count: range
+ .level_count
+ .map_or(vk::REMAINING_MIP_LEVELS, |c| c as _),
+ base_array_layer: range.layer_start as _,
+ layer_count: range
+ .layer_count
+ .map_or(vk::REMAINING_ARRAY_LAYERS, |c| c as _),
+ }
+}
+
+pub fn map_attachment_load_op(op: pass::AttachmentLoadOp) -> vk::AttachmentLoadOp {
+ use hal::pass::AttachmentLoadOp as Alo;
+ match op {
+ Alo::Load => vk::AttachmentLoadOp::LOAD,
+ Alo::Clear => vk::AttachmentLoadOp::CLEAR,
+ Alo::DontCare => vk::AttachmentLoadOp::DONT_CARE,
+ }
+}
+
+pub fn map_attachment_store_op(op: pass::AttachmentStoreOp) -> vk::AttachmentStoreOp {
+ use hal::pass::AttachmentStoreOp as Aso;
+ match op {
+ Aso::Store => vk::AttachmentStoreOp::STORE,
+ Aso::DontCare => vk::AttachmentStoreOp::DONT_CARE,
+ }
+}
+
+pub fn map_buffer_access(access: buffer::Access) -> vk::AccessFlags {
+ vk::AccessFlags::from_raw(access.bits())
+}
+
+pub fn map_image_access(access: image::Access) -> vk::AccessFlags {
+ vk::AccessFlags::from_raw(access.bits())
+}
+
+pub fn map_pipeline_stage(stage: pso::PipelineStage) -> vk::PipelineStageFlags {
+ vk::PipelineStageFlags::from_raw(stage.bits())
+}
+
+pub fn map_buffer_usage(usage: buffer::Usage) -> vk::BufferUsageFlags {
+ vk::BufferUsageFlags::from_raw(usage.bits())
+}
+
+pub fn map_image_usage(usage: image::Usage) -> vk::ImageUsageFlags {
+ vk::ImageUsageFlags::from_raw(usage.bits())
+}
+
+pub fn map_vk_image_usage(usage: vk::ImageUsageFlags) -> image::Usage {
+ image::Usage::from_bits_truncate(usage.as_raw())
+}
+
+pub fn map_descriptor_type(ty: pso::DescriptorType) -> vk::DescriptorType {
+ match ty {
+ pso::DescriptorType::Sampler => vk::DescriptorType::SAMPLER,
+ pso::DescriptorType::Image { ty } => match ty {
+ pso::ImageDescriptorType::Sampled { with_sampler } => match with_sampler {
+ true => vk::DescriptorType::COMBINED_IMAGE_SAMPLER,
+ false => vk::DescriptorType::SAMPLED_IMAGE,
+ },
+ pso::ImageDescriptorType::Storage { .. } => vk::DescriptorType::STORAGE_IMAGE,
+ },
+ pso::DescriptorType::Buffer { ty, format } => match ty {
+ pso::BufferDescriptorType::Storage { .. } => match format {
+ pso::BufferDescriptorFormat::Structured { dynamic_offset } => {
+ match dynamic_offset {
+ true => vk::DescriptorType::STORAGE_BUFFER_DYNAMIC,
+ false => vk::DescriptorType::STORAGE_BUFFER,
+ }
+ }
+ pso::BufferDescriptorFormat::Texel => vk::DescriptorType::STORAGE_TEXEL_BUFFER,
+ },
+ pso::BufferDescriptorType::Uniform => match format {
+ pso::BufferDescriptorFormat::Structured { dynamic_offset } => {
+ match dynamic_offset {
+ true => vk::DescriptorType::UNIFORM_BUFFER_DYNAMIC,
+ false => vk::DescriptorType::UNIFORM_BUFFER,
+ }
+ }
+ pso::BufferDescriptorFormat::Texel => vk::DescriptorType::UNIFORM_TEXEL_BUFFER,
+ },
+ },
+ pso::DescriptorType::InputAttachment => vk::DescriptorType::INPUT_ATTACHMENT,
+ }
+}
+
+pub fn map_stage_flags(stages: pso::ShaderStageFlags) -> vk::ShaderStageFlags {
+ vk::ShaderStageFlags::from_raw(stages.bits())
+}
+
+pub fn map_filter(filter: image::Filter) -> vk::Filter {
+ vk::Filter::from_raw(filter as i32)
+}
+
+pub fn map_mip_filter(filter: image::Filter) -> vk::SamplerMipmapMode {
+ vk::SamplerMipmapMode::from_raw(filter as i32)
+}
+
+pub fn map_wrap(wrap: image::WrapMode) -> vk::SamplerAddressMode {
+ use hal::image::WrapMode as Wm;
+ match wrap {
+ Wm::Tile => vk::SamplerAddressMode::REPEAT,
+ Wm::Mirror => vk::SamplerAddressMode::MIRRORED_REPEAT,
+ Wm::Clamp => vk::SamplerAddressMode::CLAMP_TO_EDGE,
+ Wm::Border => vk::SamplerAddressMode::CLAMP_TO_BORDER,
+ Wm::MirrorClamp => vk::SamplerAddressMode::MIRROR_CLAMP_TO_EDGE,
+ }
+}
+
+pub fn map_border_color(border_color: image::BorderColor) -> vk::BorderColor {
+ match border_color {
+ image::BorderColor::TransparentBlack => vk::BorderColor::FLOAT_TRANSPARENT_BLACK,
+ image::BorderColor::OpaqueBlack => vk::BorderColor::FLOAT_OPAQUE_BLACK,
+ image::BorderColor::OpaqueWhite => vk::BorderColor::FLOAT_OPAQUE_WHITE,
+ }
+}
+
+pub fn map_topology(ia: &pso::InputAssemblerDesc) -> vk::PrimitiveTopology {
+ match (ia.primitive, ia.with_adjacency) {
+ (pso::Primitive::PointList, false) => vk::PrimitiveTopology::POINT_LIST,
+ (pso::Primitive::PointList, true) => panic!("Points can't have adjacency info"),
+ (pso::Primitive::LineList, false) => vk::PrimitiveTopology::LINE_LIST,
+ (pso::Primitive::LineList, true) => vk::PrimitiveTopology::LINE_LIST_WITH_ADJACENCY,
+ (pso::Primitive::LineStrip, false) => vk::PrimitiveTopology::LINE_STRIP,
+ (pso::Primitive::LineStrip, true) => vk::PrimitiveTopology::LINE_STRIP_WITH_ADJACENCY,
+ (pso::Primitive::TriangleList, false) => vk::PrimitiveTopology::TRIANGLE_LIST,
+ (pso::Primitive::TriangleList, true) => vk::PrimitiveTopology::TRIANGLE_LIST_WITH_ADJACENCY,
+ (pso::Primitive::TriangleStrip, false) => vk::PrimitiveTopology::TRIANGLE_STRIP,
+ (pso::Primitive::TriangleStrip, true) => {
+ vk::PrimitiveTopology::TRIANGLE_STRIP_WITH_ADJACENCY
+ }
+ (pso::Primitive::PatchList(_), false) => vk::PrimitiveTopology::PATCH_LIST,
+ (pso::Primitive::PatchList(_), true) => panic!("Patches can't have adjacency info"),
+ }
+}
+
+pub fn map_cull_face(cf: pso::Face) -> vk::CullModeFlags {
+ match cf {
+ pso::Face::NONE => vk::CullModeFlags::NONE,
+ pso::Face::FRONT => vk::CullModeFlags::FRONT,
+ pso::Face::BACK => vk::CullModeFlags::BACK,
+ _ => vk::CullModeFlags::FRONT_AND_BACK,
+ }
+}
+
+pub fn map_front_face(ff: pso::FrontFace) -> vk::FrontFace {
+ match ff {
+ pso::FrontFace::Clockwise => vk::FrontFace::CLOCKWISE,
+ pso::FrontFace::CounterClockwise => vk::FrontFace::COUNTER_CLOCKWISE,
+ }
+}
+
+pub fn map_comparison(fun: pso::Comparison) -> vk::CompareOp {
+ use hal::pso::Comparison::*;
+ match fun {
+ Never => vk::CompareOp::NEVER,
+ Less => vk::CompareOp::LESS,
+ LessEqual => vk::CompareOp::LESS_OR_EQUAL,
+ Equal => vk::CompareOp::EQUAL,
+ GreaterEqual => vk::CompareOp::GREATER_OR_EQUAL,
+ Greater => vk::CompareOp::GREATER,
+ NotEqual => vk::CompareOp::NOT_EQUAL,
+ Always => vk::CompareOp::ALWAYS,
+ }
+}
+
+pub fn map_stencil_op(op: pso::StencilOp) -> vk::StencilOp {
+ use hal::pso::StencilOp::*;
+ match op {
+ Keep => vk::StencilOp::KEEP,
+ Zero => vk::StencilOp::ZERO,
+ Replace => vk::StencilOp::REPLACE,
+ IncrementClamp => vk::StencilOp::INCREMENT_AND_CLAMP,
+ IncrementWrap => vk::StencilOp::INCREMENT_AND_WRAP,
+ DecrementClamp => vk::StencilOp::DECREMENT_AND_CLAMP,
+ DecrementWrap => vk::StencilOp::DECREMENT_AND_WRAP,
+ Invert => vk::StencilOp::INVERT,
+ }
+}
+
+pub fn map_stencil_side(side: &pso::StencilFace) -> vk::StencilOpState {
+ vk::StencilOpState {
+ fail_op: map_stencil_op(side.op_fail),
+ pass_op: map_stencil_op(side.op_pass),
+ depth_fail_op: map_stencil_op(side.op_depth_fail),
+ compare_op: map_comparison(side.fun),
+ compare_mask: !0,
+ write_mask: !0,
+ reference: 0,
+ }
+}
+
+pub fn map_blend_factor(factor: pso::Factor) -> vk::BlendFactor {
+ use hal::pso::Factor::*;
+ match factor {
+ Zero => vk::BlendFactor::ZERO,
+ One => vk::BlendFactor::ONE,
+ SrcColor => vk::BlendFactor::SRC_COLOR,
+ OneMinusSrcColor => vk::BlendFactor::ONE_MINUS_SRC_COLOR,
+ DstColor => vk::BlendFactor::DST_COLOR,
+ OneMinusDstColor => vk::BlendFactor::ONE_MINUS_DST_COLOR,
+ SrcAlpha => vk::BlendFactor::SRC_ALPHA,
+ OneMinusSrcAlpha => vk::BlendFactor::ONE_MINUS_SRC_ALPHA,
+ DstAlpha => vk::BlendFactor::DST_ALPHA,
+ OneMinusDstAlpha => vk::BlendFactor::ONE_MINUS_DST_ALPHA,
+ ConstColor => vk::BlendFactor::CONSTANT_COLOR,
+ OneMinusConstColor => vk::BlendFactor::ONE_MINUS_CONSTANT_COLOR,
+ ConstAlpha => vk::BlendFactor::CONSTANT_ALPHA,
+ OneMinusConstAlpha => vk::BlendFactor::ONE_MINUS_CONSTANT_ALPHA,
+ SrcAlphaSaturate => vk::BlendFactor::SRC_ALPHA_SATURATE,
+ Src1Color => vk::BlendFactor::SRC1_COLOR,
+ OneMinusSrc1Color => vk::BlendFactor::ONE_MINUS_SRC1_COLOR,
+ Src1Alpha => vk::BlendFactor::SRC1_ALPHA,
+ OneMinusSrc1Alpha => vk::BlendFactor::ONE_MINUS_SRC1_ALPHA,
+ }
+}
+
+pub fn map_blend_op(operation: pso::BlendOp) -> (vk::BlendOp, vk::BlendFactor, vk::BlendFactor) {
+ use hal::pso::BlendOp::*;
+ match operation {
+ Add { src, dst } => (
+ vk::BlendOp::ADD,
+ map_blend_factor(src),
+ map_blend_factor(dst),
+ ),
+ Sub { src, dst } => (
+ vk::BlendOp::SUBTRACT,
+ map_blend_factor(src),
+ map_blend_factor(dst),
+ ),
+ RevSub { src, dst } => (
+ vk::BlendOp::REVERSE_SUBTRACT,
+ map_blend_factor(src),
+ map_blend_factor(dst),
+ ),
+ Min => (
+ vk::BlendOp::MIN,
+ vk::BlendFactor::ZERO,
+ vk::BlendFactor::ZERO,
+ ),
+ Max => (
+ vk::BlendOp::MAX,
+ vk::BlendFactor::ZERO,
+ vk::BlendFactor::ZERO,
+ ),
+ }
+}
+
+pub fn map_pipeline_statistics(
+ statistics: query::PipelineStatistic,
+) -> vk::QueryPipelineStatisticFlags {
+ vk::QueryPipelineStatisticFlags::from_raw(statistics.bits())
+}
+
+pub fn map_query_control_flags(flags: query::ControlFlags) -> vk::QueryControlFlags {
+ // Safe due to equivalence of HAL values and Vulkan values
+ vk::QueryControlFlags::from_raw(flags.bits() & vk::QueryControlFlags::all().as_raw())
+}
+
+pub fn map_query_result_flags(flags: query::ResultFlags) -> vk::QueryResultFlags {
+ vk::QueryResultFlags::from_raw(flags.bits() & vk::QueryResultFlags::all().as_raw())
+}
+
+pub fn map_image_features(features: vk::FormatFeatureFlags) -> format::ImageFeature {
+ format::ImageFeature::from_bits_truncate(features.as_raw())
+}
+
+pub fn map_buffer_features(features: vk::FormatFeatureFlags) -> format::BufferFeature {
+ format::BufferFeature::from_bits_truncate(features.as_raw())
+}
+
+pub(crate) fn map_device_features(features: Features) -> crate::DeviceCreationFeatures {
+ crate::DeviceCreationFeatures {
+ // vk::PhysicalDeviceFeatures is a struct composed of Bool32's while
+ // Features is a bitfield so we need to map everything manually
+ core: vk::PhysicalDeviceFeatures::builder()
+ .robust_buffer_access(features.contains(Features::ROBUST_BUFFER_ACCESS))
+ .full_draw_index_uint32(features.contains(Features::FULL_DRAW_INDEX_U32))
+ .image_cube_array(features.contains(Features::IMAGE_CUBE_ARRAY))
+ .independent_blend(features.contains(Features::INDEPENDENT_BLENDING))
+ .geometry_shader(features.contains(Features::GEOMETRY_SHADER))
+ .tessellation_shader(features.contains(Features::TESSELLATION_SHADER))
+ .sample_rate_shading(features.contains(Features::SAMPLE_RATE_SHADING))
+ .dual_src_blend(features.contains(Features::DUAL_SRC_BLENDING))
+ .logic_op(features.contains(Features::LOGIC_OP))
+ .multi_draw_indirect(features.contains(Features::MULTI_DRAW_INDIRECT))
+ .draw_indirect_first_instance(features.contains(Features::DRAW_INDIRECT_FIRST_INSTANCE))
+ .depth_clamp(features.contains(Features::DEPTH_CLAMP))
+ .depth_bias_clamp(features.contains(Features::DEPTH_BIAS_CLAMP))
+ .fill_mode_non_solid(features.contains(Features::NON_FILL_POLYGON_MODE))
+ .depth_bounds(features.contains(Features::DEPTH_BOUNDS))
+ .wide_lines(features.contains(Features::LINE_WIDTH))
+ .large_points(features.contains(Features::POINT_SIZE))
+ .alpha_to_one(features.contains(Features::ALPHA_TO_ONE))
+ .multi_viewport(features.contains(Features::MULTI_VIEWPORTS))
+ .sampler_anisotropy(features.contains(Features::SAMPLER_ANISOTROPY))
+ .texture_compression_etc2(features.contains(Features::FORMAT_ETC2))
+ .texture_compression_astc_ldr(features.contains(Features::FORMAT_ASTC_LDR))
+ .texture_compression_bc(features.contains(Features::FORMAT_BC))
+ .occlusion_query_precise(features.contains(Features::PRECISE_OCCLUSION_QUERY))
+ .pipeline_statistics_query(features.contains(Features::PIPELINE_STATISTICS_QUERY))
+ .vertex_pipeline_stores_and_atomics(
+ features.contains(Features::VERTEX_STORES_AND_ATOMICS),
+ )
+ .fragment_stores_and_atomics(features.contains(Features::FRAGMENT_STORES_AND_ATOMICS))
+ .shader_tessellation_and_geometry_point_size(
+ features.contains(Features::SHADER_TESSELLATION_AND_GEOMETRY_POINT_SIZE),
+ )
+ .shader_image_gather_extended(features.contains(Features::SHADER_IMAGE_GATHER_EXTENDED))
+ .shader_storage_image_extended_formats(
+ features.contains(Features::SHADER_STORAGE_IMAGE_EXTENDED_FORMATS),
+ )
+ .shader_storage_image_multisample(
+ features.contains(Features::SHADER_STORAGE_IMAGE_MULTISAMPLE),
+ )
+ .shader_storage_image_read_without_format(
+ features.contains(Features::SHADER_STORAGE_IMAGE_READ_WITHOUT_FORMAT),
+ )
+ .shader_storage_image_write_without_format(
+ features.contains(Features::SHADER_STORAGE_IMAGE_WRITE_WITHOUT_FORMAT),
+ )
+ .shader_uniform_buffer_array_dynamic_indexing(
+ features.contains(Features::SHADER_UNIFORM_BUFFER_ARRAY_DYNAMIC_INDEXING),
+ )
+ .shader_sampled_image_array_dynamic_indexing(
+ features.contains(Features::SHADER_SAMPLED_IMAGE_ARRAY_DYNAMIC_INDEXING),
+ )
+ .shader_storage_buffer_array_dynamic_indexing(
+ features.contains(Features::SHADER_STORAGE_BUFFER_ARRAY_DYNAMIC_INDEXING),
+ )
+ .shader_storage_image_array_dynamic_indexing(
+ features.contains(Features::SHADER_STORAGE_IMAGE_ARRAY_DYNAMIC_INDEXING),
+ )
+ .shader_clip_distance(features.contains(Features::SHADER_CLIP_DISTANCE))
+ .shader_cull_distance(features.contains(Features::SHADER_CULL_DISTANCE))
+ .shader_float64(features.contains(Features::SHADER_FLOAT64))
+ .shader_int64(features.contains(Features::SHADER_INT64))
+ .shader_int16(features.contains(Features::SHADER_INT16))
+ .shader_resource_residency(features.contains(Features::SHADER_RESOURCE_RESIDENCY))
+ .shader_resource_min_lod(features.contains(Features::SHADER_RESOURCE_MIN_LOD))
+ .sparse_binding(features.contains(Features::SPARSE_BINDING))
+ .sparse_residency_buffer(features.contains(Features::SPARSE_RESIDENCY_BUFFER))
+ .sparse_residency_image2_d(features.contains(Features::SPARSE_RESIDENCY_IMAGE_2D))
+ .sparse_residency_image3_d(features.contains(Features::SPARSE_RESIDENCY_IMAGE_3D))
+ .sparse_residency2_samples(features.contains(Features::SPARSE_RESIDENCY_2_SAMPLES))
+ .sparse_residency4_samples(features.contains(Features::SPARSE_RESIDENCY_4_SAMPLES))
+ .sparse_residency8_samples(features.contains(Features::SPARSE_RESIDENCY_8_SAMPLES))
+ .sparse_residency16_samples(features.contains(Features::SPARSE_RESIDENCY_16_SAMPLES))
+ .sparse_residency_aliased(features.contains(Features::SPARSE_RESIDENCY_ALIASED))
+ .variable_multisample_rate(features.contains(Features::VARIABLE_MULTISAMPLE_RATE))
+ .inherited_queries(features.contains(Features::INHERITED_QUERIES))
+ .build(),
+ descriptor_indexing: if features.intersects(
+ Features::SAMPLED_TEXTURE_DESCRIPTOR_INDEXING
+ | Features::STORAGE_TEXTURE_DESCRIPTOR_INDEXING
+ | Features::UNSIZED_DESCRIPTOR_ARRAY,
+ ) {
+ Some(
+ vk::PhysicalDeviceDescriptorIndexingFeaturesEXT::builder()
+ .shader_sampled_image_array_non_uniform_indexing(
+ features.contains(Features::SAMPLED_TEXTURE_DESCRIPTOR_INDEXING),
+ )
+ .shader_storage_image_array_non_uniform_indexing(
+ features.contains(Features::STORAGE_TEXTURE_DESCRIPTOR_INDEXING),
+ )
+ .runtime_descriptor_array(features.contains(Features::UNSIZED_DESCRIPTOR_ARRAY))
+ .build(),
+ )
+ } else {
+ None
+ },
+ mesh_shaders: if features.intersects(Features::TASK_SHADER | Features::MESH_SHADER) {
+ Some(
+ vk::PhysicalDeviceMeshShaderFeaturesNV::builder()
+ .task_shader(features.contains(Features::TASK_SHADER))
+ .mesh_shader(features.contains(Features::MESH_SHADER))
+ .build(),
+ )
+ } else {
+ None
+ },
+ }
+}
+
+pub fn map_memory_ranges<'a, I>(ranges: I) -> SmallVec<[vk::MappedMemoryRange; 4]>
+where
+ I: IntoIterator,
+ I::Item: Borrow<(&'a n::Memory, Segment)>,
+{
+ ranges
+ .into_iter()
+ .map(|range| {
+ let &(ref memory, ref segment) = range.borrow();
+ vk::MappedMemoryRange::builder()
+ .memory(memory.raw)
+ .offset(segment.offset)
+ .size(segment.size.unwrap_or(vk::WHOLE_SIZE))
+ .build()
+ })
+ .collect()
+}
+
+pub fn map_command_buffer_flags(flags: command::CommandBufferFlags) -> vk::CommandBufferUsageFlags {
+ // Safe due to equivalence of HAL values and Vulkan values
+ vk::CommandBufferUsageFlags::from_raw(flags.bits())
+}
+
+pub fn map_command_buffer_level(level: command::Level) -> vk::CommandBufferLevel {
+ match level {
+ command::Level::Primary => vk::CommandBufferLevel::PRIMARY,
+ command::Level::Secondary => vk::CommandBufferLevel::SECONDARY,
+ }
+}
+
+pub fn map_view_kind(
+ kind: image::ViewKind,
+ ty: vk::ImageType,
+ is_cube: bool,
+) -> Option<vk::ImageViewType> {
+ use crate::image::ViewKind::*;
+ use crate::vk::ImageType;
+
+ Some(match (ty, kind) {
+ (ImageType::TYPE_1D, D1) => vk::ImageViewType::TYPE_1D,
+ (ImageType::TYPE_1D, D1Array) => vk::ImageViewType::TYPE_1D_ARRAY,
+ (ImageType::TYPE_2D, D2) => vk::ImageViewType::TYPE_2D,
+ (ImageType::TYPE_2D, D2Array) => vk::ImageViewType::TYPE_2D_ARRAY,
+ (ImageType::TYPE_3D, D3) => vk::ImageViewType::TYPE_3D,
+ (ImageType::TYPE_2D, Cube) if is_cube => vk::ImageViewType::CUBE,
+ (ImageType::TYPE_2D, CubeArray) if is_cube => vk::ImageViewType::CUBE_ARRAY,
+ (ImageType::TYPE_3D, Cube) if is_cube => vk::ImageViewType::CUBE,
+ (ImageType::TYPE_3D, CubeArray) if is_cube => vk::ImageViewType::CUBE_ARRAY,
+ _ => return None,
+ })
+}
+
+pub fn map_rect(rect: &pso::Rect) -> vk::Rect2D {
+ vk::Rect2D {
+ offset: vk::Offset2D {
+ x: rect.x as _,
+ y: rect.y as _,
+ },
+ extent: vk::Extent2D {
+ width: rect.w as _,
+ height: rect.h as _,
+ },
+ }
+}
+
+pub fn map_clear_rect(rect: &pso::ClearRect) -> vk::ClearRect {
+ vk::ClearRect {
+ base_array_layer: rect.layers.start as _,
+ layer_count: (rect.layers.end - rect.layers.start) as _,
+ rect: map_rect(&rect.rect),
+ }
+}
+
+pub fn map_viewport(vp: &pso::Viewport, flip_y: bool, shift_y: bool) -> vk::Viewport {
+ vk::Viewport {
+ x: vp.rect.x as _,
+ y: if shift_y {
+ vp.rect.y + vp.rect.h
+ } else {
+ vp.rect.y
+ } as _,
+ width: vp.rect.w as _,
+ height: if flip_y { -vp.rect.h } else { vp.rect.h } as _,
+ min_depth: vp.depth.start,
+ max_depth: vp.depth.end,
+ }
+}
+
+pub fn map_view_capabilities(caps: image::ViewCapabilities) -> vk::ImageCreateFlags {
+ vk::ImageCreateFlags::from_raw(caps.bits())
+}
+
+pub fn map_present_mode(mode: PresentMode) -> vk::PresentModeKHR {
+ if mode == PresentMode::IMMEDIATE {
+ vk::PresentModeKHR::IMMEDIATE
+ } else if mode == PresentMode::MAILBOX {
+ vk::PresentModeKHR::MAILBOX
+ } else if mode == PresentMode::FIFO {
+ vk::PresentModeKHR::FIFO
+ } else if mode == PresentMode::RELAXED {
+ vk::PresentModeKHR::FIFO_RELAXED
+ } else {
+ panic!("Unexpected present mode {:?}", mode)
+ }
+}
+
+pub fn map_vk_present_mode(mode: vk::PresentModeKHR) -> PresentMode {
+ if mode == vk::PresentModeKHR::IMMEDIATE {
+ PresentMode::IMMEDIATE
+ } else if mode == vk::PresentModeKHR::MAILBOX {
+ PresentMode::MAILBOX
+ } else if mode == vk::PresentModeKHR::FIFO {
+ PresentMode::FIFO
+ } else if mode == vk::PresentModeKHR::FIFO_RELAXED {
+ PresentMode::RELAXED
+ } else {
+ warn!("Unrecognized present mode {:?}", mode);
+ PresentMode::IMMEDIATE
+ }
+}
+
+pub fn map_composite_alpha_mode(
+ composite_alpha_mode: CompositeAlphaMode,
+) -> vk::CompositeAlphaFlagsKHR {
+ vk::CompositeAlphaFlagsKHR::from_raw(composite_alpha_mode.bits())
+}
+
+pub fn map_vk_composite_alpha(composite_alpha: vk::CompositeAlphaFlagsKHR) -> CompositeAlphaMode {
+ CompositeAlphaMode::from_bits_truncate(composite_alpha.as_raw())
+}
+
+pub fn map_descriptor_pool_create_flags(
+ flags: pso::DescriptorPoolCreateFlags,
+) -> vk::DescriptorPoolCreateFlags {
+ vk::DescriptorPoolCreateFlags::from_raw(flags.bits())
+}
+
+pub fn map_memory_properties(flags: vk::MemoryPropertyFlags) -> hal::memory::Properties {
+ use crate::memory::Properties;
+ let mut properties = Properties::empty();
+
+ if flags.contains(vk::MemoryPropertyFlags::DEVICE_LOCAL) {
+ properties |= Properties::DEVICE_LOCAL;
+ }
+ if flags.contains(vk::MemoryPropertyFlags::HOST_VISIBLE) {
+ properties |= Properties::CPU_VISIBLE;
+ }
+ if flags.contains(vk::MemoryPropertyFlags::HOST_COHERENT) {
+ properties |= Properties::COHERENT;
+ }
+ if flags.contains(vk::MemoryPropertyFlags::HOST_CACHED) {
+ properties |= Properties::CPU_CACHED;
+ }
+ if flags.contains(vk::MemoryPropertyFlags::LAZILY_ALLOCATED) {
+ properties |= Properties::LAZILY_ALLOCATED;
+ }
+
+ properties
+}
+
+pub fn map_memory_heap_flags(flags: vk::MemoryHeapFlags) -> hal::memory::HeapFlags {
+ use hal::memory::HeapFlags;
+ let mut hal_flags = HeapFlags::empty();
+
+ if flags.contains(vk::MemoryHeapFlags::DEVICE_LOCAL) {
+ hal_flags |= HeapFlags::DEVICE_LOCAL;
+ }
+
+ hal_flags
+}
diff --git a/third_party/rust/gfx-backend-vulkan/src/device.rs b/third_party/rust/gfx-backend-vulkan/src/device.rs
new file mode 100644
index 0000000000..8c8a19e01f
--- /dev/null
+++ b/third_party/rust/gfx-backend-vulkan/src/device.rs
@@ -0,0 +1,2325 @@
+use arrayvec::ArrayVec;
+use ash::extensions::khr;
+use ash::version::DeviceV1_0;
+use ash::vk;
+use ash::vk::Handle;
+use smallvec::SmallVec;
+
+use hal::{
+ memory::{Requirements, Segment},
+ pool::CommandPoolCreateFlags,
+ pso::VertexInputRate,
+ window::SwapchainConfig,
+ {buffer, device as d, format, image, pass, pso, query, queue}, {Features, MemoryTypeId},
+};
+
+use std::borrow::Borrow;
+use std::ffi::{CStr, CString};
+use std::ops::Range;
+use std::pin::Pin;
+use std::sync::Arc;
+use std::{mem, ptr};
+
+use crate::pool::RawCommandPool;
+use crate::{command as cmd, conv, native as n, window as w};
+use crate::{Backend as B, DebugMessenger, Device};
+
+#[derive(Debug, Default)]
+struct GraphicsPipelineInfoBuf {
+ // 10 is the max amount of dynamic states
+ dynamic_states: ArrayVec<[vk::DynamicState; 10]>,
+
+ // 5 is the amount of stages
+ c_strings: ArrayVec<[CString; 5]>,
+ stages: ArrayVec<[vk::PipelineShaderStageCreateInfo; 5]>,
+ specializations: ArrayVec<[vk::SpecializationInfo; 5]>,
+ specialization_entries: ArrayVec<[SmallVec<[vk::SpecializationMapEntry; 4]>; 5]>,
+
+ vertex_bindings: Vec<vk::VertexInputBindingDescription>,
+ vertex_attributes: Vec<vk::VertexInputAttributeDescription>,
+ blend_states: Vec<vk::PipelineColorBlendAttachmentState>,
+
+ sample_mask: [u32; 2],
+ vertex_input_state: vk::PipelineVertexInputStateCreateInfo,
+ input_assembly_state: vk::PipelineInputAssemblyStateCreateInfo,
+ tessellation_state: Option<vk::PipelineTessellationStateCreateInfo>,
+ viewport_state: vk::PipelineViewportStateCreateInfo,
+ rasterization_state: vk::PipelineRasterizationStateCreateInfo,
+ multisample_state: vk::PipelineMultisampleStateCreateInfo,
+ depth_stencil_state: vk::PipelineDepthStencilStateCreateInfo,
+ color_blend_state: vk::PipelineColorBlendStateCreateInfo,
+ pipeline_dynamic_state: vk::PipelineDynamicStateCreateInfo,
+ viewport: vk::Viewport,
+ scissor: vk::Rect2D,
+}
+impl GraphicsPipelineInfoBuf {
+ unsafe fn add_stage<'a>(
+ &mut self,
+ stage: vk::ShaderStageFlags,
+ source: &pso::EntryPoint<'a, B>,
+ ) {
+ let string = CString::new(source.entry).unwrap();
+ self.c_strings.push(string);
+ let name = self.c_strings.last().unwrap().as_c_str();
+
+ self.specialization_entries.push(
+ source
+ .specialization
+ .constants
+ .iter()
+ .map(|c| vk::SpecializationMapEntry {
+ constant_id: c.id,
+ offset: c.range.start as _,
+ size: (c.range.end - c.range.start) as _,
+ })
+ .collect(),
+ );
+ let map_entries = self.specialization_entries.last().unwrap();
+
+ self.specializations.push(vk::SpecializationInfo {
+ map_entry_count: map_entries.len() as _,
+ p_map_entries: map_entries.as_ptr(),
+ data_size: source.specialization.data.len() as _,
+ p_data: source.specialization.data.as_ptr() as _,
+ });
+
+ self.stages.push(
+ vk::PipelineShaderStageCreateInfo::builder()
+ .flags(vk::PipelineShaderStageCreateFlags::empty())
+ .stage(stage)
+ .module(source.module.raw)
+ .name(name)
+ .specialization_info(self.specializations.last().unwrap())
+ .build(),
+ )
+ }
+
+ unsafe fn initialize<'a>(
+ this: &mut Pin<&mut Self>,
+ device: &Device,
+ desc: &pso::GraphicsPipelineDesc<'a, B>,
+ ) {
+ let mut this = Pin::get_mut(this.as_mut()); // use into_inner when it gets stable
+
+ match desc.primitive_assembler {
+ pso::PrimitiveAssemblerDesc::Vertex {
+ ref buffers,
+ ref attributes,
+ ref input_assembler,
+ ref vertex,
+ ref tessellation,
+ ref geometry,
+ } => {
+ // Vertex stage
+ // vertex shader is required
+ this.add_stage(vk::ShaderStageFlags::VERTEX, vertex);
+
+ // Geometry stage
+ if let Some(ref entry) = geometry {
+ this.add_stage(vk::ShaderStageFlags::GEOMETRY, entry);
+ }
+ // Tessellation stage
+ if let Some(ts) = tessellation {
+ this.add_stage(vk::ShaderStageFlags::TESSELLATION_CONTROL, &ts.0);
+ this.add_stage(vk::ShaderStageFlags::TESSELLATION_EVALUATION, &ts.1);
+ }
+ this.vertex_bindings = buffers.iter().map(|vbuf| {
+ vk::VertexInputBindingDescription {
+ binding: vbuf.binding,
+ stride: vbuf.stride as u32,
+ input_rate: match vbuf.rate {
+ VertexInputRate::Vertex => vk::VertexInputRate::VERTEX,
+ VertexInputRate::Instance(divisor) => {
+ debug_assert_eq!(divisor, 1, "Custom vertex rate divisors not supported in Vulkan backend without extension");
+ vk::VertexInputRate::INSTANCE
+ },
+ },
+ }
+ }).collect();
+
+ this.vertex_attributes = attributes
+ .iter()
+ .map(|attr| vk::VertexInputAttributeDescription {
+ location: attr.location as u32,
+ binding: attr.binding as u32,
+ format: conv::map_format(attr.element.format),
+ offset: attr.element.offset as u32,
+ })
+ .collect();
+
+ this.vertex_input_state = vk::PipelineVertexInputStateCreateInfo::builder()
+ .flags(vk::PipelineVertexInputStateCreateFlags::empty())
+ .vertex_binding_descriptions(&this.vertex_bindings)
+ .vertex_attribute_descriptions(&this.vertex_attributes)
+ .build();
+
+ this.input_assembly_state = vk::PipelineInputAssemblyStateCreateInfo::builder()
+ .flags(vk::PipelineInputAssemblyStateCreateFlags::empty())
+ .topology(conv::map_topology(&input_assembler))
+ .primitive_restart_enable(input_assembler.restart_index.is_some())
+ .build();
+ }
+ pso::PrimitiveAssemblerDesc::Mesh { ref task, ref mesh } => {
+ this.vertex_bindings = Vec::new();
+ this.vertex_attributes = Vec::new();
+ this.vertex_input_state = vk::PipelineVertexInputStateCreateInfo::default();
+ this.input_assembly_state = vk::PipelineInputAssemblyStateCreateInfo::default();
+
+ // Task stage, optional
+ if let Some(ref entry) = task {
+ this.add_stage(vk::ShaderStageFlags::TASK_NV, entry);
+ }
+
+ // Mesh stage
+ this.add_stage(vk::ShaderStageFlags::MESH_NV, mesh);
+ }
+ };
+
+ // Pixel stage
+ if let Some(ref entry) = desc.fragment {
+ this.add_stage(vk::ShaderStageFlags::FRAGMENT, entry);
+ }
+
+ let depth_bias = match desc.rasterizer.depth_bias {
+ Some(pso::State::Static(db)) => db,
+ Some(pso::State::Dynamic) => {
+ this.dynamic_states.push(vk::DynamicState::DEPTH_BIAS);
+ pso::DepthBias::default()
+ }
+ None => pso::DepthBias::default(),
+ };
+
+ let polygon_mode = match desc.rasterizer.polygon_mode {
+ pso::PolygonMode::Point => vk::PolygonMode::POINT,
+ pso::PolygonMode::Line => vk::PolygonMode::LINE,
+ pso::PolygonMode::Fill => vk::PolygonMode::FILL,
+ };
+
+ let line_width = match desc.rasterizer.line_width {
+ pso::State::Static(w) => w,
+ pso::State::Dynamic => {
+ this.dynamic_states.push(vk::DynamicState::LINE_WIDTH);
+ 1.0
+ }
+ };
+
+ this.rasterization_state = vk::PipelineRasterizationStateCreateInfo::builder()
+ .flags(vk::PipelineRasterizationStateCreateFlags::empty())
+ .depth_clamp_enable(if desc.rasterizer.depth_clamping {
+ if device.shared.features.contains(Features::DEPTH_CLAMP) {
+ true
+ } else {
+ warn!("Depth clamping was requested on a device with disabled feature");
+ false
+ }
+ } else {
+ false
+ })
+ .rasterizer_discard_enable(
+ desc.fragment.is_none()
+ && desc.depth_stencil.depth.is_none()
+ && desc.depth_stencil.stencil.is_none(),
+ )
+ .polygon_mode(polygon_mode)
+ .cull_mode(conv::map_cull_face(desc.rasterizer.cull_face))
+ .front_face(conv::map_front_face(desc.rasterizer.front_face))
+ .depth_bias_enable(desc.rasterizer.depth_bias.is_some())
+ .depth_bias_constant_factor(depth_bias.const_factor)
+ .depth_bias_clamp(depth_bias.clamp)
+ .depth_bias_slope_factor(depth_bias.slope_factor)
+ .line_width(line_width)
+ .build();
+
+ this.tessellation_state = {
+ if let pso::PrimitiveAssemblerDesc::Vertex {
+ input_assembler, ..
+ } = &desc.primitive_assembler
+ {
+ if let pso::Primitive::PatchList(patch_control_points) = input_assembler.primitive {
+ Some(
+ vk::PipelineTessellationStateCreateInfo::builder()
+ .flags(vk::PipelineTessellationStateCreateFlags::empty())
+ .patch_control_points(patch_control_points as _)
+ .build(),
+ )
+ } else {
+ None
+ }
+ } else {
+ None
+ }
+ };
+
+ this.viewport_state = {
+ let scissors = match desc.baked_states.scissor {
+ Some(ref rect) => {
+ this.scissor = conv::map_rect(rect);
+ Some([this.scissor])
+ }
+ None => {
+ this.dynamic_states.push(vk::DynamicState::SCISSOR);
+ None
+ }
+ };
+ let viewports = match desc.baked_states.viewport {
+ Some(ref vp) => {
+ this.viewport = device.shared.map_viewport(vp);
+ Some([this.viewport])
+ }
+ None => {
+ this.dynamic_states.push(vk::DynamicState::VIEWPORT);
+ None
+ }
+ };
+
+ let mut builder = vk::PipelineViewportStateCreateInfo::builder()
+ .flags(vk::PipelineViewportStateCreateFlags::empty());
+ if let Some(scissors) = &scissors {
+ builder = builder.scissors(scissors);
+ }
+ if let Some(viewports) = &viewports {
+ builder = builder.viewports(viewports);
+ }
+ builder.build()
+ };
+
+ this.multisample_state = match desc.multisampling {
+ Some(ref ms) => {
+ this.sample_mask = [
+ (ms.sample_mask & 0xFFFFFFFF) as u32,
+ ((ms.sample_mask >> 32) & 0xFFFFFFFF) as u32,
+ ];
+ vk::PipelineMultisampleStateCreateInfo::builder()
+ .flags(vk::PipelineMultisampleStateCreateFlags::empty())
+ .rasterization_samples(vk::SampleCountFlags::from_raw(
+ (ms.rasterization_samples as u32) & vk::SampleCountFlags::all().as_raw(),
+ ))
+ .sample_shading_enable(ms.sample_shading.is_some())
+ .min_sample_shading(ms.sample_shading.unwrap_or(0.0))
+ .sample_mask(&this.sample_mask)
+ .alpha_to_coverage_enable(ms.alpha_coverage)
+ .alpha_to_one_enable(ms.alpha_to_one)
+ .build()
+ }
+ None => vk::PipelineMultisampleStateCreateInfo::builder()
+ .flags(vk::PipelineMultisampleStateCreateFlags::empty())
+ .rasterization_samples(vk::SampleCountFlags::TYPE_1)
+ .build(),
+ };
+
+ let depth_stencil = desc.depth_stencil;
+ let (depth_test_enable, depth_write_enable, depth_compare_op) = match depth_stencil.depth {
+ Some(ref depth) => (true, depth.write as _, conv::map_comparison(depth.fun)),
+ None => (false, false, vk::CompareOp::NEVER),
+ };
+ let (stencil_test_enable, front, back) = match depth_stencil.stencil {
+ Some(ref stencil) => {
+ let mut front = conv::map_stencil_side(&stencil.faces.front);
+ let mut back = conv::map_stencil_side(&stencil.faces.back);
+ match stencil.read_masks {
+ pso::State::Static(ref sides) => {
+ front.compare_mask = sides.front;
+ back.compare_mask = sides.back;
+ }
+ pso::State::Dynamic => {
+ this.dynamic_states
+ .push(vk::DynamicState::STENCIL_COMPARE_MASK);
+ }
+ }
+ match stencil.write_masks {
+ pso::State::Static(ref sides) => {
+ front.write_mask = sides.front;
+ back.write_mask = sides.back;
+ }
+ pso::State::Dynamic => {
+ this.dynamic_states
+ .push(vk::DynamicState::STENCIL_WRITE_MASK);
+ }
+ }
+ match stencil.reference_values {
+ pso::State::Static(ref sides) => {
+ front.reference = sides.front;
+ back.reference = sides.back;
+ }
+ pso::State::Dynamic => {
+ this.dynamic_states
+ .push(vk::DynamicState::STENCIL_REFERENCE);
+ }
+ }
+ (true, front, back)
+ }
+ None => mem::zeroed(),
+ };
+ let (min_depth_bounds, max_depth_bounds) = match desc.baked_states.depth_bounds {
+ Some(ref range) => (range.start, range.end),
+ None => {
+ this.dynamic_states.push(vk::DynamicState::DEPTH_BOUNDS);
+ (0.0, 1.0)
+ }
+ };
+
+ this.depth_stencil_state = vk::PipelineDepthStencilStateCreateInfo::builder()
+ .flags(vk::PipelineDepthStencilStateCreateFlags::empty())
+ .depth_test_enable(depth_test_enable)
+ .depth_write_enable(depth_write_enable)
+ .depth_compare_op(depth_compare_op)
+ .depth_bounds_test_enable(depth_stencil.depth_bounds)
+ .stencil_test_enable(stencil_test_enable)
+ .front(front)
+ .back(back)
+ .min_depth_bounds(min_depth_bounds)
+ .max_depth_bounds(max_depth_bounds)
+ .build();
+
+ this.blend_states = desc
+ .blender
+ .targets
+ .iter()
+ .map(|color_desc| {
+ let color_write_mask =
+ vk::ColorComponentFlags::from_raw(color_desc.mask.bits() as _);
+ match color_desc.blend {
+ Some(ref bs) => {
+ let (color_blend_op, src_color_blend_factor, dst_color_blend_factor) =
+ conv::map_blend_op(bs.color);
+ let (alpha_blend_op, src_alpha_blend_factor, dst_alpha_blend_factor) =
+ conv::map_blend_op(bs.alpha);
+ vk::PipelineColorBlendAttachmentState {
+ color_write_mask,
+ blend_enable: vk::TRUE,
+ src_color_blend_factor,
+ dst_color_blend_factor,
+ color_blend_op,
+ src_alpha_blend_factor,
+ dst_alpha_blend_factor,
+ alpha_blend_op,
+ }
+ }
+ None => vk::PipelineColorBlendAttachmentState {
+ color_write_mask,
+ ..mem::zeroed()
+ },
+ }
+ })
+ .collect();
+
+ this.color_blend_state = vk::PipelineColorBlendStateCreateInfo::builder()
+ .flags(vk::PipelineColorBlendStateCreateFlags::empty())
+ .logic_op_enable(false) // TODO
+ .logic_op(vk::LogicOp::CLEAR)
+ .attachments(&this.blend_states) // TODO:
+ .blend_constants(match desc.baked_states.blend_color {
+ Some(value) => value,
+ None => {
+ this.dynamic_states.push(vk::DynamicState::BLEND_CONSTANTS);
+ [0.0; 4]
+ }
+ })
+ .build();
+
+ this.pipeline_dynamic_state = vk::PipelineDynamicStateCreateInfo::builder()
+ .flags(vk::PipelineDynamicStateCreateFlags::empty())
+ .dynamic_states(&this.dynamic_states)
+ .build();
+ }
+}
+
+#[derive(Debug, Default)]
+struct ComputePipelineInfoBuf {
+ c_string: CString,
+ specialization: vk::SpecializationInfo,
+ entries: SmallVec<[vk::SpecializationMapEntry; 4]>,
+}
+impl ComputePipelineInfoBuf {
+ unsafe fn initialize<'a>(this: &mut Pin<&mut Self>, desc: &pso::ComputePipelineDesc<'a, B>) {
+ let mut this = Pin::get_mut(this.as_mut()); // use into_inner when it gets stable
+
+ this.c_string = CString::new(desc.shader.entry).unwrap();
+ this.entries = desc
+ .shader
+ .specialization
+ .constants
+ .iter()
+ .map(|c| vk::SpecializationMapEntry {
+ constant_id: c.id,
+ offset: c.range.start as _,
+ size: (c.range.end - c.range.start) as _,
+ })
+ .collect();
+ this.specialization = vk::SpecializationInfo {
+ map_entry_count: this.entries.len() as _,
+ p_map_entries: this.entries.as_ptr(),
+ data_size: desc.shader.specialization.data.len() as _,
+ p_data: desc.shader.specialization.data.as_ptr() as _,
+ };
+ }
+}
+
+impl d::Device<B> for Device {
+ unsafe fn allocate_memory(
+ &self,
+ mem_type: MemoryTypeId,
+ size: u64,
+ ) -> Result<n::Memory, d::AllocationError> {
+ let info = vk::MemoryAllocateInfo::builder()
+ .allocation_size(size)
+ .memory_type_index(self.get_ash_memory_type_index(mem_type));
+
+ let result = self.shared.raw.allocate_memory(&info, None);
+
+ match result {
+ Ok(memory) => Ok(n::Memory { raw: memory }),
+ Err(vk::Result::ERROR_TOO_MANY_OBJECTS) => Err(d::AllocationError::TooManyObjects),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn create_command_pool(
+ &self,
+ family: queue::QueueFamilyId,
+ create_flags: CommandPoolCreateFlags,
+ ) -> Result<RawCommandPool, d::OutOfMemory> {
+ let mut flags = vk::CommandPoolCreateFlags::empty();
+ if create_flags.contains(CommandPoolCreateFlags::TRANSIENT) {
+ flags |= vk::CommandPoolCreateFlags::TRANSIENT;
+ }
+ if create_flags.contains(CommandPoolCreateFlags::RESET_INDIVIDUAL) {
+ flags |= vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER;
+ }
+
+ let info = vk::CommandPoolCreateInfo::builder()
+ .flags(flags)
+ .queue_family_index(family.0 as _);
+
+ let result = self.shared.raw.create_command_pool(&info, None);
+
+ match result {
+ Ok(pool) => Ok(RawCommandPool {
+ raw: pool,
+ device: self.shared.clone(),
+ }),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn destroy_command_pool(&self, pool: RawCommandPool) {
+ self.shared.raw.destroy_command_pool(pool.raw, None);
+ }
+
+ unsafe fn create_render_pass<'a, IA, IS, ID>(
+ &self,
+ attachments: IA,
+ subpasses: IS,
+ dependencies: ID,
+ ) -> Result<n::RenderPass, d::OutOfMemory>
+ where
+ IA: IntoIterator,
+ IA::Item: Borrow<pass::Attachment>,
+ IA::IntoIter: ExactSizeIterator,
+ IS: IntoIterator,
+ IS::Item: Borrow<pass::SubpassDesc<'a>>,
+ IS::IntoIter: ExactSizeIterator,
+ ID: IntoIterator,
+ ID::Item: Borrow<pass::SubpassDependency>,
+ ID::IntoIter: ExactSizeIterator,
+ {
+ let attachments = attachments.into_iter().map(|attachment| {
+ let attachment = attachment.borrow();
+ vk::AttachmentDescription {
+ flags: vk::AttachmentDescriptionFlags::empty(), // TODO: may even alias!
+ format: attachment
+ .format
+ .map_or(vk::Format::UNDEFINED, conv::map_format),
+ samples: vk::SampleCountFlags::from_raw(
+ (attachment.samples as u32) & vk::SampleCountFlags::all().as_raw(),
+ ),
+ load_op: conv::map_attachment_load_op(attachment.ops.load),
+ store_op: conv::map_attachment_store_op(attachment.ops.store),
+ stencil_load_op: conv::map_attachment_load_op(attachment.stencil_ops.load),
+ stencil_store_op: conv::map_attachment_store_op(attachment.stencil_ops.store),
+ initial_layout: conv::map_image_layout(attachment.layouts.start),
+ final_layout: conv::map_image_layout(attachment.layouts.end),
+ }
+ });
+
+ let dependencies = dependencies.into_iter().map(|subpass_dep| {
+ let sdep = subpass_dep.borrow();
+ // TODO: checks
+ vk::SubpassDependency {
+ src_subpass: sdep
+ .passes
+ .start
+ .map_or(vk::SUBPASS_EXTERNAL, |id| id as u32),
+ dst_subpass: sdep.passes.end.map_or(vk::SUBPASS_EXTERNAL, |id| id as u32),
+ src_stage_mask: conv::map_pipeline_stage(sdep.stages.start),
+ dst_stage_mask: conv::map_pipeline_stage(sdep.stages.end),
+ src_access_mask: conv::map_image_access(sdep.accesses.start),
+ dst_access_mask: conv::map_image_access(sdep.accesses.end),
+ dependency_flags: mem::transmute(sdep.flags),
+ }
+ });
+
+ let (clear_attachments_mask, result) =
+ inplace_it::inplace_or_alloc_array(attachments.len(), |uninit_guard| {
+ let attachments = uninit_guard.init_with_iter(attachments);
+
+ let clear_attachments_mask = attachments
+ .iter()
+ .enumerate()
+ .filter_map(|(i, at)| {
+ if at.load_op == vk::AttachmentLoadOp::CLEAR
+ || at.stencil_load_op == vk::AttachmentLoadOp::CLEAR
+ {
+ Some(1 << i as u64)
+ } else {
+ None
+ }
+ })
+ .sum();
+
+ let attachment_refs = subpasses
+ .into_iter()
+ .map(|subpass| {
+ let subpass = subpass.borrow();
+ fn make_ref(
+ &(id, layout): &pass::AttachmentRef,
+ ) -> vk::AttachmentReference {
+ vk::AttachmentReference {
+ attachment: id as _,
+ layout: conv::map_image_layout(layout),
+ }
+ }
+ let colors = subpass.colors.iter().map(make_ref).collect::<Box<[_]>>();
+ let depth_stencil = subpass.depth_stencil.map(make_ref);
+ let inputs = subpass.inputs.iter().map(make_ref).collect::<Box<[_]>>();
+ let preserves = subpass
+ .preserves
+ .iter()
+ .map(|&id| id as u32)
+ .collect::<Box<[_]>>();
+ let resolves = subpass.resolves.iter().map(make_ref).collect::<Box<[_]>>();
+
+ (colors, depth_stencil, inputs, preserves, resolves)
+ })
+ .collect::<Box<[_]>>();
+
+ let subpasses = attachment_refs
+ .iter()
+ .map(|(colors, depth_stencil, inputs, preserves, resolves)| {
+ vk::SubpassDescription {
+ flags: vk::SubpassDescriptionFlags::empty(),
+ pipeline_bind_point: vk::PipelineBindPoint::GRAPHICS,
+ input_attachment_count: inputs.len() as u32,
+ p_input_attachments: inputs.as_ptr(),
+ color_attachment_count: colors.len() as u32,
+ p_color_attachments: colors.as_ptr(),
+ p_resolve_attachments: if resolves.is_empty() {
+ ptr::null()
+ } else {
+ resolves.as_ptr()
+ },
+ p_depth_stencil_attachment: match depth_stencil {
+ Some(ref aref) => aref as *const _,
+ None => ptr::null(),
+ },
+ preserve_attachment_count: preserves.len() as u32,
+ p_preserve_attachments: preserves.as_ptr(),
+ }
+ })
+ .collect::<Box<[_]>>();
+
+ let result =
+ inplace_it::inplace_or_alloc_array(dependencies.len(), |uninit_guard| {
+ let dependencies = uninit_guard.init_with_iter(dependencies);
+
+ let info = vk::RenderPassCreateInfo::builder()
+ .flags(vk::RenderPassCreateFlags::empty())
+ .attachments(&attachments)
+ .subpasses(&subpasses)
+ .dependencies(&dependencies);
+
+ self.shared.raw.create_render_pass(&info, None)
+ });
+
+ (clear_attachments_mask, result)
+ });
+
+ match result {
+ Ok(renderpass) => Ok(n::RenderPass {
+ raw: renderpass,
+ clear_attachments_mask,
+ }),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn create_pipeline_layout<IS, IR>(
+ &self,
+ sets: IS,
+ push_constant_ranges: IR,
+ ) -> Result<n::PipelineLayout, d::OutOfMemory>
+ where
+ IS: IntoIterator,
+ IS::Item: Borrow<n::DescriptorSetLayout>,
+ IS::IntoIter: ExactSizeIterator,
+ IR: IntoIterator,
+ IR::Item: Borrow<(pso::ShaderStageFlags, Range<u32>)>,
+ IR::IntoIter: ExactSizeIterator,
+ {
+ let set_layouts = sets.into_iter().map(|set| set.borrow().raw);
+
+ let push_constant_ranges = push_constant_ranges.into_iter().map(|range| {
+ let &(s, ref r) = range.borrow();
+ vk::PushConstantRange {
+ stage_flags: conv::map_stage_flags(s),
+ offset: r.start,
+ size: r.end - r.start,
+ }
+ });
+
+ let result = inplace_it::inplace_or_alloc_array(set_layouts.len(), |uninit_guard| {
+ let set_layouts = uninit_guard.init_with_iter(set_layouts);
+
+ // TODO: set_layouts doesnt implement fmt::Debug, submit PR?
+ // debug!("create_pipeline_layout {:?}", set_layouts);
+
+ inplace_it::inplace_or_alloc_array(push_constant_ranges.len(), |uninit_guard| {
+ let push_constant_ranges = uninit_guard.init_with_iter(push_constant_ranges);
+
+ let info = vk::PipelineLayoutCreateInfo::builder()
+ .flags(vk::PipelineLayoutCreateFlags::empty())
+ .set_layouts(&set_layouts)
+ .push_constant_ranges(&push_constant_ranges);
+
+ self.shared.raw.create_pipeline_layout(&info, None)
+ })
+ });
+
+ match result {
+ Ok(raw) => Ok(n::PipelineLayout { raw }),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn create_pipeline_cache(
+ &self,
+ data: Option<&[u8]>,
+ ) -> Result<n::PipelineCache, d::OutOfMemory> {
+ let info =
+ vk::PipelineCacheCreateInfo::builder().flags(vk::PipelineCacheCreateFlags::empty());
+ let info = if let Some(d) = data {
+ info.initial_data(d)
+ } else {
+ info
+ };
+
+ let result = self.shared.raw.create_pipeline_cache(&info, None);
+
+ match result {
+ Ok(raw) => Ok(n::PipelineCache { raw }),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn get_pipeline_cache_data(
+ &self,
+ cache: &n::PipelineCache,
+ ) -> Result<Vec<u8>, d::OutOfMemory> {
+ let result = self.shared.raw.get_pipeline_cache_data(cache.raw);
+
+ match result {
+ Ok(data) => Ok(data),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn destroy_pipeline_cache(&self, cache: n::PipelineCache) {
+ self.shared.raw.destroy_pipeline_cache(cache.raw, None);
+ }
+
+ unsafe fn merge_pipeline_caches<I>(
+ &self,
+ target: &n::PipelineCache,
+ sources: I,
+ ) -> Result<(), d::OutOfMemory>
+ where
+ I: IntoIterator,
+ I::Item: Borrow<n::PipelineCache>,
+ I::IntoIter: ExactSizeIterator,
+ {
+ let caches = sources.into_iter().map(|s| s.borrow().raw);
+
+ let result = inplace_it::inplace_or_alloc_array(caches.len(), |uninit_guard| {
+ let caches = uninit_guard.init_with_iter(caches);
+
+ self.shared.raw.fp_v1_0().merge_pipeline_caches(
+ self.shared.raw.handle(),
+ target.raw,
+ caches.len() as u32,
+ caches.as_ptr(),
+ )
+ });
+
+ match result {
+ vk::Result::SUCCESS => Ok(()),
+ vk::Result::ERROR_OUT_OF_HOST_MEMORY => Err(d::OutOfMemory::Host),
+ vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => Err(d::OutOfMemory::Device),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn create_graphics_pipeline<'a>(
+ &self,
+ desc: &pso::GraphicsPipelineDesc<'a, B>,
+ cache: Option<&n::PipelineCache>,
+ ) -> Result<n::GraphicsPipeline, pso::CreationError> {
+ debug!("create_graphics_pipeline {:?}", desc);
+
+ let mut buf = GraphicsPipelineInfoBuf::default();
+ let mut buf = Pin::new(&mut buf);
+ GraphicsPipelineInfoBuf::initialize(&mut buf, self, desc);
+
+ let info = {
+ let (base_handle, base_index) = match desc.parent {
+ pso::BasePipeline::Pipeline(pipeline) => (pipeline.0, -1),
+ pso::BasePipeline::Index(index) => (vk::Pipeline::null(), index as _),
+ pso::BasePipeline::None => (vk::Pipeline::null(), -1),
+ };
+
+ let mut flags = vk::PipelineCreateFlags::empty();
+ match desc.parent {
+ pso::BasePipeline::None => (),
+ _ => {
+ flags |= vk::PipelineCreateFlags::DERIVATIVE;
+ }
+ }
+ if desc
+ .flags
+ .contains(pso::PipelineCreationFlags::DISABLE_OPTIMIZATION)
+ {
+ flags |= vk::PipelineCreateFlags::DISABLE_OPTIMIZATION;
+ }
+ if desc
+ .flags
+ .contains(pso::PipelineCreationFlags::ALLOW_DERIVATIVES)
+ {
+ flags |= vk::PipelineCreateFlags::ALLOW_DERIVATIVES;
+ }
+
+ let builder = vk::GraphicsPipelineCreateInfo::builder()
+ .flags(flags)
+ .stages(&buf.stages)
+ .vertex_input_state(&buf.vertex_input_state)
+ .input_assembly_state(&buf.input_assembly_state)
+ .rasterization_state(&buf.rasterization_state);
+ let builder = match buf.tessellation_state.as_ref() {
+ Some(t) => builder.tessellation_state(t),
+ None => builder,
+ };
+ builder
+ .viewport_state(&buf.viewport_state)
+ .multisample_state(&buf.multisample_state)
+ .depth_stencil_state(&buf.depth_stencil_state)
+ .color_blend_state(&buf.color_blend_state)
+ .dynamic_state(&buf.pipeline_dynamic_state)
+ .layout(desc.layout.raw)
+ .render_pass(desc.subpass.main_pass.raw)
+ .subpass(desc.subpass.index as _)
+ .base_pipeline_handle(base_handle)
+ .base_pipeline_index(base_index)
+ };
+
+ let mut pipeline = vk::Pipeline::null();
+
+ match self.shared.raw.fp_v1_0().create_graphics_pipelines(
+ self.shared.raw.handle(),
+ cache.map_or(vk::PipelineCache::null(), |cache| cache.raw),
+ 1,
+ &*info,
+ ptr::null(),
+ &mut pipeline,
+ ) {
+ vk::Result::SUCCESS => Ok(n::GraphicsPipeline(pipeline)),
+ vk::Result::ERROR_OUT_OF_HOST_MEMORY => Err(d::OutOfMemory::Host.into()),
+ vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => Err(d::OutOfMemory::Device.into()),
+ _ => Err(pso::CreationError::Other),
+ }
+ }
+
+ unsafe fn create_graphics_pipelines<'a, T>(
+ &self,
+ descs: T,
+ cache: Option<&n::PipelineCache>,
+ ) -> Vec<Result<n::GraphicsPipeline, pso::CreationError>>
+ where
+ T: IntoIterator,
+ T::Item: Borrow<pso::GraphicsPipelineDesc<'a, B>>,
+ {
+ debug!("create_graphics_pipelines:");
+
+ let mut bufs: Pin<Box<[_]>> = descs
+ .into_iter()
+ .enumerate()
+ .inspect(|(idx, desc)| debug!("# {} {:?}", idx, desc.borrow()))
+ .map(|(_, desc)| (desc, GraphicsPipelineInfoBuf::default()))
+ .collect::<Box<[_]>>()
+ .into();
+
+ for (desc, buf) in bufs.as_mut().get_unchecked_mut() {
+ let desc: &T::Item = desc;
+ GraphicsPipelineInfoBuf::initialize(&mut Pin::new_unchecked(buf), self, desc.borrow());
+ }
+
+ let infos: Vec<_> = bufs
+ .iter()
+ .map(|(desc, buf)| {
+ let desc = desc.borrow();
+
+ let (base_handle, base_index) = match desc.parent {
+ pso::BasePipeline::Pipeline(pipeline) => (pipeline.0, -1),
+ pso::BasePipeline::Index(index) => (vk::Pipeline::null(), index as _),
+ pso::BasePipeline::None => (vk::Pipeline::null(), -1),
+ };
+
+ let mut flags = vk::PipelineCreateFlags::empty();
+ match desc.parent {
+ pso::BasePipeline::None => (),
+ _ => {
+ flags |= vk::PipelineCreateFlags::DERIVATIVE;
+ }
+ }
+ if desc
+ .flags
+ .contains(pso::PipelineCreationFlags::DISABLE_OPTIMIZATION)
+ {
+ flags |= vk::PipelineCreateFlags::DISABLE_OPTIMIZATION;
+ }
+ if desc
+ .flags
+ .contains(pso::PipelineCreationFlags::ALLOW_DERIVATIVES)
+ {
+ flags |= vk::PipelineCreateFlags::ALLOW_DERIVATIVES;
+ }
+
+ let builder = vk::GraphicsPipelineCreateInfo::builder()
+ .flags(flags)
+ .stages(&buf.stages)
+ .vertex_input_state(&buf.vertex_input_state)
+ .input_assembly_state(&buf.input_assembly_state)
+ .rasterization_state(&buf.rasterization_state);
+ let builder = match buf.tessellation_state.as_ref() {
+ Some(t) => builder.tessellation_state(t),
+ None => builder,
+ };
+ builder
+ .viewport_state(&buf.viewport_state)
+ .multisample_state(&buf.multisample_state)
+ .depth_stencil_state(&buf.depth_stencil_state)
+ .color_blend_state(&buf.color_blend_state)
+ .dynamic_state(&buf.pipeline_dynamic_state)
+ .layout(desc.layout.raw)
+ .render_pass(desc.subpass.main_pass.raw)
+ .subpass(desc.subpass.index as _)
+ .base_pipeline_handle(base_handle)
+ .base_pipeline_index(base_index)
+ .build()
+ })
+ .collect();
+
+ let (pipelines, error) = if infos.is_empty() {
+ (Vec::new(), None)
+ } else {
+ match self.shared.raw.create_graphics_pipelines(
+ cache.map_or(vk::PipelineCache::null(), |cache| cache.raw),
+ &infos,
+ None,
+ ) {
+ Ok(pipelines) => (pipelines, None),
+ Err((pipelines, error)) => (pipelines, Some(error)),
+ }
+ };
+
+ pipelines
+ .into_iter()
+ .map(|pso| {
+ if pso == vk::Pipeline::null() {
+ match error {
+ Some(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => {
+ Err(d::OutOfMemory::Host.into())
+ }
+ Some(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => {
+ Err(d::OutOfMemory::Device.into())
+ }
+ _ => unreachable!(),
+ }
+ } else {
+ Ok(n::GraphicsPipeline(pso))
+ }
+ })
+ .collect()
+ }
+
+ unsafe fn create_compute_pipeline<'a>(
+ &self,
+ desc: &pso::ComputePipelineDesc<'a, B>,
+ cache: Option<&n::PipelineCache>,
+ ) -> Result<n::ComputePipeline, pso::CreationError> {
+ let mut buf = ComputePipelineInfoBuf::default();
+ let mut buf = Pin::new(&mut buf);
+ ComputePipelineInfoBuf::initialize(&mut buf, desc);
+
+ let info = {
+ let stage = vk::PipelineShaderStageCreateInfo::builder()
+ .flags(vk::PipelineShaderStageCreateFlags::empty())
+ .stage(vk::ShaderStageFlags::COMPUTE)
+ .module(desc.shader.module.raw)
+ .name(buf.c_string.as_c_str())
+ .specialization_info(&buf.specialization);
+
+ let (base_handle, base_index) = match desc.parent {
+ pso::BasePipeline::Pipeline(pipeline) => (pipeline.0, -1),
+ pso::BasePipeline::Index(index) => (vk::Pipeline::null(), index as _),
+ pso::BasePipeline::None => (vk::Pipeline::null(), -1),
+ };
+
+ let mut flags = vk::PipelineCreateFlags::empty();
+ match desc.parent {
+ pso::BasePipeline::None => (),
+ _ => {
+ flags |= vk::PipelineCreateFlags::DERIVATIVE;
+ }
+ }
+ if desc
+ .flags
+ .contains(pso::PipelineCreationFlags::DISABLE_OPTIMIZATION)
+ {
+ flags |= vk::PipelineCreateFlags::DISABLE_OPTIMIZATION;
+ }
+ if desc
+ .flags
+ .contains(pso::PipelineCreationFlags::ALLOW_DERIVATIVES)
+ {
+ flags |= vk::PipelineCreateFlags::ALLOW_DERIVATIVES;
+ }
+
+ vk::ComputePipelineCreateInfo::builder()
+ .flags(flags)
+ .stage(*stage)
+ .layout(desc.layout.raw)
+ .base_pipeline_handle(base_handle)
+ .base_pipeline_index(base_index)
+ .build()
+ };
+
+ let mut pipeline = vk::Pipeline::null();
+
+ match self.shared.raw.fp_v1_0().create_compute_pipelines(
+ self.shared.raw.handle(),
+ cache.map_or(vk::PipelineCache::null(), |cache| cache.raw),
+ 1,
+ &info,
+ ptr::null(),
+ &mut pipeline,
+ ) {
+ vk::Result::SUCCESS => Ok(n::ComputePipeline(pipeline)),
+ vk::Result::ERROR_OUT_OF_HOST_MEMORY => Err(d::OutOfMemory::Host.into()),
+ vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => Err(d::OutOfMemory::Device.into()),
+ _ => Err(pso::CreationError::Other),
+ }
+ }
+
+ unsafe fn create_compute_pipelines<'a, T>(
+ &self,
+ descs: T,
+ cache: Option<&n::PipelineCache>,
+ ) -> Vec<Result<n::ComputePipeline, pso::CreationError>>
+ where
+ T: IntoIterator,
+ T::Item: Borrow<pso::ComputePipelineDesc<'a, B>>,
+ {
+ let mut bufs: Pin<Box<[_]>> = descs
+ .into_iter()
+ .map(|desc| (desc, ComputePipelineInfoBuf::default()))
+ .collect::<Box<[_]>>()
+ .into();
+
+ for (desc, buf) in bufs.as_mut().get_unchecked_mut() {
+ let desc: &T::Item = desc;
+ ComputePipelineInfoBuf::initialize(&mut Pin::new_unchecked(buf), desc.borrow());
+ }
+
+ let infos: Vec<_> = bufs
+ .iter()
+ .map(|(desc, buf)| {
+ let desc = desc.borrow();
+
+ let stage = vk::PipelineShaderStageCreateInfo::builder()
+ .flags(vk::PipelineShaderStageCreateFlags::empty())
+ .stage(vk::ShaderStageFlags::COMPUTE)
+ .module(desc.shader.module.raw)
+ .name(buf.c_string.as_c_str())
+ .specialization_info(&buf.specialization);
+
+ let (base_handle, base_index) = match desc.parent {
+ pso::BasePipeline::Pipeline(pipeline) => (pipeline.0, -1),
+ pso::BasePipeline::Index(index) => (vk::Pipeline::null(), index as _),
+ pso::BasePipeline::None => (vk::Pipeline::null(), -1),
+ };
+
+ let mut flags = vk::PipelineCreateFlags::empty();
+ match desc.parent {
+ pso::BasePipeline::None => (),
+ _ => {
+ flags |= vk::PipelineCreateFlags::DERIVATIVE;
+ }
+ }
+ if desc
+ .flags
+ .contains(pso::PipelineCreationFlags::DISABLE_OPTIMIZATION)
+ {
+ flags |= vk::PipelineCreateFlags::DISABLE_OPTIMIZATION;
+ }
+ if desc
+ .flags
+ .contains(pso::PipelineCreationFlags::ALLOW_DERIVATIVES)
+ {
+ flags |= vk::PipelineCreateFlags::ALLOW_DERIVATIVES;
+ }
+
+ vk::ComputePipelineCreateInfo::builder()
+ .flags(flags)
+ .stage(*stage)
+ .layout(desc.layout.raw)
+ .base_pipeline_handle(base_handle)
+ .base_pipeline_index(base_index)
+ .build()
+ })
+ .collect();
+
+ let (pipelines, error) = if infos.is_empty() {
+ (Vec::new(), None)
+ } else {
+ match self.shared.raw.create_compute_pipelines(
+ cache.map_or(vk::PipelineCache::null(), |cache| cache.raw),
+ &infos,
+ None,
+ ) {
+ Ok(pipelines) => (pipelines, None),
+ Err((pipelines, error)) => (pipelines, Some(error)),
+ }
+ };
+
+ pipelines
+ .into_iter()
+ .map(|pso| {
+ if pso == vk::Pipeline::null() {
+ match error {
+ Some(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => {
+ Err(d::OutOfMemory::Host.into())
+ }
+ Some(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => {
+ Err(d::OutOfMemory::Device.into())
+ }
+ _ => unreachable!(),
+ }
+ } else {
+ Ok(n::ComputePipeline(pso))
+ }
+ })
+ .collect()
+ }
+
+ unsafe fn create_framebuffer<T>(
+ &self,
+ renderpass: &n::RenderPass,
+ attachments: T,
+ extent: image::Extent,
+ ) -> Result<n::Framebuffer, d::OutOfMemory>
+ where
+ T: IntoIterator,
+ T::Item: Borrow<n::ImageView>,
+ {
+ let mut framebuffers_ptr = None;
+ let mut raw_attachments = SmallVec::<[_; 4]>::new();
+ for attachment in attachments {
+ let at = attachment.borrow();
+ raw_attachments.push(at.view);
+ match at.owner {
+ n::ImageViewOwner::User => {}
+ n::ImageViewOwner::Surface(ref fbo_ptr) => {
+ framebuffers_ptr = Some(Arc::clone(&fbo_ptr.0));
+ }
+ }
+ }
+
+ let info = vk::FramebufferCreateInfo::builder()
+ .flags(vk::FramebufferCreateFlags::empty())
+ .render_pass(renderpass.raw)
+ .attachments(&raw_attachments)
+ .width(extent.width)
+ .height(extent.height)
+ .layers(extent.depth);
+
+ let result = self.shared.raw.create_framebuffer(&info, None);
+
+ match result {
+ Ok(raw) => Ok(n::Framebuffer {
+ raw,
+ owned: match framebuffers_ptr {
+ Some(fbo_ptr) => {
+ fbo_ptr.lock().unwrap().framebuffers.push(raw);
+ false
+ }
+ None => true,
+ },
+ }),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn create_shader_module(
+ &self,
+ spirv_data: &[u32],
+ ) -> Result<n::ShaderModule, d::ShaderError> {
+ let info = vk::ShaderModuleCreateInfo::builder()
+ .flags(vk::ShaderModuleCreateFlags::empty())
+ .code(spirv_data);
+
+ let module = self.shared.raw.create_shader_module(&info, None);
+
+ match module {
+ Ok(raw) => Ok(n::ShaderModule { raw }),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()),
+ Err(_) => {
+ Err(d::ShaderError::CompilationFailed(String::new())) // TODO
+ }
+ }
+ }
+
+ unsafe fn create_sampler(
+ &self,
+ desc: &image::SamplerDesc,
+ ) -> Result<n::Sampler, d::AllocationError> {
+ use hal::pso::Comparison;
+
+ let (anisotropy_enable, max_anisotropy) =
+ desc.anisotropy_clamp.map_or((false, 1.0), |aniso| {
+ if self.shared.features.contains(Features::SAMPLER_ANISOTROPY) {
+ (true, aniso as f32)
+ } else {
+ warn!(
+ "Anisotropy({}) was requested on a device with disabled feature",
+ aniso
+ );
+ (false, 1.0)
+ }
+ });
+ let info = vk::SamplerCreateInfo::builder()
+ .flags(vk::SamplerCreateFlags::empty())
+ .mag_filter(conv::map_filter(desc.mag_filter))
+ .min_filter(conv::map_filter(desc.min_filter))
+ .mipmap_mode(conv::map_mip_filter(desc.mip_filter))
+ .address_mode_u(conv::map_wrap(desc.wrap_mode.0))
+ .address_mode_v(conv::map_wrap(desc.wrap_mode.1))
+ .address_mode_w(conv::map_wrap(desc.wrap_mode.2))
+ .mip_lod_bias(desc.lod_bias.0)
+ .anisotropy_enable(anisotropy_enable)
+ .max_anisotropy(max_anisotropy)
+ .compare_enable(desc.comparison.is_some())
+ .compare_op(conv::map_comparison(
+ desc.comparison.unwrap_or(Comparison::Never),
+ ))
+ .min_lod(desc.lod_range.start.0)
+ .max_lod(desc.lod_range.end.0)
+ .border_color(conv::map_border_color(desc.border))
+ .unnormalized_coordinates(!desc.normalized);
+
+ let result = self.shared.raw.create_sampler(&info, None);
+
+ match result {
+ Ok(sampler) => Ok(n::Sampler(sampler)),
+ Err(vk::Result::ERROR_TOO_MANY_OBJECTS) => Err(d::AllocationError::TooManyObjects),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()),
+ _ => unreachable!(),
+ }
+ }
+
+ ///
+ unsafe fn create_buffer(
+ &self,
+ size: u64,
+ usage: buffer::Usage,
+ ) -> Result<n::Buffer, buffer::CreationError> {
+ let info = vk::BufferCreateInfo::builder()
+ .flags(vk::BufferCreateFlags::empty()) // TODO:
+ .size(size)
+ .usage(conv::map_buffer_usage(usage))
+ .sharing_mode(vk::SharingMode::EXCLUSIVE); // TODO:
+
+ let result = self.shared.raw.create_buffer(&info, None);
+
+ match result {
+ Ok(raw) => Ok(n::Buffer { raw }),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn get_buffer_requirements(&self, buffer: &n::Buffer) -> Requirements {
+ let req = self.shared.raw.get_buffer_memory_requirements(buffer.raw);
+
+ Requirements {
+ size: req.size,
+ alignment: req.alignment,
+ type_mask: self.filter_memory_requirements(req.memory_type_bits),
+ }
+ }
+
+ unsafe fn bind_buffer_memory(
+ &self,
+ memory: &n::Memory,
+ offset: u64,
+ buffer: &mut n::Buffer,
+ ) -> Result<(), d::BindError> {
+ let result = self
+ .shared
+ .raw
+ .bind_buffer_memory(buffer.raw, memory.raw, offset);
+
+ match result {
+ Ok(()) => Ok(()),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn create_buffer_view(
+ &self,
+ buffer: &n::Buffer,
+ format: Option<format::Format>,
+ range: buffer::SubRange,
+ ) -> Result<n::BufferView, buffer::ViewCreationError> {
+ let info = vk::BufferViewCreateInfo::builder()
+ .flags(vk::BufferViewCreateFlags::empty())
+ .buffer(buffer.raw)
+ .format(format.map_or(vk::Format::UNDEFINED, conv::map_format))
+ .offset(range.offset)
+ .range(range.size.unwrap_or(vk::WHOLE_SIZE));
+
+ let result = self.shared.raw.create_buffer_view(&info, None);
+
+ match result {
+ Ok(raw) => Ok(n::BufferView { raw }),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn create_image(
+ &self,
+ kind: image::Kind,
+ mip_levels: image::Level,
+ format: format::Format,
+ tiling: image::Tiling,
+ usage: image::Usage,
+ view_caps: image::ViewCapabilities,
+ ) -> Result<n::Image, image::CreationError> {
+ let flags = conv::map_view_capabilities(view_caps);
+ let extent = conv::map_extent(kind.extent());
+ let array_layers = kind.num_layers();
+ let samples = kind.num_samples() as u32;
+ let image_type = match kind {
+ image::Kind::D1(..) => vk::ImageType::TYPE_1D,
+ image::Kind::D2(..) => vk::ImageType::TYPE_2D,
+ image::Kind::D3(..) => vk::ImageType::TYPE_3D,
+ };
+
+ let info = vk::ImageCreateInfo::builder()
+ .flags(flags)
+ .image_type(image_type)
+ .format(conv::map_format(format))
+ .extent(extent.clone())
+ .mip_levels(mip_levels as u32)
+ .array_layers(array_layers as u32)
+ .samples(vk::SampleCountFlags::from_raw(
+ samples & vk::SampleCountFlags::all().as_raw(),
+ ))
+ .tiling(conv::map_tiling(tiling))
+ .usage(conv::map_image_usage(usage))
+ .sharing_mode(vk::SharingMode::EXCLUSIVE) // TODO:
+ .initial_layout(vk::ImageLayout::UNDEFINED);
+
+ let result = self.shared.raw.create_image(&info, None);
+
+ match result {
+ Ok(raw) => Ok(n::Image {
+ raw,
+ ty: image_type,
+ flags,
+ extent,
+ }),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn get_image_requirements(&self, image: &n::Image) -> Requirements {
+ let req = self.shared.raw.get_image_memory_requirements(image.raw);
+
+ Requirements {
+ size: req.size,
+ alignment: req.alignment,
+ type_mask: self.filter_memory_requirements(req.memory_type_bits),
+ }
+ }
+
+ unsafe fn get_image_subresource_footprint(
+ &self,
+ image: &n::Image,
+ subresource: image::Subresource,
+ ) -> image::SubresourceFootprint {
+ let sub = conv::map_subresource(&subresource);
+ let layout = self.shared.raw.get_image_subresource_layout(image.raw, sub);
+
+ image::SubresourceFootprint {
+ slice: layout.offset..layout.offset + layout.size,
+ row_pitch: layout.row_pitch,
+ array_pitch: layout.array_pitch,
+ depth_pitch: layout.depth_pitch,
+ }
+ }
+
+ unsafe fn bind_image_memory(
+ &self,
+ memory: &n::Memory,
+ offset: u64,
+ image: &mut n::Image,
+ ) -> Result<(), d::BindError> {
+ // TODO: error handling
+ // TODO: check required type
+ let result = self
+ .shared
+ .raw
+ .bind_image_memory(image.raw, memory.raw, offset);
+
+ match result {
+ Ok(()) => Ok(()),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn create_image_view(
+ &self,
+ image: &n::Image,
+ kind: image::ViewKind,
+ format: format::Format,
+ swizzle: format::Swizzle,
+ range: image::SubresourceRange,
+ ) -> Result<n::ImageView, image::ViewCreationError> {
+ let is_cube = image
+ .flags
+ .intersects(vk::ImageCreateFlags::CUBE_COMPATIBLE);
+ let info = vk::ImageViewCreateInfo::builder()
+ .flags(vk::ImageViewCreateFlags::empty())
+ .image(image.raw)
+ .view_type(match conv::map_view_kind(kind, image.ty, is_cube) {
+ Some(ty) => ty,
+ None => return Err(image::ViewCreationError::BadKind(kind)),
+ })
+ .format(conv::map_format(format))
+ .components(conv::map_swizzle(swizzle))
+ .subresource_range(conv::map_subresource_range(&range));
+
+ let result = self.shared.raw.create_image_view(&info, None);
+
+ match result {
+ Ok(view) => Ok(n::ImageView {
+ image: image.raw,
+ view,
+ range,
+ owner: n::ImageViewOwner::User,
+ }),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn create_descriptor_pool<T>(
+ &self,
+ max_sets: usize,
+ descriptor_pools: T,
+ flags: pso::DescriptorPoolCreateFlags,
+ ) -> Result<n::DescriptorPool, d::OutOfMemory>
+ where
+ T: IntoIterator,
+ T::Item: Borrow<pso::DescriptorRangeDesc>,
+ T::IntoIter: ExactSizeIterator,
+ {
+ let pools = descriptor_pools.into_iter().map(|pool| {
+ let pool = pool.borrow();
+ vk::DescriptorPoolSize {
+ ty: conv::map_descriptor_type(pool.ty),
+ descriptor_count: pool.count as u32,
+ }
+ });
+
+ let result = inplace_it::inplace_or_alloc_array(pools.len(), |uninit_guard| {
+ let pools = uninit_guard.init_with_iter(pools);
+
+ let info = vk::DescriptorPoolCreateInfo::builder()
+ .flags(conv::map_descriptor_pool_create_flags(flags))
+ .max_sets(max_sets as u32)
+ .pool_sizes(&pools);
+
+ self.shared.raw.create_descriptor_pool(&info, None)
+ });
+
+ match result {
+ Ok(pool) => Ok(n::DescriptorPool {
+ raw: pool,
+ device: self.shared.clone(),
+ set_free_vec: Vec::new(),
+ }),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn create_descriptor_set_layout<I, J>(
+ &self,
+ binding_iter: I,
+ immutable_sampler_iter: J,
+ ) -> Result<n::DescriptorSetLayout, d::OutOfMemory>
+ where
+ I: IntoIterator,
+ I::Item: Borrow<pso::DescriptorSetLayoutBinding>,
+ J: IntoIterator,
+ J::Item: Borrow<n::Sampler>,
+ J::IntoIter: ExactSizeIterator,
+ {
+ let immutable_samplers = immutable_sampler_iter.into_iter().map(|is| is.borrow().0);
+ let mut sampler_offset = 0;
+
+ let mut bindings = binding_iter
+ .into_iter()
+ .map(|b| b.borrow().clone())
+ .collect::<Vec<_>>();
+ // Sorting will come handy in `write_descriptor_sets`.
+ bindings.sort_by_key(|b| b.binding);
+
+ let result = inplace_it::inplace_or_alloc_array(immutable_samplers.len(), |uninit_guard| {
+ let immutable_samplers = uninit_guard.init_with_iter(immutable_samplers);
+
+ let raw_bindings = bindings.iter().map(|b| vk::DescriptorSetLayoutBinding {
+ binding: b.binding,
+ descriptor_type: conv::map_descriptor_type(b.ty),
+ descriptor_count: b.count as _,
+ stage_flags: conv::map_stage_flags(b.stage_flags),
+ p_immutable_samplers: if b.immutable_samplers {
+ let slice = &immutable_samplers[sampler_offset..];
+ sampler_offset += b.count;
+ slice.as_ptr()
+ } else {
+ ptr::null()
+ },
+ });
+
+ inplace_it::inplace_or_alloc_array(raw_bindings.len(), |uninit_guard| {
+ let raw_bindings = uninit_guard.init_with_iter(raw_bindings);
+
+ // TODO raw_bindings doesnt implement fmt::Debug
+ // debug!("create_descriptor_set_layout {:?}", raw_bindings);
+
+ let info = vk::DescriptorSetLayoutCreateInfo::builder()
+ .flags(vk::DescriptorSetLayoutCreateFlags::empty())
+ .bindings(&raw_bindings);
+
+ self.shared.raw.create_descriptor_set_layout(&info, None)
+ })
+ });
+
+ match result {
+ Ok(layout) => Ok(n::DescriptorSetLayout {
+ raw: layout,
+ bindings: Arc::new(bindings),
+ }),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn write_descriptor_sets<'a, I, J>(&self, write_iter: I)
+ where
+ I: IntoIterator<Item = pso::DescriptorSetWrite<'a, B, J>>,
+ J: IntoIterator,
+ J::Item: Borrow<pso::Descriptor<'a, B>>,
+ {
+ let mut raw_writes = Vec::<vk::WriteDescriptorSet>::new();
+ let mut image_infos = Vec::new();
+ let mut buffer_infos = Vec::new();
+ let mut texel_buffer_views = Vec::new();
+
+ for sw in write_iter {
+ // gfx-hal allows the type and stages to be different between the descriptor
+ // in a single write, while Vulkan requires them to be the same.
+ let mut last_type = vk::DescriptorType::SAMPLER;
+ let mut last_stages = pso::ShaderStageFlags::empty();
+
+ let mut binding_pos = sw
+ .set
+ .bindings
+ .binary_search_by_key(&sw.binding, |b| b.binding)
+ .expect("Descriptor set writes don't match the set layout!");
+ let mut array_offset = sw.array_offset;
+
+ for descriptor in sw.descriptors {
+ let layout_binding = &sw.set.bindings[binding_pos];
+ array_offset += 1;
+ if array_offset == layout_binding.count {
+ array_offset = 0;
+ binding_pos += 1;
+ }
+
+ let descriptor_type = conv::map_descriptor_type(layout_binding.ty);
+ if descriptor_type == last_type && layout_binding.stage_flags == last_stages {
+ raw_writes.last_mut().unwrap().descriptor_count += 1;
+ } else {
+ last_type = descriptor_type;
+ last_stages = layout_binding.stage_flags;
+ raw_writes.push(vk::WriteDescriptorSet {
+ s_type: vk::StructureType::WRITE_DESCRIPTOR_SET,
+ p_next: ptr::null(),
+ dst_set: sw.set.raw,
+ dst_binding: layout_binding.binding,
+ dst_array_element: if layout_binding.binding == sw.binding {
+ sw.array_offset as _
+ } else {
+ 0
+ },
+ descriptor_count: 1,
+ descriptor_type,
+ p_image_info: image_infos.len() as _,
+ p_buffer_info: buffer_infos.len() as _,
+ p_texel_buffer_view: texel_buffer_views.len() as _,
+ });
+ }
+
+ match *descriptor.borrow() {
+ pso::Descriptor::Sampler(sampler) => {
+ image_infos.push(
+ vk::DescriptorImageInfo::builder()
+ .sampler(sampler.0)
+ .image_view(vk::ImageView::null())
+ .image_layout(vk::ImageLayout::GENERAL)
+ .build(),
+ );
+ }
+ pso::Descriptor::Image(view, layout) => {
+ image_infos.push(
+ vk::DescriptorImageInfo::builder()
+ .sampler(vk::Sampler::null())
+ .image_view(view.view)
+ .image_layout(conv::map_image_layout(layout))
+ .build(),
+ );
+ }
+ pso::Descriptor::CombinedImageSampler(view, layout, sampler) => {
+ image_infos.push(
+ vk::DescriptorImageInfo::builder()
+ .sampler(sampler.0)
+ .image_view(view.view)
+ .image_layout(conv::map_image_layout(layout))
+ .build(),
+ );
+ }
+ pso::Descriptor::Buffer(buffer, ref sub) => {
+ buffer_infos.push(
+ vk::DescriptorBufferInfo::builder()
+ .buffer(buffer.raw)
+ .offset(sub.offset)
+ .range(sub.size.unwrap_or(vk::WHOLE_SIZE))
+ .build(),
+ );
+ }
+ pso::Descriptor::TexelBuffer(view) => {
+ texel_buffer_views.push(view.raw);
+ }
+ }
+ }
+ }
+
+ // Patch the pointers now that we have all the storage allocated.
+ for raw in &mut raw_writes {
+ use crate::vk::DescriptorType as Dt;
+ match raw.descriptor_type {
+ Dt::SAMPLER
+ | Dt::SAMPLED_IMAGE
+ | Dt::STORAGE_IMAGE
+ | Dt::COMBINED_IMAGE_SAMPLER
+ | Dt::INPUT_ATTACHMENT => {
+ raw.p_buffer_info = ptr::null();
+ raw.p_texel_buffer_view = ptr::null();
+ raw.p_image_info = image_infos[raw.p_image_info as usize..].as_ptr();
+ }
+ Dt::UNIFORM_TEXEL_BUFFER | Dt::STORAGE_TEXEL_BUFFER => {
+ raw.p_buffer_info = ptr::null();
+ raw.p_image_info = ptr::null();
+ raw.p_texel_buffer_view =
+ texel_buffer_views[raw.p_texel_buffer_view as usize..].as_ptr();
+ }
+ Dt::UNIFORM_BUFFER
+ | Dt::STORAGE_BUFFER
+ | Dt::STORAGE_BUFFER_DYNAMIC
+ | Dt::UNIFORM_BUFFER_DYNAMIC => {
+ raw.p_image_info = ptr::null();
+ raw.p_texel_buffer_view = ptr::null();
+ raw.p_buffer_info = buffer_infos[raw.p_buffer_info as usize..].as_ptr();
+ }
+ _ => panic!("unknown descriptor type"),
+ }
+ }
+
+ self.shared.raw.update_descriptor_sets(&raw_writes, &[]);
+ }
+
+ unsafe fn copy_descriptor_sets<'a, I>(&self, copies: I)
+ where
+ I: IntoIterator,
+ I::Item: Borrow<pso::DescriptorSetCopy<'a, B>>,
+ I::IntoIter: ExactSizeIterator,
+ {
+ let copies = copies.into_iter().map(|copy| {
+ let c = copy.borrow();
+ vk::CopyDescriptorSet::builder()
+ .src_set(c.src_set.raw)
+ .src_binding(c.src_binding as u32)
+ .src_array_element(c.src_array_offset as u32)
+ .dst_set(c.dst_set.raw)
+ .dst_binding(c.dst_binding as u32)
+ .dst_array_element(c.dst_array_offset as u32)
+ .descriptor_count(c.count as u32)
+ .build()
+ });
+
+ inplace_it::inplace_or_alloc_array(copies.len(), |uninit_guard| {
+ let copies = uninit_guard.init_with_iter(copies);
+
+ self.shared.raw.update_descriptor_sets(&[], &copies);
+ });
+ }
+
+ unsafe fn map_memory(
+ &self,
+ memory: &n::Memory,
+ segment: Segment,
+ ) -> Result<*mut u8, d::MapError> {
+ let result = self.shared.raw.map_memory(
+ memory.raw,
+ segment.offset,
+ segment.size.unwrap_or(vk::WHOLE_SIZE),
+ vk::MemoryMapFlags::empty(),
+ );
+
+ match result {
+ Ok(ptr) => Ok(ptr as *mut _),
+ Err(vk::Result::ERROR_MEMORY_MAP_FAILED) => Err(d::MapError::MappingFailed),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn unmap_memory(&self, memory: &n::Memory) {
+ self.shared.raw.unmap_memory(memory.raw)
+ }
+
+ unsafe fn flush_mapped_memory_ranges<'a, I>(&self, ranges: I) -> Result<(), d::OutOfMemory>
+ where
+ I: IntoIterator,
+ I::Item: Borrow<(&'a n::Memory, Segment)>,
+ {
+ let ranges = conv::map_memory_ranges(ranges);
+ let result = self.shared.raw.flush_mapped_memory_ranges(&ranges);
+
+ match result {
+ Ok(()) => Ok(()),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn invalidate_mapped_memory_ranges<'a, I>(&self, ranges: I) -> Result<(), d::OutOfMemory>
+ where
+ I: IntoIterator,
+ I::Item: Borrow<(&'a n::Memory, Segment)>,
+ {
+ let ranges = conv::map_memory_ranges(ranges);
+ let result = self.shared.raw.invalidate_mapped_memory_ranges(&ranges);
+
+ match result {
+ Ok(()) => Ok(()),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device),
+ _ => unreachable!(),
+ }
+ }
+
+ fn create_semaphore(&self) -> Result<n::Semaphore, d::OutOfMemory> {
+ let info = vk::SemaphoreCreateInfo::builder().flags(vk::SemaphoreCreateFlags::empty());
+
+ let result = unsafe { self.shared.raw.create_semaphore(&info, None) };
+
+ match result {
+ Ok(semaphore) => Ok(n::Semaphore(semaphore)),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()),
+ _ => unreachable!(),
+ }
+ }
+
+ fn create_fence(&self, signaled: bool) -> Result<n::Fence, d::OutOfMemory> {
+ let info = vk::FenceCreateInfo::builder().flags(if signaled {
+ vk::FenceCreateFlags::SIGNALED
+ } else {
+ vk::FenceCreateFlags::empty()
+ });
+
+ let result = unsafe { self.shared.raw.create_fence(&info, None) };
+
+ match result {
+ Ok(fence) => Ok(n::Fence(fence)),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn reset_fences<I>(&self, fences: I) -> Result<(), d::OutOfMemory>
+ where
+ I: IntoIterator,
+ I::Item: Borrow<n::Fence>,
+ I::IntoIter: ExactSizeIterator,
+ {
+ let fences = fences.into_iter().map(|fence| fence.borrow().0);
+
+ let result = inplace_it::inplace_or_alloc_array(fences.len(), |uninit_guard| {
+ let fences = uninit_guard.init_with_iter(fences);
+ self.shared.raw.reset_fences(&fences)
+ });
+
+ match result {
+ Ok(()) => Ok(()),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn wait_for_fences<I>(
+ &self,
+ fences: I,
+ wait: d::WaitFor,
+ timeout_ns: u64,
+ ) -> Result<bool, d::OomOrDeviceLost>
+ where
+ I: IntoIterator,
+ I::Item: Borrow<n::Fence>,
+ I::IntoIter: ExactSizeIterator,
+ {
+ let fences = fences.into_iter().map(|fence| fence.borrow().0);
+
+ let all = match wait {
+ d::WaitFor::Any => false,
+ d::WaitFor::All => true,
+ };
+
+ let result = inplace_it::inplace_or_alloc_array(fences.len(), |uninit_guard| {
+ let fences = uninit_guard.init_with_iter(fences);
+ self.shared.raw.wait_for_fences(&fences, all, timeout_ns)
+ });
+
+ match result {
+ Ok(()) => Ok(true),
+ Err(vk::Result::TIMEOUT) => Ok(false),
+ Err(vk::Result::ERROR_DEVICE_LOST) => Err(d::DeviceLost.into()),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn get_fence_status(&self, fence: &n::Fence) -> Result<bool, d::DeviceLost> {
+ let result = self.shared.raw.get_fence_status(fence.0);
+ match result {
+ Ok(ok) => Ok(ok),
+ Err(vk::Result::NOT_READY) => Ok(false), //TODO: shouldn't be needed
+ Err(vk::Result::ERROR_DEVICE_LOST) => Err(d::DeviceLost),
+ _ => unreachable!(),
+ }
+ }
+
+ fn create_event(&self) -> Result<n::Event, d::OutOfMemory> {
+ let info = vk::EventCreateInfo::builder().flags(vk::EventCreateFlags::empty());
+
+ let result = unsafe { self.shared.raw.create_event(&info, None) };
+ match result {
+ Ok(e) => Ok(n::Event(e)),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn get_event_status(&self, event: &n::Event) -> Result<bool, d::OomOrDeviceLost> {
+ let result = self.shared.raw.get_event_status(event.0);
+ match result {
+ Ok(b) => Ok(b),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()),
+ Err(vk::Result::ERROR_DEVICE_LOST) => Err(d::DeviceLost.into()),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn set_event(&self, event: &n::Event) -> Result<(), d::OutOfMemory> {
+ let result = self.shared.raw.set_event(event.0);
+ match result {
+ Ok(()) => Ok(()),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn reset_event(&self, event: &n::Event) -> Result<(), d::OutOfMemory> {
+ let result = self.shared.raw.reset_event(event.0);
+ match result {
+ Ok(()) => Ok(()),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn free_memory(&self, memory: n::Memory) {
+ self.shared.raw.free_memory(memory.raw, None);
+ }
+
+ unsafe fn create_query_pool(
+ &self,
+ ty: query::Type,
+ query_count: query::Id,
+ ) -> Result<n::QueryPool, query::CreationError> {
+ let (query_type, pipeline_statistics) = match ty {
+ query::Type::Occlusion => (
+ vk::QueryType::OCCLUSION,
+ vk::QueryPipelineStatisticFlags::empty(),
+ ),
+ query::Type::PipelineStatistics(statistics) => (
+ vk::QueryType::PIPELINE_STATISTICS,
+ conv::map_pipeline_statistics(statistics),
+ ),
+ query::Type::Timestamp => (
+ vk::QueryType::TIMESTAMP,
+ vk::QueryPipelineStatisticFlags::empty(),
+ ),
+ };
+
+ let info = vk::QueryPoolCreateInfo::builder()
+ .flags(vk::QueryPoolCreateFlags::empty())
+ .query_type(query_type)
+ .query_count(query_count)
+ .pipeline_statistics(pipeline_statistics);
+
+ let result = self.shared.raw.create_query_pool(&info, None);
+
+ match result {
+ Ok(pool) => Ok(n::QueryPool(pool)),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host.into()),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device.into()),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn get_query_pool_results(
+ &self,
+ pool: &n::QueryPool,
+ queries: Range<query::Id>,
+ data: &mut [u8],
+ stride: buffer::Offset,
+ flags: query::ResultFlags,
+ ) -> Result<bool, d::OomOrDeviceLost> {
+ let result = self.shared.raw.fp_v1_0().get_query_pool_results(
+ self.shared.raw.handle(),
+ pool.0,
+ queries.start,
+ queries.end - queries.start,
+ data.len(),
+ data.as_mut_ptr() as *mut _,
+ stride,
+ conv::map_query_result_flags(flags),
+ );
+
+ match result {
+ vk::Result::SUCCESS => Ok(true),
+ vk::Result::NOT_READY => Ok(false),
+ vk::Result::ERROR_DEVICE_LOST => Err(d::DeviceLost.into()),
+ vk::Result::ERROR_OUT_OF_HOST_MEMORY => Err(d::OutOfMemory::Host.into()),
+ vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => Err(d::OutOfMemory::Device.into()),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn destroy_query_pool(&self, pool: n::QueryPool) {
+ self.shared.raw.destroy_query_pool(pool.0, None);
+ }
+
+ unsafe fn destroy_shader_module(&self, module: n::ShaderModule) {
+ self.shared.raw.destroy_shader_module(module.raw, None);
+ }
+
+ unsafe fn destroy_render_pass(&self, rp: n::RenderPass) {
+ self.shared.raw.destroy_render_pass(rp.raw, None);
+ }
+
+ unsafe fn destroy_pipeline_layout(&self, pl: n::PipelineLayout) {
+ self.shared.raw.destroy_pipeline_layout(pl.raw, None);
+ }
+
+ unsafe fn destroy_graphics_pipeline(&self, pipeline: n::GraphicsPipeline) {
+ self.shared.raw.destroy_pipeline(pipeline.0, None);
+ }
+
+ unsafe fn destroy_compute_pipeline(&self, pipeline: n::ComputePipeline) {
+ self.shared.raw.destroy_pipeline(pipeline.0, None);
+ }
+
+ unsafe fn destroy_framebuffer(&self, fb: n::Framebuffer) {
+ if fb.owned {
+ self.shared.raw.destroy_framebuffer(fb.raw, None);
+ }
+ }
+
+ unsafe fn destroy_buffer(&self, buffer: n::Buffer) {
+ self.shared.raw.destroy_buffer(buffer.raw, None);
+ }
+
+ unsafe fn destroy_buffer_view(&self, view: n::BufferView) {
+ self.shared.raw.destroy_buffer_view(view.raw, None);
+ }
+
+ unsafe fn destroy_image(&self, image: n::Image) {
+ self.shared.raw.destroy_image(image.raw, None);
+ }
+
+ unsafe fn destroy_image_view(&self, view: n::ImageView) {
+ match view.owner {
+ n::ImageViewOwner::User => {
+ self.shared.raw.destroy_image_view(view.view, None);
+ }
+ n::ImageViewOwner::Surface(_fbo_cache) => {
+ //TODO: mark as deleted?
+ }
+ }
+ }
+
+ unsafe fn destroy_sampler(&self, sampler: n::Sampler) {
+ self.shared.raw.destroy_sampler(sampler.0, None);
+ }
+
+ unsafe fn destroy_descriptor_pool(&self, pool: n::DescriptorPool) {
+ self.shared.raw.destroy_descriptor_pool(pool.raw, None);
+ }
+
+ unsafe fn destroy_descriptor_set_layout(&self, layout: n::DescriptorSetLayout) {
+ self.shared
+ .raw
+ .destroy_descriptor_set_layout(layout.raw, None);
+ }
+
+ unsafe fn destroy_fence(&self, fence: n::Fence) {
+ self.shared.raw.destroy_fence(fence.0, None);
+ }
+
+ unsafe fn destroy_semaphore(&self, semaphore: n::Semaphore) {
+ self.shared.raw.destroy_semaphore(semaphore.0, None);
+ }
+
+ unsafe fn destroy_event(&self, event: n::Event) {
+ self.shared.raw.destroy_event(event.0, None);
+ }
+
+ fn wait_idle(&self) -> Result<(), d::OutOfMemory> {
+ match unsafe { self.shared.raw.device_wait_idle() } {
+ Ok(()) => Ok(()),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(d::OutOfMemory::Host),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(d::OutOfMemory::Device),
+ _ => unreachable!(),
+ }
+ }
+
+ unsafe fn set_image_name(&self, image: &mut n::Image, name: &str) {
+ self.set_object_name(vk::ObjectType::IMAGE, image.raw.as_raw(), name)
+ }
+
+ unsafe fn set_buffer_name(&self, buffer: &mut n::Buffer, name: &str) {
+ self.set_object_name(vk::ObjectType::BUFFER, buffer.raw.as_raw(), name)
+ }
+
+ unsafe fn set_command_buffer_name(&self, command_buffer: &mut cmd::CommandBuffer, name: &str) {
+ self.set_object_name(
+ vk::ObjectType::COMMAND_BUFFER,
+ command_buffer.raw.as_raw(),
+ name,
+ )
+ }
+
+ unsafe fn set_semaphore_name(&self, semaphore: &mut n::Semaphore, name: &str) {
+ self.set_object_name(vk::ObjectType::SEMAPHORE, semaphore.0.as_raw(), name)
+ }
+
+ unsafe fn set_fence_name(&self, fence: &mut n::Fence, name: &str) {
+ self.set_object_name(vk::ObjectType::FENCE, fence.0.as_raw(), name)
+ }
+
+ unsafe fn set_framebuffer_name(&self, framebuffer: &mut n::Framebuffer, name: &str) {
+ self.set_object_name(vk::ObjectType::FRAMEBUFFER, framebuffer.raw.as_raw(), name)
+ }
+
+ unsafe fn set_render_pass_name(&self, render_pass: &mut n::RenderPass, name: &str) {
+ self.set_object_name(vk::ObjectType::RENDER_PASS, render_pass.raw.as_raw(), name)
+ }
+
+ unsafe fn set_descriptor_set_name(&self, descriptor_set: &mut n::DescriptorSet, name: &str) {
+ self.set_object_name(
+ vk::ObjectType::DESCRIPTOR_SET,
+ descriptor_set.raw.as_raw(),
+ name,
+ )
+ }
+
+ unsafe fn set_descriptor_set_layout_name(
+ &self,
+ descriptor_set_layout: &mut n::DescriptorSetLayout,
+ name: &str,
+ ) {
+ self.set_object_name(
+ vk::ObjectType::DESCRIPTOR_SET_LAYOUT,
+ descriptor_set_layout.raw.as_raw(),
+ name,
+ )
+ }
+
+ unsafe fn set_pipeline_layout_name(&self, pipeline_layout: &mut n::PipelineLayout, name: &str) {
+ self.set_object_name(
+ vk::ObjectType::PIPELINE_LAYOUT,
+ pipeline_layout.raw.as_raw(),
+ name,
+ )
+ }
+
+ unsafe fn set_compute_pipeline_name(
+ &self,
+ compute_pipeline: &mut n::ComputePipeline,
+ name: &str,
+ ) {
+ self.set_object_name(vk::ObjectType::PIPELINE, compute_pipeline.0.as_raw(), name)
+ }
+
+ unsafe fn set_graphics_pipeline_name(
+ &self,
+ graphics_pipeline: &mut n::GraphicsPipeline,
+ name: &str,
+ ) {
+ self.set_object_name(vk::ObjectType::PIPELINE, graphics_pipeline.0.as_raw(), name)
+ }
+}
+
+impl Device {
+ /// We only work with a subset of Ash-exposed memory types that we know.
+ /// This function filters an ash mask into our mask.
+ fn filter_memory_requirements(&self, ash_mask: u32) -> u32 {
+ let mut hal_index = 0;
+ let mut mask = 0;
+ for ash_index in 0..32 {
+ if self.valid_ash_memory_types & (1 << ash_index) != 0 {
+ if ash_mask & (1 << ash_index) != 0 {
+ mask |= 1 << hal_index;
+ }
+ hal_index += 1;
+ }
+ }
+ mask
+ }
+
+ fn get_ash_memory_type_index(&self, hal_type: MemoryTypeId) -> u32 {
+ let mut hal_count = hal_type.0;
+ for ash_index in 0..32 {
+ if self.valid_ash_memory_types & (1 << ash_index) != 0 {
+ if hal_count == 0 {
+ return ash_index;
+ }
+ hal_count -= 1;
+ }
+ }
+ panic!("Unable to get Ash memory type for {:?}", hal_type);
+ }
+
+ unsafe fn set_object_name(&self, object_type: vk::ObjectType, object_handle: u64, name: &str) {
+ let instance = &self.shared.instance;
+ if let Some(DebugMessenger::Utils(ref debug_utils_ext, _)) = instance.debug_messenger {
+ // Keep variables outside the if-else block to ensure they do not
+ // go out of scope while we hold a pointer to them
+ let mut buffer: [u8; 64] = [0u8; 64];
+ let buffer_vec: Vec<u8>;
+
+ // Append a null terminator to the string
+ let name_cstr = if name.len() < 64 {
+ // Common case, string is very small. Allocate a copy on the stack.
+ std::ptr::copy_nonoverlapping(name.as_ptr(), buffer.as_mut_ptr(), name.len());
+ // Add null terminator
+ buffer[name.len()] = 0;
+ CStr::from_bytes_with_nul(&buffer[..name.len() + 1]).unwrap()
+ } else {
+ // Less common case, the string is large.
+ // This requires a heap allocation.
+ buffer_vec = name
+ .as_bytes()
+ .iter()
+ .cloned()
+ .chain(std::iter::once(0))
+ .collect::<Vec<u8>>();
+ CStr::from_bytes_with_nul(&buffer_vec).unwrap()
+ };
+ let _result = debug_utils_ext.debug_utils_set_object_name(
+ self.shared.raw.handle(),
+ &vk::DebugUtilsObjectNameInfoEXT::builder()
+ .object_type(object_type)
+ .object_handle(object_handle)
+ .object_name(name_cstr),
+ );
+ }
+ }
+
+ pub(crate) unsafe fn create_swapchain(
+ &self,
+ surface: &mut w::Surface,
+ config: SwapchainConfig,
+ provided_old_swapchain: Option<w::Swapchain>,
+ ) -> Result<(w::Swapchain, Vec<n::Image>), hal::window::CreationError> {
+ let functor = khr::Swapchain::new(&surface.raw.instance.inner, &self.shared.raw);
+
+ let old_swapchain = match provided_old_swapchain {
+ Some(osc) => osc.raw,
+ None => vk::SwapchainKHR::null(),
+ };
+
+ let info = vk::SwapchainCreateInfoKHR::builder()
+ .flags(vk::SwapchainCreateFlagsKHR::empty())
+ .surface(surface.raw.handle)
+ .min_image_count(config.image_count)
+ .image_format(conv::map_format(config.format))
+ .image_color_space(vk::ColorSpaceKHR::SRGB_NONLINEAR)
+ .image_extent(vk::Extent2D {
+ width: config.extent.width,
+ height: config.extent.height,
+ })
+ .image_array_layers(1)
+ .image_usage(conv::map_image_usage(config.image_usage))
+ .image_sharing_mode(vk::SharingMode::EXCLUSIVE)
+ .pre_transform(vk::SurfaceTransformFlagsKHR::IDENTITY)
+ .composite_alpha(conv::map_composite_alpha_mode(config.composite_alpha_mode))
+ .present_mode(conv::map_present_mode(config.present_mode))
+ .clipped(true)
+ .old_swapchain(old_swapchain);
+
+ let result = functor.create_swapchain(&info, None);
+
+ if old_swapchain != vk::SwapchainKHR::null() {
+ functor.destroy_swapchain(old_swapchain, None)
+ }
+
+ let swapchain_raw = match result {
+ Ok(swapchain_raw) => swapchain_raw,
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => {
+ return Err(d::OutOfMemory::Host.into());
+ }
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => {
+ return Err(d::OutOfMemory::Device.into());
+ }
+ Err(vk::Result::ERROR_DEVICE_LOST) => return Err(d::DeviceLost.into()),
+ Err(vk::Result::ERROR_SURFACE_LOST_KHR) => return Err(d::SurfaceLost.into()),
+ Err(vk::Result::ERROR_NATIVE_WINDOW_IN_USE_KHR) => return Err(d::WindowInUse.into()),
+ _ => unreachable!("Unexpected result - driver bug? {:?}", result),
+ };
+
+ let result = functor.get_swapchain_images(swapchain_raw);
+
+ let backbuffer_images = match result {
+ Ok(backbuffer_images) => backbuffer_images,
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => {
+ return Err(d::OutOfMemory::Host.into());
+ }
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => {
+ return Err(d::OutOfMemory::Device.into());
+ }
+ _ => unreachable!(),
+ };
+
+ let extent = vk::Extent3D {
+ width: config.extent.width,
+ height: config.extent.height,
+ depth: 1,
+ };
+ let swapchain = w::Swapchain {
+ raw: swapchain_raw,
+ functor,
+ vendor_id: self.vendor_id,
+ extent,
+ };
+
+ let images = backbuffer_images
+ .into_iter()
+ .map(|image| n::Image {
+ raw: image,
+ ty: vk::ImageType::TYPE_2D,
+ flags: vk::ImageCreateFlags::empty(),
+ extent,
+ })
+ .collect();
+
+ Ok((swapchain, images))
+ }
+}
+
+#[test]
+fn test_send_sync() {
+ fn foo<T: Send + Sync>() {}
+ foo::<Device>()
+}
diff --git a/third_party/rust/gfx-backend-vulkan/src/info.rs b/third_party/rust/gfx-backend-vulkan/src/info.rs
new file mode 100644
index 0000000000..1e02a6f962
--- /dev/null
+++ b/third_party/rust/gfx-backend-vulkan/src/info.rs
@@ -0,0 +1,5 @@
+pub mod intel {
+ pub const VENDOR: u32 = 0x8086;
+ pub const DEVICE_KABY_LAKE_MASK: u32 = 0x5900;
+ pub const DEVICE_SKY_LAKE_MASK: u32 = 0x1900;
+}
diff --git a/third_party/rust/gfx-backend-vulkan/src/lib.rs b/third_party/rust/gfx-backend-vulkan/src/lib.rs
new file mode 100644
index 0000000000..150fcac866
--- /dev/null
+++ b/third_party/rust/gfx-backend-vulkan/src/lib.rs
@@ -0,0 +1,1575 @@
+#![allow(non_snake_case)]
+
+#[macro_use]
+extern crate log;
+#[macro_use]
+extern crate lazy_static;
+
+#[cfg(target_os = "macos")]
+#[macro_use]
+extern crate objc;
+
+use ash::extensions::{
+ self,
+ ext::{DebugReport, DebugUtils},
+ khr::DrawIndirectCount,
+};
+use ash::extensions::{khr::Swapchain, nv::MeshShader};
+use ash::version::{DeviceV1_0, EntryV1_0, InstanceV1_0};
+use ash::vk;
+#[cfg(not(feature = "use-rtld-next"))]
+use ash::{Entry, LoadingError};
+
+use hal::{
+ adapter,
+ device::{CreationError as DeviceCreationError, DeviceLost, OutOfMemory, SurfaceLost},
+ format, image, memory,
+ pso::{PatchSize, PipelineStage},
+ queue,
+ window::{PresentError, Suboptimal},
+ Features, Hints, Limits,
+};
+
+use std::borrow::{Borrow, Cow};
+use std::ffi::{CStr, CString};
+use std::sync::Arc;
+use std::{fmt, mem, slice};
+
+#[cfg(feature = "use-rtld-next")]
+use ash::{EntryCustom, LoadingError};
+#[cfg(feature = "use-rtld-next")]
+use shared_library::dynamic_library::{DynamicLibrary, SpecialHandles};
+
+mod command;
+mod conv;
+mod device;
+mod info;
+mod native;
+mod pool;
+mod window;
+
+// CStr's cannot be constant yet, until const fn lands we need to use a lazy_static
+lazy_static! {
+ static ref LAYERS: Vec<&'static CStr> = if cfg!(debug_assertions) {
+ vec![CStr::from_bytes_with_nul(b"VK_LAYER_KHRONOS_validation\0").unwrap()]
+ } else {
+ vec![]
+ };
+ static ref EXTENSIONS: Vec<&'static CStr> = if cfg!(debug_assertions) {
+ vec![
+ DebugUtils::name(),
+ DebugReport::name(),
+ *KHR_GET_PHYSICAL_DEVICE_PROPERTIES2,
+ ]
+ } else {
+ vec![*KHR_GET_PHYSICAL_DEVICE_PROPERTIES2]
+ };
+ static ref DEVICE_EXTENSIONS: Vec<&'static CStr> = vec![extensions::khr::Swapchain::name()];
+ static ref SURFACE_EXTENSIONS: Vec<&'static CStr> = vec![
+ extensions::khr::Surface::name(),
+ // Platform-specific WSI extensions
+ #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))]
+ extensions::khr::XlibSurface::name(),
+ #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))]
+ extensions::khr::XcbSurface::name(),
+ #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))]
+ extensions::khr::WaylandSurface::name(),
+ #[cfg(target_os = "android")]
+ extensions::khr::AndroidSurface::name(),
+ #[cfg(target_os = "windows")]
+ extensions::khr::Win32Surface::name(),
+ #[cfg(target_os = "macos")]
+ extensions::mvk::MacOSSurface::name(),
+ ];
+ static ref AMD_NEGATIVE_VIEWPORT_HEIGHT: &'static CStr =
+ CStr::from_bytes_with_nul(b"VK_AMD_negative_viewport_height\0").unwrap();
+ static ref KHR_MAINTENANCE1: &'static CStr =
+ CStr::from_bytes_with_nul(b"VK_KHR_maintenance1\0").unwrap();
+ static ref KHR_MAINTENANCE3: &'static CStr =
+ CStr::from_bytes_with_nul(b"VK_KHR_maintenance3\0").unwrap();
+ static ref KHR_SAMPLER_MIRROR_MIRROR_CLAMP_TO_EDGE : &'static CStr =
+ CStr::from_bytes_with_nul(b"VK_KHR_sampler_mirror_clamp_to_edge\0").unwrap();
+ static ref KHR_GET_PHYSICAL_DEVICE_PROPERTIES2: &'static CStr =
+ CStr::from_bytes_with_nul(b"VK_KHR_get_physical_device_properties2\0").unwrap();
+ static ref KHR_DRAW_INDIRECT_COUNT: &'static CStr =
+ CStr::from_bytes_with_nul(b"VK_KHR_draw_indirect_count\0").unwrap();
+ static ref EXT_DESCRIPTOR_INDEXING: &'static CStr =
+ CStr::from_bytes_with_nul(b"VK_EXT_descriptor_indexing\0").unwrap();
+ static ref MESH_SHADER: &'static CStr = MeshShader::name();
+}
+
+#[cfg(not(feature = "use-rtld-next"))]
+lazy_static! {
+ // Entry function pointers
+ pub static ref VK_ENTRY: Result<Entry, LoadingError> = Entry::new();
+}
+
+#[cfg(feature = "use-rtld-next")]
+lazy_static! {
+ // Entry function pointers
+ pub static ref VK_ENTRY: Result<EntryCustom<V1_0, ()>, LoadingError>
+ = EntryCustom::new_custom(
+ || Ok(()),
+ |_, name| unsafe {
+ DynamicLibrary::symbol_special(SpecialHandles::Next, &*name.to_string_lossy())
+ .unwrap_or(ptr::null_mut())
+ }
+ );
+}
+
+pub struct RawInstance {
+ inner: ash::Instance,
+ debug_messenger: Option<DebugMessenger>,
+ get_physical_device_properties: Option<vk::KhrGetPhysicalDeviceProperties2Fn>,
+}
+
+pub enum DebugMessenger {
+ Utils(DebugUtils, vk::DebugUtilsMessengerEXT),
+ Report(DebugReport, vk::DebugReportCallbackEXT),
+}
+
+impl Drop for RawInstance {
+ fn drop(&mut self) {
+ unsafe {
+ #[cfg(debug_assertions)]
+ {
+ match self.debug_messenger {
+ Some(DebugMessenger::Utils(ref ext, callback)) => {
+ ext.destroy_debug_utils_messenger(callback, None)
+ }
+ Some(DebugMessenger::Report(ref ext, callback)) => {
+ ext.destroy_debug_report_callback(callback, None)
+ }
+ None => {}
+ }
+ }
+
+ self.inner.destroy_instance(None);
+ }
+ }
+}
+
+pub struct Instance {
+ pub raw: Arc<RawInstance>,
+
+ /// Supported extensions of this instance.
+ pub extensions: Vec<&'static CStr>,
+}
+
+impl fmt::Debug for Instance {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.write_str("Instance")
+ }
+}
+
+fn map_queue_type(flags: vk::QueueFlags) -> queue::QueueType {
+ if flags.contains(vk::QueueFlags::GRAPHICS | vk::QueueFlags::COMPUTE) {
+ // TRANSFER_BIT optional
+ queue::QueueType::General
+ } else if flags.contains(vk::QueueFlags::GRAPHICS) {
+ // TRANSFER_BIT optional
+ queue::QueueType::Graphics
+ } else if flags.contains(vk::QueueFlags::COMPUTE) {
+ // TRANSFER_BIT optional
+ queue::QueueType::Compute
+ } else if flags.contains(vk::QueueFlags::TRANSFER) {
+ queue::QueueType::Transfer
+ } else {
+ // TODO: present only queues?
+ unimplemented!()
+ }
+}
+
+unsafe fn display_debug_utils_label_ext(
+ label_structs: *mut vk::DebugUtilsLabelEXT,
+ count: usize,
+) -> Option<String> {
+ if count == 0 {
+ return None;
+ }
+
+ Some(
+ slice::from_raw_parts::<vk::DebugUtilsLabelEXT>(label_structs, count)
+ .iter()
+ .flat_map(|dul_obj| {
+ dul_obj
+ .p_label_name
+ .as_ref()
+ .map(|lbl| CStr::from_ptr(lbl).to_string_lossy().into_owned())
+ })
+ .collect::<Vec<String>>()
+ .join(", "),
+ )
+}
+
+unsafe fn display_debug_utils_object_name_info_ext(
+ info_structs: *mut vk::DebugUtilsObjectNameInfoEXT,
+ count: usize,
+) -> Option<String> {
+ if count == 0 {
+ return None;
+ }
+
+ //TODO: use color field of vk::DebugUtilsLabelsExt in a meaningful way?
+ Some(
+ slice::from_raw_parts::<vk::DebugUtilsObjectNameInfoEXT>(info_structs, count)
+ .iter()
+ .map(|obj_info| {
+ let object_name = obj_info
+ .p_object_name
+ .as_ref()
+ .map(|name| CStr::from_ptr(name).to_string_lossy().into_owned());
+
+ match object_name {
+ Some(name) => format!(
+ "(type: {:?}, hndl: {}, name: {})",
+ obj_info.object_type,
+ &obj_info.object_handle.to_string(),
+ name
+ ),
+ None => format!(
+ "(type: {:?}, hndl: {})",
+ obj_info.object_type,
+ &obj_info.object_handle.to_string()
+ ),
+ }
+ })
+ .collect::<Vec<String>>()
+ .join(", "),
+ )
+}
+
+unsafe extern "system" fn debug_utils_messenger_callback(
+ message_severity: vk::DebugUtilsMessageSeverityFlagsEXT,
+ message_type: vk::DebugUtilsMessageTypeFlagsEXT,
+ p_callback_data: *const vk::DebugUtilsMessengerCallbackDataEXT,
+ _user_data: *mut std::os::raw::c_void,
+) -> vk::Bool32 {
+ let callback_data = *p_callback_data;
+
+ let message_severity = match message_severity {
+ vk::DebugUtilsMessageSeverityFlagsEXT::ERROR => log::Level::Error,
+ vk::DebugUtilsMessageSeverityFlagsEXT::WARNING => log::Level::Warn,
+ vk::DebugUtilsMessageSeverityFlagsEXT::INFO => log::Level::Info,
+ vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE => log::Level::Trace,
+ _ => log::Level::Warn,
+ };
+ let message_type = &format!("{:?}", message_type);
+ let message_id_number: i32 = callback_data.message_id_number as i32;
+
+ let message_id_name = if callback_data.p_message_id_name.is_null() {
+ Cow::from("")
+ } else {
+ CStr::from_ptr(callback_data.p_message_id_name).to_string_lossy()
+ };
+
+ let message = if callback_data.p_message.is_null() {
+ Cow::from("")
+ } else {
+ CStr::from_ptr(callback_data.p_message).to_string_lossy()
+ };
+
+ let additional_info: [(&str, Option<String>); 3] = [
+ (
+ "queue info",
+ display_debug_utils_label_ext(
+ callback_data.p_queue_labels as *mut _,
+ callback_data.queue_label_count as usize,
+ ),
+ ),
+ (
+ "cmd buf info",
+ display_debug_utils_label_ext(
+ callback_data.p_cmd_buf_labels as *mut _,
+ callback_data.cmd_buf_label_count as usize,
+ ),
+ ),
+ (
+ "object info",
+ display_debug_utils_object_name_info_ext(
+ callback_data.p_objects as *mut _,
+ callback_data.object_count as usize,
+ ),
+ ),
+ ];
+
+ log!(message_severity, "{}\n", {
+ let mut msg = format!(
+ "\n{} [{} ({})] : {}",
+ message_type,
+ message_id_name,
+ &message_id_number.to_string(),
+ message
+ );
+
+ #[allow(array_into_iter)]
+ for (info_label, info) in additional_info.into_iter() {
+ match info {
+ Some(data) => {
+ msg = format!("{}\n{}: {}", msg, info_label, data);
+ }
+ None => {}
+ }
+ }
+
+ msg
+ });
+
+ vk::FALSE
+}
+
+unsafe extern "system" fn debug_report_callback(
+ type_: vk::DebugReportFlagsEXT,
+ _: vk::DebugReportObjectTypeEXT,
+ _object: u64,
+ _location: usize,
+ _msg_code: i32,
+ layer_prefix: *const std::os::raw::c_char,
+ description: *const std::os::raw::c_char,
+ _user_data: *mut std::os::raw::c_void,
+) -> vk::Bool32 {
+ let level = match type_ {
+ vk::DebugReportFlagsEXT::ERROR => log::Level::Error,
+ vk::DebugReportFlagsEXT::WARNING => log::Level::Warn,
+ vk::DebugReportFlagsEXT::INFORMATION => log::Level::Info,
+ vk::DebugReportFlagsEXT::DEBUG => log::Level::Debug,
+ _ => log::Level::Warn,
+ };
+
+ let layer_prefix = CStr::from_ptr(layer_prefix).to_str().unwrap();
+ let description = CStr::from_ptr(description).to_str().unwrap();
+ log!(level, "[{}] {}", layer_prefix, description);
+ vk::FALSE
+}
+
+impl hal::Instance<Backend> for Instance {
+ fn create(name: &str, version: u32) -> Result<Self, hal::UnsupportedBackend> {
+ // TODO: return errors instead of panic
+ let entry = VK_ENTRY.as_ref().map_err(|e| {
+ info!("Missing Vulkan entry points: {:?}", e);
+ hal::UnsupportedBackend
+ })?;
+
+ let app_name = CString::new(name).unwrap();
+ let app_info = vk::ApplicationInfo::builder()
+ .application_name(app_name.as_c_str())
+ .application_version(version)
+ .engine_name(CStr::from_bytes_with_nul(b"gfx-rs\0").unwrap())
+ .engine_version(1)
+ .api_version(vk::make_version(1, 0, 0));
+
+ let instance_extensions = entry
+ .enumerate_instance_extension_properties()
+ .map_err(|e| {
+ info!("Unable to enumerate instance extensions: {:?}", e);
+ hal::UnsupportedBackend
+ })?;
+
+ let instance_layers = entry.enumerate_instance_layer_properties().map_err(|e| {
+ info!("Unable to enumerate instance layers: {:?}", e);
+ hal::UnsupportedBackend
+ })?;
+
+ // Check our extensions against the available extensions
+ let extensions = SURFACE_EXTENSIONS
+ .iter()
+ .chain(EXTENSIONS.iter())
+ .filter_map(|&ext| {
+ instance_extensions
+ .iter()
+ .find(|inst_ext| unsafe {
+ CStr::from_ptr(inst_ext.extension_name.as_ptr()) == ext
+ })
+ .map(|_| ext)
+ .or_else(|| {
+ info!("Unable to find extension: {}", ext.to_string_lossy());
+ None
+ })
+ })
+ .collect::<Vec<&CStr>>();
+
+ // Check requested layers against the available layers
+ let layers = LAYERS
+ .iter()
+ .filter_map(|&layer| {
+ instance_layers
+ .iter()
+ .find(|inst_layer| unsafe {
+ CStr::from_ptr(inst_layer.layer_name.as_ptr()) == layer
+ })
+ .map(|_| layer)
+ .or_else(|| {
+ warn!("Unable to find layer: {}", layer.to_string_lossy());
+ None
+ })
+ })
+ .collect::<Vec<&CStr>>();
+
+ let instance = {
+ let cstrings = layers
+ .iter()
+ .chain(extensions.iter())
+ .map(|&s| CString::from(s))
+ .collect::<Vec<_>>();
+
+ let str_pointers = cstrings.iter().map(|s| s.as_ptr()).collect::<Vec<_>>();
+
+ let create_info = vk::InstanceCreateInfo::builder()
+ .flags(vk::InstanceCreateFlags::empty())
+ .application_info(&app_info)
+ .enabled_layer_names(&str_pointers[..layers.len()])
+ .enabled_extension_names(&str_pointers[layers.len()..]);
+
+ unsafe { entry.create_instance(&create_info, None) }.map_err(|e| {
+ warn!("Unable to create Vulkan instance: {:?}", e);
+ hal::UnsupportedBackend
+ })?
+ };
+
+ let get_physical_device_properties = extensions
+ .iter()
+ .find(|&&ext| ext == *KHR_GET_PHYSICAL_DEVICE_PROPERTIES2)
+ .map(|_| {
+ vk::KhrGetPhysicalDeviceProperties2Fn::load(|name| unsafe {
+ std::mem::transmute(
+ entry.get_instance_proc_addr(instance.handle(), name.as_ptr()),
+ )
+ })
+ });
+
+ #[cfg(debug_assertions)]
+ let debug_messenger = {
+ // make sure VK_EXT_debug_utils is available
+ if instance_extensions.iter().any(|props| unsafe {
+ CStr::from_ptr(props.extension_name.as_ptr()) == DebugUtils::name()
+ }) {
+ let ext = DebugUtils::new(entry, &instance);
+ let info = vk::DebugUtilsMessengerCreateInfoEXT::builder()
+ .flags(vk::DebugUtilsMessengerCreateFlagsEXT::empty())
+ .message_severity(vk::DebugUtilsMessageSeverityFlagsEXT::all())
+ .message_type(vk::DebugUtilsMessageTypeFlagsEXT::all())
+ .pfn_user_callback(Some(debug_utils_messenger_callback));
+ let handle = unsafe { ext.create_debug_utils_messenger(&info, None) }.unwrap();
+ Some(DebugMessenger::Utils(ext, handle))
+ } else if instance_extensions.iter().any(|props| unsafe {
+ CStr::from_ptr(props.extension_name.as_ptr()) == DebugReport::name()
+ }) {
+ let ext = DebugReport::new(entry, &instance);
+ let info = vk::DebugReportCallbackCreateInfoEXT::builder()
+ .flags(vk::DebugReportFlagsEXT::all())
+ .pfn_callback(Some(debug_report_callback));
+ let handle = unsafe { ext.create_debug_report_callback(&info, None) }.unwrap();
+ Some(DebugMessenger::Report(ext, handle))
+ } else {
+ None
+ }
+ };
+ #[cfg(not(debug_assertions))]
+ let debug_messenger = None;
+
+ Ok(Instance {
+ raw: Arc::new(RawInstance {
+ inner: instance,
+ debug_messenger,
+ get_physical_device_properties,
+ }),
+ extensions,
+ })
+ }
+
+ fn enumerate_adapters(&self) -> Vec<adapter::Adapter<Backend>> {
+ let devices = match unsafe { self.raw.inner.enumerate_physical_devices() } {
+ Ok(devices) => devices,
+ Err(err) => {
+ error!("Could not enumerate physical devices! {}", err);
+ vec![]
+ }
+ };
+
+ devices
+ .into_iter()
+ .map(|device| {
+ let extensions =
+ unsafe { self.raw.inner.enumerate_device_extension_properties(device) }
+ .unwrap();
+ let properties = unsafe { self.raw.inner.get_physical_device_properties(device) };
+ let info = adapter::AdapterInfo {
+ name: unsafe {
+ CStr::from_ptr(properties.device_name.as_ptr())
+ .to_str()
+ .unwrap_or("Unknown")
+ .to_owned()
+ },
+ vendor: properties.vendor_id as usize,
+ device: properties.device_id as usize,
+ device_type: match properties.device_type {
+ ash::vk::PhysicalDeviceType::OTHER => adapter::DeviceType::Other,
+ ash::vk::PhysicalDeviceType::INTEGRATED_GPU => {
+ adapter::DeviceType::IntegratedGpu
+ }
+ ash::vk::PhysicalDeviceType::DISCRETE_GPU => {
+ adapter::DeviceType::DiscreteGpu
+ }
+ ash::vk::PhysicalDeviceType::VIRTUAL_GPU => adapter::DeviceType::VirtualGpu,
+ ash::vk::PhysicalDeviceType::CPU => adapter::DeviceType::Cpu,
+ _ => adapter::DeviceType::Other,
+ },
+ };
+ let physical_device = PhysicalDevice {
+ instance: self.raw.clone(),
+ handle: device,
+ extensions,
+ properties,
+ known_memory_flags: vk::MemoryPropertyFlags::DEVICE_LOCAL
+ | vk::MemoryPropertyFlags::HOST_VISIBLE
+ | vk::MemoryPropertyFlags::HOST_COHERENT
+ | vk::MemoryPropertyFlags::HOST_CACHED
+ | vk::MemoryPropertyFlags::LAZILY_ALLOCATED,
+ };
+ let queue_families = unsafe {
+ self.raw
+ .inner
+ .get_physical_device_queue_family_properties(device)
+ .into_iter()
+ .enumerate()
+ .map(|(i, properties)| QueueFamily {
+ properties,
+ device,
+ index: i as u32,
+ })
+ .collect()
+ };
+
+ adapter::Adapter {
+ info,
+ physical_device,
+ queue_families,
+ }
+ })
+ .collect()
+ }
+
+ unsafe fn create_surface(
+ &self,
+ has_handle: &impl raw_window_handle::HasRawWindowHandle,
+ ) -> Result<window::Surface, hal::window::InitError> {
+ use raw_window_handle::RawWindowHandle;
+
+ match has_handle.raw_window_handle() {
+ #[cfg(all(
+ unix,
+ not(target_os = "android"),
+ not(target_os = "macos"),
+ not(target_os = "solaris")
+ ))]
+ RawWindowHandle::Wayland(handle)
+ if self
+ .extensions
+ .contains(&extensions::khr::WaylandSurface::name()) =>
+ {
+ Ok(self.create_surface_from_wayland(handle.display, handle.surface))
+ }
+ #[cfg(all(
+ unix,
+ not(target_os = "android"),
+ not(target_os = "macos"),
+ not(target_os = "solaris")
+ ))]
+ RawWindowHandle::Xlib(handle)
+ if self
+ .extensions
+ .contains(&extensions::khr::XlibSurface::name()) =>
+ {
+ Ok(self.create_surface_from_xlib(handle.display as *mut _, handle.window))
+ }
+ #[cfg(all(
+ unix,
+ not(target_os = "android"),
+ not(target_os = "macos"),
+ not(target_os = "ios")
+ ))]
+ RawWindowHandle::Xcb(handle)
+ if self
+ .extensions
+ .contains(&extensions::khr::XcbSurface::name()) =>
+ {
+ Ok(self.create_surface_from_xcb(handle.connection as *mut _, handle.window))
+ }
+ #[cfg(target_os = "android")]
+ RawWindowHandle::Android(handle) => {
+ Ok(self.create_surface_android(handle.a_native_window))
+ }
+ #[cfg(windows)]
+ RawWindowHandle::Windows(handle) => {
+ use winapi::um::libloaderapi::GetModuleHandleW;
+
+ let hinstance = GetModuleHandleW(std::ptr::null());
+ Ok(self.create_surface_from_hwnd(hinstance as *mut _, handle.hwnd))
+ }
+ #[cfg(target_os = "macos")]
+ RawWindowHandle::MacOS(handle) => Ok(self.create_surface_from_ns_view(handle.ns_view)),
+ _ => Err(hal::window::InitError::UnsupportedWindowHandle),
+ }
+ }
+
+ unsafe fn destroy_surface(&self, surface: window::Surface) {
+ surface
+ .raw
+ .functor
+ .destroy_surface(surface.raw.handle, None);
+ }
+}
+
+#[derive(Debug, Clone)]
+pub struct QueueFamily {
+ properties: vk::QueueFamilyProperties,
+ device: vk::PhysicalDevice,
+ index: u32,
+}
+
+impl queue::QueueFamily for QueueFamily {
+ fn queue_type(&self) -> queue::QueueType {
+ map_queue_type(self.properties.queue_flags)
+ }
+ fn max_queues(&self) -> usize {
+ self.properties.queue_count as _
+ }
+ fn id(&self) -> queue::QueueFamilyId {
+ queue::QueueFamilyId(self.index as _)
+ }
+}
+
+pub struct PhysicalDevice {
+ instance: Arc<RawInstance>,
+ handle: vk::PhysicalDevice,
+ extensions: Vec<vk::ExtensionProperties>,
+ properties: vk::PhysicalDeviceProperties,
+ known_memory_flags: vk::MemoryPropertyFlags,
+}
+
+impl PhysicalDevice {
+ fn supports_extension(&self, extension: &CStr) -> bool {
+ self.extensions
+ .iter()
+ .any(|ep| unsafe { CStr::from_ptr(ep.extension_name.as_ptr()) } == extension)
+ }
+}
+
+impl fmt::Debug for PhysicalDevice {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.write_str("PhysicalDevice")
+ }
+}
+
+pub struct DeviceCreationFeatures {
+ core: vk::PhysicalDeviceFeatures,
+ descriptor_indexing: Option<vk::PhysicalDeviceDescriptorIndexingFeaturesEXT>,
+ mesh_shaders: Option<vk::PhysicalDeviceMeshShaderFeaturesNV>,
+}
+
+impl adapter::PhysicalDevice<Backend> for PhysicalDevice {
+ unsafe fn open(
+ &self,
+ families: &[(&QueueFamily, &[queue::QueuePriority])],
+ requested_features: Features,
+ ) -> Result<adapter::Gpu<Backend>, DeviceCreationError> {
+ let family_infos = families
+ .iter()
+ .map(|&(family, priorities)| {
+ vk::DeviceQueueCreateInfo::builder()
+ .flags(vk::DeviceQueueCreateFlags::empty())
+ .queue_family_index(family.index)
+ .queue_priorities(priorities)
+ .build()
+ })
+ .collect::<Vec<_>>();
+
+ if !self.features().contains(requested_features) {
+ return Err(DeviceCreationError::MissingFeature);
+ }
+
+ let maintenance_level = if self.supports_extension(*KHR_MAINTENANCE1) {
+ 1
+ } else {
+ 0
+ };
+ let mut enabled_features = conv::map_device_features(requested_features);
+ let enabled_extensions = DEVICE_EXTENSIONS
+ .iter()
+ .cloned()
+ .chain(
+ if requested_features.contains(Features::NDC_Y_UP) && maintenance_level == 0 {
+ Some(*AMD_NEGATIVE_VIEWPORT_HEIGHT)
+ } else {
+ None
+ },
+ )
+ .chain(match maintenance_level {
+ 0 => None,
+ 1 => Some(*KHR_MAINTENANCE1),
+ _ => unreachable!(),
+ })
+ .chain(
+ if requested_features.intersects(
+ Features::SAMPLED_TEXTURE_DESCRIPTOR_INDEXING
+ | Features::STORAGE_TEXTURE_DESCRIPTOR_INDEXING
+ | Features::UNSIZED_DESCRIPTOR_ARRAY,
+ ) {
+ vec![*KHR_MAINTENANCE3, *EXT_DESCRIPTOR_INDEXING]
+ } else {
+ vec![]
+ },
+ )
+ .chain(
+ if requested_features.intersects(Features::TASK_SHADER | Features::MESH_SHADER) {
+ Some(*MESH_SHADER)
+ } else {
+ None
+ },
+ )
+ .chain(
+ if requested_features.contains(Features::DRAW_INDIRECT_COUNT) {
+ Some(*KHR_DRAW_INDIRECT_COUNT)
+ } else {
+ None
+ },
+ );
+
+ let valid_ash_memory_types = {
+ let mem_properties = self
+ .instance
+ .inner
+ .get_physical_device_memory_properties(self.handle);
+ mem_properties.memory_types[..mem_properties.memory_type_count as usize]
+ .iter()
+ .enumerate()
+ .fold(0, |u, (i, mem)| {
+ if self.known_memory_flags.contains(mem.property_flags) {
+ u | (1 << i)
+ } else {
+ u
+ }
+ })
+ };
+
+ // Create device
+ let device_raw = {
+ let cstrings = enabled_extensions.map(CString::from).collect::<Vec<_>>();
+
+ let str_pointers = cstrings.iter().map(|s| s.as_ptr()).collect::<Vec<_>>();
+
+ let info = vk::DeviceCreateInfo::builder()
+ .queue_create_infos(&family_infos)
+ .enabled_extension_names(&str_pointers)
+ .enabled_features(&enabled_features.core);
+
+ let info =
+ if let Some(ref mut descriptor_indexing) = enabled_features.descriptor_indexing {
+ info.push_next(descriptor_indexing)
+ } else {
+ info
+ };
+
+ let info = if let Some(ref mut mesh_shaders) = enabled_features.mesh_shaders {
+ info.push_next(mesh_shaders)
+ } else {
+ info
+ };
+
+ match self.instance.inner.create_device(self.handle, &info, None) {
+ Ok(device) => device,
+ Err(e) => {
+ return Err(match e {
+ vk::Result::ERROR_OUT_OF_HOST_MEMORY => {
+ DeviceCreationError::OutOfMemory(OutOfMemory::Host)
+ }
+ vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => {
+ DeviceCreationError::OutOfMemory(OutOfMemory::Device)
+ }
+ vk::Result::ERROR_INITIALIZATION_FAILED => {
+ DeviceCreationError::InitializationFailed
+ }
+ vk::Result::ERROR_DEVICE_LOST => DeviceCreationError::DeviceLost,
+ vk::Result::ERROR_TOO_MANY_OBJECTS => DeviceCreationError::TooManyObjects,
+ _ => unreachable!(),
+ })
+ }
+ }
+ };
+
+ let swapchain_fn = Swapchain::new(&self.instance.inner, &device_raw);
+
+ let mesh_fn =
+ if requested_features.intersects(Features::TASK_SHADER | Features::MESH_SHADER) {
+ Some(MeshShader::new(&self.instance.inner, &device_raw))
+ } else {
+ None
+ };
+
+ let indirect_count_fn = if requested_features.contains(Features::DRAW_INDIRECT_COUNT) {
+ Some(DrawIndirectCount::new(&self.instance.inner, &device_raw))
+ } else {
+ None
+ };
+
+ let device = Device {
+ shared: Arc::new(RawDevice {
+ raw: device_raw,
+ features: requested_features,
+ instance: Arc::clone(&self.instance),
+ extension_fns: DeviceExtensionFunctions {
+ mesh_shaders: mesh_fn,
+ draw_indirect_count: indirect_count_fn,
+ },
+ maintenance_level,
+ }),
+ vendor_id: self.properties.vendor_id,
+ valid_ash_memory_types,
+ };
+
+ let device_arc = Arc::clone(&device.shared);
+ let queue_groups = families
+ .into_iter()
+ .map(|&(family, ref priorities)| {
+ let mut family_raw =
+ queue::QueueGroup::new(queue::QueueFamilyId(family.index as usize));
+ for id in 0..priorities.len() {
+ let queue_raw = device_arc.raw.get_device_queue(family.index, id as _);
+ family_raw.add_queue(CommandQueue {
+ raw: Arc::new(queue_raw),
+ device: device_arc.clone(),
+ swapchain_fn: swapchain_fn.clone(),
+ });
+ }
+ family_raw
+ })
+ .collect();
+
+ Ok(adapter::Gpu {
+ device,
+ queue_groups,
+ })
+ }
+
+ fn format_properties(&self, format: Option<format::Format>) -> format::Properties {
+ let properties = unsafe {
+ self.instance.inner.get_physical_device_format_properties(
+ self.handle,
+ format.map_or(vk::Format::UNDEFINED, conv::map_format),
+ )
+ };
+
+ format::Properties {
+ linear_tiling: conv::map_image_features(properties.linear_tiling_features),
+ optimal_tiling: conv::map_image_features(properties.optimal_tiling_features),
+ buffer_features: conv::map_buffer_features(properties.buffer_features),
+ }
+ }
+
+ fn image_format_properties(
+ &self,
+ format: format::Format,
+ dimensions: u8,
+ tiling: image::Tiling,
+ usage: image::Usage,
+ view_caps: image::ViewCapabilities,
+ ) -> Option<image::FormatProperties> {
+ let format_properties = unsafe {
+ self.instance
+ .inner
+ .get_physical_device_image_format_properties(
+ self.handle,
+ conv::map_format(format),
+ match dimensions {
+ 1 => vk::ImageType::TYPE_1D,
+ 2 => vk::ImageType::TYPE_2D,
+ 3 => vk::ImageType::TYPE_3D,
+ _ => panic!("Unexpected image dimensionality: {}", dimensions),
+ },
+ conv::map_tiling(tiling),
+ conv::map_image_usage(usage),
+ conv::map_view_capabilities(view_caps),
+ )
+ };
+
+ match format_properties {
+ Ok(props) => Some(image::FormatProperties {
+ max_extent: image::Extent {
+ width: props.max_extent.width,
+ height: props.max_extent.height,
+ depth: props.max_extent.depth,
+ },
+ max_levels: props.max_mip_levels as _,
+ max_layers: props.max_array_layers as _,
+ sample_count_mask: props.sample_counts.as_raw() as _,
+ max_resource_size: props.max_resource_size as _,
+ }),
+ Err(vk::Result::ERROR_FORMAT_NOT_SUPPORTED) => None,
+ Err(other) => {
+ error!("Unexpected error in `image_format_properties`: {:?}", other);
+ None
+ }
+ }
+ }
+
+ fn memory_properties(&self) -> adapter::MemoryProperties {
+ let mem_properties = unsafe {
+ self.instance
+ .inner
+ .get_physical_device_memory_properties(self.handle)
+ };
+ let memory_heaps = mem_properties.memory_heaps[..mem_properties.memory_heap_count as usize]
+ .iter()
+ .map(|mem| adapter::MemoryHeap {
+ size: mem.size,
+ flags: conv::map_memory_heap_flags(mem.flags),
+ })
+ .collect();
+ let memory_types = mem_properties.memory_types[..mem_properties.memory_type_count as usize]
+ .iter()
+ .filter_map(|mem| {
+ if self.known_memory_flags.contains(mem.property_flags) {
+ Some(adapter::MemoryType {
+ properties: conv::map_memory_properties(mem.property_flags),
+ heap_index: mem.heap_index as usize,
+ })
+ } else {
+ warn!(
+ "Skipping memory type with unknown flags {:?}",
+ mem.property_flags
+ );
+ None
+ }
+ })
+ .collect();
+
+ adapter::MemoryProperties {
+ memory_heaps,
+ memory_types,
+ }
+ }
+
+ fn features(&self) -> Features {
+ // see https://github.com/gfx-rs/gfx/issues/1930
+ let is_windows_intel_dual_src_bug = cfg!(windows)
+ && self.properties.vendor_id == info::intel::VENDOR
+ && (self.properties.device_id & info::intel::DEVICE_KABY_LAKE_MASK
+ == info::intel::DEVICE_KABY_LAKE_MASK
+ || self.properties.device_id & info::intel::DEVICE_SKY_LAKE_MASK
+ == info::intel::DEVICE_SKY_LAKE_MASK);
+
+ let mut descriptor_indexing_features = None;
+ let features = if let Some(ref get_device_properties) =
+ self.instance.get_physical_device_properties
+ {
+ let features = vk::PhysicalDeviceFeatures::builder().build();
+ let mut features2 = vk::PhysicalDeviceFeatures2KHR::builder()
+ .features(features)
+ .build();
+
+ // Add extension infos to the p_next chain
+ if self.supports_extension(*EXT_DESCRIPTOR_INDEXING) {
+ descriptor_indexing_features =
+ Some(vk::PhysicalDeviceDescriptorIndexingFeaturesEXT::builder().build());
+
+ let mut_ref = descriptor_indexing_features.as_mut().unwrap();
+ mut_ref.p_next = mem::replace(&mut features2.p_next, mut_ref as *mut _ as *mut _);
+ }
+
+ unsafe {
+ get_device_properties
+ .get_physical_device_features2_khr(self.handle, &mut features2 as *mut _);
+ }
+ features2.features
+ } else {
+ unsafe {
+ self.instance
+ .inner
+ .get_physical_device_features(self.handle)
+ }
+ };
+
+ let mut bits = Features::empty()
+ | Features::TRIANGLE_FAN
+ | Features::SEPARATE_STENCIL_REF_VALUES
+ | Features::SAMPLER_MIP_LOD_BIAS
+ | Features::SAMPLER_BORDER_COLOR
+ | Features::MUTABLE_COMPARISON_SAMPLER
+ | Features::TEXTURE_DESCRIPTOR_ARRAY;
+
+ if self.supports_extension(*AMD_NEGATIVE_VIEWPORT_HEIGHT)
+ || self.supports_extension(*KHR_MAINTENANCE1)
+ {
+ bits |= Features::NDC_Y_UP;
+ }
+ if self.supports_extension(*KHR_SAMPLER_MIRROR_MIRROR_CLAMP_TO_EDGE) {
+ bits |= Features::SAMPLER_MIRROR_CLAMP_EDGE;
+ }
+ if self.supports_extension(*KHR_DRAW_INDIRECT_COUNT) {
+ bits |= Features::DRAW_INDIRECT_COUNT
+ }
+ // This will only be some if the extension exists
+ if let Some(ref desc_indexing) = descriptor_indexing_features {
+ if desc_indexing.shader_sampled_image_array_non_uniform_indexing != 0 {
+ bits |= Features::SAMPLED_TEXTURE_DESCRIPTOR_INDEXING;
+ }
+ if desc_indexing.shader_storage_image_array_non_uniform_indexing != 0 {
+ bits |= Features::STORAGE_TEXTURE_DESCRIPTOR_INDEXING;
+ }
+ if desc_indexing.runtime_descriptor_array != 0 {
+ bits |= Features::UNSIZED_DESCRIPTOR_ARRAY;
+ }
+ }
+
+ if features.robust_buffer_access != 0 {
+ bits |= Features::ROBUST_BUFFER_ACCESS;
+ }
+ if features.full_draw_index_uint32 != 0 {
+ bits |= Features::FULL_DRAW_INDEX_U32;
+ }
+ if features.image_cube_array != 0 {
+ bits |= Features::IMAGE_CUBE_ARRAY;
+ }
+ if features.independent_blend != 0 {
+ bits |= Features::INDEPENDENT_BLENDING;
+ }
+ if features.geometry_shader != 0 {
+ bits |= Features::GEOMETRY_SHADER;
+ }
+ if features.tessellation_shader != 0 {
+ bits |= Features::TESSELLATION_SHADER;
+ }
+ if features.sample_rate_shading != 0 {
+ bits |= Features::SAMPLE_RATE_SHADING;
+ }
+ if features.dual_src_blend != 0 && !is_windows_intel_dual_src_bug {
+ bits |= Features::DUAL_SRC_BLENDING;
+ }
+ if features.logic_op != 0 {
+ bits |= Features::LOGIC_OP;
+ }
+ if features.multi_draw_indirect != 0 {
+ bits |= Features::MULTI_DRAW_INDIRECT;
+ }
+ if features.draw_indirect_first_instance != 0 {
+ bits |= Features::DRAW_INDIRECT_FIRST_INSTANCE;
+ }
+ if features.depth_clamp != 0 {
+ bits |= Features::DEPTH_CLAMP;
+ }
+ if features.depth_bias_clamp != 0 {
+ bits |= Features::DEPTH_BIAS_CLAMP;
+ }
+ if features.fill_mode_non_solid != 0 {
+ bits |= Features::NON_FILL_POLYGON_MODE;
+ }
+ if features.depth_bounds != 0 {
+ bits |= Features::DEPTH_BOUNDS;
+ }
+ if features.wide_lines != 0 {
+ bits |= Features::LINE_WIDTH;
+ }
+ if features.large_points != 0 {
+ bits |= Features::POINT_SIZE;
+ }
+ if features.alpha_to_one != 0 {
+ bits |= Features::ALPHA_TO_ONE;
+ }
+ if features.multi_viewport != 0 {
+ bits |= Features::MULTI_VIEWPORTS;
+ }
+ if features.sampler_anisotropy != 0 {
+ bits |= Features::SAMPLER_ANISOTROPY;
+ }
+ if features.texture_compression_etc2 != 0 {
+ bits |= Features::FORMAT_ETC2;
+ }
+ if features.texture_compression_astc_ldr != 0 {
+ bits |= Features::FORMAT_ASTC_LDR;
+ }
+ if features.texture_compression_bc != 0 {
+ bits |= Features::FORMAT_BC;
+ }
+ if features.occlusion_query_precise != 0 {
+ bits |= Features::PRECISE_OCCLUSION_QUERY;
+ }
+ if features.pipeline_statistics_query != 0 {
+ bits |= Features::PIPELINE_STATISTICS_QUERY;
+ }
+ if features.vertex_pipeline_stores_and_atomics != 0 {
+ bits |= Features::VERTEX_STORES_AND_ATOMICS;
+ }
+ if features.fragment_stores_and_atomics != 0 {
+ bits |= Features::FRAGMENT_STORES_AND_ATOMICS;
+ }
+ if features.shader_tessellation_and_geometry_point_size != 0 {
+ bits |= Features::SHADER_TESSELLATION_AND_GEOMETRY_POINT_SIZE;
+ }
+ if features.shader_image_gather_extended != 0 {
+ bits |= Features::SHADER_IMAGE_GATHER_EXTENDED;
+ }
+ if features.shader_storage_image_extended_formats != 0 {
+ bits |= Features::SHADER_STORAGE_IMAGE_EXTENDED_FORMATS;
+ }
+ if features.shader_storage_image_multisample != 0 {
+ bits |= Features::SHADER_STORAGE_IMAGE_MULTISAMPLE;
+ }
+ if features.shader_storage_image_read_without_format != 0 {
+ bits |= Features::SHADER_STORAGE_IMAGE_READ_WITHOUT_FORMAT;
+ }
+ if features.shader_storage_image_write_without_format != 0 {
+ bits |= Features::SHADER_STORAGE_IMAGE_WRITE_WITHOUT_FORMAT;
+ }
+ if features.shader_uniform_buffer_array_dynamic_indexing != 0 {
+ bits |= Features::SHADER_UNIFORM_BUFFER_ARRAY_DYNAMIC_INDEXING;
+ }
+ if features.shader_sampled_image_array_dynamic_indexing != 0 {
+ bits |= Features::SHADER_SAMPLED_IMAGE_ARRAY_DYNAMIC_INDEXING;
+ }
+ if features.shader_storage_buffer_array_dynamic_indexing != 0 {
+ bits |= Features::SHADER_STORAGE_BUFFER_ARRAY_DYNAMIC_INDEXING;
+ }
+ if features.shader_storage_image_array_dynamic_indexing != 0 {
+ bits |= Features::SHADER_STORAGE_IMAGE_ARRAY_DYNAMIC_INDEXING;
+ }
+ if features.shader_clip_distance != 0 {
+ bits |= Features::SHADER_CLIP_DISTANCE;
+ }
+ if features.shader_cull_distance != 0 {
+ bits |= Features::SHADER_CULL_DISTANCE;
+ }
+ if features.shader_float64 != 0 {
+ bits |= Features::SHADER_FLOAT64;
+ }
+ if features.shader_int64 != 0 {
+ bits |= Features::SHADER_INT64;
+ }
+ if features.shader_int16 != 0 {
+ bits |= Features::SHADER_INT16;
+ }
+ if features.shader_resource_residency != 0 {
+ bits |= Features::SHADER_RESOURCE_RESIDENCY;
+ }
+ if features.shader_resource_min_lod != 0 {
+ bits |= Features::SHADER_RESOURCE_MIN_LOD;
+ }
+ if features.sparse_binding != 0 {
+ bits |= Features::SPARSE_BINDING;
+ }
+ if features.sparse_residency_buffer != 0 {
+ bits |= Features::SPARSE_RESIDENCY_BUFFER;
+ }
+ if features.sparse_residency_image2_d != 0 {
+ bits |= Features::SPARSE_RESIDENCY_IMAGE_2D;
+ }
+ if features.sparse_residency_image3_d != 0 {
+ bits |= Features::SPARSE_RESIDENCY_IMAGE_3D;
+ }
+ if features.sparse_residency2_samples != 0 {
+ bits |= Features::SPARSE_RESIDENCY_2_SAMPLES;
+ }
+ if features.sparse_residency4_samples != 0 {
+ bits |= Features::SPARSE_RESIDENCY_4_SAMPLES;
+ }
+ if features.sparse_residency8_samples != 0 {
+ bits |= Features::SPARSE_RESIDENCY_8_SAMPLES;
+ }
+ if features.sparse_residency16_samples != 0 {
+ bits |= Features::SPARSE_RESIDENCY_16_SAMPLES;
+ }
+ if features.sparse_residency_aliased != 0 {
+ bits |= Features::SPARSE_RESIDENCY_ALIASED;
+ }
+ if features.variable_multisample_rate != 0 {
+ bits |= Features::VARIABLE_MULTISAMPLE_RATE;
+ }
+ if features.inherited_queries != 0 {
+ bits |= Features::INHERITED_QUERIES;
+ }
+ if self.supports_extension(*MESH_SHADER) {
+ bits |= Features::TASK_SHADER;
+ bits |= Features::MESH_SHADER
+ }
+
+ bits
+ }
+
+ fn hints(&self) -> Hints {
+ Hints::BASE_VERTEX_INSTANCE_DRAWING
+ }
+
+ fn limits(&self) -> Limits {
+ let limits = &self.properties.limits;
+ let max_group_count = limits.max_compute_work_group_count;
+ let max_group_size = limits.max_compute_work_group_size;
+
+ Limits {
+ max_image_1d_size: limits.max_image_dimension1_d,
+ max_image_2d_size: limits.max_image_dimension2_d,
+ max_image_3d_size: limits.max_image_dimension3_d,
+ max_image_cube_size: limits.max_image_dimension_cube,
+ max_image_array_layers: limits.max_image_array_layers as _,
+ max_texel_elements: limits.max_texel_buffer_elements as _,
+ max_patch_size: limits.max_tessellation_patch_size as PatchSize,
+ max_viewports: limits.max_viewports as _,
+ max_viewport_dimensions: limits.max_viewport_dimensions,
+ max_framebuffer_extent: image::Extent {
+ width: limits.max_framebuffer_width,
+ height: limits.max_framebuffer_height,
+ depth: limits.max_framebuffer_layers,
+ },
+ max_compute_work_group_count: [
+ max_group_count[0] as _,
+ max_group_count[1] as _,
+ max_group_count[2] as _,
+ ],
+ max_compute_work_group_size: [
+ max_group_size[0] as _,
+ max_group_size[1] as _,
+ max_group_size[2] as _,
+ ],
+ max_vertex_input_attributes: limits.max_vertex_input_attributes as _,
+ max_vertex_input_bindings: limits.max_vertex_input_bindings as _,
+ max_vertex_input_attribute_offset: limits.max_vertex_input_attribute_offset as _,
+ max_vertex_input_binding_stride: limits.max_vertex_input_binding_stride as _,
+ max_vertex_output_components: limits.max_vertex_output_components as _,
+ optimal_buffer_copy_offset_alignment: limits.optimal_buffer_copy_offset_alignment as _,
+ optimal_buffer_copy_pitch_alignment: limits.optimal_buffer_copy_row_pitch_alignment
+ as _,
+ min_texel_buffer_offset_alignment: limits.min_texel_buffer_offset_alignment as _,
+ min_uniform_buffer_offset_alignment: limits.min_uniform_buffer_offset_alignment as _,
+ min_storage_buffer_offset_alignment: limits.min_storage_buffer_offset_alignment as _,
+ framebuffer_color_sample_counts: limits.framebuffer_color_sample_counts.as_raw() as _,
+ framebuffer_depth_sample_counts: limits.framebuffer_depth_sample_counts.as_raw() as _,
+ framebuffer_stencil_sample_counts: limits.framebuffer_stencil_sample_counts.as_raw()
+ as _,
+ max_color_attachments: limits.max_color_attachments as _,
+ buffer_image_granularity: limits.buffer_image_granularity,
+ non_coherent_atom_size: limits.non_coherent_atom_size as _,
+ max_sampler_anisotropy: limits.max_sampler_anisotropy,
+ min_vertex_input_binding_stride_alignment: 1,
+ max_bound_descriptor_sets: limits.max_bound_descriptor_sets as _,
+ max_compute_shared_memory_size: limits.max_compute_shared_memory_size as _,
+ max_compute_work_group_invocations: limits.max_compute_work_group_invocations as _,
+ max_descriptor_set_input_attachments: limits.max_descriptor_set_input_attachments as _,
+ max_descriptor_set_sampled_images: limits.max_descriptor_set_sampled_images as _,
+ max_descriptor_set_samplers: limits.max_descriptor_set_samplers as _,
+ max_descriptor_set_storage_buffers: limits.max_descriptor_set_storage_buffers as _,
+ max_descriptor_set_storage_buffers_dynamic: limits
+ .max_descriptor_set_storage_buffers_dynamic
+ as _,
+ max_descriptor_set_storage_images: limits.max_descriptor_set_storage_images as _,
+ max_descriptor_set_uniform_buffers: limits.max_descriptor_set_uniform_buffers as _,
+ max_descriptor_set_uniform_buffers_dynamic: limits
+ .max_descriptor_set_uniform_buffers_dynamic
+ as _,
+ max_draw_indexed_index_value: limits.max_draw_indexed_index_value,
+ max_draw_indirect_count: limits.max_draw_indirect_count,
+ max_fragment_combined_output_resources: limits.max_fragment_combined_output_resources
+ as _,
+ max_fragment_dual_source_attachments: limits.max_fragment_dual_src_attachments as _,
+ max_fragment_input_components: limits.max_fragment_input_components as _,
+ max_fragment_output_attachments: limits.max_fragment_output_attachments as _,
+ max_framebuffer_layers: limits.max_framebuffer_layers as _,
+ max_geometry_input_components: limits.max_geometry_input_components as _,
+ max_geometry_output_components: limits.max_geometry_output_components as _,
+ max_geometry_output_vertices: limits.max_geometry_output_vertices as _,
+ max_geometry_shader_invocations: limits.max_geometry_shader_invocations as _,
+ max_geometry_total_output_components: limits.max_geometry_total_output_components as _,
+ max_memory_allocation_count: limits.max_memory_allocation_count as _,
+ max_per_stage_descriptor_input_attachments: limits
+ .max_per_stage_descriptor_input_attachments
+ as _,
+ max_per_stage_descriptor_sampled_images: limits.max_per_stage_descriptor_sampled_images
+ as _,
+ max_per_stage_descriptor_samplers: limits.max_per_stage_descriptor_samplers as _,
+ max_per_stage_descriptor_storage_buffers: limits
+ .max_per_stage_descriptor_storage_buffers
+ as _,
+ max_per_stage_descriptor_storage_images: limits.max_per_stage_descriptor_storage_images
+ as _,
+ max_per_stage_descriptor_uniform_buffers: limits
+ .max_per_stage_descriptor_uniform_buffers
+ as _,
+ max_per_stage_resources: limits.max_per_stage_resources as _,
+ max_push_constants_size: limits.max_push_constants_size as _,
+ max_sampler_allocation_count: limits.max_sampler_allocation_count as _,
+ max_sampler_lod_bias: limits.max_sampler_lod_bias as _,
+ max_storage_buffer_range: limits.max_storage_buffer_range as _,
+ max_uniform_buffer_range: limits.max_uniform_buffer_range as _,
+ min_memory_map_alignment: limits.min_memory_map_alignment,
+ standard_sample_locations: limits.standard_sample_locations == ash::vk::TRUE,
+
+ // TODO: Implement Limits for Mesh Shaders
+ // Depends on VkPhysicalDeviceMeshShaderPropertiesNV which depends on VkPhysicalProperties2
+ max_draw_mesh_tasks_count: 0,
+ max_task_work_group_invocations: 0,
+ max_task_work_group_size: [0; 3],
+ max_task_total_memory_size: 0,
+ max_task_output_count: 0,
+ max_mesh_work_group_invocations: 0,
+ max_mesh_work_group_size: [0; 3],
+ max_mesh_total_memory_size: 0,
+ max_mesh_output_vertices: 0,
+ max_mesh_output_primitives: 0,
+ max_mesh_multiview_view_count: 0,
+ mesh_output_per_vertex_granularity: 0,
+ mesh_output_per_primitive_granularity: 0,
+ }
+ }
+
+ fn is_valid_cache(&self, cache: &[u8]) -> bool {
+ const HEADER_SIZE: usize = 16 + vk::UUID_SIZE;
+
+ if cache.len() < HEADER_SIZE {
+ warn!("Bad cache data length {:?}", cache.len());
+ return false;
+ }
+
+ let header_len = u32::from_le_bytes([cache[0], cache[1], cache[2], cache[3]]);
+ let header_version = u32::from_le_bytes([cache[4], cache[5], cache[6], cache[7]]);
+ let vendor_id = u32::from_le_bytes([cache[8], cache[9], cache[10], cache[11]]);
+ let device_id = u32::from_le_bytes([cache[12], cache[13], cache[14], cache[15]]);
+
+ // header length
+ if (header_len as usize) < HEADER_SIZE {
+ warn!("Bad header length {:?}", header_len);
+ return false;
+ }
+
+ // cache header version
+ if header_version != vk::PipelineCacheHeaderVersion::ONE.as_raw() as u32 {
+ warn!("Unsupported cache header version: {:?}", header_version);
+ return false;
+ }
+
+ // vendor id
+ if vendor_id != self.properties.vendor_id {
+ warn!(
+ "Vendor ID mismatch. Device: {:?}, cache: {:?}.",
+ self.properties.vendor_id, vendor_id,
+ );
+ return false;
+ }
+
+ // device id
+ if device_id != self.properties.device_id {
+ warn!(
+ "Device ID mismatch. Device: {:?}, cache: {:?}.",
+ self.properties.device_id, device_id,
+ );
+ return false;
+ }
+
+ if self.properties.pipeline_cache_uuid != cache[16..16 + vk::UUID_SIZE] {
+ warn!(
+ "Pipeline cache UUID mismatch. Device: {:?}, cache: {:?}.",
+ self.properties.pipeline_cache_uuid,
+ &cache[16..16 + vk::UUID_SIZE],
+ );
+ return false;
+ }
+ true
+ }
+}
+
+struct DeviceExtensionFunctions {
+ mesh_shaders: Option<MeshShader>,
+ draw_indirect_count: Option<DrawIndirectCount>,
+}
+
+#[doc(hidden)]
+pub struct RawDevice {
+ raw: ash::Device,
+ features: Features,
+ instance: Arc<RawInstance>,
+ extension_fns: DeviceExtensionFunctions,
+ maintenance_level: u8,
+}
+
+impl fmt::Debug for RawDevice {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "RawDevice") // TODO: Real Debug impl
+ }
+}
+impl Drop for RawDevice {
+ fn drop(&mut self) {
+ unsafe {
+ self.raw.destroy_device(None);
+ }
+ }
+}
+
+impl RawDevice {
+ fn debug_messenger(&self) -> Option<&DebugMessenger> {
+ self.instance.debug_messenger.as_ref()
+ }
+
+ fn map_viewport(&self, rect: &hal::pso::Viewport) -> vk::Viewport {
+ let flip_y = self.features.contains(hal::Features::NDC_Y_UP);
+ let shift_y = flip_y && self.maintenance_level != 0;
+ conv::map_viewport(rect, flip_y, shift_y)
+ }
+}
+
+// Need to explicitly synchronize on submission and present.
+pub type RawCommandQueue = Arc<vk::Queue>;
+
+pub struct CommandQueue {
+ raw: RawCommandQueue,
+ device: Arc<RawDevice>,
+ swapchain_fn: Swapchain,
+}
+
+impl fmt::Debug for CommandQueue {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.write_str("CommandQueue")
+ }
+}
+
+impl queue::CommandQueue<Backend> for CommandQueue {
+ unsafe fn submit<'a, T, Ic, S, Iw, Is>(
+ &mut self,
+ submission: queue::Submission<Ic, Iw, Is>,
+ fence: Option<&native::Fence>,
+ ) where
+ T: 'a + Borrow<command::CommandBuffer>,
+ Ic: IntoIterator<Item = &'a T>,
+ S: 'a + Borrow<native::Semaphore>,
+ Iw: IntoIterator<Item = (&'a S, PipelineStage)>,
+ Is: IntoIterator<Item = &'a S>,
+ {
+ //TODO: avoid heap allocations
+ let mut waits = Vec::new();
+ let mut stages = Vec::new();
+
+ let buffers = submission
+ .command_buffers
+ .into_iter()
+ .map(|cmd| cmd.borrow().raw)
+ .collect::<Vec<_>>();
+ for (semaphore, stage) in submission.wait_semaphores {
+ waits.push(semaphore.borrow().0);
+ stages.push(conv::map_pipeline_stage(stage));
+ }
+ let signals = submission
+ .signal_semaphores
+ .into_iter()
+ .map(|semaphore| semaphore.borrow().0)
+ .collect::<Vec<_>>();
+
+ let mut info = vk::SubmitInfo::builder()
+ .wait_semaphores(&waits)
+ .command_buffers(&buffers)
+ .signal_semaphores(&signals);
+ // If count is zero, AMD driver crashes if nullptr is not set for stage masks
+ if !stages.is_empty() {
+ info = info.wait_dst_stage_mask(&stages);
+ }
+
+ let fence_raw = fence.map(|fence| fence.0).unwrap_or(vk::Fence::null());
+
+ let result = self.device.raw.queue_submit(*self.raw, &[*info], fence_raw);
+ assert_eq!(Ok(()), result);
+ }
+
+ unsafe fn present(
+ &mut self,
+ surface: &mut window::Surface,
+ image: window::SurfaceImage,
+ wait_semaphore: Option<&native::Semaphore>,
+ ) -> Result<Option<Suboptimal>, PresentError> {
+ let ssc = surface.swapchain.as_ref().unwrap();
+ let wait_semaphore = if let Some(wait_semaphore) = wait_semaphore {
+ wait_semaphore.0
+ } else {
+ let signals = &[ssc.semaphore.0];
+ let submit_info = vk::SubmitInfo::builder()
+ .wait_dst_stage_mask(&[vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT])
+ .signal_semaphores(signals);
+ self.device
+ .raw
+ .queue_submit(*self.raw, &[*submit_info], vk::Fence::null())
+ .unwrap();
+ ssc.semaphore.0
+ };
+
+ let wait_semaphores = &[wait_semaphore];
+ let swapchains = &[ssc.swapchain.raw];
+ let image_indices = &[image.index];
+ let present_info = vk::PresentInfoKHR::builder()
+ .wait_semaphores(wait_semaphores)
+ .swapchains(swapchains)
+ .image_indices(image_indices);
+
+ match self.swapchain_fn.queue_present(*self.raw, &present_info) {
+ Ok(true) => Ok(None),
+ Ok(false) => Ok(Some(Suboptimal)),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => {
+ Err(PresentError::OutOfMemory(OutOfMemory::Host))
+ }
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => {
+ Err(PresentError::OutOfMemory(OutOfMemory::Device))
+ }
+ Err(vk::Result::ERROR_DEVICE_LOST) => Err(PresentError::DeviceLost(DeviceLost)),
+ Err(vk::Result::ERROR_OUT_OF_DATE_KHR) => Err(PresentError::OutOfDate),
+ Err(vk::Result::ERROR_SURFACE_LOST_KHR) => Err(PresentError::SurfaceLost(SurfaceLost)),
+ _ => panic!("Failed to present frame"),
+ }
+ }
+
+ fn wait_idle(&self) -> Result<(), OutOfMemory> {
+ match unsafe { self.device.raw.queue_wait_idle(*self.raw) } {
+ Ok(()) => Ok(()),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => Err(OutOfMemory::Host),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(OutOfMemory::Device),
+ Err(_) => unreachable!(),
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct Device {
+ shared: Arc<RawDevice>,
+ vendor_id: u32,
+ valid_ash_memory_types: u32,
+}
+
+#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)]
+pub enum Backend {}
+impl hal::Backend for Backend {
+ type Instance = Instance;
+ type PhysicalDevice = PhysicalDevice;
+ type Device = Device;
+ type Surface = window::Surface;
+
+ type QueueFamily = QueueFamily;
+ type CommandQueue = CommandQueue;
+ type CommandBuffer = command::CommandBuffer;
+
+ type Memory = native::Memory;
+ type CommandPool = pool::RawCommandPool;
+
+ type ShaderModule = native::ShaderModule;
+ type RenderPass = native::RenderPass;
+ type Framebuffer = native::Framebuffer;
+
+ type Buffer = native::Buffer;
+ type BufferView = native::BufferView;
+ type Image = native::Image;
+ type ImageView = native::ImageView;
+ type Sampler = native::Sampler;
+
+ type ComputePipeline = native::ComputePipeline;
+ type GraphicsPipeline = native::GraphicsPipeline;
+ type PipelineLayout = native::PipelineLayout;
+ type PipelineCache = native::PipelineCache;
+ type DescriptorSetLayout = native::DescriptorSetLayout;
+ type DescriptorPool = native::DescriptorPool;
+ type DescriptorSet = native::DescriptorSet;
+
+ type Fence = native::Fence;
+ type Semaphore = native::Semaphore;
+ type Event = native::Event;
+ type QueryPool = native::QueryPool;
+}
diff --git a/third_party/rust/gfx-backend-vulkan/src/native.rs b/third_party/rust/gfx-backend-vulkan/src/native.rs
new file mode 100644
index 0000000000..7754659991
--- /dev/null
+++ b/third_party/rust/gfx-backend-vulkan/src/native.rs
@@ -0,0 +1,178 @@
+use crate::{window::FramebufferCachePtr, Backend, RawDevice};
+use ash::{version::DeviceV1_0, vk};
+use hal::{device::OutOfMemory, image::SubresourceRange, pso};
+use std::{borrow::Borrow, sync::Arc};
+
+#[derive(Debug, Hash)]
+pub struct Semaphore(pub vk::Semaphore);
+
+#[derive(Debug, Hash, PartialEq, Eq)]
+pub struct Fence(pub vk::Fence);
+
+#[derive(Debug, Hash)]
+pub struct Event(pub vk::Event);
+
+#[derive(Debug, Hash)]
+pub struct GraphicsPipeline(pub vk::Pipeline);
+
+#[derive(Debug, Hash)]
+pub struct ComputePipeline(pub vk::Pipeline);
+
+#[derive(Debug, Hash)]
+pub struct Memory {
+ pub(crate) raw: vk::DeviceMemory,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub struct Buffer {
+ pub(crate) raw: vk::Buffer,
+}
+
+unsafe impl Sync for Buffer {}
+unsafe impl Send for Buffer {}
+
+#[derive(Clone, Debug, Eq, Hash, PartialEq)]
+pub struct BufferView {
+ pub(crate) raw: vk::BufferView,
+}
+
+#[derive(Debug, Hash, PartialEq, Eq)]
+pub struct Image {
+ pub(crate) raw: vk::Image,
+ pub(crate) ty: vk::ImageType,
+ pub(crate) flags: vk::ImageCreateFlags,
+ pub(crate) extent: vk::Extent3D,
+}
+
+#[derive(Debug, Hash, PartialEq, Eq)]
+pub enum ImageViewOwner {
+ User,
+ Surface(FramebufferCachePtr),
+}
+
+#[derive(Debug, Hash, PartialEq, Eq)]
+pub struct ImageView {
+ pub(crate) image: vk::Image,
+ pub(crate) view: vk::ImageView,
+ pub(crate) range: SubresourceRange,
+ pub(crate) owner: ImageViewOwner,
+}
+
+#[derive(Debug, Hash)]
+pub struct Sampler(pub vk::Sampler);
+
+#[derive(Debug, Hash)]
+pub struct RenderPass {
+ pub raw: vk::RenderPass,
+ pub clear_attachments_mask: u64,
+}
+
+#[derive(Debug, Hash)]
+pub struct Framebuffer {
+ pub(crate) raw: vk::Framebuffer,
+ pub(crate) owned: bool,
+}
+
+#[derive(Debug)]
+pub struct DescriptorSetLayout {
+ pub(crate) raw: vk::DescriptorSetLayout,
+ pub(crate) bindings: Arc<Vec<pso::DescriptorSetLayoutBinding>>,
+}
+
+#[derive(Debug)]
+pub struct DescriptorSet {
+ pub(crate) raw: vk::DescriptorSet,
+ pub(crate) bindings: Arc<Vec<pso::DescriptorSetLayoutBinding>>,
+}
+
+#[derive(Debug, Hash)]
+pub struct PipelineLayout {
+ pub(crate) raw: vk::PipelineLayout,
+}
+
+#[derive(Debug)]
+pub struct PipelineCache {
+ pub(crate) raw: vk::PipelineCache,
+}
+
+#[derive(Debug, Eq, Hash, PartialEq)]
+pub struct ShaderModule {
+ pub(crate) raw: vk::ShaderModule,
+}
+
+#[derive(Debug)]
+pub struct DescriptorPool {
+ pub(crate) raw: vk::DescriptorPool,
+ pub(crate) device: Arc<RawDevice>,
+ /// This vec only exists to re-use allocations when `DescriptorSet`s are freed.
+ pub(crate) set_free_vec: Vec<vk::DescriptorSet>,
+}
+
+impl pso::DescriptorPool<Backend> for DescriptorPool {
+ unsafe fn allocate<I, E>(
+ &mut self,
+ layout_iter: I,
+ list: &mut E,
+ ) -> Result<(), pso::AllocationError>
+ where
+ I: IntoIterator,
+ I::Item: Borrow<DescriptorSetLayout>,
+ E: Extend<DescriptorSet>,
+ {
+ let mut raw_layouts = Vec::new();
+ let mut layout_bindings = Vec::new();
+ for layout in layout_iter {
+ raw_layouts.push(layout.borrow().raw);
+ layout_bindings.push(layout.borrow().bindings.clone());
+ }
+
+ let info = vk::DescriptorSetAllocateInfo::builder()
+ .descriptor_pool(self.raw)
+ .set_layouts(&raw_layouts);
+
+ self.device
+ .raw
+ .allocate_descriptor_sets(&info)
+ .map(|sets| {
+ list.extend(
+ sets.into_iter()
+ .zip(layout_bindings)
+ .map(|(raw, bindings)| DescriptorSet { raw, bindings }),
+ )
+ })
+ .map_err(|err| match err {
+ vk::Result::ERROR_OUT_OF_HOST_MEMORY => {
+ pso::AllocationError::OutOfMemory(OutOfMemory::Host)
+ }
+ vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => {
+ pso::AllocationError::OutOfMemory(OutOfMemory::Device)
+ }
+ vk::Result::ERROR_OUT_OF_POOL_MEMORY => pso::AllocationError::OutOfPoolMemory,
+ _ => pso::AllocationError::FragmentedPool,
+ })
+ }
+
+ unsafe fn free<I>(&mut self, descriptor_sets: I)
+ where
+ I: IntoIterator<Item = DescriptorSet>,
+ {
+ self.set_free_vec.clear();
+ self.set_free_vec
+ .extend(descriptor_sets.into_iter().map(|d| d.raw));
+ self.device
+ .raw
+ .free_descriptor_sets(self.raw, &self.set_free_vec);
+ }
+
+ unsafe fn reset(&mut self) {
+ assert_eq!(
+ Ok(()),
+ self.device
+ .raw
+ .reset_descriptor_pool(self.raw, vk::DescriptorPoolResetFlags::empty())
+ );
+ }
+}
+
+#[derive(Debug, Hash)]
+pub struct QueryPool(pub vk::QueryPool);
diff --git a/third_party/rust/gfx-backend-vulkan/src/pool.rs b/third_party/rust/gfx-backend-vulkan/src/pool.rs
new file mode 100644
index 0000000000..824a2f1f70
--- /dev/null
+++ b/third_party/rust/gfx-backend-vulkan/src/pool.rs
@@ -0,0 +1,60 @@
+use ash::version::DeviceV1_0;
+use ash::vk;
+use smallvec::SmallVec;
+use std::sync::Arc;
+
+use crate::command::CommandBuffer;
+use crate::conv;
+use crate::{Backend, RawDevice};
+use hal::{command, pool};
+
+#[derive(Debug)]
+pub struct RawCommandPool {
+ pub(crate) raw: vk::CommandPool,
+ pub(crate) device: Arc<RawDevice>,
+}
+
+impl pool::CommandPool<Backend> for RawCommandPool {
+ unsafe fn reset(&mut self, release_resources: bool) {
+ let flags = if release_resources {
+ vk::CommandPoolResetFlags::RELEASE_RESOURCES
+ } else {
+ vk::CommandPoolResetFlags::empty()
+ };
+
+ assert_eq!(Ok(()), self.device.raw.reset_command_pool(self.raw, flags));
+ }
+
+ unsafe fn allocate<E>(&mut self, num: usize, level: command::Level, list: &mut E)
+ where
+ E: Extend<CommandBuffer>,
+ {
+ let info = vk::CommandBufferAllocateInfo::builder()
+ .command_pool(self.raw)
+ .level(conv::map_command_buffer_level(level))
+ .command_buffer_count(num as u32);
+
+ let device = &self.device;
+
+ list.extend(
+ device
+ .raw
+ .allocate_command_buffers(&info)
+ .expect("Error on command buffer allocation")
+ .into_iter()
+ .map(|buffer| CommandBuffer {
+ raw: buffer,
+ device: Arc::clone(device),
+ }),
+ );
+ }
+
+ unsafe fn free<I>(&mut self, cbufs: I)
+ where
+ I: IntoIterator<Item = CommandBuffer>,
+ {
+ let buffers: SmallVec<[vk::CommandBuffer; 16]> =
+ cbufs.into_iter().map(|buffer| buffer.raw).collect();
+ self.device.raw.free_command_buffers(self.raw, &buffers);
+ }
+}
diff --git a/third_party/rust/gfx-backend-vulkan/src/window.rs b/third_party/rust/gfx-backend-vulkan/src/window.rs
new file mode 100644
index 0000000000..5add2e8e2f
--- /dev/null
+++ b/third_party/rust/gfx-backend-vulkan/src/window.rs
@@ -0,0 +1,584 @@
+use std::{
+ borrow::Borrow,
+ fmt, hash,
+ os::raw::c_void,
+ sync::{Arc, Mutex},
+ time::Instant,
+};
+
+use ash::{extensions::khr, version::DeviceV1_0 as _, vk};
+use hal::{format::Format, window as w};
+use smallvec::SmallVec;
+
+use crate::{conv, info, native};
+use crate::{
+ Backend, Device, Instance, PhysicalDevice, QueueFamily, RawDevice, RawInstance, VK_ENTRY,
+};
+
+#[derive(Debug, Default)]
+pub struct FramebufferCache {
+ // We expect exactly one framebuffer per frame, but can support more.
+ pub framebuffers: SmallVec<[vk::Framebuffer; 1]>,
+}
+
+#[derive(Debug, Default)]
+pub struct FramebufferCachePtr(pub Arc<Mutex<FramebufferCache>>);
+
+impl hash::Hash for FramebufferCachePtr {
+ fn hash<H: hash::Hasher>(&self, hasher: &mut H) {
+ (self.0.as_ref() as *const Mutex<FramebufferCache>).hash(hasher)
+ }
+}
+impl PartialEq for FramebufferCachePtr {
+ fn eq(&self, other: &Self) -> bool {
+ Arc::ptr_eq(&self.0, &other.0)
+ }
+}
+impl Eq for FramebufferCachePtr {}
+
+#[derive(Debug)]
+struct SurfaceFrame {
+ image: vk::Image,
+ view: vk::ImageView,
+ framebuffers: FramebufferCachePtr,
+}
+
+#[derive(Debug)]
+pub struct SurfaceSwapchain {
+ pub(crate) swapchain: Swapchain,
+ device: Arc<RawDevice>,
+ fence: native::Fence,
+ pub(crate) semaphore: native::Semaphore,
+ frames: Vec<SurfaceFrame>,
+}
+
+impl SurfaceSwapchain {
+ unsafe fn release_resources(self, device: &ash::Device) -> Swapchain {
+ let _ = device.device_wait_idle();
+ device.destroy_fence(self.fence.0, None);
+ device.destroy_semaphore(self.semaphore.0, None);
+ for frame in self.frames {
+ device.destroy_image_view(frame.view, None);
+ for framebuffer in frame.framebuffers.0.lock().unwrap().framebuffers.drain(..) {
+ device.destroy_framebuffer(framebuffer, None);
+ }
+ }
+ self.swapchain
+ }
+}
+
+pub struct Surface {
+ // Vk (EXT) specs [29.2.7 Platform-Independent Information]
+ // For vkDestroySurfaceKHR: Host access to surface must be externally synchronized
+ pub(crate) raw: Arc<RawSurface>,
+ pub(crate) swapchain: Option<SurfaceSwapchain>,
+}
+
+impl fmt::Debug for Surface {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.write_str("Surface")
+ }
+}
+
+pub struct RawSurface {
+ pub(crate) handle: vk::SurfaceKHR,
+ pub(crate) functor: khr::Surface,
+ pub(crate) instance: Arc<RawInstance>,
+}
+
+impl Instance {
+ #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))]
+ pub fn create_surface_from_xlib(&self, dpy: *mut vk::Display, window: vk::Window) -> Surface {
+ let entry = VK_ENTRY
+ .as_ref()
+ .expect("Unable to load Vulkan entry points");
+
+ if !self.extensions.contains(&khr::XlibSurface::name()) {
+ panic!("Vulkan driver does not support VK_KHR_XLIB_SURFACE");
+ }
+
+ let surface = {
+ let xlib_loader = khr::XlibSurface::new(entry, &self.raw.inner);
+ let info = vk::XlibSurfaceCreateInfoKHR::builder()
+ .flags(vk::XlibSurfaceCreateFlagsKHR::empty())
+ .window(window)
+ .dpy(dpy);
+
+ unsafe { xlib_loader.create_xlib_surface(&info, None) }
+ .expect("XlibSurface::create_xlib_surface() failed")
+ };
+
+ self.create_surface_from_vk_surface_khr(surface)
+ }
+
+ #[cfg(all(unix, not(target_os = "android"), not(target_os = "macos")))]
+ pub fn create_surface_from_xcb(
+ &self,
+ connection: *mut vk::xcb_connection_t,
+ window: vk::xcb_window_t,
+ ) -> Surface {
+ let entry = VK_ENTRY
+ .as_ref()
+ .expect("Unable to load Vulkan entry points");
+
+ if !self.extensions.contains(&khr::XcbSurface::name()) {
+ panic!("Vulkan driver does not support VK_KHR_XCB_SURFACE");
+ }
+
+ let surface = {
+ let xcb_loader = khr::XcbSurface::new(entry, &self.raw.inner);
+ let info = vk::XcbSurfaceCreateInfoKHR::builder()
+ .flags(vk::XcbSurfaceCreateFlagsKHR::empty())
+ .window(window)
+ .connection(connection);
+
+ unsafe { xcb_loader.create_xcb_surface(&info, None) }
+ .expect("XcbSurface::create_xcb_surface() failed")
+ };
+
+ self.create_surface_from_vk_surface_khr(surface)
+ }
+
+ #[cfg(all(unix, not(target_os = "android")))]
+ pub fn create_surface_from_wayland(
+ &self,
+ display: *mut c_void,
+ surface: *mut c_void,
+ ) -> Surface {
+ let entry = VK_ENTRY
+ .as_ref()
+ .expect("Unable to load Vulkan entry points");
+
+ if !self.extensions.contains(&khr::WaylandSurface::name()) {
+ panic!("Vulkan driver does not support VK_KHR_WAYLAND_SURFACE");
+ }
+
+ let surface = {
+ let w_loader = khr::WaylandSurface::new(entry, &self.raw.inner);
+ let info = vk::WaylandSurfaceCreateInfoKHR::builder()
+ .flags(vk::WaylandSurfaceCreateFlagsKHR::empty())
+ .display(display)
+ .surface(surface);
+
+ unsafe { w_loader.create_wayland_surface(&info, None) }.expect("WaylandSurface failed")
+ };
+
+ self.create_surface_from_vk_surface_khr(surface)
+ }
+
+ #[cfg(target_os = "android")]
+ pub fn create_surface_android(&self, window: *const c_void) -> Surface {
+ let entry = VK_ENTRY
+ .as_ref()
+ .expect("Unable to load Vulkan entry points");
+
+ let surface = {
+ let loader = khr::AndroidSurface::new(entry, &self.raw.inner);
+ let info = vk::AndroidSurfaceCreateInfoKHR::builder()
+ .flags(vk::AndroidSurfaceCreateFlagsKHR::empty())
+ .window(window as *mut _);
+
+ unsafe { loader.create_android_surface(&info, None) }.expect("AndroidSurface failed")
+ };
+
+ self.create_surface_from_vk_surface_khr(surface)
+ }
+
+ #[cfg(windows)]
+ pub fn create_surface_from_hwnd(&self, hinstance: *mut c_void, hwnd: *mut c_void) -> Surface {
+ let entry = VK_ENTRY
+ .as_ref()
+ .expect("Unable to load Vulkan entry points");
+
+ if !self.extensions.contains(&khr::Win32Surface::name()) {
+ panic!("Vulkan driver does not support VK_KHR_WIN32_SURFACE");
+ }
+
+ let surface = {
+ let info = vk::Win32SurfaceCreateInfoKHR::builder()
+ .flags(vk::Win32SurfaceCreateFlagsKHR::empty())
+ .hinstance(hinstance)
+ .hwnd(hwnd);
+ let win32_loader = khr::Win32Surface::new(entry, &self.raw.inner);
+ unsafe {
+ win32_loader
+ .create_win32_surface(&info, None)
+ .expect("Unable to create Win32 surface")
+ }
+ };
+
+ self.create_surface_from_vk_surface_khr(surface)
+ }
+
+ #[cfg(target_os = "macos")]
+ pub fn create_surface_from_ns_view(&self, view: *mut c_void) -> Surface {
+ use ash::extensions::mvk;
+ use core_graphics_types::{base::CGFloat, geometry::CGRect};
+ use objc::runtime::{Object, BOOL, YES};
+
+ // TODO: this logic is duplicated from gfx-backend-metal, refactor?
+ unsafe {
+ let view = view as *mut Object;
+ let existing: *mut Object = msg_send![view, layer];
+ let class = class!(CAMetalLayer);
+
+ let use_current = if existing.is_null() {
+ false
+ } else {
+ let result: BOOL = msg_send![existing, isKindOfClass: class];
+ result == YES
+ };
+
+ if !use_current {
+ let layer: *mut Object = msg_send![class, new];
+ let () = msg_send![view, setLayer: layer];
+ let bounds: CGRect = msg_send![view, bounds];
+ let () = msg_send![layer, setBounds: bounds];
+
+ let window: *mut Object = msg_send![view, window];
+ if !window.is_null() {
+ let scale_factor: CGFloat = msg_send![window, backingScaleFactor];
+ let () = msg_send![layer, setContentsScale: scale_factor];
+ }
+ }
+ }
+
+ let entry = VK_ENTRY
+ .as_ref()
+ .expect("Unable to load Vulkan entry points");
+
+ if !self.extensions.contains(&mvk::MacOSSurface::name()) {
+ panic!("Vulkan driver does not support VK_MVK_MACOS_SURFACE");
+ }
+
+ let surface = {
+ let mac_os_loader = mvk::MacOSSurface::new(entry, &self.raw.inner);
+ let mut info = vk::MacOSSurfaceCreateInfoMVK::builder()
+ .flags(vk::MacOSSurfaceCreateFlagsMVK::empty());
+ if let Some(view) = unsafe { view.as_ref() } {
+ info = info.view(view);
+ }
+
+ unsafe {
+ mac_os_loader
+ .create_mac_os_surface_mvk(&info, None)
+ .expect("Unable to create macOS surface")
+ }
+ };
+
+ self.create_surface_from_vk_surface_khr(surface)
+ }
+
+ pub fn create_surface_from_vk_surface_khr(&self, surface: vk::SurfaceKHR) -> Surface {
+ let entry = VK_ENTRY
+ .as_ref()
+ .expect("Unable to load Vulkan entry points");
+
+ let functor = khr::Surface::new(entry, &self.raw.inner);
+
+ let raw = Arc::new(RawSurface {
+ handle: surface,
+ functor,
+ instance: self.raw.clone(),
+ });
+
+ Surface {
+ raw,
+ swapchain: None,
+ }
+ }
+}
+
+impl w::Surface<Backend> for Surface {
+ fn supports_queue_family(&self, queue_family: &QueueFamily) -> bool {
+ match unsafe {
+ self.raw.functor.get_physical_device_surface_support(
+ queue_family.device,
+ queue_family.index,
+ self.raw.handle,
+ )
+ } {
+ Ok(ok) => ok,
+ Err(e) => {
+ error!("get_physical_device_surface_support error {:?}", e);
+ false
+ }
+ }
+ }
+
+ fn capabilities(&self, physical_device: &PhysicalDevice) -> w::SurfaceCapabilities {
+ // Capabilities
+ let caps = unsafe {
+ self.raw
+ .functor
+ .get_physical_device_surface_capabilities(physical_device.handle, self.raw.handle)
+ }
+ .expect("Unable to query surface capabilities");
+
+ // If image count is 0, the support number of images is unlimited.
+ let max_images = if caps.max_image_count == 0 {
+ !0
+ } else {
+ caps.max_image_count
+ };
+
+ // `0xFFFFFFFF` indicates that the extent depends on the created swapchain.
+ let current_extent = if caps.current_extent.width != !0 && caps.current_extent.height != !0
+ {
+ Some(w::Extent2D {
+ width: caps.current_extent.width,
+ height: caps.current_extent.height,
+ })
+ } else {
+ None
+ };
+
+ let min_extent = w::Extent2D {
+ width: caps.min_image_extent.width,
+ height: caps.min_image_extent.height,
+ };
+
+ let max_extent = w::Extent2D {
+ width: caps.max_image_extent.width,
+ height: caps.max_image_extent.height,
+ };
+
+ let raw_present_modes = unsafe {
+ self.raw
+ .functor
+ .get_physical_device_surface_present_modes(physical_device.handle, self.raw.handle)
+ }
+ .expect("Unable to query present modes");
+
+ w::SurfaceCapabilities {
+ present_modes: raw_present_modes
+ .into_iter()
+ .fold(w::PresentMode::empty(), |u, m| {
+ u | conv::map_vk_present_mode(m)
+ }),
+ composite_alpha_modes: conv::map_vk_composite_alpha(caps.supported_composite_alpha),
+ image_count: caps.min_image_count..=max_images,
+ current_extent,
+ extents: min_extent..=max_extent,
+ max_image_layers: caps.max_image_array_layers as _,
+ usage: conv::map_vk_image_usage(caps.supported_usage_flags),
+ }
+ }
+
+ fn supported_formats(&self, physical_device: &PhysicalDevice) -> Option<Vec<Format>> {
+ // Swapchain formats
+ let raw_formats = unsafe {
+ self.raw
+ .functor
+ .get_physical_device_surface_formats(physical_device.handle, self.raw.handle)
+ }
+ .expect("Unable to query surface formats");
+
+ match raw_formats[0].format {
+ // If pSurfaceFormats includes just one entry, whose value for format is
+ // VK_FORMAT_UNDEFINED, surface has no preferred format. In this case, the application
+ // can use any valid VkFormat value.
+ vk::Format::UNDEFINED => None,
+ _ => Some(
+ raw_formats
+ .into_iter()
+ .filter_map(|sf| conv::map_vk_format(sf.format))
+ .collect(),
+ ),
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct SurfaceImage {
+ pub(crate) index: w::SwapImageIndex,
+ image: native::Image,
+ view: native::ImageView,
+}
+
+impl Borrow<native::Image> for SurfaceImage {
+ fn borrow(&self) -> &native::Image {
+ &self.image
+ }
+}
+
+impl Borrow<native::ImageView> for SurfaceImage {
+ fn borrow(&self) -> &native::ImageView {
+ &self.view
+ }
+}
+
+impl w::PresentationSurface<Backend> for Surface {
+ type SwapchainImage = SurfaceImage;
+
+ unsafe fn configure_swapchain(
+ &mut self,
+ device: &Device,
+ config: w::SwapchainConfig,
+ ) -> Result<(), w::CreationError> {
+ use hal::device::Device as _;
+
+ let format = config.format;
+ let old = self
+ .swapchain
+ .take()
+ .map(|ssc| ssc.release_resources(&device.shared.raw));
+
+ let (swapchain, images) = device.create_swapchain(self, config, old)?;
+
+ self.swapchain = Some(SurfaceSwapchain {
+ swapchain,
+ device: Arc::clone(&device.shared),
+ fence: device.create_fence(false).unwrap(),
+ semaphore: device.create_semaphore().unwrap(),
+ frames: images
+ .iter()
+ .map(|image| {
+ let view = device
+ .create_image_view(
+ image,
+ hal::image::ViewKind::D2,
+ format,
+ hal::format::Swizzle::NO,
+ hal::image::SubresourceRange {
+ aspects: hal::format::Aspects::COLOR,
+ ..Default::default()
+ },
+ )
+ .unwrap();
+ SurfaceFrame {
+ image: view.image,
+ view: view.view,
+ framebuffers: Default::default(),
+ }
+ })
+ .collect(),
+ });
+
+ Ok(())
+ }
+
+ unsafe fn unconfigure_swapchain(&mut self, device: &Device) {
+ if let Some(ssc) = self.swapchain.take() {
+ let swapchain = ssc.release_resources(&device.shared.raw);
+ swapchain.functor.destroy_swapchain(swapchain.raw, None);
+ }
+ }
+
+ unsafe fn acquire_image(
+ &mut self,
+ mut timeout_ns: u64,
+ ) -> Result<(Self::SwapchainImage, Option<w::Suboptimal>), w::AcquireError> {
+ let ssc = self.swapchain.as_mut().unwrap();
+ let moment = Instant::now();
+ let (index, suboptimal) =
+ ssc.swapchain
+ .acquire_image(timeout_ns, None, Some(&ssc.fence))?;
+ timeout_ns = timeout_ns.saturating_sub(moment.elapsed().as_nanos() as u64);
+ let fences = &[ssc.fence.0];
+
+ match ssc.device.raw.wait_for_fences(fences, true, timeout_ns) {
+ Ok(()) => {
+ ssc.device.raw.reset_fences(fences).unwrap();
+ let frame = &ssc.frames[index as usize];
+ // We have just waited for the frame to be fully available on CPU.
+ // All the associated framebuffers are expected to be destroyed by now.
+ for framebuffer in frame.framebuffers.0.lock().unwrap().framebuffers.drain(..) {
+ ssc.device.raw.destroy_framebuffer(framebuffer, None);
+ }
+ let image = Self::SwapchainImage {
+ index,
+ image: native::Image {
+ raw: frame.image,
+ ty: vk::ImageType::TYPE_2D,
+ flags: vk::ImageCreateFlags::empty(),
+ extent: ssc.swapchain.extent,
+ },
+ view: native::ImageView {
+ image: frame.image,
+ view: frame.view,
+ range: hal::image::SubresourceRange {
+ aspects: hal::format::Aspects::COLOR,
+ ..Default::default()
+ },
+ owner: native::ImageViewOwner::Surface(FramebufferCachePtr(Arc::clone(
+ &frame.framebuffers.0,
+ ))),
+ },
+ };
+ Ok((image, suboptimal))
+ }
+ Err(vk::Result::NOT_READY) => Err(w::AcquireError::NotReady),
+ Err(vk::Result::TIMEOUT) => Err(w::AcquireError::Timeout),
+ Err(vk::Result::ERROR_OUT_OF_DATE_KHR) => Err(w::AcquireError::OutOfDate),
+ Err(vk::Result::ERROR_SURFACE_LOST_KHR) => {
+ Err(w::AcquireError::SurfaceLost(hal::device::SurfaceLost))
+ }
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => {
+ Err(w::AcquireError::OutOfMemory(hal::device::OutOfMemory::Host))
+ }
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(w::AcquireError::OutOfMemory(
+ hal::device::OutOfMemory::Device,
+ )),
+ Err(vk::Result::ERROR_DEVICE_LOST) => {
+ Err(w::AcquireError::DeviceLost(hal::device::DeviceLost))
+ }
+ _ => unreachable!(),
+ }
+ }
+}
+
+pub struct Swapchain {
+ pub(crate) raw: vk::SwapchainKHR,
+ pub(crate) functor: khr::Swapchain,
+ pub(crate) vendor_id: u32,
+ pub(crate) extent: vk::Extent3D,
+}
+
+impl fmt::Debug for Swapchain {
+ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
+ fmt.write_str("Swapchain")
+ }
+}
+
+impl Swapchain {
+ unsafe fn acquire_image(
+ &mut self,
+ timeout_ns: u64,
+ semaphore: Option<&native::Semaphore>,
+ fence: Option<&native::Fence>,
+ ) -> Result<(w::SwapImageIndex, Option<w::Suboptimal>), w::AcquireError> {
+ let semaphore = semaphore.map_or(vk::Semaphore::null(), |s| s.0);
+ let fence = fence.map_or(vk::Fence::null(), |f| f.0);
+
+ // will block if no image is available
+ let index = self
+ .functor
+ .acquire_next_image(self.raw, timeout_ns, semaphore, fence);
+
+ match index {
+ // special case for Intel Vulkan returning bizzare values (ugh)
+ Ok((i, _)) if self.vendor_id == info::intel::VENDOR && i > 0x100 => {
+ Err(w::AcquireError::OutOfDate)
+ }
+ Ok((i, true)) => Ok((i, Some(w::Suboptimal))),
+ Ok((i, false)) => Ok((i, None)),
+ Err(vk::Result::NOT_READY) => Err(w::AcquireError::NotReady),
+ Err(vk::Result::TIMEOUT) => Err(w::AcquireError::Timeout),
+ Err(vk::Result::ERROR_OUT_OF_DATE_KHR) => Err(w::AcquireError::OutOfDate),
+ Err(vk::Result::ERROR_SURFACE_LOST_KHR) => {
+ Err(w::AcquireError::SurfaceLost(hal::device::SurfaceLost))
+ }
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => {
+ Err(w::AcquireError::OutOfMemory(hal::device::OutOfMemory::Host))
+ }
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => Err(w::AcquireError::OutOfMemory(
+ hal::device::OutOfMemory::Device,
+ )),
+ Err(vk::Result::ERROR_DEVICE_LOST) => {
+ Err(w::AcquireError::DeviceLost(hal::device::DeviceLost))
+ }
+ _ => panic!("Failed to acquire image."),
+ }
+ }
+}