summaryrefslogtreecommitdiffstats
path: root/third_party/rust/wgpu-hal/src/vulkan
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /third_party/rust/wgpu-hal/src/vulkan
parentInitial commit. (diff)
downloadfirefox-esr-upstream.tar.xz
firefox-esr-upstream.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/wgpu-hal/src/vulkan')
-rw-r--r--third_party/rust/wgpu-hal/src/vulkan/adapter.rs1746
-rw-r--r--third_party/rust/wgpu-hal/src/vulkan/command.rs826
-rw-r--r--third_party/rust/wgpu-hal/src/vulkan/conv.rs825
-rw-r--r--third_party/rust/wgpu-hal/src/vulkan/device.rs2032
-rw-r--r--third_party/rust/wgpu-hal/src/vulkan/instance.rs832
-rw-r--r--third_party/rust/wgpu-hal/src/vulkan/mod.rs626
6 files changed, 6887 insertions, 0 deletions
diff --git a/third_party/rust/wgpu-hal/src/vulkan/adapter.rs b/third_party/rust/wgpu-hal/src/vulkan/adapter.rs
new file mode 100644
index 0000000000..f8f26e422f
--- /dev/null
+++ b/third_party/rust/wgpu-hal/src/vulkan/adapter.rs
@@ -0,0 +1,1746 @@
+use super::conv;
+
+use ash::{extensions::khr, vk};
+use parking_lot::Mutex;
+
+use std::{collections::BTreeMap, ffi::CStr, sync::Arc};
+
+fn depth_stencil_required_flags() -> vk::FormatFeatureFlags {
+ vk::FormatFeatureFlags::SAMPLED_IMAGE | vk::FormatFeatureFlags::DEPTH_STENCIL_ATTACHMENT
+}
+
+//TODO: const fn?
+fn indexing_features() -> wgt::Features {
+ wgt::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING
+ | wgt::Features::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING
+}
+
+/// Aggregate of the `vk::PhysicalDevice*Features` structs used by `gfx`.
+#[derive(Debug, Default)]
+pub struct PhysicalDeviceFeatures {
+ core: vk::PhysicalDeviceFeatures,
+ pub(super) descriptor_indexing: Option<vk::PhysicalDeviceDescriptorIndexingFeaturesEXT>,
+ imageless_framebuffer: Option<vk::PhysicalDeviceImagelessFramebufferFeaturesKHR>,
+ timeline_semaphore: Option<vk::PhysicalDeviceTimelineSemaphoreFeaturesKHR>,
+ image_robustness: Option<vk::PhysicalDeviceImageRobustnessFeaturesEXT>,
+ robustness2: Option<vk::PhysicalDeviceRobustness2FeaturesEXT>,
+ depth_clip_enable: Option<vk::PhysicalDeviceDepthClipEnableFeaturesEXT>,
+ multiview: Option<vk::PhysicalDeviceMultiviewFeaturesKHR>,
+ astc_hdr: Option<vk::PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT>,
+ shader_float16: Option<(
+ vk::PhysicalDeviceShaderFloat16Int8Features,
+ vk::PhysicalDevice16BitStorageFeatures,
+ )>,
+ zero_initialize_workgroup_memory:
+ Option<vk::PhysicalDeviceZeroInitializeWorkgroupMemoryFeatures>,
+}
+
+// This is safe because the structs have `p_next: *mut c_void`, which we null out/never read.
+unsafe impl Send for PhysicalDeviceFeatures {}
+unsafe impl Sync for PhysicalDeviceFeatures {}
+
+impl PhysicalDeviceFeatures {
+ /// Add the members of `self` into `info.enabled_features` and its `p_next` chain.
+ pub fn add_to_device_create_builder<'a>(
+ &'a mut self,
+ mut info: vk::DeviceCreateInfoBuilder<'a>,
+ ) -> vk::DeviceCreateInfoBuilder<'a> {
+ info = info.enabled_features(&self.core);
+ if let Some(ref mut feature) = self.descriptor_indexing {
+ info = info.push_next(feature);
+ }
+ if let Some(ref mut feature) = self.imageless_framebuffer {
+ info = info.push_next(feature);
+ }
+ if let Some(ref mut feature) = self.timeline_semaphore {
+ info = info.push_next(feature);
+ }
+ if let Some(ref mut feature) = self.image_robustness {
+ info = info.push_next(feature);
+ }
+ if let Some(ref mut feature) = self.robustness2 {
+ info = info.push_next(feature);
+ }
+ if let Some(ref mut feature) = self.depth_clip_enable {
+ info = info.push_next(feature);
+ }
+ if let Some(ref mut feature) = self.astc_hdr {
+ info = info.push_next(feature);
+ }
+ if let Some((ref mut f16_i8_feature, ref mut _16bit_feature)) = self.shader_float16 {
+ info = info.push_next(f16_i8_feature);
+ info = info.push_next(_16bit_feature);
+ }
+ if let Some(ref mut feature) = self.zero_initialize_workgroup_memory {
+ info = info.push_next(feature);
+ }
+ info
+ }
+
+ /// Create a `PhysicalDeviceFeatures` that will be used to create a logical device.
+ ///
+ /// `requested_features` should be the same as what was used to generate `enabled_extensions`.
+ fn from_extensions_and_requested_features(
+ effective_api_version: u32,
+ enabled_extensions: &[&'static CStr],
+ requested_features: wgt::Features,
+ downlevel_flags: wgt::DownlevelFlags,
+ private_caps: &super::PrivateCapabilities,
+ ) -> Self {
+ let needs_sampled_image_non_uniform = requested_features.contains(
+ wgt::Features::TEXTURE_BINDING_ARRAY
+ | wgt::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
+ );
+ let needs_storage_buffer_non_uniform = requested_features.contains(
+ wgt::Features::BUFFER_BINDING_ARRAY
+ | wgt::Features::STORAGE_RESOURCE_BINDING_ARRAY
+ | wgt::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
+ );
+ let needs_uniform_buffer_non_uniform = requested_features.contains(
+ wgt::Features::TEXTURE_BINDING_ARRAY
+ | wgt::Features::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING,
+ );
+ let needs_storage_image_non_uniform = requested_features.contains(
+ wgt::Features::TEXTURE_BINDING_ARRAY
+ | wgt::Features::STORAGE_RESOURCE_BINDING_ARRAY
+ | wgt::Features::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING,
+ );
+ let needs_partially_bound =
+ requested_features.intersects(wgt::Features::PARTIALLY_BOUND_BINDING_ARRAY);
+
+ Self {
+ // vk::PhysicalDeviceFeatures is a struct composed of Bool32's while
+ // Features is a bitfield so we need to map everything manually
+ core: vk::PhysicalDeviceFeatures::builder()
+ .robust_buffer_access(private_caps.robust_buffer_access)
+ .independent_blend(downlevel_flags.contains(wgt::DownlevelFlags::INDEPENDENT_BLEND))
+ .sample_rate_shading(
+ downlevel_flags.contains(wgt::DownlevelFlags::MULTISAMPLED_SHADING),
+ )
+ .image_cube_array(
+ downlevel_flags.contains(wgt::DownlevelFlags::CUBE_ARRAY_TEXTURES),
+ )
+ .draw_indirect_first_instance(
+ requested_features.contains(wgt::Features::INDIRECT_FIRST_INSTANCE),
+ )
+ //.dual_src_blend(requested_features.contains(wgt::Features::DUAL_SRC_BLENDING))
+ .multi_draw_indirect(
+ requested_features.contains(wgt::Features::MULTI_DRAW_INDIRECT),
+ )
+ .fill_mode_non_solid(requested_features.intersects(
+ wgt::Features::POLYGON_MODE_LINE | wgt::Features::POLYGON_MODE_POINT,
+ ))
+ //.depth_bounds(requested_features.contains(wgt::Features::DEPTH_BOUNDS))
+ //.alpha_to_one(requested_features.contains(wgt::Features::ALPHA_TO_ONE))
+ //.multi_viewport(requested_features.contains(wgt::Features::MULTI_VIEWPORTS))
+ .sampler_anisotropy(
+ downlevel_flags.contains(wgt::DownlevelFlags::ANISOTROPIC_FILTERING),
+ )
+ .texture_compression_etc2(
+ requested_features.contains(wgt::Features::TEXTURE_COMPRESSION_ETC2),
+ )
+ .texture_compression_astc_ldr(
+ requested_features.contains(wgt::Features::TEXTURE_COMPRESSION_ASTC),
+ )
+ .texture_compression_bc(
+ requested_features.contains(wgt::Features::TEXTURE_COMPRESSION_BC),
+ )
+ //.occlusion_query_precise(requested_features.contains(wgt::Features::PRECISE_OCCLUSION_QUERY))
+ .pipeline_statistics_query(
+ requested_features.contains(wgt::Features::PIPELINE_STATISTICS_QUERY),
+ )
+ .vertex_pipeline_stores_and_atomics(
+ requested_features.contains(wgt::Features::VERTEX_WRITABLE_STORAGE),
+ )
+ .fragment_stores_and_atomics(
+ downlevel_flags.contains(wgt::DownlevelFlags::FRAGMENT_WRITABLE_STORAGE),
+ )
+ //.shader_image_gather_extended(
+ //.shader_storage_image_extended_formats(
+ .shader_uniform_buffer_array_dynamic_indexing(
+ requested_features.contains(wgt::Features::BUFFER_BINDING_ARRAY),
+ )
+ .shader_storage_buffer_array_dynamic_indexing(requested_features.contains(
+ wgt::Features::BUFFER_BINDING_ARRAY
+ | wgt::Features::STORAGE_RESOURCE_BINDING_ARRAY,
+ ))
+ .shader_sampled_image_array_dynamic_indexing(
+ requested_features.contains(wgt::Features::TEXTURE_BINDING_ARRAY),
+ )
+ .shader_storage_buffer_array_dynamic_indexing(requested_features.contains(
+ wgt::Features::TEXTURE_BINDING_ARRAY
+ | wgt::Features::STORAGE_RESOURCE_BINDING_ARRAY,
+ ))
+ //.shader_storage_image_array_dynamic_indexing(
+ //.shader_clip_distance(requested_features.contains(wgt::Features::SHADER_CLIP_DISTANCE))
+ //.shader_cull_distance(requested_features.contains(wgt::Features::SHADER_CULL_DISTANCE))
+ .shader_float64(requested_features.contains(wgt::Features::SHADER_F64))
+ //.shader_int64(requested_features.contains(wgt::Features::SHADER_INT64))
+ .shader_int16(requested_features.contains(wgt::Features::SHADER_I16))
+ //.shader_resource_residency(requested_features.contains(wgt::Features::SHADER_RESOURCE_RESIDENCY))
+ .geometry_shader(requested_features.contains(wgt::Features::SHADER_PRIMITIVE_INDEX))
+ .build(),
+ descriptor_indexing: if requested_features.intersects(indexing_features()) {
+ Some(
+ vk::PhysicalDeviceDescriptorIndexingFeaturesEXT::builder()
+ .shader_sampled_image_array_non_uniform_indexing(
+ needs_sampled_image_non_uniform,
+ )
+ .shader_storage_image_array_non_uniform_indexing(
+ needs_storage_image_non_uniform,
+ )
+ .shader_uniform_buffer_array_non_uniform_indexing(
+ needs_uniform_buffer_non_uniform,
+ )
+ .shader_storage_buffer_array_non_uniform_indexing(
+ needs_storage_buffer_non_uniform,
+ )
+ .descriptor_binding_partially_bound(needs_partially_bound)
+ .build(),
+ )
+ } else {
+ None
+ },
+ imageless_framebuffer: if effective_api_version >= vk::API_VERSION_1_2
+ || enabled_extensions.contains(&vk::KhrImagelessFramebufferFn::name())
+ {
+ Some(
+ vk::PhysicalDeviceImagelessFramebufferFeaturesKHR::builder()
+ .imageless_framebuffer(private_caps.imageless_framebuffers)
+ .build(),
+ )
+ } else {
+ None
+ },
+ timeline_semaphore: if effective_api_version >= vk::API_VERSION_1_2
+ || enabled_extensions.contains(&vk::KhrTimelineSemaphoreFn::name())
+ {
+ Some(
+ vk::PhysicalDeviceTimelineSemaphoreFeaturesKHR::builder()
+ .timeline_semaphore(private_caps.timeline_semaphores)
+ .build(),
+ )
+ } else {
+ None
+ },
+ image_robustness: if effective_api_version >= vk::API_VERSION_1_3
+ || enabled_extensions.contains(&vk::ExtImageRobustnessFn::name())
+ {
+ Some(
+ vk::PhysicalDeviceImageRobustnessFeaturesEXT::builder()
+ .robust_image_access(private_caps.robust_image_access)
+ .build(),
+ )
+ } else {
+ None
+ },
+ robustness2: if enabled_extensions.contains(&vk::ExtRobustness2Fn::name()) {
+ // Note: enabling `robust_buffer_access2` isn't requires, strictly speaking
+ // since we can enable `robust_buffer_access` all the time. But it improves
+ // program portability, so we opt into it anyway.
+ Some(
+ vk::PhysicalDeviceRobustness2FeaturesEXT::builder()
+ .robust_buffer_access2(private_caps.robust_buffer_access)
+ .robust_image_access2(private_caps.robust_image_access)
+ .build(),
+ )
+ } else {
+ None
+ },
+ depth_clip_enable: if enabled_extensions.contains(&vk::ExtDepthClipEnableFn::name()) {
+ Some(
+ vk::PhysicalDeviceDepthClipEnableFeaturesEXT::builder()
+ .depth_clip_enable(
+ requested_features.contains(wgt::Features::DEPTH_CLIP_CONTROL),
+ )
+ .build(),
+ )
+ } else {
+ None
+ },
+ multiview: if effective_api_version >= vk::API_VERSION_1_1
+ || enabled_extensions.contains(&vk::KhrMultiviewFn::name())
+ {
+ Some(
+ vk::PhysicalDeviceMultiviewFeatures::builder()
+ .multiview(requested_features.contains(wgt::Features::MULTIVIEW))
+ .build(),
+ )
+ } else {
+ None
+ },
+ astc_hdr: if enabled_extensions.contains(&vk::ExtTextureCompressionAstcHdrFn::name()) {
+ Some(
+ vk::PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT::builder()
+ .texture_compression_astc_hdr(true)
+ .build(),
+ )
+ } else {
+ None
+ },
+ shader_float16: if requested_features.contains(wgt::Features::SHADER_F16) {
+ Some((
+ vk::PhysicalDeviceShaderFloat16Int8Features::builder()
+ .shader_float16(true)
+ .build(),
+ vk::PhysicalDevice16BitStorageFeatures::builder()
+ .storage_buffer16_bit_access(true)
+ .uniform_and_storage_buffer16_bit_access(true)
+ .build(),
+ ))
+ } else {
+ None
+ },
+ zero_initialize_workgroup_memory: if effective_api_version >= vk::API_VERSION_1_3
+ || enabled_extensions.contains(&vk::KhrZeroInitializeWorkgroupMemoryFn::name())
+ {
+ Some(
+ vk::PhysicalDeviceZeroInitializeWorkgroupMemoryFeatures::builder()
+ .shader_zero_initialize_workgroup_memory(
+ private_caps.zero_initialize_workgroup_memory,
+ )
+ .build(),
+ )
+ } else {
+ None
+ },
+ }
+ }
+
+ fn to_wgpu(
+ &self,
+ instance: &ash::Instance,
+ phd: vk::PhysicalDevice,
+ caps: &PhysicalDeviceCapabilities,
+ ) -> (wgt::Features, wgt::DownlevelFlags) {
+ use crate::auxil::db;
+ use wgt::{DownlevelFlags as Df, Features as F};
+ let mut features = F::empty()
+ | F::SPIRV_SHADER_PASSTHROUGH
+ | F::MAPPABLE_PRIMARY_BUFFERS
+ | F::PUSH_CONSTANTS
+ | F::ADDRESS_MODE_CLAMP_TO_BORDER
+ | F::ADDRESS_MODE_CLAMP_TO_ZERO
+ | F::TIMESTAMP_QUERY
+ | F::TIMESTAMP_QUERY_INSIDE_PASSES
+ | F::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES
+ | F::CLEAR_TEXTURE;
+
+ let mut dl_flags = Df::COMPUTE_SHADERS
+ | Df::BASE_VERTEX
+ | Df::READ_ONLY_DEPTH_STENCIL
+ | Df::NON_POWER_OF_TWO_MIPMAPPED_TEXTURES
+ | Df::COMPARISON_SAMPLERS
+ | Df::VERTEX_STORAGE
+ | Df::FRAGMENT_STORAGE
+ | Df::DEPTH_TEXTURE_AND_BUFFER_COPIES
+ | Df::BUFFER_BINDINGS_NOT_16_BYTE_ALIGNED
+ | Df::UNRESTRICTED_INDEX_BUFFER
+ | Df::INDIRECT_EXECUTION
+ | Df::VIEW_FORMATS
+ | Df::UNRESTRICTED_EXTERNAL_TEXTURE_COPIES;
+
+ dl_flags.set(
+ Df::SURFACE_VIEW_FORMATS,
+ caps.supports_extension(vk::KhrSwapchainMutableFormatFn::name()),
+ );
+ dl_flags.set(Df::CUBE_ARRAY_TEXTURES, self.core.image_cube_array != 0);
+ dl_flags.set(Df::ANISOTROPIC_FILTERING, self.core.sampler_anisotropy != 0);
+ dl_flags.set(
+ Df::FRAGMENT_WRITABLE_STORAGE,
+ self.core.fragment_stores_and_atomics != 0,
+ );
+ dl_flags.set(Df::MULTISAMPLED_SHADING, self.core.sample_rate_shading != 0);
+ dl_flags.set(Df::INDEPENDENT_BLEND, self.core.independent_blend != 0);
+ dl_flags.set(
+ Df::FULL_DRAW_INDEX_UINT32,
+ self.core.full_draw_index_uint32 != 0,
+ );
+ dl_flags.set(Df::DEPTH_BIAS_CLAMP, self.core.depth_bias_clamp != 0);
+
+ features.set(
+ F::INDIRECT_FIRST_INSTANCE,
+ self.core.draw_indirect_first_instance != 0,
+ );
+ //if self.core.dual_src_blend != 0
+ features.set(F::MULTI_DRAW_INDIRECT, self.core.multi_draw_indirect != 0);
+ features.set(F::POLYGON_MODE_LINE, self.core.fill_mode_non_solid != 0);
+ features.set(F::POLYGON_MODE_POINT, self.core.fill_mode_non_solid != 0);
+ //if self.core.depth_bounds != 0 {
+ //if self.core.alpha_to_one != 0 {
+ //if self.core.multi_viewport != 0 {
+ features.set(
+ F::TEXTURE_COMPRESSION_ETC2,
+ self.core.texture_compression_etc2 != 0,
+ );
+ features.set(
+ F::TEXTURE_COMPRESSION_ASTC,
+ self.core.texture_compression_astc_ldr != 0,
+ );
+ features.set(
+ F::TEXTURE_COMPRESSION_BC,
+ self.core.texture_compression_bc != 0,
+ );
+ features.set(
+ F::PIPELINE_STATISTICS_QUERY,
+ self.core.pipeline_statistics_query != 0,
+ );
+ features.set(
+ F::VERTEX_WRITABLE_STORAGE,
+ self.core.vertex_pipeline_stores_and_atomics != 0,
+ );
+ //if self.core.shader_image_gather_extended != 0 {
+ //if self.core.shader_storage_image_extended_formats != 0 {
+ features.set(
+ F::BUFFER_BINDING_ARRAY,
+ self.core.shader_uniform_buffer_array_dynamic_indexing != 0,
+ );
+ features.set(
+ F::TEXTURE_BINDING_ARRAY,
+ self.core.shader_sampled_image_array_dynamic_indexing != 0,
+ );
+ features.set(F::SHADER_PRIMITIVE_INDEX, self.core.geometry_shader != 0);
+ if Self::all_features_supported(
+ &features,
+ &[
+ (
+ F::BUFFER_BINDING_ARRAY,
+ self.core.shader_storage_buffer_array_dynamic_indexing,
+ ),
+ (
+ F::TEXTURE_BINDING_ARRAY,
+ self.core.shader_storage_image_array_dynamic_indexing,
+ ),
+ ],
+ ) {
+ features.insert(F::STORAGE_RESOURCE_BINDING_ARRAY);
+ }
+ //if self.core.shader_storage_image_array_dynamic_indexing != 0 {
+ //if self.core.shader_clip_distance != 0 {
+ //if self.core.shader_cull_distance != 0 {
+ features.set(F::SHADER_F64, self.core.shader_float64 != 0);
+ //if self.core.shader_int64 != 0 {
+ features.set(F::SHADER_I16, self.core.shader_int16 != 0);
+
+ //if caps.supports_extension(vk::KhrSamplerMirrorClampToEdgeFn::name()) {
+ //if caps.supports_extension(vk::ExtSamplerFilterMinmaxFn::name()) {
+ features.set(
+ F::MULTI_DRAW_INDIRECT_COUNT,
+ caps.supports_extension(vk::KhrDrawIndirectCountFn::name()),
+ );
+ features.set(
+ F::CONSERVATIVE_RASTERIZATION,
+ caps.supports_extension(vk::ExtConservativeRasterizationFn::name()),
+ );
+
+ let intel_windows = caps.properties.vendor_id == db::intel::VENDOR && cfg!(windows);
+
+ if let Some(ref descriptor_indexing) = self.descriptor_indexing {
+ const STORAGE: F = F::STORAGE_RESOURCE_BINDING_ARRAY;
+ if Self::all_features_supported(
+ &features,
+ &[
+ (
+ F::TEXTURE_BINDING_ARRAY,
+ descriptor_indexing.shader_sampled_image_array_non_uniform_indexing,
+ ),
+ (
+ F::BUFFER_BINDING_ARRAY | STORAGE,
+ descriptor_indexing.shader_storage_buffer_array_non_uniform_indexing,
+ ),
+ ],
+ ) {
+ features.insert(F::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING);
+ }
+ if Self::all_features_supported(
+ &features,
+ &[
+ (
+ F::BUFFER_BINDING_ARRAY,
+ descriptor_indexing.shader_uniform_buffer_array_non_uniform_indexing,
+ ),
+ (
+ F::TEXTURE_BINDING_ARRAY | STORAGE,
+ descriptor_indexing.shader_storage_image_array_non_uniform_indexing,
+ ),
+ ],
+ ) {
+ features.insert(F::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING);
+ }
+ if descriptor_indexing.descriptor_binding_partially_bound != 0 && !intel_windows {
+ features |= F::PARTIALLY_BOUND_BINDING_ARRAY;
+ }
+ }
+
+ if let Some(ref feature) = self.depth_clip_enable {
+ features.set(F::DEPTH_CLIP_CONTROL, feature.depth_clip_enable != 0);
+ }
+
+ if let Some(ref multiview) = self.multiview {
+ features.set(F::MULTIVIEW, multiview.multiview != 0);
+ }
+
+ features.set(
+ F::TEXTURE_FORMAT_16BIT_NORM,
+ is_format_16bit_norm_supported(instance, phd),
+ );
+
+ if let Some(ref astc_hdr) = self.astc_hdr {
+ features.set(
+ F::TEXTURE_COMPRESSION_ASTC_HDR,
+ astc_hdr.texture_compression_astc_hdr != 0,
+ );
+ }
+
+ if let Some((ref f16_i8, ref bit16)) = self.shader_float16 {
+ features.set(
+ F::SHADER_F16,
+ f16_i8.shader_float16 != 0
+ && bit16.storage_buffer16_bit_access != 0
+ && bit16.uniform_and_storage_buffer16_bit_access != 0,
+ );
+ }
+
+ let supports_depth_format = |format| {
+ supports_format(
+ instance,
+ phd,
+ format,
+ vk::ImageTiling::OPTIMAL,
+ depth_stencil_required_flags(),
+ )
+ };
+
+ let texture_s8 = supports_depth_format(vk::Format::S8_UINT);
+ let texture_d32 = supports_depth_format(vk::Format::D32_SFLOAT);
+ let texture_d24_s8 = supports_depth_format(vk::Format::D24_UNORM_S8_UINT);
+ let texture_d32_s8 = supports_depth_format(vk::Format::D32_SFLOAT_S8_UINT);
+
+ let stencil8 = texture_s8 || texture_d24_s8;
+ let depth24_plus_stencil8 = texture_d24_s8 || texture_d32_s8;
+
+ dl_flags.set(
+ Df::WEBGPU_TEXTURE_FORMAT_SUPPORT,
+ stencil8 && depth24_plus_stencil8 && texture_d32,
+ );
+
+ features.set(F::DEPTH32FLOAT_STENCIL8, texture_d32_s8);
+
+ let rg11b10ufloat_renderable = supports_format(
+ instance,
+ phd,
+ vk::Format::B10G11R11_UFLOAT_PACK32,
+ vk::ImageTiling::OPTIMAL,
+ vk::FormatFeatureFlags::COLOR_ATTACHMENT
+ | vk::FormatFeatureFlags::COLOR_ATTACHMENT_BLEND,
+ );
+ features.set(F::RG11B10UFLOAT_RENDERABLE, rg11b10ufloat_renderable);
+
+ (features, dl_flags)
+ }
+
+ fn all_features_supported(
+ features: &wgt::Features,
+ implications: &[(wgt::Features, vk::Bool32)],
+ ) -> bool {
+ implications
+ .iter()
+ .all(|&(flag, support)| !features.contains(flag) || support != 0)
+ }
+}
+
+/// Information gathered about a physical device capabilities.
+#[derive(Default)]
+pub struct PhysicalDeviceCapabilities {
+ supported_extensions: Vec<vk::ExtensionProperties>,
+ properties: vk::PhysicalDeviceProperties,
+ maintenance_3: Option<vk::PhysicalDeviceMaintenance3Properties>,
+ descriptor_indexing: Option<vk::PhysicalDeviceDescriptorIndexingPropertiesEXT>,
+ driver: Option<vk::PhysicalDeviceDriverPropertiesKHR>,
+ /// The effective driver api version supported by the physical device.
+ ///
+ /// The Vulkan specification states the following in the documentation for VkPhysicalDeviceProperties:
+ /// > The value of apiVersion may be different than the version returned by vkEnumerateInstanceVersion;
+ /// > either higher or lower. In such cases, the application must not use functionality that exceeds
+ /// > the version of Vulkan associated with a given object.
+ ///
+ /// For example, a Vulkan 1.1 instance cannot use functionality added in Vulkan 1.2 even if the physical
+ /// device supports Vulkan 1.2.
+ ///
+ /// This means that assuming that the apiVersion provided by VkPhysicalDeviceProperties is the actual
+ /// version we can use is incorrect. Instead the effective version is the lower of the instance version
+ /// and physical device version.
+ effective_api_version: u32,
+}
+
+// This is safe because the structs have `p_next: *mut c_void`, which we null out/never read.
+unsafe impl Send for PhysicalDeviceCapabilities {}
+unsafe impl Sync for PhysicalDeviceCapabilities {}
+
+impl PhysicalDeviceCapabilities {
+ pub fn properties(&self) -> vk::PhysicalDeviceProperties {
+ self.properties
+ }
+
+ pub fn supports_extension(&self, extension: &CStr) -> bool {
+ use crate::auxil::cstr_from_bytes_until_nul;
+ self.supported_extensions
+ .iter()
+ .any(|ep| cstr_from_bytes_until_nul(&ep.extension_name) == Some(extension))
+ }
+
+ /// Map `requested_features` to the list of Vulkan extension strings required to create the logical device.
+ fn get_required_extensions(&self, requested_features: wgt::Features) -> Vec<&'static CStr> {
+ let mut extensions = Vec::new();
+
+ // Note that quite a few extensions depend on the `VK_KHR_get_physical_device_properties2` instance extension.
+ // We enable `VK_KHR_get_physical_device_properties2` unconditionally (if available).
+
+ // Require `VK_KHR_swapchain`
+ extensions.push(vk::KhrSwapchainFn::name());
+
+ if self.effective_api_version < vk::API_VERSION_1_1 {
+ // Require either `VK_KHR_maintenance1` or `VK_AMD_negative_viewport_height`
+ if self.supports_extension(vk::KhrMaintenance1Fn::name()) {
+ extensions.push(vk::KhrMaintenance1Fn::name());
+ } else {
+ // `VK_AMD_negative_viewport_height` is obsoleted by `VK_KHR_maintenance1` and must not be enabled alongside it
+ extensions.push(vk::AmdNegativeViewportHeightFn::name());
+ }
+
+ // Optional `VK_KHR_maintenance2`
+ if self.supports_extension(vk::KhrMaintenance2Fn::name()) {
+ extensions.push(vk::KhrMaintenance2Fn::name());
+ }
+
+ // Optional `VK_KHR_maintenance3`
+ if self.supports_extension(vk::KhrMaintenance3Fn::name()) {
+ extensions.push(vk::KhrMaintenance3Fn::name());
+ }
+
+ // Require `VK_KHR_storage_buffer_storage_class`
+ extensions.push(vk::KhrStorageBufferStorageClassFn::name());
+
+ // Require `VK_KHR_multiview` if the associated feature was requested
+ if requested_features.contains(wgt::Features::MULTIVIEW) {
+ extensions.push(vk::KhrMultiviewFn::name());
+ }
+ }
+
+ if self.effective_api_version < vk::API_VERSION_1_2 {
+ // Optional `VK_KHR_image_format_list`
+ if self.supports_extension(vk::KhrImageFormatListFn::name()) {
+ extensions.push(vk::KhrImageFormatListFn::name());
+ }
+
+ // Optional `VK_KHR_imageless_framebuffer`
+ if self.supports_extension(vk::KhrImagelessFramebufferFn::name()) {
+ extensions.push(vk::KhrImagelessFramebufferFn::name());
+ // Require `VK_KHR_maintenance2` due to it being a dependency
+ if self.effective_api_version < vk::API_VERSION_1_1 {
+ extensions.push(vk::KhrMaintenance2Fn::name());
+ }
+ }
+
+ // Optional `VK_KHR_driver_properties`
+ if self.supports_extension(vk::KhrDriverPropertiesFn::name()) {
+ extensions.push(vk::KhrDriverPropertiesFn::name());
+ }
+
+ // Optional `VK_KHR_timeline_semaphore`
+ if self.supports_extension(vk::KhrTimelineSemaphoreFn::name()) {
+ extensions.push(vk::KhrTimelineSemaphoreFn::name());
+ }
+
+ // Require `VK_EXT_descriptor_indexing` if one of the associated features was requested
+ if requested_features.intersects(indexing_features()) {
+ extensions.push(vk::ExtDescriptorIndexingFn::name());
+ }
+
+ // Require `VK_KHR_shader_float16_int8` and `VK_KHR_16bit_storage` if the associated feature was requested
+ if requested_features.contains(wgt::Features::SHADER_F16) {
+ extensions.push(vk::KhrShaderFloat16Int8Fn::name());
+ // `VK_KHR_16bit_storage` requires `VK_KHR_storage_buffer_storage_class`, however we require that one already
+ if self.effective_api_version < vk::API_VERSION_1_1 {
+ extensions.push(vk::Khr16bitStorageFn::name());
+ }
+ }
+
+ //extensions.push(vk::KhrSamplerMirrorClampToEdgeFn::name());
+ //extensions.push(vk::ExtSamplerFilterMinmaxFn::name());
+ }
+
+ if self.effective_api_version < vk::API_VERSION_1_3 {
+ // Optional `VK_EXT_image_robustness`
+ if self.supports_extension(vk::ExtImageRobustnessFn::name()) {
+ extensions.push(vk::ExtImageRobustnessFn::name());
+ }
+ }
+
+ // Optional `VK_KHR_swapchain_mutable_format`
+ if self.supports_extension(vk::KhrSwapchainMutableFormatFn::name()) {
+ extensions.push(vk::KhrSwapchainMutableFormatFn::name());
+ }
+
+ // Optional `VK_EXT_robustness2`
+ if self.supports_extension(vk::ExtRobustness2Fn::name()) {
+ extensions.push(vk::ExtRobustness2Fn::name());
+ }
+
+ // Require `VK_KHR_draw_indirect_count` if the associated feature was requested
+ // Even though Vulkan 1.2 has promoted the extension to core, we must require the extension to avoid
+ // large amounts of spaghetti involved with using PhysicalDeviceVulkan12Features.
+ if requested_features.contains(wgt::Features::MULTI_DRAW_INDIRECT_COUNT) {
+ extensions.push(vk::KhrDrawIndirectCountFn::name());
+ }
+
+ // Require `VK_EXT_conservative_rasterization` if the associated feature was requested
+ if requested_features.contains(wgt::Features::CONSERVATIVE_RASTERIZATION) {
+ extensions.push(vk::ExtConservativeRasterizationFn::name());
+ }
+
+ // Require `VK_EXT_depth_clip_enable` if the associated feature was requested
+ if requested_features.contains(wgt::Features::DEPTH_CLIP_CONTROL) {
+ extensions.push(vk::ExtDepthClipEnableFn::name());
+ }
+
+ // Require `VK_KHR_portability_subset` on macOS/iOS
+ #[cfg(any(target_os = "macos", target_os = "ios"))]
+ extensions.push(vk::KhrPortabilitySubsetFn::name());
+
+ // Require `VK_EXT_texture_compression_astc_hdr` if the associated feature was requested
+ if requested_features.contains(wgt::Features::TEXTURE_COMPRESSION_ASTC_HDR) {
+ extensions.push(vk::ExtTextureCompressionAstcHdrFn::name());
+ }
+
+ extensions
+ }
+
+ fn to_wgpu_limits(&self) -> wgt::Limits {
+ let limits = &self.properties.limits;
+
+ let max_compute_workgroup_sizes = limits.max_compute_work_group_size;
+ let max_compute_workgroups_per_dimension = limits.max_compute_work_group_count[0]
+ .min(limits.max_compute_work_group_count[1])
+ .min(limits.max_compute_work_group_count[2]);
+
+ // Prevent very large buffers on mesa and most android devices.
+ let is_nvidia = self.properties.vendor_id == crate::auxil::db::nvidia::VENDOR;
+ let max_buffer_size =
+ if (cfg!(target_os = "linux") || cfg!(target_os = "android")) && !is_nvidia {
+ i32::MAX as u64
+ } else {
+ u64::MAX
+ };
+
+ wgt::Limits {
+ max_texture_dimension_1d: limits.max_image_dimension1_d,
+ max_texture_dimension_2d: limits.max_image_dimension2_d,
+ max_texture_dimension_3d: limits.max_image_dimension3_d,
+ max_texture_array_layers: limits.max_image_array_layers,
+ max_bind_groups: limits
+ .max_bound_descriptor_sets
+ .min(crate::MAX_BIND_GROUPS as u32),
+ max_bindings_per_bind_group: 640,
+ max_dynamic_uniform_buffers_per_pipeline_layout: limits
+ .max_descriptor_set_uniform_buffers_dynamic,
+ max_dynamic_storage_buffers_per_pipeline_layout: limits
+ .max_descriptor_set_storage_buffers_dynamic,
+ max_sampled_textures_per_shader_stage: limits.max_per_stage_descriptor_sampled_images,
+ max_samplers_per_shader_stage: limits.max_per_stage_descriptor_samplers,
+ max_storage_buffers_per_shader_stage: limits.max_per_stage_descriptor_storage_buffers,
+ max_storage_textures_per_shader_stage: limits.max_per_stage_descriptor_storage_images,
+ max_uniform_buffers_per_shader_stage: limits.max_per_stage_descriptor_uniform_buffers,
+ max_uniform_buffer_binding_size: limits
+ .max_uniform_buffer_range
+ .min(crate::auxil::MAX_I32_BINDING_SIZE),
+ max_storage_buffer_binding_size: limits
+ .max_storage_buffer_range
+ .min(crate::auxil::MAX_I32_BINDING_SIZE),
+ max_vertex_buffers: limits
+ .max_vertex_input_bindings
+ .min(crate::MAX_VERTEX_BUFFERS as u32),
+ max_vertex_attributes: limits.max_vertex_input_attributes,
+ max_vertex_buffer_array_stride: limits.max_vertex_input_binding_stride,
+ max_push_constant_size: limits.max_push_constants_size,
+ min_uniform_buffer_offset_alignment: limits.min_uniform_buffer_offset_alignment as u32,
+ min_storage_buffer_offset_alignment: limits.min_storage_buffer_offset_alignment as u32,
+ max_inter_stage_shader_components: limits
+ .max_vertex_output_components
+ .min(limits.max_fragment_input_components),
+ max_compute_workgroup_storage_size: limits.max_compute_shared_memory_size,
+ max_compute_invocations_per_workgroup: limits.max_compute_work_group_invocations,
+ max_compute_workgroup_size_x: max_compute_workgroup_sizes[0],
+ max_compute_workgroup_size_y: max_compute_workgroup_sizes[1],
+ max_compute_workgroup_size_z: max_compute_workgroup_sizes[2],
+ max_compute_workgroups_per_dimension,
+ max_buffer_size,
+ }
+ }
+
+ fn to_hal_alignments(&self) -> crate::Alignments {
+ let limits = &self.properties.limits;
+ crate::Alignments {
+ buffer_copy_offset: wgt::BufferSize::new(limits.optimal_buffer_copy_offset_alignment)
+ .unwrap(),
+ buffer_copy_pitch: wgt::BufferSize::new(limits.optimal_buffer_copy_row_pitch_alignment)
+ .unwrap(),
+ }
+ }
+}
+
+impl super::InstanceShared {
+ #[allow(trivial_casts)] // false positives
+ fn inspect(
+ &self,
+ phd: vk::PhysicalDevice,
+ ) -> (PhysicalDeviceCapabilities, PhysicalDeviceFeatures) {
+ let capabilities = {
+ let mut capabilities = PhysicalDeviceCapabilities::default();
+ capabilities.supported_extensions =
+ unsafe { self.raw.enumerate_device_extension_properties(phd).unwrap() };
+ capabilities.properties = if let Some(ref get_device_properties) =
+ self.get_physical_device_properties
+ {
+ // Get these now to avoid borrowing conflicts later
+ let supports_descriptor_indexing = self.driver_api_version >= vk::API_VERSION_1_2
+ || capabilities.supports_extension(vk::ExtDescriptorIndexingFn::name());
+ let supports_driver_properties = self.driver_api_version >= vk::API_VERSION_1_2
+ || capabilities.supports_extension(vk::KhrDriverPropertiesFn::name());
+
+ let mut builder = vk::PhysicalDeviceProperties2KHR::builder();
+ if self.driver_api_version >= vk::API_VERSION_1_1
+ || capabilities.supports_extension(vk::KhrMaintenance3Fn::name())
+ {
+ capabilities.maintenance_3 =
+ Some(vk::PhysicalDeviceMaintenance3Properties::default());
+ builder = builder.push_next(capabilities.maintenance_3.as_mut().unwrap());
+ }
+
+ if supports_descriptor_indexing {
+ let next = capabilities
+ .descriptor_indexing
+ .insert(vk::PhysicalDeviceDescriptorIndexingPropertiesEXT::default());
+ builder = builder.push_next(next);
+ }
+
+ if supports_driver_properties {
+ let next = capabilities
+ .driver
+ .insert(vk::PhysicalDeviceDriverPropertiesKHR::default());
+ builder = builder.push_next(next);
+ }
+
+ let mut properties2 = builder.build();
+ unsafe {
+ get_device_properties.get_physical_device_properties2(phd, &mut properties2);
+ }
+ properties2.properties
+ } else {
+ unsafe { self.raw.get_physical_device_properties(phd) }
+ };
+
+ // Set the effective api version
+ capabilities.effective_api_version = self
+ .driver_api_version
+ .min(capabilities.properties.api_version);
+ capabilities
+ };
+
+ let mut features = PhysicalDeviceFeatures::default();
+ features.core = if let Some(ref get_device_properties) = self.get_physical_device_properties
+ {
+ let core = vk::PhysicalDeviceFeatures::default();
+ let mut builder = vk::PhysicalDeviceFeatures2KHR::builder().features(core);
+
+ // `VK_KHR_multiview` is promoted to 1.1
+ if capabilities.effective_api_version >= vk::API_VERSION_1_1
+ || capabilities.supports_extension(vk::KhrMultiviewFn::name())
+ {
+ let next = features
+ .multiview
+ .insert(vk::PhysicalDeviceMultiviewFeatures::default());
+ builder = builder.push_next(next);
+ }
+
+ if capabilities.supports_extension(vk::ExtDescriptorIndexingFn::name()) {
+ let next = features
+ .descriptor_indexing
+ .insert(vk::PhysicalDeviceDescriptorIndexingFeaturesEXT::default());
+ builder = builder.push_next(next);
+ }
+
+ // `VK_KHR_imageless_framebuffer` is promoted to 1.2, but has no changes, so we can keep using the extension unconditionally.
+ if capabilities.supports_extension(vk::KhrImagelessFramebufferFn::name()) {
+ let next = features
+ .imageless_framebuffer
+ .insert(vk::PhysicalDeviceImagelessFramebufferFeaturesKHR::default());
+ builder = builder.push_next(next);
+ }
+
+ // `VK_KHR_timeline_semaphore` is promoted to 1.2, but has no changes, so we can keep using the extension unconditionally.
+ if capabilities.supports_extension(vk::KhrTimelineSemaphoreFn::name()) {
+ let next = features
+ .timeline_semaphore
+ .insert(vk::PhysicalDeviceTimelineSemaphoreFeaturesKHR::default());
+ builder = builder.push_next(next);
+ }
+
+ if capabilities.supports_extension(vk::ExtImageRobustnessFn::name()) {
+ let next = features
+ .image_robustness
+ .insert(vk::PhysicalDeviceImageRobustnessFeaturesEXT::default());
+ builder = builder.push_next(next);
+ }
+ if capabilities.supports_extension(vk::ExtRobustness2Fn::name()) {
+ let next = features
+ .robustness2
+ .insert(vk::PhysicalDeviceRobustness2FeaturesEXT::default());
+ builder = builder.push_next(next);
+ }
+ if capabilities.supports_extension(vk::ExtDepthClipEnableFn::name()) {
+ let next = features
+ .depth_clip_enable
+ .insert(vk::PhysicalDeviceDepthClipEnableFeaturesEXT::default());
+ builder = builder.push_next(next);
+ }
+ if capabilities.supports_extension(vk::ExtTextureCompressionAstcHdrFn::name()) {
+ let next = features
+ .astc_hdr
+ .insert(vk::PhysicalDeviceTextureCompressionASTCHDRFeaturesEXT::default());
+ builder = builder.push_next(next);
+ }
+ if capabilities.supports_extension(vk::KhrShaderFloat16Int8Fn::name())
+ && capabilities.supports_extension(vk::Khr16bitStorageFn::name())
+ {
+ let next = features.shader_float16.insert((
+ vk::PhysicalDeviceShaderFloat16Int8FeaturesKHR::default(),
+ vk::PhysicalDevice16BitStorageFeaturesKHR::default(),
+ ));
+ builder = builder.push_next(&mut next.0);
+ builder = builder.push_next(&mut next.1);
+ }
+
+ // `VK_KHR_zero_initialize_workgroup_memory` is promoted to 1.3
+ if capabilities.effective_api_version >= vk::API_VERSION_1_3
+ || capabilities.supports_extension(vk::KhrZeroInitializeWorkgroupMemoryFn::name())
+ {
+ let next = features
+ .zero_initialize_workgroup_memory
+ .insert(vk::PhysicalDeviceZeroInitializeWorkgroupMemoryFeatures::default());
+ builder = builder.push_next(next);
+ }
+
+ let mut features2 = builder.build();
+ unsafe {
+ get_device_properties.get_physical_device_features2(phd, &mut features2);
+ }
+ features2.features
+ } else {
+ unsafe { self.raw.get_physical_device_features(phd) }
+ };
+
+ (capabilities, features)
+ }
+}
+
+impl super::Instance {
+ pub fn expose_adapter(
+ &self,
+ phd: vk::PhysicalDevice,
+ ) -> Option<crate::ExposedAdapter<super::Api>> {
+ use crate::auxil::cstr_from_bytes_until_nul;
+ use crate::auxil::db;
+
+ let (phd_capabilities, phd_features) = self.shared.inspect(phd);
+
+ let info = wgt::AdapterInfo {
+ name: {
+ cstr_from_bytes_until_nul(&phd_capabilities.properties.device_name)
+ .and_then(|info| info.to_str().ok())
+ .unwrap_or("?")
+ .to_owned()
+ },
+ vendor: phd_capabilities.properties.vendor_id,
+ device: phd_capabilities.properties.device_id,
+ device_type: match phd_capabilities.properties.device_type {
+ ash::vk::PhysicalDeviceType::OTHER => wgt::DeviceType::Other,
+ ash::vk::PhysicalDeviceType::INTEGRATED_GPU => wgt::DeviceType::IntegratedGpu,
+ ash::vk::PhysicalDeviceType::DISCRETE_GPU => wgt::DeviceType::DiscreteGpu,
+ ash::vk::PhysicalDeviceType::VIRTUAL_GPU => wgt::DeviceType::VirtualGpu,
+ ash::vk::PhysicalDeviceType::CPU => wgt::DeviceType::Cpu,
+ _ => wgt::DeviceType::Other,
+ },
+ driver: {
+ phd_capabilities
+ .driver
+ .as_ref()
+ .and_then(|driver| cstr_from_bytes_until_nul(&driver.driver_name))
+ .and_then(|name| name.to_str().ok())
+ .unwrap_or("?")
+ .to_owned()
+ },
+ driver_info: {
+ phd_capabilities
+ .driver
+ .as_ref()
+ .and_then(|driver| cstr_from_bytes_until_nul(&driver.driver_info))
+ .and_then(|name| name.to_str().ok())
+ .unwrap_or("?")
+ .to_owned()
+ },
+ backend: wgt::Backend::Vulkan,
+ };
+
+ let (available_features, downlevel_flags) =
+ phd_features.to_wgpu(&self.shared.raw, phd, &phd_capabilities);
+ let mut workarounds = super::Workarounds::empty();
+ {
+ // see https://github.com/gfx-rs/gfx/issues/1930
+ let _is_windows_intel_dual_src_bug = cfg!(windows)
+ && phd_capabilities.properties.vendor_id == db::intel::VENDOR
+ && (phd_capabilities.properties.device_id & db::intel::DEVICE_KABY_LAKE_MASK
+ == db::intel::DEVICE_KABY_LAKE_MASK
+ || phd_capabilities.properties.device_id & db::intel::DEVICE_SKY_LAKE_MASK
+ == db::intel::DEVICE_SKY_LAKE_MASK);
+ // TODO: only enable for particular devices
+ workarounds |= super::Workarounds::SEPARATE_ENTRY_POINTS;
+ workarounds.set(
+ super::Workarounds::EMPTY_RESOLVE_ATTACHMENT_LISTS,
+ phd_capabilities.properties.vendor_id == db::qualcomm::VENDOR,
+ );
+ };
+
+ if phd_capabilities.effective_api_version == vk::API_VERSION_1_0
+ && !phd_capabilities.supports_extension(vk::KhrStorageBufferStorageClassFn::name())
+ {
+ log::warn!(
+ "SPIR-V storage buffer class is not supported, hiding adapter: {}",
+ info.name
+ );
+ return None;
+ }
+ if !phd_capabilities.supports_extension(vk::AmdNegativeViewportHeightFn::name())
+ && !phd_capabilities.supports_extension(vk::KhrMaintenance1Fn::name())
+ && phd_capabilities.effective_api_version < vk::API_VERSION_1_1
+ {
+ log::warn!(
+ "viewport Y-flip is not supported, hiding adapter: {}",
+ info.name
+ );
+ return None;
+ }
+
+ let queue_families = unsafe {
+ self.shared
+ .raw
+ .get_physical_device_queue_family_properties(phd)
+ };
+ let queue_flags = queue_families.first()?.queue_flags;
+ if !queue_flags.contains(vk::QueueFlags::GRAPHICS) {
+ log::warn!("The first queue only exposes {:?}", queue_flags);
+ return None;
+ }
+
+ let private_caps = super::PrivateCapabilities {
+ flip_y_requires_shift: phd_capabilities.effective_api_version >= vk::API_VERSION_1_1
+ || phd_capabilities.supports_extension(vk::KhrMaintenance1Fn::name()),
+ imageless_framebuffers: match phd_features.imageless_framebuffer {
+ Some(features) => features.imageless_framebuffer == vk::TRUE,
+ None => phd_features
+ .imageless_framebuffer
+ .map_or(false, |ext| ext.imageless_framebuffer != 0),
+ },
+ image_view_usage: phd_capabilities.effective_api_version >= vk::API_VERSION_1_1
+ || phd_capabilities.supports_extension(vk::KhrMaintenance2Fn::name()),
+ timeline_semaphores: match phd_features.timeline_semaphore {
+ Some(features) => features.timeline_semaphore == vk::TRUE,
+ None => phd_features
+ .timeline_semaphore
+ .map_or(false, |ext| ext.timeline_semaphore != 0),
+ },
+ texture_d24: supports_format(
+ &self.shared.raw,
+ phd,
+ vk::Format::X8_D24_UNORM_PACK32,
+ vk::ImageTiling::OPTIMAL,
+ depth_stencil_required_flags(),
+ ),
+ texture_d24_s8: supports_format(
+ &self.shared.raw,
+ phd,
+ vk::Format::D24_UNORM_S8_UINT,
+ vk::ImageTiling::OPTIMAL,
+ depth_stencil_required_flags(),
+ ),
+ texture_s8: supports_format(
+ &self.shared.raw,
+ phd,
+ vk::Format::S8_UINT,
+ vk::ImageTiling::OPTIMAL,
+ depth_stencil_required_flags(),
+ ),
+ non_coherent_map_mask: phd_capabilities.properties.limits.non_coherent_atom_size - 1,
+ can_present: true,
+ //TODO: make configurable
+ robust_buffer_access: phd_features.core.robust_buffer_access != 0,
+ robust_image_access: match phd_features.robustness2 {
+ Some(ref f) => f.robust_image_access2 != 0,
+ None => phd_features
+ .image_robustness
+ .map_or(false, |ext| ext.robust_image_access != 0),
+ },
+ zero_initialize_workgroup_memory: phd_features
+ .zero_initialize_workgroup_memory
+ .map_or(false, |ext| {
+ ext.shader_zero_initialize_workgroup_memory == vk::TRUE
+ }),
+ };
+ let capabilities = crate::Capabilities {
+ limits: phd_capabilities.to_wgpu_limits(),
+ alignments: phd_capabilities.to_hal_alignments(),
+ downlevel: wgt::DownlevelCapabilities {
+ flags: downlevel_flags,
+ limits: wgt::DownlevelLimits {},
+ shader_model: wgt::ShaderModel::Sm5, //TODO?
+ },
+ };
+
+ let adapter = super::Adapter {
+ raw: phd,
+ instance: Arc::clone(&self.shared),
+ //queue_families,
+ known_memory_flags: vk::MemoryPropertyFlags::DEVICE_LOCAL
+ | vk::MemoryPropertyFlags::HOST_VISIBLE
+ | vk::MemoryPropertyFlags::HOST_COHERENT
+ | vk::MemoryPropertyFlags::HOST_CACHED
+ | vk::MemoryPropertyFlags::LAZILY_ALLOCATED,
+ phd_capabilities,
+ //phd_features,
+ downlevel_flags,
+ private_caps,
+ workarounds,
+ };
+
+ Some(crate::ExposedAdapter {
+ adapter,
+ info,
+ features: available_features,
+ capabilities,
+ })
+ }
+}
+
+impl super::Adapter {
+ pub fn raw_physical_device(&self) -> ash::vk::PhysicalDevice {
+ self.raw
+ }
+
+ pub fn physical_device_capabilities(&self) -> &PhysicalDeviceCapabilities {
+ &self.phd_capabilities
+ }
+
+ pub fn shared_instance(&self) -> &super::InstanceShared {
+ &self.instance
+ }
+
+ pub fn required_device_extensions(&self, features: wgt::Features) -> Vec<&'static CStr> {
+ let (supported_extensions, unsupported_extensions) = self
+ .phd_capabilities
+ .get_required_extensions(features)
+ .iter()
+ .partition::<Vec<&CStr>, _>(|&&extension| {
+ self.phd_capabilities.supports_extension(extension)
+ });
+
+ if !unsupported_extensions.is_empty() {
+ log::warn!("Missing extensions: {:?}", unsupported_extensions);
+ }
+
+ log::debug!("Supported extensions: {:?}", supported_extensions);
+ supported_extensions
+ }
+
+ /// `features` must be the same features used to create `enabled_extensions`.
+ pub fn physical_device_features(
+ &self,
+ enabled_extensions: &[&'static CStr],
+ features: wgt::Features,
+ ) -> PhysicalDeviceFeatures {
+ PhysicalDeviceFeatures::from_extensions_and_requested_features(
+ self.phd_capabilities.effective_api_version,
+ enabled_extensions,
+ features,
+ self.downlevel_flags,
+ &self.private_caps,
+ )
+ }
+
+ /// # Safety
+ ///
+ /// - `raw_device` must be created from this adapter.
+ /// - `raw_device` must be created using `family_index`, `enabled_extensions` and `physical_device_features()`
+ /// - `enabled_extensions` must be a superset of `required_device_extensions()`.
+ #[allow(clippy::too_many_arguments)]
+ pub unsafe fn device_from_raw(
+ &self,
+ raw_device: ash::Device,
+ handle_is_owned: bool,
+ enabled_extensions: &[&'static CStr],
+ features: wgt::Features,
+ family_index: u32,
+ queue_index: u32,
+ ) -> Result<crate::OpenDevice<super::Api>, crate::DeviceError> {
+ let mem_properties = {
+ profiling::scope!("vkGetPhysicalDeviceMemoryProperties");
+ unsafe {
+ self.instance
+ .raw
+ .get_physical_device_memory_properties(self.raw)
+ }
+ };
+ let memory_types =
+ &mem_properties.memory_types[..mem_properties.memory_type_count as usize];
+ let valid_ash_memory_types = memory_types.iter().enumerate().fold(0, |u, (i, mem)| {
+ if self.known_memory_flags.contains(mem.property_flags) {
+ u | (1 << i)
+ } else {
+ u
+ }
+ });
+
+ let swapchain_fn = khr::Swapchain::new(&self.instance.raw, &raw_device);
+
+ let indirect_count_fn = if enabled_extensions.contains(&khr::DrawIndirectCount::name()) {
+ Some(khr::DrawIndirectCount::new(&self.instance.raw, &raw_device))
+ } else {
+ None
+ };
+ let timeline_semaphore_fn = if enabled_extensions.contains(&khr::TimelineSemaphore::name())
+ {
+ Some(super::ExtensionFn::Extension(khr::TimelineSemaphore::new(
+ &self.instance.raw,
+ &raw_device,
+ )))
+ } else if self.phd_capabilities.effective_api_version >= vk::API_VERSION_1_2 {
+ Some(super::ExtensionFn::Promoted)
+ } else {
+ None
+ };
+
+ let naga_options = {
+ use naga::back::spv;
+
+ let mut capabilities = vec![
+ spv::Capability::Shader,
+ spv::Capability::Matrix,
+ spv::Capability::Sampled1D,
+ spv::Capability::Image1D,
+ spv::Capability::ImageQuery,
+ spv::Capability::DerivativeControl,
+ spv::Capability::SampledCubeArray,
+ spv::Capability::SampleRateShading,
+ //Note: this is requested always, no matter what the actual
+ // adapter supports. It's not the responsibility of SPV-out
+ // translation to handle the storage support for formats.
+ spv::Capability::StorageImageExtendedFormats,
+ //TODO: fill out the rest
+ ];
+
+ if features.contains(wgt::Features::MULTIVIEW) {
+ capabilities.push(spv::Capability::MultiView);
+ }
+
+ if features.contains(wgt::Features::SHADER_PRIMITIVE_INDEX) {
+ capabilities.push(spv::Capability::Geometry);
+ }
+
+ if features.intersects(
+ wgt::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING
+ | wgt::Features::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING,
+ ) {
+ capabilities.push(spv::Capability::ShaderNonUniform);
+ }
+
+ let mut flags = spv::WriterFlags::empty();
+ flags.set(
+ spv::WriterFlags::DEBUG,
+ self.instance.flags.contains(crate::InstanceFlags::DEBUG),
+ );
+ flags.set(
+ spv::WriterFlags::LABEL_VARYINGS,
+ self.phd_capabilities.properties.vendor_id != crate::auxil::db::qualcomm::VENDOR,
+ );
+ flags.set(
+ spv::WriterFlags::FORCE_POINT_SIZE,
+ //Note: we could technically disable this when we are compiling separate entry points,
+ // and we know exactly that the primitive topology is not `PointList`.
+ // But this requires cloning the `spv::Options` struct, which has heap allocations.
+ true, // could check `super::Workarounds::SEPARATE_ENTRY_POINTS`
+ );
+ spv::Options {
+ lang_version: (1, 0),
+ flags,
+ capabilities: Some(capabilities.iter().cloned().collect()),
+ bounds_check_policies: naga::proc::BoundsCheckPolicies {
+ index: naga::proc::BoundsCheckPolicy::Restrict,
+ buffer: if self.private_caps.robust_buffer_access {
+ naga::proc::BoundsCheckPolicy::Unchecked
+ } else {
+ naga::proc::BoundsCheckPolicy::Restrict
+ },
+ image: if self.private_caps.robust_image_access {
+ naga::proc::BoundsCheckPolicy::Unchecked
+ } else {
+ naga::proc::BoundsCheckPolicy::Restrict
+ },
+ // TODO: support bounds checks on binding arrays
+ binding_array: naga::proc::BoundsCheckPolicy::Unchecked,
+ },
+ zero_initialize_workgroup_memory: if self
+ .private_caps
+ .zero_initialize_workgroup_memory
+ {
+ spv::ZeroInitializeWorkgroupMemoryMode::Native
+ } else {
+ spv::ZeroInitializeWorkgroupMemoryMode::Polyfill
+ },
+ // We need to build this separately for each invocation, so just default it out here
+ binding_map: BTreeMap::default(),
+ }
+ };
+
+ let raw_queue = {
+ profiling::scope!("vkGetDeviceQueue");
+ unsafe { raw_device.get_device_queue(family_index, queue_index) }
+ };
+
+ let shared = Arc::new(super::DeviceShared {
+ raw: raw_device,
+ family_index,
+ queue_index,
+ raw_queue,
+ handle_is_owned,
+ instance: Arc::clone(&self.instance),
+ physical_device: self.raw,
+ enabled_extensions: enabled_extensions.into(),
+ extension_fns: super::DeviceExtensionFunctions {
+ draw_indirect_count: indirect_count_fn,
+ timeline_semaphore: timeline_semaphore_fn,
+ },
+ vendor_id: self.phd_capabilities.properties.vendor_id,
+ timestamp_period: self.phd_capabilities.properties.limits.timestamp_period,
+ private_caps: self.private_caps.clone(),
+ workarounds: self.workarounds,
+ render_passes: Mutex::new(Default::default()),
+ framebuffers: Mutex::new(Default::default()),
+ });
+ let mut relay_semaphores = [vk::Semaphore::null(); 2];
+ for sem in relay_semaphores.iter_mut() {
+ unsafe {
+ *sem = shared
+ .raw
+ .create_semaphore(&vk::SemaphoreCreateInfo::builder(), None)?
+ };
+ }
+ let queue = super::Queue {
+ raw: raw_queue,
+ swapchain_fn,
+ device: Arc::clone(&shared),
+ family_index,
+ relay_semaphores,
+ relay_index: None,
+ };
+
+ let mem_allocator = {
+ let limits = self.phd_capabilities.properties.limits;
+ let config = gpu_alloc::Config::i_am_prototyping(); //TODO
+ let max_memory_allocation_size =
+ if let Some(maintenance_3) = self.phd_capabilities.maintenance_3 {
+ maintenance_3.max_memory_allocation_size
+ } else {
+ u64::max_value()
+ };
+ let properties = gpu_alloc::DeviceProperties {
+ max_memory_allocation_count: limits.max_memory_allocation_count,
+ max_memory_allocation_size,
+ non_coherent_atom_size: limits.non_coherent_atom_size,
+ memory_types: memory_types
+ .iter()
+ .map(|memory_type| gpu_alloc::MemoryType {
+ props: gpu_alloc::MemoryPropertyFlags::from_bits_truncate(
+ memory_type.property_flags.as_raw() as u8,
+ ),
+ heap: memory_type.heap_index,
+ })
+ .collect(),
+ memory_heaps: mem_properties.memory_heaps
+ [..mem_properties.memory_heap_count as usize]
+ .iter()
+ .map(|&memory_heap| gpu_alloc::MemoryHeap {
+ size: memory_heap.size,
+ })
+ .collect(),
+ buffer_device_address: false,
+ };
+ gpu_alloc::GpuAllocator::new(config, properties)
+ };
+ let desc_allocator = gpu_descriptor::DescriptorAllocator::new(
+ if let Some(di) = self.phd_capabilities.descriptor_indexing {
+ di.max_update_after_bind_descriptors_in_all_pools
+ } else {
+ 0
+ },
+ );
+
+ let device = super::Device {
+ shared,
+ mem_allocator: Mutex::new(mem_allocator),
+ desc_allocator: Mutex::new(desc_allocator),
+ valid_ash_memory_types,
+ naga_options,
+ #[cfg(feature = "renderdoc")]
+ render_doc: Default::default(),
+ };
+
+ Ok(crate::OpenDevice { device, queue })
+ }
+}
+
+impl crate::Adapter<super::Api> for super::Adapter {
+ unsafe fn open(
+ &self,
+ features: wgt::Features,
+ _limits: &wgt::Limits,
+ ) -> Result<crate::OpenDevice<super::Api>, crate::DeviceError> {
+ let enabled_extensions = self.required_device_extensions(features);
+ let mut enabled_phd_features = self.physical_device_features(&enabled_extensions, features);
+
+ let family_index = 0; //TODO
+ let family_info = vk::DeviceQueueCreateInfo::builder()
+ .queue_family_index(family_index)
+ .queue_priorities(&[1.0])
+ .build();
+ let family_infos = [family_info];
+
+ let str_pointers = enabled_extensions
+ .iter()
+ .map(|&s| {
+ // Safe because `enabled_extensions` entries have static lifetime.
+ s.as_ptr()
+ })
+ .collect::<Vec<_>>();
+
+ let pre_info = vk::DeviceCreateInfo::builder()
+ .queue_create_infos(&family_infos)
+ .enabled_extension_names(&str_pointers);
+ let info = enabled_phd_features
+ .add_to_device_create_builder(pre_info)
+ .build();
+ let raw_device = {
+ profiling::scope!("vkCreateDevice");
+ unsafe { self.instance.raw.create_device(self.raw, &info, None)? }
+ };
+
+ unsafe {
+ self.device_from_raw(
+ raw_device,
+ true,
+ &enabled_extensions,
+ features,
+ family_info.queue_family_index,
+ 0,
+ )
+ }
+ }
+
+ unsafe fn texture_format_capabilities(
+ &self,
+ format: wgt::TextureFormat,
+ ) -> crate::TextureFormatCapabilities {
+ use crate::TextureFormatCapabilities as Tfc;
+
+ let vk_format = self.private_caps.map_texture_format(format);
+ let properties = unsafe {
+ self.instance
+ .raw
+ .get_physical_device_format_properties(self.raw, vk_format)
+ };
+ let features = properties.optimal_tiling_features;
+
+ let mut flags = Tfc::empty();
+ flags.set(
+ Tfc::SAMPLED,
+ features.contains(vk::FormatFeatureFlags::SAMPLED_IMAGE),
+ );
+ flags.set(
+ Tfc::SAMPLED_LINEAR,
+ features.contains(vk::FormatFeatureFlags::SAMPLED_IMAGE_FILTER_LINEAR),
+ );
+ // flags.set(
+ // Tfc::SAMPLED_MINMAX,
+ // features.contains(vk::FormatFeatureFlags::SAMPLED_IMAGE_FILTER_MINMAX),
+ // );
+ flags.set(
+ Tfc::STORAGE | Tfc::STORAGE_READ_WRITE,
+ features.contains(vk::FormatFeatureFlags::STORAGE_IMAGE),
+ );
+ flags.set(
+ Tfc::STORAGE_ATOMIC,
+ features.contains(vk::FormatFeatureFlags::STORAGE_IMAGE_ATOMIC),
+ );
+ flags.set(
+ Tfc::COLOR_ATTACHMENT,
+ features.contains(vk::FormatFeatureFlags::COLOR_ATTACHMENT),
+ );
+ flags.set(
+ Tfc::COLOR_ATTACHMENT_BLEND,
+ features.contains(vk::FormatFeatureFlags::COLOR_ATTACHMENT_BLEND),
+ );
+ flags.set(
+ Tfc::DEPTH_STENCIL_ATTACHMENT,
+ features.contains(vk::FormatFeatureFlags::DEPTH_STENCIL_ATTACHMENT),
+ );
+ flags.set(
+ Tfc::COPY_SRC,
+ features.intersects(vk::FormatFeatureFlags::TRANSFER_SRC),
+ );
+ flags.set(
+ Tfc::COPY_DST,
+ features.intersects(vk::FormatFeatureFlags::TRANSFER_DST),
+ );
+ // Vulkan is very permissive about MSAA
+ flags.set(Tfc::MULTISAMPLE_RESOLVE, !format.is_compressed());
+
+ // get the supported sample counts
+ let format_aspect = crate::FormatAspects::from(format);
+ let limits = self.phd_capabilities.properties.limits;
+
+ let sample_flags = if format_aspect.contains(crate::FormatAspects::DEPTH) {
+ limits
+ .framebuffer_depth_sample_counts
+ .min(limits.sampled_image_depth_sample_counts)
+ } else if format_aspect.contains(crate::FormatAspects::STENCIL) {
+ limits
+ .framebuffer_stencil_sample_counts
+ .min(limits.sampled_image_stencil_sample_counts)
+ } else {
+ match format.sample_type(None).unwrap() {
+ wgt::TextureSampleType::Float { filterable: _ } => limits
+ .framebuffer_color_sample_counts
+ .min(limits.sampled_image_color_sample_counts),
+ wgt::TextureSampleType::Sint | wgt::TextureSampleType::Uint => {
+ limits.sampled_image_integer_sample_counts
+ }
+ _ => unreachable!(),
+ }
+ };
+
+ flags.set(
+ Tfc::MULTISAMPLE_X2,
+ sample_flags.contains(vk::SampleCountFlags::TYPE_2),
+ );
+ flags.set(
+ Tfc::MULTISAMPLE_X4,
+ sample_flags.contains(vk::SampleCountFlags::TYPE_4),
+ );
+ flags.set(
+ Tfc::MULTISAMPLE_X8,
+ sample_flags.contains(vk::SampleCountFlags::TYPE_8),
+ );
+ flags.set(
+ Tfc::MULTISAMPLE_X16,
+ sample_flags.contains(vk::SampleCountFlags::TYPE_16),
+ );
+
+ flags
+ }
+
+ unsafe fn surface_capabilities(
+ &self,
+ surface: &super::Surface,
+ ) -> Option<crate::SurfaceCapabilities> {
+ if !self.private_caps.can_present {
+ return None;
+ }
+ let queue_family_index = 0; //TODO
+ {
+ profiling::scope!("vkGetPhysicalDeviceSurfaceSupportKHR");
+ match unsafe {
+ surface.functor.get_physical_device_surface_support(
+ self.raw,
+ queue_family_index,
+ surface.raw,
+ )
+ } {
+ Ok(true) => (),
+ Ok(false) => return None,
+ Err(e) => {
+ log::error!("get_physical_device_surface_support: {}", e);
+ return None;
+ }
+ }
+ }
+
+ let caps = {
+ profiling::scope!("vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
+ match unsafe {
+ surface
+ .functor
+ .get_physical_device_surface_capabilities(self.raw, surface.raw)
+ } {
+ Ok(caps) => caps,
+ Err(e) => {
+ log::error!("get_physical_device_surface_capabilities: {}", e);
+ return None;
+ }
+ }
+ };
+
+ // If image count is 0, the support number of images is unlimited.
+ let max_image_count = if caps.max_image_count == 0 {
+ !0
+ } else {
+ caps.max_image_count
+ };
+
+ // `0xFFFFFFFF` indicates that the extent depends on the created swapchain.
+ let current_extent = if caps.current_extent.width != !0 && caps.current_extent.height != !0
+ {
+ Some(wgt::Extent3d {
+ width: caps.current_extent.width,
+ height: caps.current_extent.height,
+ depth_or_array_layers: 1,
+ })
+ } else {
+ None
+ };
+
+ let min_extent = wgt::Extent3d {
+ width: caps.min_image_extent.width,
+ height: caps.min_image_extent.height,
+ depth_or_array_layers: 1,
+ };
+
+ let max_extent = wgt::Extent3d {
+ width: caps.max_image_extent.width,
+ height: caps.max_image_extent.height,
+ depth_or_array_layers: caps.max_image_array_layers,
+ };
+
+ let raw_present_modes = {
+ profiling::scope!("vkGetPhysicalDeviceSurfacePresentModesKHR");
+ match unsafe {
+ surface
+ .functor
+ .get_physical_device_surface_present_modes(self.raw, surface.raw)
+ } {
+ Ok(present_modes) => present_modes,
+ Err(e) => {
+ log::error!("get_physical_device_surface_present_modes: {}", e);
+ Vec::new()
+ }
+ }
+ };
+
+ let raw_surface_formats = {
+ profiling::scope!("vkGetPhysicalDeviceSurfaceFormatsKHR");
+ match unsafe {
+ surface
+ .functor
+ .get_physical_device_surface_formats(self.raw, surface.raw)
+ } {
+ Ok(formats) => formats,
+ Err(e) => {
+ log::error!("get_physical_device_surface_formats: {}", e);
+ Vec::new()
+ }
+ }
+ };
+
+ let formats = raw_surface_formats
+ .into_iter()
+ .filter_map(conv::map_vk_surface_formats)
+ .collect();
+ Some(crate::SurfaceCapabilities {
+ formats,
+ swap_chain_sizes: caps.min_image_count..=max_image_count,
+ current_extent,
+ extents: min_extent..=max_extent,
+ usage: conv::map_vk_image_usage(caps.supported_usage_flags),
+ present_modes: raw_present_modes
+ .into_iter()
+ .flat_map(conv::map_vk_present_mode)
+ .collect(),
+ composite_alpha_modes: conv::map_vk_composite_alpha(caps.supported_composite_alpha),
+ })
+ }
+
+ unsafe fn get_presentation_timestamp(&self) -> wgt::PresentationTimestamp {
+ // VK_GOOGLE_display_timing is the only way to get presentation
+ // timestamps on vulkan right now and it is only ever available
+ // on android and linux. This includes mac, but there's no alternative
+ // on mac, so this is fine.
+ #[cfg(unix)]
+ {
+ let mut timespec = libc::timespec {
+ tv_sec: 0,
+ tv_nsec: 0,
+ };
+ unsafe {
+ libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut timespec);
+ }
+
+ wgt::PresentationTimestamp(
+ timespec.tv_sec as u128 * 1_000_000_000 + timespec.tv_nsec as u128,
+ )
+ }
+ #[cfg(not(unix))]
+ {
+ wgt::PresentationTimestamp::INVALID_TIMESTAMP
+ }
+ }
+}
+
+fn is_format_16bit_norm_supported(instance: &ash::Instance, phd: vk::PhysicalDevice) -> bool {
+ let tiling = vk::ImageTiling::OPTIMAL;
+ let features = vk::FormatFeatureFlags::SAMPLED_IMAGE
+ | vk::FormatFeatureFlags::STORAGE_IMAGE
+ | vk::FormatFeatureFlags::TRANSFER_SRC
+ | vk::FormatFeatureFlags::TRANSFER_DST;
+ let r16unorm = supports_format(instance, phd, vk::Format::R16_UNORM, tiling, features);
+ let r16snorm = supports_format(instance, phd, vk::Format::R16_SNORM, tiling, features);
+ let rg16unorm = supports_format(instance, phd, vk::Format::R16G16_UNORM, tiling, features);
+ let rg16snorm = supports_format(instance, phd, vk::Format::R16G16_SNORM, tiling, features);
+ let rgba16unorm = supports_format(
+ instance,
+ phd,
+ vk::Format::R16G16B16A16_UNORM,
+ tiling,
+ features,
+ );
+ let rgba16snorm = supports_format(
+ instance,
+ phd,
+ vk::Format::R16G16B16A16_SNORM,
+ tiling,
+ features,
+ );
+
+ r16unorm && r16snorm && rg16unorm && rg16snorm && rgba16unorm && rgba16snorm
+}
+
+fn supports_format(
+ instance: &ash::Instance,
+ phd: vk::PhysicalDevice,
+ format: vk::Format,
+ tiling: vk::ImageTiling,
+ features: vk::FormatFeatureFlags,
+) -> bool {
+ let properties = unsafe { instance.get_physical_device_format_properties(phd, format) };
+ match tiling {
+ vk::ImageTiling::LINEAR => properties.linear_tiling_features.contains(features),
+ vk::ImageTiling::OPTIMAL => properties.optimal_tiling_features.contains(features),
+ _ => false,
+ }
+}
diff --git a/third_party/rust/wgpu-hal/src/vulkan/command.rs b/third_party/rust/wgpu-hal/src/vulkan/command.rs
new file mode 100644
index 0000000000..f6c871026c
--- /dev/null
+++ b/third_party/rust/wgpu-hal/src/vulkan/command.rs
@@ -0,0 +1,826 @@
+use super::conv;
+
+use arrayvec::ArrayVec;
+use ash::{extensions::ext, vk};
+
+use std::{mem, ops::Range, slice};
+
+const ALLOCATION_GRANULARITY: u32 = 16;
+const DST_IMAGE_LAYOUT: vk::ImageLayout = vk::ImageLayout::TRANSFER_DST_OPTIMAL;
+
+impl super::Texture {
+ fn map_buffer_copies<T>(&self, regions: T) -> impl Iterator<Item = vk::BufferImageCopy>
+ where
+ T: Iterator<Item = crate::BufferTextureCopy>,
+ {
+ let (block_width, block_height) = self.format.block_dimensions();
+ let format = self.format;
+ let copy_size = self.copy_size;
+ regions.map(move |r| {
+ let extent = r.texture_base.max_copy_size(&copy_size).min(&r.size);
+ let (image_subresource, image_offset) = conv::map_subresource_layers(&r.texture_base);
+ vk::BufferImageCopy {
+ buffer_offset: r.buffer_layout.offset,
+ buffer_row_length: r.buffer_layout.bytes_per_row.map_or(0, |bpr| {
+ let block_size = format
+ .block_size(Some(r.texture_base.aspect.map()))
+ .unwrap();
+ block_width * (bpr / block_size)
+ }),
+ buffer_image_height: r
+ .buffer_layout
+ .rows_per_image
+ .map_or(0, |rpi| rpi * block_height),
+ image_subresource,
+ image_offset,
+ image_extent: conv::map_copy_extent(&extent),
+ }
+ })
+ }
+}
+
+impl super::DeviceShared {
+ fn debug_messenger(&self) -> Option<&ext::DebugUtils> {
+ Some(&self.instance.debug_utils.as_ref()?.extension)
+ }
+}
+
+impl crate::CommandEncoder<super::Api> for super::CommandEncoder {
+ unsafe fn begin_encoding(&mut self, label: crate::Label) -> Result<(), crate::DeviceError> {
+ if self.free.is_empty() {
+ let vk_info = vk::CommandBufferAllocateInfo::builder()
+ .command_pool(self.raw)
+ .command_buffer_count(ALLOCATION_GRANULARITY)
+ .build();
+ let cmd_buf_vec = unsafe { self.device.raw.allocate_command_buffers(&vk_info)? };
+ self.free.extend(cmd_buf_vec);
+ }
+ let raw = self.free.pop().unwrap();
+
+ // Set the name unconditionally, since there might be a
+ // previous name assigned to this.
+ unsafe {
+ self.device.set_object_name(
+ vk::ObjectType::COMMAND_BUFFER,
+ raw,
+ label.unwrap_or_default(),
+ )
+ };
+
+ // Reset this in case the last renderpass was never ended.
+ self.rpass_debug_marker_active = false;
+
+ let vk_info = vk::CommandBufferBeginInfo::builder()
+ .flags(vk::CommandBufferUsageFlags::ONE_TIME_SUBMIT)
+ .build();
+ unsafe { self.device.raw.begin_command_buffer(raw, &vk_info) }?;
+ self.active = raw;
+
+ Ok(())
+ }
+
+ unsafe fn end_encoding(&mut self) -> Result<super::CommandBuffer, crate::DeviceError> {
+ let raw = self.active;
+ self.active = vk::CommandBuffer::null();
+ unsafe { self.device.raw.end_command_buffer(raw) }?;
+ Ok(super::CommandBuffer { raw })
+ }
+
+ unsafe fn discard_encoding(&mut self) {
+ self.discarded.push(self.active);
+ self.active = vk::CommandBuffer::null();
+ }
+
+ unsafe fn reset_all<I>(&mut self, cmd_bufs: I)
+ where
+ I: Iterator<Item = super::CommandBuffer>,
+ {
+ self.temp.clear();
+ self.free
+ .extend(cmd_bufs.into_iter().map(|cmd_buf| cmd_buf.raw));
+ self.free.append(&mut self.discarded);
+ let _ = unsafe {
+ self.device
+ .raw
+ .reset_command_pool(self.raw, vk::CommandPoolResetFlags::default())
+ };
+ }
+
+ unsafe fn transition_buffers<'a, T>(&mut self, barriers: T)
+ where
+ T: Iterator<Item = crate::BufferBarrier<'a, super::Api>>,
+ {
+ //Note: this is done so that we never end up with empty stage flags
+ let mut src_stages = vk::PipelineStageFlags::TOP_OF_PIPE;
+ let mut dst_stages = vk::PipelineStageFlags::BOTTOM_OF_PIPE;
+ let vk_barriers = &mut self.temp.buffer_barriers;
+ vk_barriers.clear();
+
+ for bar in barriers {
+ let (src_stage, src_access) = conv::map_buffer_usage_to_barrier(bar.usage.start);
+ src_stages |= src_stage;
+ let (dst_stage, dst_access) = conv::map_buffer_usage_to_barrier(bar.usage.end);
+ dst_stages |= dst_stage;
+
+ vk_barriers.push(
+ vk::BufferMemoryBarrier::builder()
+ .buffer(bar.buffer.raw)
+ .size(vk::WHOLE_SIZE)
+ .src_access_mask(src_access)
+ .dst_access_mask(dst_access)
+ .build(),
+ )
+ }
+
+ if !vk_barriers.is_empty() {
+ unsafe {
+ self.device.raw.cmd_pipeline_barrier(
+ self.active,
+ src_stages,
+ dst_stages,
+ vk::DependencyFlags::empty(),
+ &[],
+ vk_barriers,
+ &[],
+ )
+ };
+ }
+ }
+
+ unsafe fn transition_textures<'a, T>(&mut self, barriers: T)
+ where
+ T: Iterator<Item = crate::TextureBarrier<'a, super::Api>>,
+ {
+ let mut src_stages = vk::PipelineStageFlags::empty();
+ let mut dst_stages = vk::PipelineStageFlags::empty();
+ let vk_barriers = &mut self.temp.image_barriers;
+ vk_barriers.clear();
+
+ for bar in barriers {
+ let range = conv::map_subresource_range(&bar.range, bar.texture.format);
+ let (src_stage, src_access) = conv::map_texture_usage_to_barrier(bar.usage.start);
+ let src_layout = conv::derive_image_layout(bar.usage.start, bar.texture.format);
+ src_stages |= src_stage;
+ let (dst_stage, dst_access) = conv::map_texture_usage_to_barrier(bar.usage.end);
+ let dst_layout = conv::derive_image_layout(bar.usage.end, bar.texture.format);
+ dst_stages |= dst_stage;
+
+ vk_barriers.push(
+ vk::ImageMemoryBarrier::builder()
+ .image(bar.texture.raw)
+ .subresource_range(range)
+ .src_access_mask(src_access)
+ .dst_access_mask(dst_access)
+ .old_layout(src_layout)
+ .new_layout(dst_layout)
+ .build(),
+ );
+ }
+
+ if !vk_barriers.is_empty() {
+ unsafe {
+ self.device.raw.cmd_pipeline_barrier(
+ self.active,
+ src_stages,
+ dst_stages,
+ vk::DependencyFlags::empty(),
+ &[],
+ &[],
+ vk_barriers,
+ )
+ };
+ }
+ }
+
+ unsafe fn clear_buffer(&mut self, buffer: &super::Buffer, range: crate::MemoryRange) {
+ unsafe {
+ self.device.raw.cmd_fill_buffer(
+ self.active,
+ buffer.raw,
+ range.start,
+ range.end - range.start,
+ 0,
+ )
+ };
+ }
+
+ unsafe fn copy_buffer_to_buffer<T>(
+ &mut self,
+ src: &super::Buffer,
+ dst: &super::Buffer,
+ regions: T,
+ ) where
+ T: Iterator<Item = crate::BufferCopy>,
+ {
+ let vk_regions_iter = regions.map(|r| vk::BufferCopy {
+ src_offset: r.src_offset,
+ dst_offset: r.dst_offset,
+ size: r.size.get(),
+ });
+
+ unsafe {
+ self.device.raw.cmd_copy_buffer(
+ self.active,
+ src.raw,
+ dst.raw,
+ &smallvec::SmallVec::<[vk::BufferCopy; 32]>::from_iter(vk_regions_iter),
+ )
+ };
+ }
+
+ unsafe fn copy_texture_to_texture<T>(
+ &mut self,
+ src: &super::Texture,
+ src_usage: crate::TextureUses,
+ dst: &super::Texture,
+ regions: T,
+ ) where
+ T: Iterator<Item = crate::TextureCopy>,
+ {
+ let src_layout = conv::derive_image_layout(src_usage, src.format);
+
+ let vk_regions_iter = regions.map(|r| {
+ let (src_subresource, src_offset) = conv::map_subresource_layers(&r.src_base);
+ let (dst_subresource, dst_offset) = conv::map_subresource_layers(&r.dst_base);
+ let extent = r
+ .size
+ .min(&r.src_base.max_copy_size(&src.copy_size))
+ .min(&r.dst_base.max_copy_size(&dst.copy_size));
+ vk::ImageCopy {
+ src_subresource,
+ src_offset,
+ dst_subresource,
+ dst_offset,
+ extent: conv::map_copy_extent(&extent),
+ }
+ });
+
+ unsafe {
+ self.device.raw.cmd_copy_image(
+ self.active,
+ src.raw,
+ src_layout,
+ dst.raw,
+ DST_IMAGE_LAYOUT,
+ &smallvec::SmallVec::<[vk::ImageCopy; 32]>::from_iter(vk_regions_iter),
+ )
+ };
+ }
+
+ unsafe fn copy_buffer_to_texture<T>(
+ &mut self,
+ src: &super::Buffer,
+ dst: &super::Texture,
+ regions: T,
+ ) where
+ T: Iterator<Item = crate::BufferTextureCopy>,
+ {
+ let vk_regions_iter = dst.map_buffer_copies(regions);
+
+ unsafe {
+ self.device.raw.cmd_copy_buffer_to_image(
+ self.active,
+ src.raw,
+ dst.raw,
+ DST_IMAGE_LAYOUT,
+ &smallvec::SmallVec::<[vk::BufferImageCopy; 32]>::from_iter(vk_regions_iter),
+ )
+ };
+ }
+
+ unsafe fn copy_texture_to_buffer<T>(
+ &mut self,
+ src: &super::Texture,
+ src_usage: crate::TextureUses,
+ dst: &super::Buffer,
+ regions: T,
+ ) where
+ T: Iterator<Item = crate::BufferTextureCopy>,
+ {
+ let src_layout = conv::derive_image_layout(src_usage, src.format);
+ let vk_regions_iter = src.map_buffer_copies(regions);
+
+ unsafe {
+ self.device.raw.cmd_copy_image_to_buffer(
+ self.active,
+ src.raw,
+ src_layout,
+ dst.raw,
+ &smallvec::SmallVec::<[vk::BufferImageCopy; 32]>::from_iter(vk_regions_iter),
+ )
+ };
+ }
+
+ unsafe fn begin_query(&mut self, set: &super::QuerySet, index: u32) {
+ unsafe {
+ self.device.raw.cmd_begin_query(
+ self.active,
+ set.raw,
+ index,
+ vk::QueryControlFlags::empty(),
+ )
+ };
+ }
+ unsafe fn end_query(&mut self, set: &super::QuerySet, index: u32) {
+ unsafe { self.device.raw.cmd_end_query(self.active, set.raw, index) };
+ }
+ unsafe fn write_timestamp(&mut self, set: &super::QuerySet, index: u32) {
+ unsafe {
+ self.device.raw.cmd_write_timestamp(
+ self.active,
+ vk::PipelineStageFlags::BOTTOM_OF_PIPE,
+ set.raw,
+ index,
+ )
+ };
+ }
+ unsafe fn reset_queries(&mut self, set: &super::QuerySet, range: Range<u32>) {
+ unsafe {
+ self.device.raw.cmd_reset_query_pool(
+ self.active,
+ set.raw,
+ range.start,
+ range.end - range.start,
+ )
+ };
+ }
+ unsafe fn copy_query_results(
+ &mut self,
+ set: &super::QuerySet,
+ range: Range<u32>,
+ buffer: &super::Buffer,
+ offset: wgt::BufferAddress,
+ stride: wgt::BufferSize,
+ ) {
+ unsafe {
+ self.device.raw.cmd_copy_query_pool_results(
+ self.active,
+ set.raw,
+ range.start,
+ range.end - range.start,
+ buffer.raw,
+ offset,
+ stride.get(),
+ vk::QueryResultFlags::TYPE_64 | vk::QueryResultFlags::WAIT,
+ )
+ };
+ }
+
+ // render
+
+ unsafe fn begin_render_pass(&mut self, desc: &crate::RenderPassDescriptor<super::Api>) {
+ let mut vk_clear_values =
+ ArrayVec::<vk::ClearValue, { super::MAX_TOTAL_ATTACHMENTS }>::new();
+ let mut vk_image_views = ArrayVec::<vk::ImageView, { super::MAX_TOTAL_ATTACHMENTS }>::new();
+ let mut rp_key = super::RenderPassKey::default();
+ let mut fb_key = super::FramebufferKey {
+ attachments: ArrayVec::default(),
+ extent: desc.extent,
+ sample_count: desc.sample_count,
+ };
+ let caps = &self.device.private_caps;
+
+ for cat in desc.color_attachments {
+ if let Some(cat) = cat.as_ref() {
+ vk_clear_values.push(vk::ClearValue {
+ color: unsafe { cat.make_vk_clear_color() },
+ });
+ vk_image_views.push(cat.target.view.raw);
+ let color = super::ColorAttachmentKey {
+ base: cat.target.make_attachment_key(cat.ops, caps),
+ resolve: cat.resolve_target.as_ref().map(|target| {
+ target.make_attachment_key(crate::AttachmentOps::STORE, caps)
+ }),
+ };
+
+ rp_key.colors.push(Some(color));
+ fb_key.attachments.push(cat.target.view.attachment.clone());
+ if let Some(ref at) = cat.resolve_target {
+ vk_clear_values.push(unsafe { mem::zeroed() });
+ vk_image_views.push(at.view.raw);
+ fb_key.attachments.push(at.view.attachment.clone());
+ }
+
+ // Assert this attachment is valid for the detected multiview, as a sanity check
+ // The driver crash for this is really bad on AMD, so the check is worth it
+ if let Some(multiview) = desc.multiview {
+ assert_eq!(cat.target.view.layers, multiview);
+ if let Some(ref resolve_target) = cat.resolve_target {
+ assert_eq!(resolve_target.view.layers, multiview);
+ }
+ }
+ } else {
+ rp_key.colors.push(None);
+ }
+ }
+ if let Some(ref ds) = desc.depth_stencil_attachment {
+ vk_clear_values.push(vk::ClearValue {
+ depth_stencil: vk::ClearDepthStencilValue {
+ depth: ds.clear_value.0,
+ stencil: ds.clear_value.1,
+ },
+ });
+ vk_image_views.push(ds.target.view.raw);
+ rp_key.depth_stencil = Some(super::DepthStencilAttachmentKey {
+ base: ds.target.make_attachment_key(ds.depth_ops, caps),
+ stencil_ops: ds.stencil_ops,
+ });
+ fb_key.attachments.push(ds.target.view.attachment.clone());
+
+ // Assert this attachment is valid for the detected multiview, as a sanity check
+ // The driver crash for this is really bad on AMD, so the check is worth it
+ if let Some(multiview) = desc.multiview {
+ assert_eq!(ds.target.view.layers, multiview);
+ }
+ }
+ rp_key.sample_count = fb_key.sample_count;
+ rp_key.multiview = desc.multiview;
+
+ let render_area = vk::Rect2D {
+ offset: vk::Offset2D { x: 0, y: 0 },
+ extent: vk::Extent2D {
+ width: desc.extent.width,
+ height: desc.extent.height,
+ },
+ };
+ let vk_viewports = [vk::Viewport {
+ x: 0.0,
+ y: if self.device.private_caps.flip_y_requires_shift {
+ desc.extent.height as f32
+ } else {
+ 0.0
+ },
+ width: desc.extent.width as f32,
+ height: -(desc.extent.height as f32),
+ min_depth: 0.0,
+ max_depth: 1.0,
+ }];
+
+ let raw_pass = self.device.make_render_pass(rp_key).unwrap();
+ let raw_framebuffer = self
+ .device
+ .make_framebuffer(fb_key, raw_pass, desc.label)
+ .unwrap();
+
+ let mut vk_info = vk::RenderPassBeginInfo::builder()
+ .render_pass(raw_pass)
+ .render_area(render_area)
+ .clear_values(&vk_clear_values)
+ .framebuffer(raw_framebuffer);
+ let mut vk_attachment_info = if caps.imageless_framebuffers {
+ Some(
+ vk::RenderPassAttachmentBeginInfo::builder()
+ .attachments(&vk_image_views)
+ .build(),
+ )
+ } else {
+ None
+ };
+ if let Some(attachment_info) = vk_attachment_info.as_mut() {
+ vk_info = vk_info.push_next(attachment_info);
+ }
+
+ if let Some(label) = desc.label {
+ unsafe { self.begin_debug_marker(label) };
+ self.rpass_debug_marker_active = true;
+ }
+
+ unsafe {
+ self.device
+ .raw
+ .cmd_set_viewport(self.active, 0, &vk_viewports);
+ self.device
+ .raw
+ .cmd_set_scissor(self.active, 0, &[render_area]);
+ self.device.raw.cmd_begin_render_pass(
+ self.active,
+ &vk_info,
+ vk::SubpassContents::INLINE,
+ );
+ };
+
+ self.bind_point = vk::PipelineBindPoint::GRAPHICS;
+ }
+ unsafe fn end_render_pass(&mut self) {
+ unsafe {
+ self.device.raw.cmd_end_render_pass(self.active);
+ if self.rpass_debug_marker_active {
+ self.end_debug_marker();
+ self.rpass_debug_marker_active = false;
+ }
+ }
+ }
+
+ unsafe fn set_bind_group(
+ &mut self,
+ layout: &super::PipelineLayout,
+ index: u32,
+ group: &super::BindGroup,
+ dynamic_offsets: &[wgt::DynamicOffset],
+ ) {
+ let sets = [*group.set.raw()];
+ unsafe {
+ self.device.raw.cmd_bind_descriptor_sets(
+ self.active,
+ self.bind_point,
+ layout.raw,
+ index,
+ &sets,
+ dynamic_offsets,
+ )
+ };
+ }
+ unsafe fn set_push_constants(
+ &mut self,
+ layout: &super::PipelineLayout,
+ stages: wgt::ShaderStages,
+ offset: u32,
+ data: &[u32],
+ ) {
+ unsafe {
+ self.device.raw.cmd_push_constants(
+ self.active,
+ layout.raw,
+ conv::map_shader_stage(stages),
+ offset,
+ slice::from_raw_parts(data.as_ptr() as _, data.len() * 4),
+ )
+ };
+ }
+
+ unsafe fn insert_debug_marker(&mut self, label: &str) {
+ if let Some(ext) = self.device.debug_messenger() {
+ let cstr = self.temp.make_c_str(label);
+ let vk_label = vk::DebugUtilsLabelEXT::builder().label_name(cstr).build();
+ unsafe { ext.cmd_insert_debug_utils_label(self.active, &vk_label) };
+ }
+ }
+ unsafe fn begin_debug_marker(&mut self, group_label: &str) {
+ if let Some(ext) = self.device.debug_messenger() {
+ let cstr = self.temp.make_c_str(group_label);
+ let vk_label = vk::DebugUtilsLabelEXT::builder().label_name(cstr).build();
+ unsafe { ext.cmd_begin_debug_utils_label(self.active, &vk_label) };
+ }
+ }
+ unsafe fn end_debug_marker(&mut self) {
+ if let Some(ext) = self.device.debug_messenger() {
+ unsafe { ext.cmd_end_debug_utils_label(self.active) };
+ }
+ }
+
+ unsafe fn set_render_pipeline(&mut self, pipeline: &super::RenderPipeline) {
+ unsafe {
+ self.device.raw.cmd_bind_pipeline(
+ self.active,
+ vk::PipelineBindPoint::GRAPHICS,
+ pipeline.raw,
+ )
+ };
+ }
+
+ unsafe fn set_index_buffer<'a>(
+ &mut self,
+ binding: crate::BufferBinding<'a, super::Api>,
+ format: wgt::IndexFormat,
+ ) {
+ unsafe {
+ self.device.raw.cmd_bind_index_buffer(
+ self.active,
+ binding.buffer.raw,
+ binding.offset,
+ conv::map_index_format(format),
+ )
+ };
+ }
+ unsafe fn set_vertex_buffer<'a>(
+ &mut self,
+ index: u32,
+ binding: crate::BufferBinding<'a, super::Api>,
+ ) {
+ let vk_buffers = [binding.buffer.raw];
+ let vk_offsets = [binding.offset];
+ unsafe {
+ self.device
+ .raw
+ .cmd_bind_vertex_buffers(self.active, index, &vk_buffers, &vk_offsets)
+ };
+ }
+ unsafe fn set_viewport(&mut self, rect: &crate::Rect<f32>, depth_range: Range<f32>) {
+ let vk_viewports = [vk::Viewport {
+ x: rect.x,
+ y: if self.device.private_caps.flip_y_requires_shift {
+ rect.y + rect.h
+ } else {
+ rect.y
+ },
+ width: rect.w,
+ height: -rect.h, // flip Y
+ min_depth: depth_range.start,
+ max_depth: depth_range.end,
+ }];
+ unsafe {
+ self.device
+ .raw
+ .cmd_set_viewport(self.active, 0, &vk_viewports)
+ };
+ }
+ unsafe fn set_scissor_rect(&mut self, rect: &crate::Rect<u32>) {
+ let vk_scissors = [vk::Rect2D {
+ offset: vk::Offset2D {
+ x: rect.x as i32,
+ y: rect.y as i32,
+ },
+ extent: vk::Extent2D {
+ width: rect.w,
+ height: rect.h,
+ },
+ }];
+ unsafe {
+ self.device
+ .raw
+ .cmd_set_scissor(self.active, 0, &vk_scissors)
+ };
+ }
+ unsafe fn set_stencil_reference(&mut self, value: u32) {
+ unsafe {
+ self.device.raw.cmd_set_stencil_reference(
+ self.active,
+ vk::StencilFaceFlags::FRONT_AND_BACK,
+ value,
+ )
+ };
+ }
+ unsafe fn set_blend_constants(&mut self, color: &[f32; 4]) {
+ unsafe { self.device.raw.cmd_set_blend_constants(self.active, color) };
+ }
+
+ unsafe fn draw(
+ &mut self,
+ start_vertex: u32,
+ vertex_count: u32,
+ start_instance: u32,
+ instance_count: u32,
+ ) {
+ unsafe {
+ self.device.raw.cmd_draw(
+ self.active,
+ vertex_count,
+ instance_count,
+ start_vertex,
+ start_instance,
+ )
+ };
+ }
+ unsafe fn draw_indexed(
+ &mut self,
+ start_index: u32,
+ index_count: u32,
+ base_vertex: i32,
+ start_instance: u32,
+ instance_count: u32,
+ ) {
+ unsafe {
+ self.device.raw.cmd_draw_indexed(
+ self.active,
+ index_count,
+ instance_count,
+ start_index,
+ base_vertex,
+ start_instance,
+ )
+ };
+ }
+ unsafe fn draw_indirect(
+ &mut self,
+ buffer: &super::Buffer,
+ offset: wgt::BufferAddress,
+ draw_count: u32,
+ ) {
+ unsafe {
+ self.device.raw.cmd_draw_indirect(
+ self.active,
+ buffer.raw,
+ offset,
+ draw_count,
+ mem::size_of::<wgt::DrawIndirectArgs>() as u32,
+ )
+ };
+ }
+ unsafe fn draw_indexed_indirect(
+ &mut self,
+ buffer: &super::Buffer,
+ offset: wgt::BufferAddress,
+ draw_count: u32,
+ ) {
+ unsafe {
+ self.device.raw.cmd_draw_indexed_indirect(
+ self.active,
+ buffer.raw,
+ offset,
+ draw_count,
+ mem::size_of::<wgt::DrawIndexedIndirectArgs>() as u32,
+ )
+ };
+ }
+ unsafe fn draw_indirect_count(
+ &mut self,
+ buffer: &super::Buffer,
+ offset: wgt::BufferAddress,
+ count_buffer: &super::Buffer,
+ count_offset: wgt::BufferAddress,
+ max_count: u32,
+ ) {
+ let stride = mem::size_of::<wgt::DrawIndirectArgs>() as u32;
+ match self.device.extension_fns.draw_indirect_count {
+ Some(ref t) => {
+ unsafe {
+ t.cmd_draw_indirect_count(
+ self.active,
+ buffer.raw,
+ offset,
+ count_buffer.raw,
+ count_offset,
+ max_count,
+ stride,
+ )
+ };
+ }
+ None => panic!("Feature `DRAW_INDIRECT_COUNT` not enabled"),
+ }
+ }
+ unsafe fn draw_indexed_indirect_count(
+ &mut self,
+ buffer: &super::Buffer,
+ offset: wgt::BufferAddress,
+ count_buffer: &super::Buffer,
+ count_offset: wgt::BufferAddress,
+ max_count: u32,
+ ) {
+ let stride = mem::size_of::<wgt::DrawIndexedIndirectArgs>() as u32;
+ match self.device.extension_fns.draw_indirect_count {
+ Some(ref t) => {
+ unsafe {
+ t.cmd_draw_indexed_indirect_count(
+ self.active,
+ buffer.raw,
+ offset,
+ count_buffer.raw,
+ count_offset,
+ max_count,
+ stride,
+ )
+ };
+ }
+ None => panic!("Feature `DRAW_INDIRECT_COUNT` not enabled"),
+ }
+ }
+
+ // compute
+
+ unsafe fn begin_compute_pass(&mut self, desc: &crate::ComputePassDescriptor) {
+ self.bind_point = vk::PipelineBindPoint::COMPUTE;
+ if let Some(label) = desc.label {
+ unsafe { self.begin_debug_marker(label) };
+ self.rpass_debug_marker_active = true;
+ }
+ }
+ unsafe fn end_compute_pass(&mut self) {
+ if self.rpass_debug_marker_active {
+ unsafe { self.end_debug_marker() };
+ self.rpass_debug_marker_active = false
+ }
+ }
+
+ unsafe fn set_compute_pipeline(&mut self, pipeline: &super::ComputePipeline) {
+ unsafe {
+ self.device.raw.cmd_bind_pipeline(
+ self.active,
+ vk::PipelineBindPoint::COMPUTE,
+ pipeline.raw,
+ )
+ };
+ }
+
+ unsafe fn dispatch(&mut self, count: [u32; 3]) {
+ unsafe {
+ self.device
+ .raw
+ .cmd_dispatch(self.active, count[0], count[1], count[2])
+ };
+ }
+ unsafe fn dispatch_indirect(&mut self, buffer: &super::Buffer, offset: wgt::BufferAddress) {
+ unsafe {
+ self.device
+ .raw
+ .cmd_dispatch_indirect(self.active, buffer.raw, offset)
+ }
+ }
+}
+
+#[test]
+fn check_dst_image_layout() {
+ assert_eq!(
+ conv::derive_image_layout(crate::TextureUses::COPY_DST, wgt::TextureFormat::Rgba8Unorm),
+ DST_IMAGE_LAYOUT
+ );
+}
diff --git a/third_party/rust/wgpu-hal/src/vulkan/conv.rs b/third_party/rust/wgpu-hal/src/vulkan/conv.rs
new file mode 100644
index 0000000000..a26f3765b9
--- /dev/null
+++ b/third_party/rust/wgpu-hal/src/vulkan/conv.rs
@@ -0,0 +1,825 @@
+use ash::vk;
+
+impl super::PrivateCapabilities {
+ pub fn map_texture_format(&self, format: wgt::TextureFormat) -> vk::Format {
+ use ash::vk::Format as F;
+ use wgt::TextureFormat as Tf;
+ use wgt::{AstcBlock, AstcChannel};
+ match format {
+ Tf::R8Unorm => F::R8_UNORM,
+ Tf::R8Snorm => F::R8_SNORM,
+ Tf::R8Uint => F::R8_UINT,
+ Tf::R8Sint => F::R8_SINT,
+ Tf::R16Uint => F::R16_UINT,
+ Tf::R16Sint => F::R16_SINT,
+ Tf::R16Unorm => F::R16_UNORM,
+ Tf::R16Snorm => F::R16_SNORM,
+ Tf::R16Float => F::R16_SFLOAT,
+ Tf::Rg8Unorm => F::R8G8_UNORM,
+ Tf::Rg8Snorm => F::R8G8_SNORM,
+ Tf::Rg8Uint => F::R8G8_UINT,
+ Tf::Rg8Sint => F::R8G8_SINT,
+ Tf::Rg16Unorm => F::R16G16_UNORM,
+ Tf::Rg16Snorm => F::R16G16_SNORM,
+ Tf::R32Uint => F::R32_UINT,
+ Tf::R32Sint => F::R32_SINT,
+ Tf::R32Float => F::R32_SFLOAT,
+ Tf::Rg16Uint => F::R16G16_UINT,
+ Tf::Rg16Sint => F::R16G16_SINT,
+ Tf::Rg16Float => F::R16G16_SFLOAT,
+ Tf::Rgba8Unorm => F::R8G8B8A8_UNORM,
+ Tf::Rgba8UnormSrgb => F::R8G8B8A8_SRGB,
+ Tf::Bgra8UnormSrgb => F::B8G8R8A8_SRGB,
+ Tf::Rgba8Snorm => F::R8G8B8A8_SNORM,
+ Tf::Bgra8Unorm => F::B8G8R8A8_UNORM,
+ Tf::Rgba8Uint => F::R8G8B8A8_UINT,
+ Tf::Rgba8Sint => F::R8G8B8A8_SINT,
+ Tf::Rgb10a2Unorm => F::A2B10G10R10_UNORM_PACK32,
+ Tf::Rg11b10Float => F::B10G11R11_UFLOAT_PACK32,
+ Tf::Rg32Uint => F::R32G32_UINT,
+ Tf::Rg32Sint => F::R32G32_SINT,
+ Tf::Rg32Float => F::R32G32_SFLOAT,
+ Tf::Rgba16Uint => F::R16G16B16A16_UINT,
+ Tf::Rgba16Sint => F::R16G16B16A16_SINT,
+ Tf::Rgba16Unorm => F::R16G16B16A16_UNORM,
+ Tf::Rgba16Snorm => F::R16G16B16A16_SNORM,
+ Tf::Rgba16Float => F::R16G16B16A16_SFLOAT,
+ Tf::Rgba32Uint => F::R32G32B32A32_UINT,
+ Tf::Rgba32Sint => F::R32G32B32A32_SINT,
+ Tf::Rgba32Float => F::R32G32B32A32_SFLOAT,
+ Tf::Depth32Float => F::D32_SFLOAT,
+ Tf::Depth32FloatStencil8 => F::D32_SFLOAT_S8_UINT,
+ Tf::Depth24Plus => {
+ if self.texture_d24 {
+ F::X8_D24_UNORM_PACK32
+ } else {
+ F::D32_SFLOAT
+ }
+ }
+ Tf::Depth24PlusStencil8 => {
+ if self.texture_d24_s8 {
+ F::D24_UNORM_S8_UINT
+ } else {
+ F::D32_SFLOAT_S8_UINT
+ }
+ }
+ Tf::Stencil8 => {
+ if self.texture_s8 {
+ F::S8_UINT
+ } else if self.texture_d24_s8 {
+ F::D24_UNORM_S8_UINT
+ } else {
+ F::D32_SFLOAT_S8_UINT
+ }
+ }
+ Tf::Depth16Unorm => F::D16_UNORM,
+ Tf::Rgb9e5Ufloat => F::E5B9G9R9_UFLOAT_PACK32,
+ Tf::Bc1RgbaUnorm => F::BC1_RGBA_UNORM_BLOCK,
+ Tf::Bc1RgbaUnormSrgb => F::BC1_RGBA_SRGB_BLOCK,
+ Tf::Bc2RgbaUnorm => F::BC2_UNORM_BLOCK,
+ Tf::Bc2RgbaUnormSrgb => F::BC2_SRGB_BLOCK,
+ Tf::Bc3RgbaUnorm => F::BC3_UNORM_BLOCK,
+ Tf::Bc3RgbaUnormSrgb => F::BC3_SRGB_BLOCK,
+ Tf::Bc4RUnorm => F::BC4_UNORM_BLOCK,
+ Tf::Bc4RSnorm => F::BC4_SNORM_BLOCK,
+ Tf::Bc5RgUnorm => F::BC5_UNORM_BLOCK,
+ Tf::Bc5RgSnorm => F::BC5_SNORM_BLOCK,
+ Tf::Bc6hRgbUfloat => F::BC6H_UFLOAT_BLOCK,
+ Tf::Bc6hRgbFloat => F::BC6H_SFLOAT_BLOCK,
+ Tf::Bc7RgbaUnorm => F::BC7_UNORM_BLOCK,
+ Tf::Bc7RgbaUnormSrgb => F::BC7_SRGB_BLOCK,
+ Tf::Etc2Rgb8Unorm => F::ETC2_R8G8B8_UNORM_BLOCK,
+ Tf::Etc2Rgb8UnormSrgb => F::ETC2_R8G8B8_SRGB_BLOCK,
+ Tf::Etc2Rgb8A1Unorm => F::ETC2_R8G8B8A1_UNORM_BLOCK,
+ Tf::Etc2Rgb8A1UnormSrgb => F::ETC2_R8G8B8A1_SRGB_BLOCK,
+ Tf::Etc2Rgba8Unorm => F::ETC2_R8G8B8A8_UNORM_BLOCK,
+ Tf::Etc2Rgba8UnormSrgb => F::ETC2_R8G8B8A8_SRGB_BLOCK,
+ Tf::EacR11Unorm => F::EAC_R11_UNORM_BLOCK,
+ Tf::EacR11Snorm => F::EAC_R11_SNORM_BLOCK,
+ Tf::EacRg11Unorm => F::EAC_R11G11_UNORM_BLOCK,
+ Tf::EacRg11Snorm => F::EAC_R11G11_SNORM_BLOCK,
+ Tf::Astc { block, channel } => match channel {
+ AstcChannel::Unorm => match block {
+ AstcBlock::B4x4 => F::ASTC_4X4_UNORM_BLOCK,
+ AstcBlock::B5x4 => F::ASTC_5X4_UNORM_BLOCK,
+ AstcBlock::B5x5 => F::ASTC_5X5_UNORM_BLOCK,
+ AstcBlock::B6x5 => F::ASTC_6X5_UNORM_BLOCK,
+ AstcBlock::B6x6 => F::ASTC_6X6_UNORM_BLOCK,
+ AstcBlock::B8x5 => F::ASTC_8X5_UNORM_BLOCK,
+ AstcBlock::B8x6 => F::ASTC_8X6_UNORM_BLOCK,
+ AstcBlock::B8x8 => F::ASTC_8X8_UNORM_BLOCK,
+ AstcBlock::B10x5 => F::ASTC_10X5_UNORM_BLOCK,
+ AstcBlock::B10x6 => F::ASTC_10X6_UNORM_BLOCK,
+ AstcBlock::B10x8 => F::ASTC_10X8_UNORM_BLOCK,
+ AstcBlock::B10x10 => F::ASTC_10X10_UNORM_BLOCK,
+ AstcBlock::B12x10 => F::ASTC_12X10_UNORM_BLOCK,
+ AstcBlock::B12x12 => F::ASTC_12X12_UNORM_BLOCK,
+ },
+ AstcChannel::UnormSrgb => match block {
+ AstcBlock::B4x4 => F::ASTC_4X4_SRGB_BLOCK,
+ AstcBlock::B5x4 => F::ASTC_5X4_SRGB_BLOCK,
+ AstcBlock::B5x5 => F::ASTC_5X5_SRGB_BLOCK,
+ AstcBlock::B6x5 => F::ASTC_6X5_SRGB_BLOCK,
+ AstcBlock::B6x6 => F::ASTC_6X6_SRGB_BLOCK,
+ AstcBlock::B8x5 => F::ASTC_8X5_SRGB_BLOCK,
+ AstcBlock::B8x6 => F::ASTC_8X6_SRGB_BLOCK,
+ AstcBlock::B8x8 => F::ASTC_8X8_SRGB_BLOCK,
+ AstcBlock::B10x5 => F::ASTC_10X5_SRGB_BLOCK,
+ AstcBlock::B10x6 => F::ASTC_10X6_SRGB_BLOCK,
+ AstcBlock::B10x8 => F::ASTC_10X8_SRGB_BLOCK,
+ AstcBlock::B10x10 => F::ASTC_10X10_SRGB_BLOCK,
+ AstcBlock::B12x10 => F::ASTC_12X10_SRGB_BLOCK,
+ AstcBlock::B12x12 => F::ASTC_12X12_SRGB_BLOCK,
+ },
+ AstcChannel::Hdr => match block {
+ AstcBlock::B4x4 => F::ASTC_4X4_SFLOAT_BLOCK_EXT,
+ AstcBlock::B5x4 => F::ASTC_5X4_SFLOAT_BLOCK_EXT,
+ AstcBlock::B5x5 => F::ASTC_5X5_SFLOAT_BLOCK_EXT,
+ AstcBlock::B6x5 => F::ASTC_6X5_SFLOAT_BLOCK_EXT,
+ AstcBlock::B6x6 => F::ASTC_6X6_SFLOAT_BLOCK_EXT,
+ AstcBlock::B8x5 => F::ASTC_8X5_SFLOAT_BLOCK_EXT,
+ AstcBlock::B8x6 => F::ASTC_8X6_SFLOAT_BLOCK_EXT,
+ AstcBlock::B8x8 => F::ASTC_8X8_SFLOAT_BLOCK_EXT,
+ AstcBlock::B10x5 => F::ASTC_10X5_SFLOAT_BLOCK_EXT,
+ AstcBlock::B10x6 => F::ASTC_10X6_SFLOAT_BLOCK_EXT,
+ AstcBlock::B10x8 => F::ASTC_10X8_SFLOAT_BLOCK_EXT,
+ AstcBlock::B10x10 => F::ASTC_10X10_SFLOAT_BLOCK_EXT,
+ AstcBlock::B12x10 => F::ASTC_12X10_SFLOAT_BLOCK_EXT,
+ AstcBlock::B12x12 => F::ASTC_12X12_SFLOAT_BLOCK_EXT,
+ },
+ },
+ }
+ }
+}
+
+pub fn map_vk_surface_formats(sf: vk::SurfaceFormatKHR) -> Option<wgt::TextureFormat> {
+ use ash::vk::Format as F;
+ use wgt::TextureFormat as Tf;
+ // List we care about pulled from https://vulkan.gpuinfo.org/listsurfaceformats.php
+ Some(match sf.color_space {
+ vk::ColorSpaceKHR::SRGB_NONLINEAR => match sf.format {
+ F::B8G8R8A8_UNORM => Tf::Bgra8Unorm,
+ F::B8G8R8A8_SRGB => Tf::Bgra8UnormSrgb,
+ F::R8G8B8A8_SNORM => Tf::Rgba8Snorm,
+ F::R8G8B8A8_UNORM => Tf::Rgba8Unorm,
+ F::R8G8B8A8_SRGB => Tf::Rgba8UnormSrgb,
+ _ => return None,
+ },
+ vk::ColorSpaceKHR::EXTENDED_SRGB_LINEAR_EXT => match sf.format {
+ F::R16G16B16A16_SFLOAT => Tf::Rgba16Float,
+ F::R16G16B16A16_SNORM => Tf::Rgba16Snorm,
+ F::R16G16B16A16_UNORM => Tf::Rgba16Unorm,
+ F::A2B10G10R10_UNORM_PACK32 => Tf::Rgb10a2Unorm,
+ _ => return None,
+ },
+ _ => return None,
+ })
+}
+
+impl crate::Attachment<'_, super::Api> {
+ pub(super) fn make_attachment_key(
+ &self,
+ ops: crate::AttachmentOps,
+ caps: &super::PrivateCapabilities,
+ ) -> super::AttachmentKey {
+ super::AttachmentKey {
+ format: caps.map_texture_format(self.view.attachment.view_format),
+ layout: derive_image_layout(self.usage, self.view.attachment.view_format),
+ ops,
+ }
+ }
+}
+
+impl crate::ColorAttachment<'_, super::Api> {
+ pub(super) unsafe fn make_vk_clear_color(&self) -> vk::ClearColorValue {
+ let cv = &self.clear_value;
+ match self
+ .target
+ .view
+ .attachment
+ .view_format
+ .sample_type(None)
+ .unwrap()
+ {
+ wgt::TextureSampleType::Float { .. } => vk::ClearColorValue {
+ float32: [cv.r as f32, cv.g as f32, cv.b as f32, cv.a as f32],
+ },
+ wgt::TextureSampleType::Sint => vk::ClearColorValue {
+ int32: [cv.r as i32, cv.g as i32, cv.b as i32, cv.a as i32],
+ },
+ wgt::TextureSampleType::Uint => vk::ClearColorValue {
+ uint32: [cv.r as u32, cv.g as u32, cv.b as u32, cv.a as u32],
+ },
+ wgt::TextureSampleType::Depth => unreachable!(),
+ }
+ }
+}
+
+pub fn derive_image_layout(
+ usage: crate::TextureUses,
+ format: wgt::TextureFormat,
+) -> vk::ImageLayout {
+ // Note: depth textures are always sampled with RODS layout
+ let is_color = crate::FormatAspects::from(format).contains(crate::FormatAspects::COLOR);
+ match usage {
+ crate::TextureUses::UNINITIALIZED => vk::ImageLayout::UNDEFINED,
+ crate::TextureUses::COPY_SRC => vk::ImageLayout::TRANSFER_SRC_OPTIMAL,
+ crate::TextureUses::COPY_DST => vk::ImageLayout::TRANSFER_DST_OPTIMAL,
+ crate::TextureUses::RESOURCE if is_color => vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL,
+ crate::TextureUses::COLOR_TARGET => vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL,
+ crate::TextureUses::DEPTH_STENCIL_WRITE => {
+ vk::ImageLayout::DEPTH_STENCIL_ATTACHMENT_OPTIMAL
+ }
+ _ => {
+ if usage == crate::TextureUses::PRESENT {
+ vk::ImageLayout::PRESENT_SRC_KHR
+ } else if is_color {
+ vk::ImageLayout::GENERAL
+ } else {
+ vk::ImageLayout::DEPTH_STENCIL_READ_ONLY_OPTIMAL
+ }
+ }
+ }
+}
+
+pub fn map_texture_usage(usage: crate::TextureUses) -> vk::ImageUsageFlags {
+ let mut flags = vk::ImageUsageFlags::empty();
+ if usage.contains(crate::TextureUses::COPY_SRC) {
+ flags |= vk::ImageUsageFlags::TRANSFER_SRC;
+ }
+ if usage.contains(crate::TextureUses::COPY_DST) {
+ flags |= vk::ImageUsageFlags::TRANSFER_DST;
+ }
+ if usage.contains(crate::TextureUses::RESOURCE) {
+ flags |= vk::ImageUsageFlags::SAMPLED;
+ }
+ if usage.contains(crate::TextureUses::COLOR_TARGET) {
+ flags |= vk::ImageUsageFlags::COLOR_ATTACHMENT;
+ }
+ if usage.intersects(
+ crate::TextureUses::DEPTH_STENCIL_READ | crate::TextureUses::DEPTH_STENCIL_WRITE,
+ ) {
+ flags |= vk::ImageUsageFlags::DEPTH_STENCIL_ATTACHMENT;
+ }
+ if usage.intersects(crate::TextureUses::STORAGE_READ | crate::TextureUses::STORAGE_READ_WRITE) {
+ flags |= vk::ImageUsageFlags::STORAGE;
+ }
+ flags
+}
+
+pub fn map_texture_usage_to_barrier(
+ usage: crate::TextureUses,
+) -> (vk::PipelineStageFlags, vk::AccessFlags) {
+ let mut stages = vk::PipelineStageFlags::empty();
+ let mut access = vk::AccessFlags::empty();
+ let shader_stages = vk::PipelineStageFlags::VERTEX_SHADER
+ | vk::PipelineStageFlags::FRAGMENT_SHADER
+ | vk::PipelineStageFlags::COMPUTE_SHADER;
+
+ if usage.contains(crate::TextureUses::COPY_SRC) {
+ stages |= vk::PipelineStageFlags::TRANSFER;
+ access |= vk::AccessFlags::TRANSFER_READ;
+ }
+ if usage.contains(crate::TextureUses::COPY_DST) {
+ stages |= vk::PipelineStageFlags::TRANSFER;
+ access |= vk::AccessFlags::TRANSFER_WRITE;
+ }
+ if usage.contains(crate::TextureUses::RESOURCE) {
+ stages |= shader_stages;
+ access |= vk::AccessFlags::SHADER_READ;
+ }
+ if usage.contains(crate::TextureUses::COLOR_TARGET) {
+ stages |= vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT;
+ access |= vk::AccessFlags::COLOR_ATTACHMENT_READ | vk::AccessFlags::COLOR_ATTACHMENT_WRITE;
+ }
+ if usage.intersects(crate::TextureUses::DEPTH_STENCIL_READ) {
+ stages |= vk::PipelineStageFlags::EARLY_FRAGMENT_TESTS
+ | vk::PipelineStageFlags::LATE_FRAGMENT_TESTS;
+ access |= vk::AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ;
+ }
+ if usage.intersects(crate::TextureUses::DEPTH_STENCIL_WRITE) {
+ stages |= vk::PipelineStageFlags::EARLY_FRAGMENT_TESTS
+ | vk::PipelineStageFlags::LATE_FRAGMENT_TESTS;
+ access |= vk::AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ
+ | vk::AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE;
+ }
+ if usage.contains(crate::TextureUses::STORAGE_READ) {
+ stages |= shader_stages;
+ access |= vk::AccessFlags::SHADER_READ;
+ }
+ if usage.contains(crate::TextureUses::STORAGE_READ_WRITE) {
+ stages |= shader_stages;
+ access |= vk::AccessFlags::SHADER_READ | vk::AccessFlags::SHADER_WRITE;
+ }
+
+ if usage == crate::TextureUses::UNINITIALIZED || usage == crate::TextureUses::PRESENT {
+ (
+ vk::PipelineStageFlags::TOP_OF_PIPE,
+ vk::AccessFlags::empty(),
+ )
+ } else {
+ (stages, access)
+ }
+}
+
+pub fn map_vk_image_usage(usage: vk::ImageUsageFlags) -> crate::TextureUses {
+ let mut bits = crate::TextureUses::empty();
+ if usage.contains(vk::ImageUsageFlags::TRANSFER_SRC) {
+ bits |= crate::TextureUses::COPY_SRC;
+ }
+ if usage.contains(vk::ImageUsageFlags::TRANSFER_DST) {
+ bits |= crate::TextureUses::COPY_DST;
+ }
+ if usage.contains(vk::ImageUsageFlags::SAMPLED) {
+ bits |= crate::TextureUses::RESOURCE;
+ }
+ if usage.contains(vk::ImageUsageFlags::COLOR_ATTACHMENT) {
+ bits |= crate::TextureUses::COLOR_TARGET;
+ }
+ if usage.contains(vk::ImageUsageFlags::DEPTH_STENCIL_ATTACHMENT) {
+ bits |= crate::TextureUses::DEPTH_STENCIL_READ | crate::TextureUses::DEPTH_STENCIL_WRITE;
+ }
+ if usage.contains(vk::ImageUsageFlags::STORAGE) {
+ bits |= crate::TextureUses::STORAGE_READ | crate::TextureUses::STORAGE_READ_WRITE;
+ }
+ bits
+}
+
+pub fn map_texture_dimension(dim: wgt::TextureDimension) -> vk::ImageType {
+ match dim {
+ wgt::TextureDimension::D1 => vk::ImageType::TYPE_1D,
+ wgt::TextureDimension::D2 => vk::ImageType::TYPE_2D,
+ wgt::TextureDimension::D3 => vk::ImageType::TYPE_3D,
+ }
+}
+
+pub fn map_index_format(index_format: wgt::IndexFormat) -> vk::IndexType {
+ match index_format {
+ wgt::IndexFormat::Uint16 => vk::IndexType::UINT16,
+ wgt::IndexFormat::Uint32 => vk::IndexType::UINT32,
+ }
+}
+
+pub fn map_vertex_format(vertex_format: wgt::VertexFormat) -> vk::Format {
+ use wgt::VertexFormat as Vf;
+ match vertex_format {
+ Vf::Uint8x2 => vk::Format::R8G8_UINT,
+ Vf::Uint8x4 => vk::Format::R8G8B8A8_UINT,
+ Vf::Sint8x2 => vk::Format::R8G8_SINT,
+ Vf::Sint8x4 => vk::Format::R8G8B8A8_SINT,
+ Vf::Unorm8x2 => vk::Format::R8G8_UNORM,
+ Vf::Unorm8x4 => vk::Format::R8G8B8A8_UNORM,
+ Vf::Snorm8x2 => vk::Format::R8G8_SNORM,
+ Vf::Snorm8x4 => vk::Format::R8G8B8A8_SNORM,
+ Vf::Uint16x2 => vk::Format::R16G16_UINT,
+ Vf::Uint16x4 => vk::Format::R16G16B16A16_UINT,
+ Vf::Sint16x2 => vk::Format::R16G16_SINT,
+ Vf::Sint16x4 => vk::Format::R16G16B16A16_SINT,
+ Vf::Unorm16x2 => vk::Format::R16G16_UNORM,
+ Vf::Unorm16x4 => vk::Format::R16G16B16A16_UNORM,
+ Vf::Snorm16x2 => vk::Format::R16G16_SNORM,
+ Vf::Snorm16x4 => vk::Format::R16G16B16A16_SNORM,
+ Vf::Float16x2 => vk::Format::R16G16_SFLOAT,
+ Vf::Float16x4 => vk::Format::R16G16B16A16_SFLOAT,
+ Vf::Float32 => vk::Format::R32_SFLOAT,
+ Vf::Float32x2 => vk::Format::R32G32_SFLOAT,
+ Vf::Float32x3 => vk::Format::R32G32B32_SFLOAT,
+ Vf::Float32x4 => vk::Format::R32G32B32A32_SFLOAT,
+ Vf::Uint32 => vk::Format::R32_UINT,
+ Vf::Uint32x2 => vk::Format::R32G32_UINT,
+ Vf::Uint32x3 => vk::Format::R32G32B32_UINT,
+ Vf::Uint32x4 => vk::Format::R32G32B32A32_UINT,
+ Vf::Sint32 => vk::Format::R32_SINT,
+ Vf::Sint32x2 => vk::Format::R32G32_SINT,
+ Vf::Sint32x3 => vk::Format::R32G32B32_SINT,
+ Vf::Sint32x4 => vk::Format::R32G32B32A32_SINT,
+ Vf::Float64 => vk::Format::R64_SFLOAT,
+ Vf::Float64x2 => vk::Format::R64G64_SFLOAT,
+ Vf::Float64x3 => vk::Format::R64G64B64_SFLOAT,
+ Vf::Float64x4 => vk::Format::R64G64B64A64_SFLOAT,
+ }
+}
+
+pub fn map_aspects(aspects: crate::FormatAspects) -> vk::ImageAspectFlags {
+ let mut flags = vk::ImageAspectFlags::empty();
+ if aspects.contains(crate::FormatAspects::COLOR) {
+ flags |= vk::ImageAspectFlags::COLOR;
+ }
+ if aspects.contains(crate::FormatAspects::DEPTH) {
+ flags |= vk::ImageAspectFlags::DEPTH;
+ }
+ if aspects.contains(crate::FormatAspects::STENCIL) {
+ flags |= vk::ImageAspectFlags::STENCIL;
+ }
+ flags
+}
+
+pub fn map_attachment_ops(
+ op: crate::AttachmentOps,
+) -> (vk::AttachmentLoadOp, vk::AttachmentStoreOp) {
+ let load_op = if op.contains(crate::AttachmentOps::LOAD) {
+ vk::AttachmentLoadOp::LOAD
+ } else {
+ vk::AttachmentLoadOp::CLEAR
+ };
+ let store_op = if op.contains(crate::AttachmentOps::STORE) {
+ vk::AttachmentStoreOp::STORE
+ } else {
+ vk::AttachmentStoreOp::DONT_CARE
+ };
+ (load_op, store_op)
+}
+
+pub fn map_present_mode(mode: wgt::PresentMode) -> vk::PresentModeKHR {
+ match mode {
+ wgt::PresentMode::Immediate => vk::PresentModeKHR::IMMEDIATE,
+ wgt::PresentMode::Mailbox => vk::PresentModeKHR::MAILBOX,
+ wgt::PresentMode::Fifo => vk::PresentModeKHR::FIFO,
+ wgt::PresentMode::FifoRelaxed => vk::PresentModeKHR::FIFO_RELAXED,
+ wgt::PresentMode::AutoNoVsync | wgt::PresentMode::AutoVsync => {
+ unreachable!("Cannot create swapchain with Auto PresentationMode")
+ }
+ }
+}
+
+pub fn map_vk_present_mode(mode: vk::PresentModeKHR) -> Option<wgt::PresentMode> {
+ if mode == vk::PresentModeKHR::IMMEDIATE {
+ Some(wgt::PresentMode::Immediate)
+ } else if mode == vk::PresentModeKHR::MAILBOX {
+ Some(wgt::PresentMode::Mailbox)
+ } else if mode == vk::PresentModeKHR::FIFO {
+ Some(wgt::PresentMode::Fifo)
+ } else if mode == vk::PresentModeKHR::FIFO_RELAXED {
+ //Some(wgt::PresentMode::Relaxed)
+ None
+ } else {
+ log::warn!("Unrecognized present mode {:?}", mode);
+ None
+ }
+}
+
+pub fn map_composite_alpha_mode(mode: wgt::CompositeAlphaMode) -> vk::CompositeAlphaFlagsKHR {
+ match mode {
+ wgt::CompositeAlphaMode::Opaque => vk::CompositeAlphaFlagsKHR::OPAQUE,
+ wgt::CompositeAlphaMode::PreMultiplied => vk::CompositeAlphaFlagsKHR::PRE_MULTIPLIED,
+ wgt::CompositeAlphaMode::PostMultiplied => vk::CompositeAlphaFlagsKHR::POST_MULTIPLIED,
+ wgt::CompositeAlphaMode::Inherit => vk::CompositeAlphaFlagsKHR::INHERIT,
+ wgt::CompositeAlphaMode::Auto => unreachable!(),
+ }
+}
+
+pub fn map_vk_composite_alpha(flags: vk::CompositeAlphaFlagsKHR) -> Vec<wgt::CompositeAlphaMode> {
+ let mut modes = Vec::new();
+ if flags.contains(vk::CompositeAlphaFlagsKHR::OPAQUE) {
+ modes.push(wgt::CompositeAlphaMode::Opaque);
+ }
+ if flags.contains(vk::CompositeAlphaFlagsKHR::PRE_MULTIPLIED) {
+ modes.push(wgt::CompositeAlphaMode::PreMultiplied);
+ }
+ if flags.contains(vk::CompositeAlphaFlagsKHR::POST_MULTIPLIED) {
+ modes.push(wgt::CompositeAlphaMode::PostMultiplied);
+ }
+ if flags.contains(vk::CompositeAlphaFlagsKHR::INHERIT) {
+ modes.push(wgt::CompositeAlphaMode::Inherit);
+ }
+ modes
+}
+
+pub fn map_buffer_usage(usage: crate::BufferUses) -> vk::BufferUsageFlags {
+ let mut flags = vk::BufferUsageFlags::empty();
+ if usage.contains(crate::BufferUses::COPY_SRC) {
+ flags |= vk::BufferUsageFlags::TRANSFER_SRC;
+ }
+ if usage.contains(crate::BufferUses::COPY_DST) {
+ flags |= vk::BufferUsageFlags::TRANSFER_DST;
+ }
+ if usage.contains(crate::BufferUses::UNIFORM) {
+ flags |= vk::BufferUsageFlags::UNIFORM_BUFFER;
+ }
+ if usage.intersects(crate::BufferUses::STORAGE_READ | crate::BufferUses::STORAGE_READ_WRITE) {
+ flags |= vk::BufferUsageFlags::STORAGE_BUFFER;
+ }
+ if usage.contains(crate::BufferUses::INDEX) {
+ flags |= vk::BufferUsageFlags::INDEX_BUFFER;
+ }
+ if usage.contains(crate::BufferUses::VERTEX) {
+ flags |= vk::BufferUsageFlags::VERTEX_BUFFER;
+ }
+ if usage.contains(crate::BufferUses::INDIRECT) {
+ flags |= vk::BufferUsageFlags::INDIRECT_BUFFER;
+ }
+ flags
+}
+
+pub fn map_buffer_usage_to_barrier(
+ usage: crate::BufferUses,
+) -> (vk::PipelineStageFlags, vk::AccessFlags) {
+ let mut stages = vk::PipelineStageFlags::empty();
+ let mut access = vk::AccessFlags::empty();
+ let shader_stages = vk::PipelineStageFlags::VERTEX_SHADER
+ | vk::PipelineStageFlags::FRAGMENT_SHADER
+ | vk::PipelineStageFlags::COMPUTE_SHADER;
+
+ if usage.contains(crate::BufferUses::MAP_READ) {
+ stages |= vk::PipelineStageFlags::HOST;
+ access |= vk::AccessFlags::HOST_READ;
+ }
+ if usage.contains(crate::BufferUses::MAP_WRITE) {
+ stages |= vk::PipelineStageFlags::HOST;
+ access |= vk::AccessFlags::HOST_WRITE;
+ }
+ if usage.contains(crate::BufferUses::COPY_SRC) {
+ stages |= vk::PipelineStageFlags::TRANSFER;
+ access |= vk::AccessFlags::TRANSFER_READ;
+ }
+ if usage.contains(crate::BufferUses::COPY_DST) {
+ stages |= vk::PipelineStageFlags::TRANSFER;
+ access |= vk::AccessFlags::TRANSFER_WRITE;
+ }
+ if usage.contains(crate::BufferUses::UNIFORM) {
+ stages |= shader_stages;
+ access |= vk::AccessFlags::UNIFORM_READ;
+ }
+ if usage.intersects(crate::BufferUses::STORAGE_READ) {
+ stages |= shader_stages;
+ access |= vk::AccessFlags::SHADER_READ;
+ }
+ if usage.intersects(crate::BufferUses::STORAGE_READ_WRITE) {
+ stages |= shader_stages;
+ access |= vk::AccessFlags::SHADER_READ | vk::AccessFlags::SHADER_WRITE;
+ }
+ if usage.contains(crate::BufferUses::INDEX) {
+ stages |= vk::PipelineStageFlags::VERTEX_INPUT;
+ access |= vk::AccessFlags::INDEX_READ;
+ }
+ if usage.contains(crate::BufferUses::VERTEX) {
+ stages |= vk::PipelineStageFlags::VERTEX_INPUT;
+ access |= vk::AccessFlags::VERTEX_ATTRIBUTE_READ;
+ }
+ if usage.contains(crate::BufferUses::INDIRECT) {
+ stages |= vk::PipelineStageFlags::DRAW_INDIRECT;
+ access |= vk::AccessFlags::INDIRECT_COMMAND_READ;
+ }
+
+ (stages, access)
+}
+
+pub fn map_view_dimension(dim: wgt::TextureViewDimension) -> vk::ImageViewType {
+ match dim {
+ wgt::TextureViewDimension::D1 => vk::ImageViewType::TYPE_1D,
+ wgt::TextureViewDimension::D2 => vk::ImageViewType::TYPE_2D,
+ wgt::TextureViewDimension::D2Array => vk::ImageViewType::TYPE_2D_ARRAY,
+ wgt::TextureViewDimension::Cube => vk::ImageViewType::CUBE,
+ wgt::TextureViewDimension::CubeArray => vk::ImageViewType::CUBE_ARRAY,
+ wgt::TextureViewDimension::D3 => vk::ImageViewType::TYPE_3D,
+ }
+}
+
+pub fn map_copy_extent(extent: &crate::CopyExtent) -> vk::Extent3D {
+ vk::Extent3D {
+ width: extent.width,
+ height: extent.height,
+ depth: extent.depth,
+ }
+}
+
+pub fn map_subresource_range(
+ range: &wgt::ImageSubresourceRange,
+ format: wgt::TextureFormat,
+) -> vk::ImageSubresourceRange {
+ vk::ImageSubresourceRange {
+ aspect_mask: map_aspects(crate::FormatAspects::new(format, range.aspect)),
+ base_mip_level: range.base_mip_level,
+ level_count: range.mip_level_count.unwrap_or(vk::REMAINING_MIP_LEVELS),
+ base_array_layer: range.base_array_layer,
+ layer_count: range
+ .array_layer_count
+ .unwrap_or(vk::REMAINING_ARRAY_LAYERS),
+ }
+}
+
+pub fn map_subresource_layers(
+ base: &crate::TextureCopyBase,
+) -> (vk::ImageSubresourceLayers, vk::Offset3D) {
+ let offset = vk::Offset3D {
+ x: base.origin.x as i32,
+ y: base.origin.y as i32,
+ z: base.origin.z as i32,
+ };
+ let subresource = vk::ImageSubresourceLayers {
+ aspect_mask: map_aspects(base.aspect),
+ mip_level: base.mip_level,
+ base_array_layer: base.array_layer,
+ layer_count: 1,
+ };
+ (subresource, offset)
+}
+
+pub fn map_filter_mode(mode: wgt::FilterMode) -> vk::Filter {
+ match mode {
+ wgt::FilterMode::Nearest => vk::Filter::NEAREST,
+ wgt::FilterMode::Linear => vk::Filter::LINEAR,
+ }
+}
+
+pub fn map_mip_filter_mode(mode: wgt::FilterMode) -> vk::SamplerMipmapMode {
+ match mode {
+ wgt::FilterMode::Nearest => vk::SamplerMipmapMode::NEAREST,
+ wgt::FilterMode::Linear => vk::SamplerMipmapMode::LINEAR,
+ }
+}
+
+pub fn map_address_mode(mode: wgt::AddressMode) -> vk::SamplerAddressMode {
+ match mode {
+ wgt::AddressMode::ClampToEdge => vk::SamplerAddressMode::CLAMP_TO_EDGE,
+ wgt::AddressMode::Repeat => vk::SamplerAddressMode::REPEAT,
+ wgt::AddressMode::MirrorRepeat => vk::SamplerAddressMode::MIRRORED_REPEAT,
+ wgt::AddressMode::ClampToBorder => vk::SamplerAddressMode::CLAMP_TO_BORDER,
+ // wgt::AddressMode::MirrorClamp => vk::SamplerAddressMode::MIRROR_CLAMP_TO_EDGE,
+ }
+}
+
+pub fn map_border_color(border_color: wgt::SamplerBorderColor) -> vk::BorderColor {
+ match border_color {
+ wgt::SamplerBorderColor::TransparentBlack | wgt::SamplerBorderColor::Zero => {
+ vk::BorderColor::FLOAT_TRANSPARENT_BLACK
+ }
+ wgt::SamplerBorderColor::OpaqueBlack => vk::BorderColor::FLOAT_OPAQUE_BLACK,
+ wgt::SamplerBorderColor::OpaqueWhite => vk::BorderColor::FLOAT_OPAQUE_WHITE,
+ }
+}
+
+pub fn map_comparison(fun: wgt::CompareFunction) -> vk::CompareOp {
+ use wgt::CompareFunction as Cf;
+ match fun {
+ Cf::Never => vk::CompareOp::NEVER,
+ Cf::Less => vk::CompareOp::LESS,
+ Cf::LessEqual => vk::CompareOp::LESS_OR_EQUAL,
+ Cf::Equal => vk::CompareOp::EQUAL,
+ Cf::GreaterEqual => vk::CompareOp::GREATER_OR_EQUAL,
+ Cf::Greater => vk::CompareOp::GREATER,
+ Cf::NotEqual => vk::CompareOp::NOT_EQUAL,
+ Cf::Always => vk::CompareOp::ALWAYS,
+ }
+}
+
+pub fn map_shader_stage(stage: wgt::ShaderStages) -> vk::ShaderStageFlags {
+ let mut flags = vk::ShaderStageFlags::empty();
+ if stage.contains(wgt::ShaderStages::VERTEX) {
+ flags |= vk::ShaderStageFlags::VERTEX;
+ }
+ if stage.contains(wgt::ShaderStages::FRAGMENT) {
+ flags |= vk::ShaderStageFlags::FRAGMENT;
+ }
+ if stage.contains(wgt::ShaderStages::COMPUTE) {
+ flags |= vk::ShaderStageFlags::COMPUTE;
+ }
+ flags
+}
+
+pub fn map_binding_type(ty: wgt::BindingType) -> vk::DescriptorType {
+ match ty {
+ wgt::BindingType::Buffer {
+ ty,
+ has_dynamic_offset,
+ ..
+ } => match ty {
+ wgt::BufferBindingType::Storage { .. } => match has_dynamic_offset {
+ true => vk::DescriptorType::STORAGE_BUFFER_DYNAMIC,
+ false => vk::DescriptorType::STORAGE_BUFFER,
+ },
+ wgt::BufferBindingType::Uniform => match has_dynamic_offset {
+ true => vk::DescriptorType::UNIFORM_BUFFER_DYNAMIC,
+ false => vk::DescriptorType::UNIFORM_BUFFER,
+ },
+ },
+ wgt::BindingType::Sampler { .. } => vk::DescriptorType::SAMPLER,
+ wgt::BindingType::Texture { .. } => vk::DescriptorType::SAMPLED_IMAGE,
+ wgt::BindingType::StorageTexture { .. } => vk::DescriptorType::STORAGE_IMAGE,
+ }
+}
+
+pub fn map_topology(topology: wgt::PrimitiveTopology) -> vk::PrimitiveTopology {
+ use wgt::PrimitiveTopology as Pt;
+ match topology {
+ Pt::PointList => vk::PrimitiveTopology::POINT_LIST,
+ Pt::LineList => vk::PrimitiveTopology::LINE_LIST,
+ Pt::LineStrip => vk::PrimitiveTopology::LINE_STRIP,
+ Pt::TriangleList => vk::PrimitiveTopology::TRIANGLE_LIST,
+ Pt::TriangleStrip => vk::PrimitiveTopology::TRIANGLE_STRIP,
+ }
+}
+
+pub fn map_polygon_mode(mode: wgt::PolygonMode) -> vk::PolygonMode {
+ match mode {
+ wgt::PolygonMode::Fill => vk::PolygonMode::FILL,
+ wgt::PolygonMode::Line => vk::PolygonMode::LINE,
+ wgt::PolygonMode::Point => vk::PolygonMode::POINT,
+ }
+}
+
+pub fn map_front_face(front_face: wgt::FrontFace) -> vk::FrontFace {
+ match front_face {
+ wgt::FrontFace::Cw => vk::FrontFace::CLOCKWISE,
+ wgt::FrontFace::Ccw => vk::FrontFace::COUNTER_CLOCKWISE,
+ }
+}
+
+pub fn map_cull_face(face: wgt::Face) -> vk::CullModeFlags {
+ match face {
+ wgt::Face::Front => vk::CullModeFlags::FRONT,
+ wgt::Face::Back => vk::CullModeFlags::BACK,
+ }
+}
+
+pub fn map_stencil_op(op: wgt::StencilOperation) -> vk::StencilOp {
+ use wgt::StencilOperation as So;
+ match op {
+ So::Keep => vk::StencilOp::KEEP,
+ So::Zero => vk::StencilOp::ZERO,
+ So::Replace => vk::StencilOp::REPLACE,
+ So::Invert => vk::StencilOp::INVERT,
+ So::IncrementClamp => vk::StencilOp::INCREMENT_AND_CLAMP,
+ So::IncrementWrap => vk::StencilOp::INCREMENT_AND_WRAP,
+ So::DecrementClamp => vk::StencilOp::DECREMENT_AND_CLAMP,
+ So::DecrementWrap => vk::StencilOp::DECREMENT_AND_WRAP,
+ }
+}
+
+pub fn map_stencil_face(
+ face: &wgt::StencilFaceState,
+ compare_mask: u32,
+ write_mask: u32,
+) -> vk::StencilOpState {
+ vk::StencilOpState {
+ fail_op: map_stencil_op(face.fail_op),
+ pass_op: map_stencil_op(face.pass_op),
+ depth_fail_op: map_stencil_op(face.depth_fail_op),
+ compare_op: map_comparison(face.compare),
+ compare_mask,
+ write_mask,
+ reference: 0,
+ }
+}
+
+fn map_blend_factor(factor: wgt::BlendFactor) -> vk::BlendFactor {
+ use wgt::BlendFactor as Bf;
+ match factor {
+ Bf::Zero => vk::BlendFactor::ZERO,
+ Bf::One => vk::BlendFactor::ONE,
+ Bf::Src => vk::BlendFactor::SRC_COLOR,
+ Bf::OneMinusSrc => vk::BlendFactor::ONE_MINUS_SRC_COLOR,
+ Bf::SrcAlpha => vk::BlendFactor::SRC_ALPHA,
+ Bf::OneMinusSrcAlpha => vk::BlendFactor::ONE_MINUS_SRC_ALPHA,
+ Bf::Dst => vk::BlendFactor::DST_COLOR,
+ Bf::OneMinusDst => vk::BlendFactor::ONE_MINUS_DST_COLOR,
+ Bf::DstAlpha => vk::BlendFactor::DST_ALPHA,
+ Bf::OneMinusDstAlpha => vk::BlendFactor::ONE_MINUS_DST_ALPHA,
+ Bf::SrcAlphaSaturated => vk::BlendFactor::SRC_ALPHA_SATURATE,
+ Bf::Constant => vk::BlendFactor::CONSTANT_COLOR,
+ Bf::OneMinusConstant => vk::BlendFactor::ONE_MINUS_CONSTANT_COLOR,
+ }
+}
+
+fn map_blend_op(operation: wgt::BlendOperation) -> vk::BlendOp {
+ use wgt::BlendOperation as Bo;
+ match operation {
+ Bo::Add => vk::BlendOp::ADD,
+ Bo::Subtract => vk::BlendOp::SUBTRACT,
+ Bo::ReverseSubtract => vk::BlendOp::REVERSE_SUBTRACT,
+ Bo::Min => vk::BlendOp::MIN,
+ Bo::Max => vk::BlendOp::MAX,
+ }
+}
+
+pub fn map_blend_component(
+ component: &wgt::BlendComponent,
+) -> (vk::BlendOp, vk::BlendFactor, vk::BlendFactor) {
+ let op = map_blend_op(component.operation);
+ let src = map_blend_factor(component.src_factor);
+ let dst = map_blend_factor(component.dst_factor);
+ (op, src, dst)
+}
+
+pub fn map_pipeline_statistics(
+ types: wgt::PipelineStatisticsTypes,
+) -> vk::QueryPipelineStatisticFlags {
+ use wgt::PipelineStatisticsTypes as Pst;
+ let mut flags = vk::QueryPipelineStatisticFlags::empty();
+ if types.contains(Pst::VERTEX_SHADER_INVOCATIONS) {
+ flags |= vk::QueryPipelineStatisticFlags::VERTEX_SHADER_INVOCATIONS;
+ }
+ if types.contains(Pst::CLIPPER_INVOCATIONS) {
+ flags |= vk::QueryPipelineStatisticFlags::CLIPPING_INVOCATIONS;
+ }
+ if types.contains(Pst::CLIPPER_PRIMITIVES_OUT) {
+ flags |= vk::QueryPipelineStatisticFlags::CLIPPING_PRIMITIVES;
+ }
+ if types.contains(Pst::FRAGMENT_SHADER_INVOCATIONS) {
+ flags |= vk::QueryPipelineStatisticFlags::FRAGMENT_SHADER_INVOCATIONS;
+ }
+ if types.contains(Pst::COMPUTE_SHADER_INVOCATIONS) {
+ flags |= vk::QueryPipelineStatisticFlags::COMPUTE_SHADER_INVOCATIONS;
+ }
+ flags
+}
diff --git a/third_party/rust/wgpu-hal/src/vulkan/device.rs b/third_party/rust/wgpu-hal/src/vulkan/device.rs
new file mode 100644
index 0000000000..09b887772c
--- /dev/null
+++ b/third_party/rust/wgpu-hal/src/vulkan/device.rs
@@ -0,0 +1,2032 @@
+use super::conv;
+
+use arrayvec::ArrayVec;
+use ash::{extensions::khr, vk};
+use parking_lot::Mutex;
+
+use std::{
+ borrow::Cow,
+ collections::{hash_map::Entry, BTreeMap},
+ ffi::{CStr, CString},
+ num::NonZeroU32,
+ ptr,
+ sync::Arc,
+};
+
+impl super::DeviceShared {
+ pub(super) unsafe fn set_object_name(
+ &self,
+ object_type: vk::ObjectType,
+ object: impl vk::Handle,
+ name: &str,
+ ) {
+ let extension = match self.instance.debug_utils {
+ Some(ref debug_utils) => &debug_utils.extension,
+ None => return,
+ };
+
+ // Keep variables outside the if-else block to ensure they do not
+ // go out of scope while we hold a pointer to them
+ let mut buffer: [u8; 64] = [0u8; 64];
+ let buffer_vec: Vec<u8>;
+
+ // Append a null terminator to the string
+ let name_bytes = if name.len() < buffer.len() {
+ // Common case, string is very small. Allocate a copy on the stack.
+ buffer[..name.len()].copy_from_slice(name.as_bytes());
+ // Add null terminator
+ buffer[name.len()] = 0;
+ &buffer[..name.len() + 1]
+ } else {
+ // Less common case, the string is large.
+ // This requires a heap allocation.
+ buffer_vec = name
+ .as_bytes()
+ .iter()
+ .cloned()
+ .chain(std::iter::once(0))
+ .collect();
+ &buffer_vec
+ };
+
+ let name = unsafe { CStr::from_bytes_with_nul_unchecked(name_bytes) };
+
+ let _result = unsafe {
+ extension.set_debug_utils_object_name(
+ self.raw.handle(),
+ &vk::DebugUtilsObjectNameInfoEXT::builder()
+ .object_type(object_type)
+ .object_handle(object.as_raw())
+ .object_name(name),
+ )
+ };
+ }
+
+ pub fn make_render_pass(
+ &self,
+ key: super::RenderPassKey,
+ ) -> Result<vk::RenderPass, crate::DeviceError> {
+ Ok(match self.render_passes.lock().entry(key) {
+ Entry::Occupied(e) => *e.get(),
+ Entry::Vacant(e) => {
+ let mut vk_attachments = Vec::new();
+ let mut color_refs = Vec::with_capacity(e.key().colors.len());
+ let mut resolve_refs = Vec::with_capacity(color_refs.capacity());
+ let mut ds_ref = None;
+ let samples = vk::SampleCountFlags::from_raw(e.key().sample_count);
+ let unused = vk::AttachmentReference {
+ attachment: vk::ATTACHMENT_UNUSED,
+ layout: vk::ImageLayout::UNDEFINED,
+ };
+ for cat in e.key().colors.iter() {
+ let (color_ref, resolve_ref) = if let Some(cat) = cat.as_ref() {
+ let color_ref = vk::AttachmentReference {
+ attachment: vk_attachments.len() as u32,
+ layout: cat.base.layout,
+ };
+ vk_attachments.push({
+ let (load_op, store_op) = conv::map_attachment_ops(cat.base.ops);
+ vk::AttachmentDescription::builder()
+ .format(cat.base.format)
+ .samples(samples)
+ .load_op(load_op)
+ .store_op(store_op)
+ .initial_layout(cat.base.layout)
+ .final_layout(cat.base.layout)
+ .build()
+ });
+ let resolve_ref = if let Some(ref rat) = cat.resolve {
+ let (load_op, store_op) = conv::map_attachment_ops(rat.ops);
+ let vk_attachment = vk::AttachmentDescription::builder()
+ .format(rat.format)
+ .samples(vk::SampleCountFlags::TYPE_1)
+ .load_op(load_op)
+ .store_op(store_op)
+ .initial_layout(rat.layout)
+ .final_layout(rat.layout)
+ .build();
+ vk_attachments.push(vk_attachment);
+
+ vk::AttachmentReference {
+ attachment: vk_attachments.len() as u32 - 1,
+ layout: rat.layout,
+ }
+ } else {
+ unused
+ };
+
+ (color_ref, resolve_ref)
+ } else {
+ (unused, unused)
+ };
+
+ color_refs.push(color_ref);
+ resolve_refs.push(resolve_ref);
+ }
+
+ if let Some(ref ds) = e.key().depth_stencil {
+ ds_ref = Some(vk::AttachmentReference {
+ attachment: vk_attachments.len() as u32,
+ layout: ds.base.layout,
+ });
+ let (load_op, store_op) = conv::map_attachment_ops(ds.base.ops);
+ let (stencil_load_op, stencil_store_op) =
+ conv::map_attachment_ops(ds.stencil_ops);
+ let vk_attachment = vk::AttachmentDescription::builder()
+ .format(ds.base.format)
+ .samples(samples)
+ .load_op(load_op)
+ .store_op(store_op)
+ .stencil_load_op(stencil_load_op)
+ .stencil_store_op(stencil_store_op)
+ .initial_layout(ds.base.layout)
+ .final_layout(ds.base.layout)
+ .build();
+ vk_attachments.push(vk_attachment);
+ }
+
+ let vk_subpasses = [{
+ let mut vk_subpass = vk::SubpassDescription::builder()
+ .pipeline_bind_point(vk::PipelineBindPoint::GRAPHICS)
+ .color_attachments(&color_refs)
+ .resolve_attachments(&resolve_refs);
+
+ if self
+ .workarounds
+ .contains(super::Workarounds::EMPTY_RESOLVE_ATTACHMENT_LISTS)
+ && resolve_refs.is_empty()
+ {
+ vk_subpass.p_resolve_attachments = ptr::null();
+ }
+
+ if let Some(ref reference) = ds_ref {
+ vk_subpass = vk_subpass.depth_stencil_attachment(reference)
+ }
+ vk_subpass.build()
+ }];
+
+ let mut vk_info = vk::RenderPassCreateInfo::builder()
+ .attachments(&vk_attachments)
+ .subpasses(&vk_subpasses);
+
+ let mut multiview_info;
+ let mask;
+ if let Some(multiview) = e.key().multiview {
+ // Sanity checks, better to panic here than cause a driver crash
+ assert!(multiview.get() <= 8);
+ assert!(multiview.get() > 1);
+
+ // Right now we enable all bits on the view masks and correlation masks.
+ // This means we're rendering to all views in the subpass, and that all views
+ // can be rendered concurrently.
+ mask = [(1 << multiview.get()) - 1];
+
+ // On Vulkan 1.1 or later, this is an alias for core functionality
+ multiview_info = vk::RenderPassMultiviewCreateInfoKHR::builder()
+ .view_masks(&mask)
+ .correlation_masks(&mask)
+ .build();
+ vk_info = vk_info.push_next(&mut multiview_info);
+ }
+
+ let raw = unsafe { self.raw.create_render_pass(&vk_info, None)? };
+
+ *e.insert(raw)
+ }
+ })
+ }
+
+ pub fn make_framebuffer(
+ &self,
+ key: super::FramebufferKey,
+ raw_pass: vk::RenderPass,
+ pass_label: crate::Label,
+ ) -> Result<vk::Framebuffer, crate::DeviceError> {
+ Ok(match self.framebuffers.lock().entry(key) {
+ Entry::Occupied(e) => *e.get(),
+ Entry::Vacant(e) => {
+ let vk_views = e
+ .key()
+ .attachments
+ .iter()
+ .map(|at| at.raw)
+ .collect::<ArrayVec<_, { super::MAX_TOTAL_ATTACHMENTS }>>();
+ let vk_view_formats = e
+ .key()
+ .attachments
+ .iter()
+ .map(|at| self.private_caps.map_texture_format(at.view_format))
+ .collect::<ArrayVec<_, { super::MAX_TOTAL_ATTACHMENTS }>>();
+ let vk_view_formats_list = e
+ .key()
+ .attachments
+ .iter()
+ .map(|at| at.raw_view_formats.clone())
+ .collect::<ArrayVec<_, { super::MAX_TOTAL_ATTACHMENTS }>>();
+
+ let vk_image_infos = e
+ .key()
+ .attachments
+ .iter()
+ .enumerate()
+ .map(|(i, at)| {
+ let mut info = vk::FramebufferAttachmentImageInfo::builder()
+ .usage(conv::map_texture_usage(at.view_usage))
+ .flags(at.raw_image_flags)
+ .width(e.key().extent.width)
+ .height(e.key().extent.height)
+ .layer_count(e.key().extent.depth_or_array_layers);
+ // https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkRenderPassBeginInfo.html#VUID-VkRenderPassBeginInfo-framebuffer-03214
+ if vk_view_formats_list[i].is_empty() {
+ info = info.view_formats(&vk_view_formats[i..i + 1]);
+ } else {
+ info = info.view_formats(&vk_view_formats_list[i]);
+ };
+ info.build()
+ })
+ .collect::<ArrayVec<_, { super::MAX_TOTAL_ATTACHMENTS }>>();
+
+ let mut vk_attachment_info = vk::FramebufferAttachmentsCreateInfo::builder()
+ .attachment_image_infos(&vk_image_infos)
+ .build();
+ let mut vk_info = vk::FramebufferCreateInfo::builder()
+ .render_pass(raw_pass)
+ .width(e.key().extent.width)
+ .height(e.key().extent.height)
+ .layers(e.key().extent.depth_or_array_layers);
+
+ if self.private_caps.imageless_framebuffers {
+ //TODO: https://github.com/MaikKlein/ash/issues/450
+ vk_info = vk_info
+ .flags(vk::FramebufferCreateFlags::IMAGELESS_KHR)
+ .push_next(&mut vk_attachment_info);
+ vk_info.attachment_count = e.key().attachments.len() as u32;
+ } else {
+ vk_info = vk_info.attachments(&vk_views);
+ }
+
+ *e.insert(unsafe {
+ let raw = self.raw.create_framebuffer(&vk_info, None).unwrap();
+ if let Some(label) = pass_label {
+ self.set_object_name(vk::ObjectType::FRAMEBUFFER, raw, label);
+ }
+ raw
+ })
+ }
+ })
+ }
+
+ fn make_memory_ranges<'a, I: 'a + Iterator<Item = crate::MemoryRange>>(
+ &self,
+ buffer: &'a super::Buffer,
+ ranges: I,
+ ) -> impl 'a + Iterator<Item = vk::MappedMemoryRange> {
+ let block = buffer.block.lock();
+ let mask = self.private_caps.non_coherent_map_mask;
+ ranges.map(move |range| {
+ vk::MappedMemoryRange::builder()
+ .memory(*block.memory())
+ .offset((block.offset() + range.start) & !mask)
+ .size((range.end - range.start + mask) & !mask)
+ .build()
+ })
+ }
+
+ unsafe fn free_resources(&self) {
+ for &raw in self.render_passes.lock().values() {
+ unsafe { self.raw.destroy_render_pass(raw, None) };
+ }
+ for &raw in self.framebuffers.lock().values() {
+ unsafe { self.raw.destroy_framebuffer(raw, None) };
+ }
+ if self.handle_is_owned {
+ unsafe { self.raw.destroy_device(None) };
+ }
+ }
+}
+
+impl gpu_alloc::MemoryDevice<vk::DeviceMemory> for super::DeviceShared {
+ unsafe fn allocate_memory(
+ &self,
+ size: u64,
+ memory_type: u32,
+ flags: gpu_alloc::AllocationFlags,
+ ) -> Result<vk::DeviceMemory, gpu_alloc::OutOfMemory> {
+ let mut info = vk::MemoryAllocateInfo::builder()
+ .allocation_size(size)
+ .memory_type_index(memory_type);
+
+ let mut info_flags;
+
+ if flags.contains(gpu_alloc::AllocationFlags::DEVICE_ADDRESS) {
+ info_flags = vk::MemoryAllocateFlagsInfo::builder()
+ .flags(vk::MemoryAllocateFlags::DEVICE_ADDRESS);
+ info = info.push_next(&mut info_flags);
+ }
+
+ match unsafe { self.raw.allocate_memory(&info, None) } {
+ Ok(memory) => Ok(memory),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => {
+ Err(gpu_alloc::OutOfMemory::OutOfDeviceMemory)
+ }
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => {
+ Err(gpu_alloc::OutOfMemory::OutOfHostMemory)
+ }
+ Err(vk::Result::ERROR_TOO_MANY_OBJECTS) => panic!("Too many objects"),
+ Err(err) => panic!("Unexpected Vulkan error: `{err}`"),
+ }
+ }
+
+ unsafe fn deallocate_memory(&self, memory: vk::DeviceMemory) {
+ unsafe { self.raw.free_memory(memory, None) };
+ }
+
+ unsafe fn map_memory(
+ &self,
+ memory: &mut vk::DeviceMemory,
+ offset: u64,
+ size: u64,
+ ) -> Result<ptr::NonNull<u8>, gpu_alloc::DeviceMapError> {
+ match unsafe {
+ self.raw
+ .map_memory(*memory, offset, size, vk::MemoryMapFlags::empty())
+ } {
+ Ok(ptr) => Ok(ptr::NonNull::new(ptr as *mut u8)
+ .expect("Pointer to memory mapping must not be null")),
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => {
+ Err(gpu_alloc::DeviceMapError::OutOfDeviceMemory)
+ }
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => {
+ Err(gpu_alloc::DeviceMapError::OutOfHostMemory)
+ }
+ Err(vk::Result::ERROR_MEMORY_MAP_FAILED) => Err(gpu_alloc::DeviceMapError::MapFailed),
+ Err(err) => panic!("Unexpected Vulkan error: `{err}`"),
+ }
+ }
+
+ unsafe fn unmap_memory(&self, memory: &mut vk::DeviceMemory) {
+ unsafe { self.raw.unmap_memory(*memory) };
+ }
+
+ unsafe fn invalidate_memory_ranges(
+ &self,
+ _ranges: &[gpu_alloc::MappedMemoryRange<'_, vk::DeviceMemory>],
+ ) -> Result<(), gpu_alloc::OutOfMemory> {
+ // should never be called
+ unimplemented!()
+ }
+
+ unsafe fn flush_memory_ranges(
+ &self,
+ _ranges: &[gpu_alloc::MappedMemoryRange<'_, vk::DeviceMemory>],
+ ) -> Result<(), gpu_alloc::OutOfMemory> {
+ // should never be called
+ unimplemented!()
+ }
+}
+
+impl
+ gpu_descriptor::DescriptorDevice<vk::DescriptorSetLayout, vk::DescriptorPool, vk::DescriptorSet>
+ for super::DeviceShared
+{
+ unsafe fn create_descriptor_pool(
+ &self,
+ descriptor_count: &gpu_descriptor::DescriptorTotalCount,
+ max_sets: u32,
+ flags: gpu_descriptor::DescriptorPoolCreateFlags,
+ ) -> Result<vk::DescriptorPool, gpu_descriptor::CreatePoolError> {
+ //Note: ignoring other types, since they can't appear here
+ let unfiltered_counts = [
+ (vk::DescriptorType::SAMPLER, descriptor_count.sampler),
+ (
+ vk::DescriptorType::SAMPLED_IMAGE,
+ descriptor_count.sampled_image,
+ ),
+ (
+ vk::DescriptorType::STORAGE_IMAGE,
+ descriptor_count.storage_image,
+ ),
+ (
+ vk::DescriptorType::UNIFORM_BUFFER,
+ descriptor_count.uniform_buffer,
+ ),
+ (
+ vk::DescriptorType::UNIFORM_BUFFER_DYNAMIC,
+ descriptor_count.uniform_buffer_dynamic,
+ ),
+ (
+ vk::DescriptorType::STORAGE_BUFFER,
+ descriptor_count.storage_buffer,
+ ),
+ (
+ vk::DescriptorType::STORAGE_BUFFER_DYNAMIC,
+ descriptor_count.storage_buffer_dynamic,
+ ),
+ ];
+
+ let filtered_counts = unfiltered_counts
+ .iter()
+ .cloned()
+ .filter(|&(_, count)| count != 0)
+ .map(|(ty, count)| vk::DescriptorPoolSize {
+ ty,
+ descriptor_count: count,
+ })
+ .collect::<ArrayVec<_, 8>>();
+
+ let mut vk_flags =
+ if flags.contains(gpu_descriptor::DescriptorPoolCreateFlags::UPDATE_AFTER_BIND) {
+ vk::DescriptorPoolCreateFlags::UPDATE_AFTER_BIND
+ } else {
+ vk::DescriptorPoolCreateFlags::empty()
+ };
+ if flags.contains(gpu_descriptor::DescriptorPoolCreateFlags::FREE_DESCRIPTOR_SET) {
+ vk_flags |= vk::DescriptorPoolCreateFlags::FREE_DESCRIPTOR_SET;
+ }
+ let vk_info = vk::DescriptorPoolCreateInfo::builder()
+ .max_sets(max_sets)
+ .flags(vk_flags)
+ .pool_sizes(&filtered_counts)
+ .build();
+
+ match unsafe { self.raw.create_descriptor_pool(&vk_info, None) } {
+ Ok(pool) => Ok(pool),
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY) => {
+ Err(gpu_descriptor::CreatePoolError::OutOfHostMemory)
+ }
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => {
+ Err(gpu_descriptor::CreatePoolError::OutOfDeviceMemory)
+ }
+ Err(vk::Result::ERROR_FRAGMENTATION) => {
+ Err(gpu_descriptor::CreatePoolError::Fragmentation)
+ }
+ Err(other) => {
+ log::error!("create_descriptor_pool: {:?}", other);
+ Err(gpu_descriptor::CreatePoolError::OutOfHostMemory)
+ }
+ }
+ }
+
+ unsafe fn destroy_descriptor_pool(&self, pool: vk::DescriptorPool) {
+ unsafe { self.raw.destroy_descriptor_pool(pool, None) }
+ }
+
+ unsafe fn alloc_descriptor_sets<'a>(
+ &self,
+ pool: &mut vk::DescriptorPool,
+ layouts: impl ExactSizeIterator<Item = &'a vk::DescriptorSetLayout>,
+ sets: &mut impl Extend<vk::DescriptorSet>,
+ ) -> Result<(), gpu_descriptor::DeviceAllocationError> {
+ let result = unsafe {
+ self.raw.allocate_descriptor_sets(
+ &vk::DescriptorSetAllocateInfo::builder()
+ .descriptor_pool(*pool)
+ .set_layouts(
+ &smallvec::SmallVec::<[vk::DescriptorSetLayout; 32]>::from_iter(
+ layouts.cloned(),
+ ),
+ )
+ .build(),
+ )
+ };
+
+ match result {
+ Ok(vk_sets) => {
+ sets.extend(vk_sets);
+ Ok(())
+ }
+ Err(vk::Result::ERROR_OUT_OF_HOST_MEMORY)
+ | Err(vk::Result::ERROR_OUT_OF_POOL_MEMORY) => {
+ Err(gpu_descriptor::DeviceAllocationError::OutOfHostMemory)
+ }
+ Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => {
+ Err(gpu_descriptor::DeviceAllocationError::OutOfDeviceMemory)
+ }
+ Err(vk::Result::ERROR_FRAGMENTED_POOL) => {
+ Err(gpu_descriptor::DeviceAllocationError::FragmentedPool)
+ }
+ Err(other) => {
+ log::error!("allocate_descriptor_sets: {:?}", other);
+ Err(gpu_descriptor::DeviceAllocationError::OutOfHostMemory)
+ }
+ }
+ }
+
+ unsafe fn dealloc_descriptor_sets<'a>(
+ &self,
+ pool: &mut vk::DescriptorPool,
+ sets: impl Iterator<Item = vk::DescriptorSet>,
+ ) {
+ let result = unsafe {
+ self.raw.free_descriptor_sets(
+ *pool,
+ &smallvec::SmallVec::<[vk::DescriptorSet; 32]>::from_iter(sets),
+ )
+ };
+ match result {
+ Ok(()) => {}
+ Err(err) => log::error!("free_descriptor_sets: {:?}", err),
+ }
+ }
+}
+
+struct CompiledStage {
+ create_info: vk::PipelineShaderStageCreateInfo,
+ _entry_point: CString,
+ temp_raw_module: Option<vk::ShaderModule>,
+}
+
+impl super::Device {
+ pub(super) unsafe fn create_swapchain(
+ &self,
+ surface: &mut super::Surface,
+ config: &crate::SurfaceConfiguration,
+ provided_old_swapchain: Option<super::Swapchain>,
+ ) -> Result<super::Swapchain, crate::SurfaceError> {
+ profiling::scope!("Device::create_swapchain");
+ let functor = khr::Swapchain::new(&surface.instance.raw, &self.shared.raw);
+
+ let old_swapchain = match provided_old_swapchain {
+ Some(osc) => osc.raw,
+ None => vk::SwapchainKHR::null(),
+ };
+
+ let color_space = if config.format == wgt::TextureFormat::Rgba16Float {
+ // Enable wide color gamut mode
+ // Vulkan swapchain for Android only supports DISPLAY_P3_NONLINEAR_EXT and EXTENDED_SRGB_LINEAR_EXT
+ vk::ColorSpaceKHR::EXTENDED_SRGB_LINEAR_EXT
+ } else {
+ vk::ColorSpaceKHR::SRGB_NONLINEAR
+ };
+
+ let original_format = self.shared.private_caps.map_texture_format(config.format);
+ let mut raw_flags = vk::SwapchainCreateFlagsKHR::empty();
+ let mut raw_view_formats: Vec<vk::Format> = vec![];
+ let mut wgt_view_formats = vec![];
+ if !config.view_formats.is_empty() {
+ raw_flags |= vk::SwapchainCreateFlagsKHR::MUTABLE_FORMAT;
+ raw_view_formats = config
+ .view_formats
+ .iter()
+ .map(|f| self.shared.private_caps.map_texture_format(*f))
+ .collect();
+ raw_view_formats.push(original_format);
+
+ wgt_view_formats = config.view_formats.clone();
+ wgt_view_formats.push(config.format);
+ }
+
+ let mut info = vk::SwapchainCreateInfoKHR::builder()
+ .flags(raw_flags)
+ .surface(surface.raw)
+ .min_image_count(config.swap_chain_size)
+ .image_format(original_format)
+ .image_color_space(color_space)
+ .image_extent(vk::Extent2D {
+ width: config.extent.width,
+ height: config.extent.height,
+ })
+ .image_array_layers(config.extent.depth_or_array_layers)
+ .image_usage(conv::map_texture_usage(config.usage))
+ .image_sharing_mode(vk::SharingMode::EXCLUSIVE)
+ .pre_transform(vk::SurfaceTransformFlagsKHR::IDENTITY)
+ .composite_alpha(conv::map_composite_alpha_mode(config.composite_alpha_mode))
+ .present_mode(conv::map_present_mode(config.present_mode))
+ .clipped(true)
+ .old_swapchain(old_swapchain);
+
+ let mut format_list_info = vk::ImageFormatListCreateInfo::builder();
+ if !raw_view_formats.is_empty() {
+ format_list_info = format_list_info.view_formats(&raw_view_formats);
+ info = info.push_next(&mut format_list_info);
+ }
+
+ let result = {
+ profiling::scope!("vkCreateSwapchainKHR");
+ unsafe { functor.create_swapchain(&info, None) }
+ };
+
+ // doing this before bailing out with error
+ if old_swapchain != vk::SwapchainKHR::null() {
+ unsafe { functor.destroy_swapchain(old_swapchain, None) }
+ }
+
+ let raw = match result {
+ Ok(swapchain) => swapchain,
+ Err(error) => {
+ return Err(match error {
+ vk::Result::ERROR_SURFACE_LOST_KHR => crate::SurfaceError::Lost,
+ vk::Result::ERROR_NATIVE_WINDOW_IN_USE_KHR => {
+ crate::SurfaceError::Other("Native window is in use")
+ }
+ other => crate::DeviceError::from(other).into(),
+ })
+ }
+ };
+
+ let images =
+ unsafe { functor.get_swapchain_images(raw) }.map_err(crate::DeviceError::from)?;
+
+ let vk_info = vk::FenceCreateInfo::builder().build();
+ let fence = unsafe { self.shared.raw.create_fence(&vk_info, None) }
+ .map_err(crate::DeviceError::from)?;
+
+ Ok(super::Swapchain {
+ raw,
+ raw_flags,
+ functor,
+ device: Arc::clone(&self.shared),
+ fence,
+ images,
+ config: config.clone(),
+ view_formats: wgt_view_formats,
+ })
+ }
+
+ /// # Safety
+ ///
+ /// - `vk_image` must be created respecting `desc`
+ /// - If `drop_guard` is `Some`, the application must manually destroy the image handle. This
+ /// can be done inside the `Drop` impl of `drop_guard`.
+ /// - If the `ImageCreateFlags` does not contain `MUTABLE_FORMAT`, the `view_formats` of `desc` must be empty.
+ pub unsafe fn texture_from_raw(
+ vk_image: vk::Image,
+ desc: &crate::TextureDescriptor,
+ drop_guard: Option<crate::DropGuard>,
+ ) -> super::Texture {
+ let mut raw_flags = vk::ImageCreateFlags::empty();
+ let mut view_formats = vec![];
+ for tf in desc.view_formats.iter() {
+ if *tf == desc.format {
+ continue;
+ }
+ view_formats.push(*tf);
+ }
+ if !view_formats.is_empty() {
+ raw_flags |=
+ vk::ImageCreateFlags::MUTABLE_FORMAT | vk::ImageCreateFlags::EXTENDED_USAGE;
+ view_formats.push(desc.format)
+ }
+
+ super::Texture {
+ raw: vk_image,
+ drop_guard,
+ block: None,
+ usage: desc.usage,
+ format: desc.format,
+ raw_flags: vk::ImageCreateFlags::empty(),
+ copy_size: desc.copy_extent(),
+ view_formats,
+ }
+ }
+
+ fn create_shader_module_impl(
+ &self,
+ spv: &[u32],
+ ) -> Result<vk::ShaderModule, crate::DeviceError> {
+ let vk_info = vk::ShaderModuleCreateInfo::builder()
+ .flags(vk::ShaderModuleCreateFlags::empty())
+ .code(spv);
+
+ let raw = unsafe {
+ profiling::scope!("vkCreateShaderModule");
+ self.shared.raw.create_shader_module(&vk_info, None)?
+ };
+ Ok(raw)
+ }
+
+ fn compile_stage(
+ &self,
+ stage: &crate::ProgrammableStage<super::Api>,
+ naga_stage: naga::ShaderStage,
+ binding_map: &naga::back::spv::BindingMap,
+ ) -> Result<CompiledStage, crate::PipelineError> {
+ let stage_flags = crate::auxil::map_naga_stage(naga_stage);
+ let vk_module = match *stage.module {
+ super::ShaderModule::Raw(raw) => raw,
+ super::ShaderModule::Intermediate {
+ ref naga_shader,
+ runtime_checks,
+ } => {
+ let pipeline_options = naga::back::spv::PipelineOptions {
+ entry_point: stage.entry_point.to_string(),
+ shader_stage: naga_stage,
+ };
+ let needs_temp_options = !runtime_checks || !binding_map.is_empty();
+ let mut temp_options;
+ let options = if needs_temp_options {
+ temp_options = self.naga_options.clone();
+ if !runtime_checks {
+ temp_options.bounds_check_policies = naga::proc::BoundsCheckPolicies {
+ index: naga::proc::BoundsCheckPolicy::Unchecked,
+ buffer: naga::proc::BoundsCheckPolicy::Unchecked,
+ image: naga::proc::BoundsCheckPolicy::Unchecked,
+ binding_array: naga::proc::BoundsCheckPolicy::Unchecked,
+ };
+ }
+ if !binding_map.is_empty() {
+ temp_options.binding_map = binding_map.clone();
+ }
+ &temp_options
+ } else {
+ &self.naga_options
+ };
+ let spv = {
+ profiling::scope!("naga::spv::write_vec");
+ naga::back::spv::write_vec(
+ &naga_shader.module,
+ &naga_shader.info,
+ options,
+ Some(&pipeline_options),
+ )
+ }
+ .map_err(|e| crate::PipelineError::Linkage(stage_flags, format!("{e}")))?;
+ self.create_shader_module_impl(&spv)?
+ }
+ };
+
+ let entry_point = CString::new(stage.entry_point).unwrap();
+ let create_info = vk::PipelineShaderStageCreateInfo::builder()
+ .stage(conv::map_shader_stage(stage_flags))
+ .module(vk_module)
+ .name(&entry_point)
+ .build();
+
+ Ok(CompiledStage {
+ create_info,
+ _entry_point: entry_point,
+ temp_raw_module: match *stage.module {
+ super::ShaderModule::Raw(_) => None,
+ super::ShaderModule::Intermediate { .. } => Some(vk_module),
+ },
+ })
+ }
+
+ /// Returns the queue family index of the device's internal queue.
+ ///
+ /// This is useful for constructing memory barriers needed for queue family ownership transfer when
+ /// external memory is involved (from/to `VK_QUEUE_FAMILY_EXTERNAL_KHR` and `VK_QUEUE_FAMILY_FOREIGN_EXT`
+ /// for example).
+ pub fn queue_family_index(&self) -> u32 {
+ self.shared.family_index
+ }
+
+ pub fn queue_index(&self) -> u32 {
+ self.shared.queue_index
+ }
+
+ pub fn raw_device(&self) -> &ash::Device {
+ &self.shared.raw
+ }
+
+ pub fn raw_physical_device(&self) -> ash::vk::PhysicalDevice {
+ self.shared.physical_device
+ }
+
+ pub fn raw_queue(&self) -> ash::vk::Queue {
+ self.shared.raw_queue
+ }
+
+ pub fn enabled_device_extensions(&self) -> &[&'static CStr] {
+ &self.shared.enabled_extensions
+ }
+
+ pub fn shared_instance(&self) -> &super::InstanceShared {
+ &self.shared.instance
+ }
+}
+
+impl crate::Device<super::Api> for super::Device {
+ unsafe fn exit(self, queue: super::Queue) {
+ unsafe { self.mem_allocator.into_inner().cleanup(&*self.shared) };
+ unsafe { self.desc_allocator.into_inner().cleanup(&*self.shared) };
+ for &sem in queue.relay_semaphores.iter() {
+ unsafe { self.shared.raw.destroy_semaphore(sem, None) };
+ }
+ unsafe { self.shared.free_resources() };
+ }
+
+ unsafe fn create_buffer(
+ &self,
+ desc: &crate::BufferDescriptor,
+ ) -> Result<super::Buffer, crate::DeviceError> {
+ let vk_info = vk::BufferCreateInfo::builder()
+ .size(desc.size)
+ .usage(conv::map_buffer_usage(desc.usage))
+ .sharing_mode(vk::SharingMode::EXCLUSIVE);
+
+ let raw = unsafe { self.shared.raw.create_buffer(&vk_info, None)? };
+ let req = unsafe { self.shared.raw.get_buffer_memory_requirements(raw) };
+
+ let mut alloc_usage = if desc
+ .usage
+ .intersects(crate::BufferUses::MAP_READ | crate::BufferUses::MAP_WRITE)
+ {
+ let mut flags = gpu_alloc::UsageFlags::HOST_ACCESS;
+ //TODO: find a way to use `crate::MemoryFlags::PREFER_COHERENT`
+ flags.set(
+ gpu_alloc::UsageFlags::DOWNLOAD,
+ desc.usage.contains(crate::BufferUses::MAP_READ),
+ );
+ flags.set(
+ gpu_alloc::UsageFlags::UPLOAD,
+ desc.usage.contains(crate::BufferUses::MAP_WRITE),
+ );
+ flags
+ } else {
+ gpu_alloc::UsageFlags::FAST_DEVICE_ACCESS
+ };
+ alloc_usage.set(
+ gpu_alloc::UsageFlags::TRANSIENT,
+ desc.memory_flags.contains(crate::MemoryFlags::TRANSIENT),
+ );
+
+ let block = unsafe {
+ self.mem_allocator.lock().alloc(
+ &*self.shared,
+ gpu_alloc::Request {
+ size: req.size,
+ align_mask: req.alignment - 1,
+ usage: alloc_usage,
+ memory_types: req.memory_type_bits & self.valid_ash_memory_types,
+ },
+ )?
+ };
+
+ unsafe {
+ self.shared
+ .raw
+ .bind_buffer_memory(raw, *block.memory(), block.offset())?
+ };
+
+ if let Some(label) = desc.label {
+ unsafe {
+ self.shared
+ .set_object_name(vk::ObjectType::BUFFER, raw, label)
+ };
+ }
+
+ Ok(super::Buffer {
+ raw,
+ block: Mutex::new(block),
+ })
+ }
+ unsafe fn destroy_buffer(&self, buffer: super::Buffer) {
+ unsafe { self.shared.raw.destroy_buffer(buffer.raw, None) };
+ unsafe {
+ self.mem_allocator
+ .lock()
+ .dealloc(&*self.shared, buffer.block.into_inner())
+ };
+ }
+
+ unsafe fn map_buffer(
+ &self,
+ buffer: &super::Buffer,
+ range: crate::MemoryRange,
+ ) -> Result<crate::BufferMapping, crate::DeviceError> {
+ let size = range.end - range.start;
+ let mut block = buffer.block.lock();
+ let ptr = unsafe { block.map(&*self.shared, range.start, size as usize)? };
+ let is_coherent = block
+ .props()
+ .contains(gpu_alloc::MemoryPropertyFlags::HOST_COHERENT);
+ Ok(crate::BufferMapping { ptr, is_coherent })
+ }
+ unsafe fn unmap_buffer(&self, buffer: &super::Buffer) -> Result<(), crate::DeviceError> {
+ unsafe { buffer.block.lock().unmap(&*self.shared) };
+ Ok(())
+ }
+
+ unsafe fn flush_mapped_ranges<I>(&self, buffer: &super::Buffer, ranges: I)
+ where
+ I: Iterator<Item = crate::MemoryRange>,
+ {
+ let vk_ranges = self.shared.make_memory_ranges(buffer, ranges);
+
+ unsafe {
+ self.shared
+ .raw
+ .flush_mapped_memory_ranges(
+ &smallvec::SmallVec::<[vk::MappedMemoryRange; 32]>::from_iter(vk_ranges),
+ )
+ }
+ .unwrap();
+ }
+ unsafe fn invalidate_mapped_ranges<I>(&self, buffer: &super::Buffer, ranges: I)
+ where
+ I: Iterator<Item = crate::MemoryRange>,
+ {
+ let vk_ranges = self.shared.make_memory_ranges(buffer, ranges);
+
+ unsafe {
+ self.shared
+ .raw
+ .invalidate_mapped_memory_ranges(
+ &smallvec::SmallVec::<[vk::MappedMemoryRange; 32]>::from_iter(vk_ranges),
+ )
+ }
+ .unwrap();
+ }
+
+ unsafe fn create_texture(
+ &self,
+ desc: &crate::TextureDescriptor,
+ ) -> Result<super::Texture, crate::DeviceError> {
+ let copy_size = desc.copy_extent();
+
+ let mut raw_flags = vk::ImageCreateFlags::empty();
+ if desc.is_cube_compatible() {
+ raw_flags |= vk::ImageCreateFlags::CUBE_COMPATIBLE;
+ }
+
+ let original_format = self.shared.private_caps.map_texture_format(desc.format);
+ let mut vk_view_formats = vec![];
+ let mut wgt_view_formats = vec![];
+ if !desc.view_formats.is_empty() {
+ raw_flags |= vk::ImageCreateFlags::MUTABLE_FORMAT;
+ wgt_view_formats = desc.view_formats.clone();
+ wgt_view_formats.push(desc.format);
+
+ if self.shared_instance().driver_api_version >= vk::API_VERSION_1_2
+ || self
+ .enabled_device_extensions()
+ .contains(&vk::KhrImageFormatListFn::name())
+ {
+ vk_view_formats = desc
+ .view_formats
+ .iter()
+ .map(|f| self.shared.private_caps.map_texture_format(*f))
+ .collect();
+ vk_view_formats.push(original_format)
+ }
+ }
+
+ let mut vk_info = vk::ImageCreateInfo::builder()
+ .flags(raw_flags)
+ .image_type(conv::map_texture_dimension(desc.dimension))
+ .format(original_format)
+ .extent(conv::map_copy_extent(&copy_size))
+ .mip_levels(desc.mip_level_count)
+ .array_layers(desc.array_layer_count())
+ .samples(vk::SampleCountFlags::from_raw(desc.sample_count))
+ .tiling(vk::ImageTiling::OPTIMAL)
+ .usage(conv::map_texture_usage(desc.usage))
+ .sharing_mode(vk::SharingMode::EXCLUSIVE)
+ .initial_layout(vk::ImageLayout::UNDEFINED);
+
+ let mut format_list_info = vk::ImageFormatListCreateInfo::builder();
+ if !vk_view_formats.is_empty() {
+ format_list_info = format_list_info.view_formats(&vk_view_formats);
+ vk_info = vk_info.push_next(&mut format_list_info);
+ }
+
+ let raw = unsafe { self.shared.raw.create_image(&vk_info, None)? };
+ let req = unsafe { self.shared.raw.get_image_memory_requirements(raw) };
+
+ let block = unsafe {
+ self.mem_allocator.lock().alloc(
+ &*self.shared,
+ gpu_alloc::Request {
+ size: req.size,
+ align_mask: req.alignment - 1,
+ usage: gpu_alloc::UsageFlags::FAST_DEVICE_ACCESS,
+ memory_types: req.memory_type_bits & self.valid_ash_memory_types,
+ },
+ )?
+ };
+
+ unsafe {
+ self.shared
+ .raw
+ .bind_image_memory(raw, *block.memory(), block.offset())?
+ };
+
+ if let Some(label) = desc.label {
+ unsafe {
+ self.shared
+ .set_object_name(vk::ObjectType::IMAGE, raw, label)
+ };
+ }
+
+ Ok(super::Texture {
+ raw,
+ drop_guard: None,
+ block: Some(block),
+ usage: desc.usage,
+ format: desc.format,
+ raw_flags,
+ copy_size,
+ view_formats: wgt_view_formats,
+ })
+ }
+ unsafe fn destroy_texture(&self, texture: super::Texture) {
+ if texture.drop_guard.is_none() {
+ unsafe { self.shared.raw.destroy_image(texture.raw, None) };
+ }
+ if let Some(block) = texture.block {
+ unsafe { self.mem_allocator.lock().dealloc(&*self.shared, block) };
+ }
+ }
+
+ unsafe fn create_texture_view(
+ &self,
+ texture: &super::Texture,
+ desc: &crate::TextureViewDescriptor,
+ ) -> Result<super::TextureView, crate::DeviceError> {
+ let subresource_range = conv::map_subresource_range(&desc.range, desc.format);
+ let mut vk_info = vk::ImageViewCreateInfo::builder()
+ .flags(vk::ImageViewCreateFlags::empty())
+ .image(texture.raw)
+ .view_type(conv::map_view_dimension(desc.dimension))
+ .format(self.shared.private_caps.map_texture_format(desc.format))
+ .subresource_range(subresource_range);
+ let layers =
+ NonZeroU32::new(subresource_range.layer_count).expect("Unexpected zero layer count");
+
+ let mut image_view_info;
+ let view_usage = if self.shared.private_caps.image_view_usage && !desc.usage.is_empty() {
+ image_view_info = vk::ImageViewUsageCreateInfo::builder()
+ .usage(conv::map_texture_usage(desc.usage))
+ .build();
+ vk_info = vk_info.push_next(&mut image_view_info);
+ desc.usage
+ } else {
+ texture.usage
+ };
+
+ let raw = unsafe { self.shared.raw.create_image_view(&vk_info, None) }?;
+
+ if let Some(label) = desc.label {
+ unsafe {
+ self.shared
+ .set_object_name(vk::ObjectType::IMAGE_VIEW, raw, label)
+ };
+ }
+
+ let attachment = super::FramebufferAttachment {
+ raw: if self.shared.private_caps.imageless_framebuffers {
+ vk::ImageView::null()
+ } else {
+ raw
+ },
+ raw_image_flags: texture.raw_flags,
+ view_usage,
+ view_format: desc.format,
+ raw_view_formats: texture
+ .view_formats
+ .iter()
+ .map(|tf| self.shared.private_caps.map_texture_format(*tf))
+ .collect(),
+ };
+
+ Ok(super::TextureView {
+ raw,
+ layers,
+ attachment,
+ })
+ }
+ unsafe fn destroy_texture_view(&self, view: super::TextureView) {
+ if !self.shared.private_caps.imageless_framebuffers {
+ let mut fbuf_lock = self.shared.framebuffers.lock();
+ for (key, &raw_fbuf) in fbuf_lock.iter() {
+ if key.attachments.iter().any(|at| at.raw == view.raw) {
+ unsafe { self.shared.raw.destroy_framebuffer(raw_fbuf, None) };
+ }
+ }
+ fbuf_lock.retain(|key, _| !key.attachments.iter().any(|at| at.raw == view.raw));
+ }
+ unsafe { self.shared.raw.destroy_image_view(view.raw, None) };
+ }
+
+ unsafe fn create_sampler(
+ &self,
+ desc: &crate::SamplerDescriptor,
+ ) -> Result<super::Sampler, crate::DeviceError> {
+ let mut vk_info = vk::SamplerCreateInfo::builder()
+ .flags(vk::SamplerCreateFlags::empty())
+ .mag_filter(conv::map_filter_mode(desc.mag_filter))
+ .min_filter(conv::map_filter_mode(desc.min_filter))
+ .mipmap_mode(conv::map_mip_filter_mode(desc.mipmap_filter))
+ .address_mode_u(conv::map_address_mode(desc.address_modes[0]))
+ .address_mode_v(conv::map_address_mode(desc.address_modes[1]))
+ .address_mode_w(conv::map_address_mode(desc.address_modes[2]))
+ .min_lod(desc.lod_clamp.start)
+ .max_lod(desc.lod_clamp.end);
+
+ if let Some(fun) = desc.compare {
+ vk_info = vk_info
+ .compare_enable(true)
+ .compare_op(conv::map_comparison(fun));
+ }
+
+ if desc.anisotropy_clamp != 1 {
+ // We only enable anisotropy if it is supported, and wgpu-hal interface guarentees
+ // the clamp is in the range [1, 16] which is always supported if anisotropy is.
+ vk_info = vk_info
+ .anisotropy_enable(true)
+ .max_anisotropy(desc.anisotropy_clamp as f32);
+ }
+
+ if let Some(color) = desc.border_color {
+ vk_info = vk_info.border_color(conv::map_border_color(color));
+ }
+
+ let raw = unsafe { self.shared.raw.create_sampler(&vk_info, None)? };
+
+ if let Some(label) = desc.label {
+ unsafe {
+ self.shared
+ .set_object_name(vk::ObjectType::SAMPLER, raw, label)
+ };
+ }
+
+ Ok(super::Sampler { raw })
+ }
+ unsafe fn destroy_sampler(&self, sampler: super::Sampler) {
+ unsafe { self.shared.raw.destroy_sampler(sampler.raw, None) };
+ }
+
+ unsafe fn create_command_encoder(
+ &self,
+ desc: &crate::CommandEncoderDescriptor<super::Api>,
+ ) -> Result<super::CommandEncoder, crate::DeviceError> {
+ let vk_info = vk::CommandPoolCreateInfo::builder()
+ .queue_family_index(desc.queue.family_index)
+ .flags(vk::CommandPoolCreateFlags::TRANSIENT)
+ .build();
+ let raw = unsafe { self.shared.raw.create_command_pool(&vk_info, None)? };
+
+ Ok(super::CommandEncoder {
+ raw,
+ device: Arc::clone(&self.shared),
+ active: vk::CommandBuffer::null(),
+ bind_point: vk::PipelineBindPoint::default(),
+ temp: super::Temp::default(),
+ free: Vec::new(),
+ discarded: Vec::new(),
+ rpass_debug_marker_active: false,
+ })
+ }
+ unsafe fn destroy_command_encoder(&self, cmd_encoder: super::CommandEncoder) {
+ unsafe {
+ if !cmd_encoder.free.is_empty() {
+ self.shared
+ .raw
+ .free_command_buffers(cmd_encoder.raw, &cmd_encoder.free)
+ }
+ if !cmd_encoder.discarded.is_empty() {
+ self.shared
+ .raw
+ .free_command_buffers(cmd_encoder.raw, &cmd_encoder.discarded)
+ }
+ self.shared.raw.destroy_command_pool(cmd_encoder.raw, None);
+ }
+ }
+
+ unsafe fn create_bind_group_layout(
+ &self,
+ desc: &crate::BindGroupLayoutDescriptor,
+ ) -> Result<super::BindGroupLayout, crate::DeviceError> {
+ let mut desc_count = gpu_descriptor::DescriptorTotalCount::default();
+ let mut types = Vec::new();
+ for entry in desc.entries {
+ let count = entry.count.map_or(1, |c| c.get());
+ if entry.binding as usize >= types.len() {
+ types.resize(
+ entry.binding as usize + 1,
+ (vk::DescriptorType::INPUT_ATTACHMENT, 0),
+ );
+ }
+ types[entry.binding as usize] = (
+ conv::map_binding_type(entry.ty),
+ entry.count.map_or(1, |c| c.get()),
+ );
+
+ match entry.ty {
+ wgt::BindingType::Buffer {
+ ty,
+ has_dynamic_offset,
+ ..
+ } => match ty {
+ wgt::BufferBindingType::Uniform => {
+ if has_dynamic_offset {
+ desc_count.uniform_buffer_dynamic += count;
+ } else {
+ desc_count.uniform_buffer += count;
+ }
+ }
+ wgt::BufferBindingType::Storage { .. } => {
+ if has_dynamic_offset {
+ desc_count.storage_buffer_dynamic += count;
+ } else {
+ desc_count.storage_buffer += count;
+ }
+ }
+ },
+ wgt::BindingType::Sampler { .. } => {
+ desc_count.sampler += count;
+ }
+ wgt::BindingType::Texture { .. } => {
+ desc_count.sampled_image += count;
+ }
+ wgt::BindingType::StorageTexture { .. } => {
+ desc_count.storage_image += count;
+ }
+ }
+ }
+
+ //Note: not bothering with on stack array here as it's low frequency
+ let vk_bindings = desc
+ .entries
+ .iter()
+ .map(|entry| vk::DescriptorSetLayoutBinding {
+ binding: entry.binding,
+ descriptor_type: types[entry.binding as usize].0,
+ descriptor_count: types[entry.binding as usize].1,
+ stage_flags: conv::map_shader_stage(entry.visibility),
+ p_immutable_samplers: ptr::null(),
+ })
+ .collect::<Vec<_>>();
+
+ let vk_info = vk::DescriptorSetLayoutCreateInfo::builder().bindings(&vk_bindings);
+
+ let binding_arrays = desc
+ .entries
+ .iter()
+ .enumerate()
+ .filter_map(|(idx, entry)| entry.count.map(|count| (idx as u32, count)))
+ .collect();
+
+ let mut binding_flag_info;
+ let binding_flag_vec;
+
+ let partially_bound = desc
+ .flags
+ .contains(crate::BindGroupLayoutFlags::PARTIALLY_BOUND);
+
+ let vk_info = if partially_bound {
+ binding_flag_vec = desc
+ .entries
+ .iter()
+ .map(|entry| {
+ let mut flags = vk::DescriptorBindingFlags::empty();
+
+ if partially_bound && entry.count.is_some() {
+ flags |= vk::DescriptorBindingFlags::PARTIALLY_BOUND;
+ }
+
+ flags
+ })
+ .collect::<Vec<_>>();
+
+ binding_flag_info = vk::DescriptorSetLayoutBindingFlagsCreateInfo::builder()
+ .binding_flags(&binding_flag_vec);
+
+ vk_info.push_next(&mut binding_flag_info)
+ } else {
+ vk_info
+ };
+
+ let raw = unsafe {
+ self.shared
+ .raw
+ .create_descriptor_set_layout(&vk_info, None)?
+ };
+
+ if let Some(label) = desc.label {
+ unsafe {
+ self.shared
+ .set_object_name(vk::ObjectType::DESCRIPTOR_SET_LAYOUT, raw, label)
+ };
+ }
+
+ Ok(super::BindGroupLayout {
+ raw,
+ desc_count,
+ types: types.into_boxed_slice(),
+ binding_arrays,
+ })
+ }
+ unsafe fn destroy_bind_group_layout(&self, bg_layout: super::BindGroupLayout) {
+ unsafe {
+ self.shared
+ .raw
+ .destroy_descriptor_set_layout(bg_layout.raw, None)
+ };
+ }
+
+ unsafe fn create_pipeline_layout(
+ &self,
+ desc: &crate::PipelineLayoutDescriptor<super::Api>,
+ ) -> Result<super::PipelineLayout, crate::DeviceError> {
+ //Note: not bothering with on stack array here as it's low frequency
+ let vk_set_layouts = desc
+ .bind_group_layouts
+ .iter()
+ .map(|bgl| bgl.raw)
+ .collect::<Vec<_>>();
+ let vk_push_constant_ranges = desc
+ .push_constant_ranges
+ .iter()
+ .map(|pcr| vk::PushConstantRange {
+ stage_flags: conv::map_shader_stage(pcr.stages),
+ offset: pcr.range.start,
+ size: pcr.range.end - pcr.range.start,
+ })
+ .collect::<Vec<_>>();
+
+ let vk_info = vk::PipelineLayoutCreateInfo::builder()
+ .flags(vk::PipelineLayoutCreateFlags::empty())
+ .set_layouts(&vk_set_layouts)
+ .push_constant_ranges(&vk_push_constant_ranges);
+
+ let raw = {
+ profiling::scope!("vkCreatePipelineLayout");
+ unsafe { self.shared.raw.create_pipeline_layout(&vk_info, None)? }
+ };
+
+ if let Some(label) = desc.label {
+ unsafe {
+ self.shared
+ .set_object_name(vk::ObjectType::PIPELINE_LAYOUT, raw, label)
+ };
+ }
+
+ let mut binding_arrays = BTreeMap::new();
+ for (group, &layout) in desc.bind_group_layouts.iter().enumerate() {
+ for &(binding, binding_array_size) in &layout.binding_arrays {
+ binding_arrays.insert(
+ naga::ResourceBinding {
+ group: group as u32,
+ binding,
+ },
+ naga::back::spv::BindingInfo {
+ binding_array_size: Some(binding_array_size.get()),
+ },
+ );
+ }
+ }
+
+ Ok(super::PipelineLayout {
+ raw,
+ binding_arrays,
+ })
+ }
+ unsafe fn destroy_pipeline_layout(&self, pipeline_layout: super::PipelineLayout) {
+ unsafe {
+ self.shared
+ .raw
+ .destroy_pipeline_layout(pipeline_layout.raw, None)
+ };
+ }
+
+ unsafe fn create_bind_group(
+ &self,
+ desc: &crate::BindGroupDescriptor<super::Api>,
+ ) -> Result<super::BindGroup, crate::DeviceError> {
+ let mut vk_sets = unsafe {
+ self.desc_allocator.lock().allocate(
+ &*self.shared,
+ &desc.layout.raw,
+ gpu_descriptor::DescriptorSetLayoutCreateFlags::empty(),
+ &desc.layout.desc_count,
+ 1,
+ )?
+ };
+
+ let set = vk_sets.pop().unwrap();
+ if let Some(label) = desc.label {
+ unsafe {
+ self.shared
+ .set_object_name(vk::ObjectType::DESCRIPTOR_SET, *set.raw(), label)
+ };
+ }
+
+ let mut writes = Vec::with_capacity(desc.entries.len());
+ let mut buffer_infos = Vec::with_capacity(desc.buffers.len());
+ let mut sampler_infos = Vec::with_capacity(desc.samplers.len());
+ let mut image_infos = Vec::with_capacity(desc.textures.len());
+ for entry in desc.entries {
+ let (ty, size) = desc.layout.types[entry.binding as usize];
+ if size == 0 {
+ continue; // empty slot
+ }
+ let mut write = vk::WriteDescriptorSet::builder()
+ .dst_set(*set.raw())
+ .dst_binding(entry.binding)
+ .descriptor_type(ty);
+ write = match ty {
+ vk::DescriptorType::SAMPLER => {
+ let index = sampler_infos.len();
+ let start = entry.resource_index;
+ let end = start + entry.count;
+ sampler_infos.extend(desc.samplers[start as usize..end as usize].iter().map(
+ |binding| {
+ vk::DescriptorImageInfo::builder()
+ .sampler(binding.raw)
+ .build()
+ },
+ ));
+ write.image_info(&sampler_infos[index..])
+ }
+ vk::DescriptorType::SAMPLED_IMAGE | vk::DescriptorType::STORAGE_IMAGE => {
+ let index = image_infos.len();
+ let start = entry.resource_index;
+ let end = start + entry.count;
+ image_infos.extend(desc.textures[start as usize..end as usize].iter().map(
+ |binding| {
+ let layout = conv::derive_image_layout(
+ binding.usage,
+ binding.view.attachment.view_format,
+ );
+ vk::DescriptorImageInfo::builder()
+ .image_view(binding.view.raw)
+ .image_layout(layout)
+ .build()
+ },
+ ));
+ write.image_info(&image_infos[index..])
+ }
+ vk::DescriptorType::UNIFORM_BUFFER
+ | vk::DescriptorType::UNIFORM_BUFFER_DYNAMIC
+ | vk::DescriptorType::STORAGE_BUFFER
+ | vk::DescriptorType::STORAGE_BUFFER_DYNAMIC => {
+ let index = buffer_infos.len();
+ let start = entry.resource_index;
+ let end = start + entry.count;
+ buffer_infos.extend(desc.buffers[start as usize..end as usize].iter().map(
+ |binding| {
+ vk::DescriptorBufferInfo::builder()
+ .buffer(binding.buffer.raw)
+ .offset(binding.offset)
+ .range(binding.size.map_or(vk::WHOLE_SIZE, wgt::BufferSize::get))
+ .build()
+ },
+ ));
+ write.buffer_info(&buffer_infos[index..])
+ }
+ _ => unreachable!(),
+ };
+ writes.push(write.build());
+ }
+
+ unsafe { self.shared.raw.update_descriptor_sets(&writes, &[]) };
+ Ok(super::BindGroup { set })
+ }
+ unsafe fn destroy_bind_group(&self, group: super::BindGroup) {
+ unsafe {
+ self.desc_allocator
+ .lock()
+ .free(&*self.shared, Some(group.set))
+ };
+ }
+
+ unsafe fn create_shader_module(
+ &self,
+ desc: &crate::ShaderModuleDescriptor,
+ shader: crate::ShaderInput,
+ ) -> Result<super::ShaderModule, crate::ShaderError> {
+ let spv = match shader {
+ crate::ShaderInput::Naga(naga_shader) => {
+ if self
+ .shared
+ .workarounds
+ .contains(super::Workarounds::SEPARATE_ENTRY_POINTS)
+ {
+ return Ok(super::ShaderModule::Intermediate {
+ naga_shader,
+ runtime_checks: desc.runtime_checks,
+ });
+ }
+ let mut naga_options = self.naga_options.clone();
+ if !desc.runtime_checks {
+ naga_options.bounds_check_policies = naga::proc::BoundsCheckPolicies {
+ index: naga::proc::BoundsCheckPolicy::Unchecked,
+ buffer: naga::proc::BoundsCheckPolicy::Unchecked,
+ image: naga::proc::BoundsCheckPolicy::Unchecked,
+ binding_array: naga::proc::BoundsCheckPolicy::Unchecked,
+ };
+ }
+ Cow::Owned(
+ naga::back::spv::write_vec(
+ &naga_shader.module,
+ &naga_shader.info,
+ &naga_options,
+ None,
+ )
+ .map_err(|e| crate::ShaderError::Compilation(format!("{e}")))?,
+ )
+ }
+ crate::ShaderInput::SpirV(spv) => Cow::Borrowed(spv),
+ };
+
+ let raw = self.create_shader_module_impl(&spv)?;
+
+ if let Some(label) = desc.label {
+ unsafe {
+ self.shared
+ .set_object_name(vk::ObjectType::SHADER_MODULE, raw, label)
+ };
+ }
+
+ Ok(super::ShaderModule::Raw(raw))
+ }
+ unsafe fn destroy_shader_module(&self, module: super::ShaderModule) {
+ match module {
+ super::ShaderModule::Raw(raw) => {
+ unsafe { self.shared.raw.destroy_shader_module(raw, None) };
+ }
+ super::ShaderModule::Intermediate { .. } => {}
+ }
+ }
+
+ unsafe fn create_render_pipeline(
+ &self,
+ desc: &crate::RenderPipelineDescriptor<super::Api>,
+ ) -> Result<super::RenderPipeline, crate::PipelineError> {
+ let dynamic_states = [
+ vk::DynamicState::VIEWPORT,
+ vk::DynamicState::SCISSOR,
+ vk::DynamicState::BLEND_CONSTANTS,
+ vk::DynamicState::STENCIL_REFERENCE,
+ ];
+ let mut compatible_rp_key = super::RenderPassKey {
+ sample_count: desc.multisample.count,
+ multiview: desc.multiview,
+ ..Default::default()
+ };
+ let mut stages = ArrayVec::<_, 2>::new();
+ let mut vertex_buffers = Vec::with_capacity(desc.vertex_buffers.len());
+ let mut vertex_attributes = Vec::new();
+
+ for (i, vb) in desc.vertex_buffers.iter().enumerate() {
+ vertex_buffers.push(vk::VertexInputBindingDescription {
+ binding: i as u32,
+ stride: vb.array_stride as u32,
+ input_rate: match vb.step_mode {
+ wgt::VertexStepMode::Vertex => vk::VertexInputRate::VERTEX,
+ wgt::VertexStepMode::Instance => vk::VertexInputRate::INSTANCE,
+ },
+ });
+ for at in vb.attributes {
+ vertex_attributes.push(vk::VertexInputAttributeDescription {
+ location: at.shader_location,
+ binding: i as u32,
+ format: conv::map_vertex_format(at.format),
+ offset: at.offset as u32,
+ });
+ }
+ }
+
+ let vk_vertex_input = vk::PipelineVertexInputStateCreateInfo::builder()
+ .vertex_binding_descriptions(&vertex_buffers)
+ .vertex_attribute_descriptions(&vertex_attributes)
+ .build();
+
+ let vk_input_assembly = vk::PipelineInputAssemblyStateCreateInfo::builder()
+ .topology(conv::map_topology(desc.primitive.topology))
+ .primitive_restart_enable(desc.primitive.strip_index_format.is_some())
+ .build();
+
+ let compiled_vs = self.compile_stage(
+ &desc.vertex_stage,
+ naga::ShaderStage::Vertex,
+ &desc.layout.binding_arrays,
+ )?;
+ stages.push(compiled_vs.create_info);
+ let compiled_fs = match desc.fragment_stage {
+ Some(ref stage) => {
+ let compiled = self.compile_stage(
+ stage,
+ naga::ShaderStage::Fragment,
+ &desc.layout.binding_arrays,
+ )?;
+ stages.push(compiled.create_info);
+ Some(compiled)
+ }
+ None => None,
+ };
+
+ let mut vk_rasterization = vk::PipelineRasterizationStateCreateInfo::builder()
+ .polygon_mode(conv::map_polygon_mode(desc.primitive.polygon_mode))
+ .front_face(conv::map_front_face(desc.primitive.front_face))
+ .line_width(1.0);
+ if let Some(face) = desc.primitive.cull_mode {
+ vk_rasterization = vk_rasterization.cull_mode(conv::map_cull_face(face))
+ }
+ let mut vk_rasterization_conservative_state =
+ vk::PipelineRasterizationConservativeStateCreateInfoEXT::builder()
+ .conservative_rasterization_mode(vk::ConservativeRasterizationModeEXT::OVERESTIMATE)
+ .build();
+ if desc.primitive.conservative {
+ vk_rasterization = vk_rasterization.push_next(&mut vk_rasterization_conservative_state);
+ }
+ let mut vk_depth_clip_state =
+ vk::PipelineRasterizationDepthClipStateCreateInfoEXT::builder()
+ .depth_clip_enable(false)
+ .build();
+ if desc.primitive.unclipped_depth {
+ vk_rasterization = vk_rasterization.push_next(&mut vk_depth_clip_state);
+ }
+
+ let mut vk_depth_stencil = vk::PipelineDepthStencilStateCreateInfo::builder();
+ if let Some(ref ds) = desc.depth_stencil {
+ let vk_format = self.shared.private_caps.map_texture_format(ds.format);
+ let vk_layout = if ds.is_read_only(desc.primitive.cull_mode) {
+ vk::ImageLayout::DEPTH_STENCIL_READ_ONLY_OPTIMAL
+ } else {
+ vk::ImageLayout::DEPTH_STENCIL_ATTACHMENT_OPTIMAL
+ };
+ compatible_rp_key.depth_stencil = Some(super::DepthStencilAttachmentKey {
+ base: super::AttachmentKey::compatible(vk_format, vk_layout),
+ stencil_ops: crate::AttachmentOps::all(),
+ });
+
+ if ds.is_depth_enabled() {
+ vk_depth_stencil = vk_depth_stencil
+ .depth_test_enable(true)
+ .depth_write_enable(ds.depth_write_enabled)
+ .depth_compare_op(conv::map_comparison(ds.depth_compare));
+ }
+ if ds.stencil.is_enabled() {
+ let s = &ds.stencil;
+ let front = conv::map_stencil_face(&s.front, s.read_mask, s.write_mask);
+ let back = conv::map_stencil_face(&s.back, s.read_mask, s.write_mask);
+ vk_depth_stencil = vk_depth_stencil
+ .stencil_test_enable(true)
+ .front(front)
+ .back(back);
+ }
+
+ if ds.bias.is_enabled() {
+ vk_rasterization = vk_rasterization
+ .depth_bias_enable(true)
+ .depth_bias_constant_factor(ds.bias.constant as f32)
+ .depth_bias_clamp(ds.bias.clamp)
+ .depth_bias_slope_factor(ds.bias.slope_scale);
+ }
+ }
+
+ let vk_viewport = vk::PipelineViewportStateCreateInfo::builder()
+ .flags(vk::PipelineViewportStateCreateFlags::empty())
+ .scissor_count(1)
+ .viewport_count(1)
+ .build();
+
+ let vk_sample_mask = [
+ desc.multisample.mask as u32,
+ (desc.multisample.mask >> 32) as u32,
+ ];
+ let vk_multisample = vk::PipelineMultisampleStateCreateInfo::builder()
+ .rasterization_samples(vk::SampleCountFlags::from_raw(desc.multisample.count))
+ .alpha_to_coverage_enable(desc.multisample.alpha_to_coverage_enabled)
+ .sample_mask(&vk_sample_mask)
+ .build();
+
+ let mut vk_attachments = Vec::with_capacity(desc.color_targets.len());
+ for cat in desc.color_targets {
+ let (key, attarchment) = if let Some(cat) = cat.as_ref() {
+ let mut vk_attachment = vk::PipelineColorBlendAttachmentState::builder()
+ .color_write_mask(vk::ColorComponentFlags::from_raw(cat.write_mask.bits()));
+ if let Some(ref blend) = cat.blend {
+ let (color_op, color_src, color_dst) = conv::map_blend_component(&blend.color);
+ let (alpha_op, alpha_src, alpha_dst) = conv::map_blend_component(&blend.alpha);
+ vk_attachment = vk_attachment
+ .blend_enable(true)
+ .color_blend_op(color_op)
+ .src_color_blend_factor(color_src)
+ .dst_color_blend_factor(color_dst)
+ .alpha_blend_op(alpha_op)
+ .src_alpha_blend_factor(alpha_src)
+ .dst_alpha_blend_factor(alpha_dst);
+ }
+
+ let vk_format = self.shared.private_caps.map_texture_format(cat.format);
+ (
+ Some(super::ColorAttachmentKey {
+ base: super::AttachmentKey::compatible(
+ vk_format,
+ vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL,
+ ),
+ resolve: None,
+ }),
+ vk_attachment.build(),
+ )
+ } else {
+ (None, vk::PipelineColorBlendAttachmentState::default())
+ };
+
+ compatible_rp_key.colors.push(key);
+ vk_attachments.push(attarchment);
+ }
+
+ let vk_color_blend = vk::PipelineColorBlendStateCreateInfo::builder()
+ .attachments(&vk_attachments)
+ .build();
+
+ let vk_dynamic_state = vk::PipelineDynamicStateCreateInfo::builder()
+ .dynamic_states(&dynamic_states)
+ .build();
+
+ let raw_pass = self
+ .shared
+ .make_render_pass(compatible_rp_key)
+ .map_err(crate::DeviceError::from)?;
+
+ let vk_infos = [{
+ vk::GraphicsPipelineCreateInfo::builder()
+ .layout(desc.layout.raw)
+ .stages(&stages)
+ .vertex_input_state(&vk_vertex_input)
+ .input_assembly_state(&vk_input_assembly)
+ .rasterization_state(&vk_rasterization)
+ .viewport_state(&vk_viewport)
+ .multisample_state(&vk_multisample)
+ .depth_stencil_state(&vk_depth_stencil)
+ .color_blend_state(&vk_color_blend)
+ .dynamic_state(&vk_dynamic_state)
+ .render_pass(raw_pass)
+ .build()
+ }];
+
+ let mut raw_vec = {
+ profiling::scope!("vkCreateGraphicsPipelines");
+ unsafe {
+ self.shared
+ .raw
+ .create_graphics_pipelines(vk::PipelineCache::null(), &vk_infos, None)
+ .map_err(|(_, e)| crate::DeviceError::from(e))
+ }?
+ };
+
+ let raw = raw_vec.pop().unwrap();
+ if let Some(label) = desc.label {
+ unsafe {
+ self.shared
+ .set_object_name(vk::ObjectType::PIPELINE, raw, label)
+ };
+ }
+
+ if let Some(raw_module) = compiled_vs.temp_raw_module {
+ unsafe { self.shared.raw.destroy_shader_module(raw_module, None) };
+ }
+ if let Some(CompiledStage {
+ temp_raw_module: Some(raw_module),
+ ..
+ }) = compiled_fs
+ {
+ unsafe { self.shared.raw.destroy_shader_module(raw_module, None) };
+ }
+
+ Ok(super::RenderPipeline { raw })
+ }
+ unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) {
+ unsafe { self.shared.raw.destroy_pipeline(pipeline.raw, None) };
+ }
+
+ unsafe fn create_compute_pipeline(
+ &self,
+ desc: &crate::ComputePipelineDescriptor<super::Api>,
+ ) -> Result<super::ComputePipeline, crate::PipelineError> {
+ let compiled = self.compile_stage(
+ &desc.stage,
+ naga::ShaderStage::Compute,
+ &desc.layout.binding_arrays,
+ )?;
+
+ let vk_infos = [{
+ vk::ComputePipelineCreateInfo::builder()
+ .layout(desc.layout.raw)
+ .stage(compiled.create_info)
+ .build()
+ }];
+
+ let mut raw_vec = {
+ profiling::scope!("vkCreateComputePipelines");
+ unsafe {
+ self.shared
+ .raw
+ .create_compute_pipelines(vk::PipelineCache::null(), &vk_infos, None)
+ .map_err(|(_, e)| crate::DeviceError::from(e))
+ }?
+ };
+
+ let raw = raw_vec.pop().unwrap();
+ if let Some(label) = desc.label {
+ unsafe {
+ self.shared
+ .set_object_name(vk::ObjectType::PIPELINE, raw, label)
+ };
+ }
+
+ if let Some(raw_module) = compiled.temp_raw_module {
+ unsafe { self.shared.raw.destroy_shader_module(raw_module, None) };
+ }
+
+ Ok(super::ComputePipeline { raw })
+ }
+ unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) {
+ unsafe { self.shared.raw.destroy_pipeline(pipeline.raw, None) };
+ }
+
+ unsafe fn create_query_set(
+ &self,
+ desc: &wgt::QuerySetDescriptor<crate::Label>,
+ ) -> Result<super::QuerySet, crate::DeviceError> {
+ let (vk_type, pipeline_statistics) = match desc.ty {
+ wgt::QueryType::Occlusion => (
+ vk::QueryType::OCCLUSION,
+ vk::QueryPipelineStatisticFlags::empty(),
+ ),
+ wgt::QueryType::PipelineStatistics(statistics) => (
+ vk::QueryType::PIPELINE_STATISTICS,
+ conv::map_pipeline_statistics(statistics),
+ ),
+ wgt::QueryType::Timestamp => (
+ vk::QueryType::TIMESTAMP,
+ vk::QueryPipelineStatisticFlags::empty(),
+ ),
+ };
+
+ let vk_info = vk::QueryPoolCreateInfo::builder()
+ .query_type(vk_type)
+ .query_count(desc.count)
+ .pipeline_statistics(pipeline_statistics)
+ .build();
+
+ let raw = unsafe { self.shared.raw.create_query_pool(&vk_info, None) }?;
+ if let Some(label) = desc.label {
+ unsafe {
+ self.shared
+ .set_object_name(vk::ObjectType::QUERY_POOL, raw, label)
+ };
+ }
+
+ Ok(super::QuerySet { raw })
+ }
+ unsafe fn destroy_query_set(&self, set: super::QuerySet) {
+ unsafe { self.shared.raw.destroy_query_pool(set.raw, None) };
+ }
+
+ unsafe fn create_fence(&self) -> Result<super::Fence, crate::DeviceError> {
+ Ok(if self.shared.private_caps.timeline_semaphores {
+ let mut sem_type_info =
+ vk::SemaphoreTypeCreateInfo::builder().semaphore_type(vk::SemaphoreType::TIMELINE);
+ let vk_info = vk::SemaphoreCreateInfo::builder().push_next(&mut sem_type_info);
+ let raw = unsafe { self.shared.raw.create_semaphore(&vk_info, None) }?;
+ super::Fence::TimelineSemaphore(raw)
+ } else {
+ super::Fence::FencePool {
+ last_completed: 0,
+ active: Vec::new(),
+ free: Vec::new(),
+ }
+ })
+ }
+ unsafe fn destroy_fence(&self, fence: super::Fence) {
+ match fence {
+ super::Fence::TimelineSemaphore(raw) => {
+ unsafe { self.shared.raw.destroy_semaphore(raw, None) };
+ }
+ super::Fence::FencePool {
+ active,
+ free,
+ last_completed: _,
+ } => {
+ for (_, raw) in active {
+ unsafe { self.shared.raw.destroy_fence(raw, None) };
+ }
+ for raw in free {
+ unsafe { self.shared.raw.destroy_fence(raw, None) };
+ }
+ }
+ }
+ }
+ unsafe fn get_fence_value(
+ &self,
+ fence: &super::Fence,
+ ) -> Result<crate::FenceValue, crate::DeviceError> {
+ fence.get_latest(
+ &self.shared.raw,
+ self.shared.extension_fns.timeline_semaphore.as_ref(),
+ )
+ }
+ unsafe fn wait(
+ &self,
+ fence: &super::Fence,
+ wait_value: crate::FenceValue,
+ timeout_ms: u32,
+ ) -> Result<bool, crate::DeviceError> {
+ let timeout_ns = timeout_ms as u64 * super::MILLIS_TO_NANOS;
+ match *fence {
+ super::Fence::TimelineSemaphore(raw) => {
+ let semaphores = [raw];
+ let values = [wait_value];
+ let vk_info = vk::SemaphoreWaitInfo::builder()
+ .semaphores(&semaphores)
+ .values(&values);
+ let result = match self.shared.extension_fns.timeline_semaphore {
+ Some(super::ExtensionFn::Extension(ref ext)) => unsafe {
+ ext.wait_semaphores(&vk_info, timeout_ns)
+ },
+ Some(super::ExtensionFn::Promoted) => unsafe {
+ self.shared.raw.wait_semaphores(&vk_info, timeout_ns)
+ },
+ None => unreachable!(),
+ };
+ match result {
+ Ok(()) => Ok(true),
+ Err(vk::Result::TIMEOUT) => Ok(false),
+ Err(other) => Err(other.into()),
+ }
+ }
+ super::Fence::FencePool {
+ last_completed,
+ ref active,
+ free: _,
+ } => {
+ if wait_value <= last_completed {
+ Ok(true)
+ } else {
+ match active.iter().find(|&&(value, _)| value >= wait_value) {
+ Some(&(_, raw)) => {
+ match unsafe {
+ self.shared.raw.wait_for_fences(&[raw], true, timeout_ns)
+ } {
+ Ok(()) => Ok(true),
+ Err(vk::Result::TIMEOUT) => Ok(false),
+ Err(other) => Err(other.into()),
+ }
+ }
+ None => {
+ log::error!("No signals reached value {}", wait_value);
+ Err(crate::DeviceError::Lost)
+ }
+ }
+ }
+ }
+ }
+ }
+
+ unsafe fn start_capture(&self) -> bool {
+ #[cfg(feature = "renderdoc")]
+ {
+ // Renderdoc requires us to give us the pointer that vkInstance _points to_.
+ let raw_vk_instance =
+ ash::vk::Handle::as_raw(self.shared.instance.raw.handle()) as *mut *mut _;
+ let raw_vk_instance_dispatch_table = unsafe { *raw_vk_instance };
+ unsafe {
+ self.render_doc
+ .start_frame_capture(raw_vk_instance_dispatch_table, ptr::null_mut())
+ }
+ }
+ #[cfg(not(feature = "renderdoc"))]
+ false
+ }
+ unsafe fn stop_capture(&self) {
+ #[cfg(feature = "renderdoc")]
+ {
+ // Renderdoc requires us to give us the pointer that vkInstance _points to_.
+ let raw_vk_instance =
+ ash::vk::Handle::as_raw(self.shared.instance.raw.handle()) as *mut *mut _;
+ let raw_vk_instance_dispatch_table = unsafe { *raw_vk_instance };
+
+ unsafe {
+ self.render_doc
+ .end_frame_capture(raw_vk_instance_dispatch_table, ptr::null_mut())
+ }
+ }
+ }
+}
+
+impl From<gpu_alloc::AllocationError> for crate::DeviceError {
+ fn from(error: gpu_alloc::AllocationError) -> Self {
+ use gpu_alloc::AllocationError as Ae;
+ match error {
+ Ae::OutOfDeviceMemory | Ae::OutOfHostMemory => Self::OutOfMemory,
+ _ => {
+ log::error!("memory allocation: {:?}", error);
+ Self::Lost
+ }
+ }
+ }
+}
+impl From<gpu_alloc::MapError> for crate::DeviceError {
+ fn from(error: gpu_alloc::MapError) -> Self {
+ use gpu_alloc::MapError as Me;
+ match error {
+ Me::OutOfDeviceMemory | Me::OutOfHostMemory => Self::OutOfMemory,
+ _ => {
+ log::error!("memory mapping: {:?}", error);
+ Self::Lost
+ }
+ }
+ }
+}
+impl From<gpu_descriptor::AllocationError> for crate::DeviceError {
+ fn from(error: gpu_descriptor::AllocationError) -> Self {
+ log::error!("descriptor allocation: {:?}", error);
+ Self::OutOfMemory
+ }
+}
diff --git a/third_party/rust/wgpu-hal/src/vulkan/instance.rs b/third_party/rust/wgpu-hal/src/vulkan/instance.rs
new file mode 100644
index 0000000000..101f303c16
--- /dev/null
+++ b/third_party/rust/wgpu-hal/src/vulkan/instance.rs
@@ -0,0 +1,832 @@
+use std::{
+ ffi::{c_void, CStr, CString},
+ slice,
+ sync::Arc,
+ thread,
+};
+
+use ash::{
+ extensions::{ext, khr},
+ vk,
+};
+
+unsafe extern "system" fn debug_utils_messenger_callback(
+ message_severity: vk::DebugUtilsMessageSeverityFlagsEXT,
+ message_type: vk::DebugUtilsMessageTypeFlagsEXT,
+ callback_data_ptr: *const vk::DebugUtilsMessengerCallbackDataEXT,
+ _user_data: *mut c_void,
+) -> vk::Bool32 {
+ const VUID_VKSWAPCHAINCREATEINFOKHR_IMAGEEXTENT_01274: i32 = 0x7cd0911d;
+ use std::borrow::Cow;
+
+ if thread::panicking() {
+ return vk::FALSE;
+ }
+
+ let level = match message_severity {
+ vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE => log::Level::Debug,
+ vk::DebugUtilsMessageSeverityFlagsEXT::INFO => log::Level::Info,
+ vk::DebugUtilsMessageSeverityFlagsEXT::WARNING => log::Level::Warn,
+ vk::DebugUtilsMessageSeverityFlagsEXT::ERROR => log::Level::Error,
+ _ => log::Level::Warn,
+ };
+
+ let cd = unsafe { &*callback_data_ptr };
+
+ let message_id_name = if cd.p_message_id_name.is_null() {
+ Cow::from("")
+ } else {
+ unsafe { CStr::from_ptr(cd.p_message_id_name) }.to_string_lossy()
+ };
+ let message = if cd.p_message.is_null() {
+ Cow::from("")
+ } else {
+ unsafe { CStr::from_ptr(cd.p_message) }.to_string_lossy()
+ };
+
+ // Silence Vulkan Validation error "VUID-VkSwapchainCreateInfoKHR-imageExtent-01274"
+ // - it's a false positive due to the inherent racy-ness of surface resizing
+ if cd.message_id_number == VUID_VKSWAPCHAINCREATEINFOKHR_IMAGEEXTENT_01274 {
+ return vk::FALSE;
+ }
+
+ let _ = std::panic::catch_unwind(|| {
+ log::log!(
+ level,
+ "{:?} [{} (0x{:x})]\n\t{}",
+ message_type,
+ message_id_name,
+ cd.message_id_number,
+ message,
+ );
+ });
+
+ if cd.queue_label_count != 0 {
+ let labels =
+ unsafe { slice::from_raw_parts(cd.p_queue_labels, cd.queue_label_count as usize) };
+ let names = labels
+ .iter()
+ .flat_map(|dul_obj| {
+ unsafe { dul_obj.p_label_name.as_ref() }
+ .map(|lbl| unsafe { CStr::from_ptr(lbl) }.to_string_lossy())
+ })
+ .collect::<Vec<_>>();
+
+ let _ = std::panic::catch_unwind(|| {
+ log::log!(level, "\tqueues: {}", names.join(", "));
+ });
+ }
+
+ if cd.cmd_buf_label_count != 0 {
+ let labels =
+ unsafe { slice::from_raw_parts(cd.p_cmd_buf_labels, cd.cmd_buf_label_count as usize) };
+ let names = labels
+ .iter()
+ .flat_map(|dul_obj| {
+ unsafe { dul_obj.p_label_name.as_ref() }
+ .map(|lbl| unsafe { CStr::from_ptr(lbl) }.to_string_lossy())
+ })
+ .collect::<Vec<_>>();
+
+ let _ = std::panic::catch_unwind(|| {
+ log::log!(level, "\tcommand buffers: {}", names.join(", "));
+ });
+ }
+
+ if cd.object_count != 0 {
+ let labels = unsafe { slice::from_raw_parts(cd.p_objects, cd.object_count as usize) };
+ //TODO: use color fields of `vk::DebugUtilsLabelExt`?
+ let names = labels
+ .iter()
+ .map(|obj_info| {
+ let name = unsafe { obj_info.p_object_name.as_ref() }
+ .map(|name| unsafe { CStr::from_ptr(name) }.to_string_lossy())
+ .unwrap_or(Cow::Borrowed("?"));
+
+ format!(
+ "(type: {:?}, hndl: 0x{:x}, name: {})",
+ obj_info.object_type, obj_info.object_handle, name
+ )
+ })
+ .collect::<Vec<_>>();
+ let _ = std::panic::catch_unwind(|| {
+ log::log!(level, "\tobjects: {}", names.join(", "));
+ });
+ }
+
+ if cfg!(debug_assertions) && level == log::Level::Error {
+ // Set canary and continue
+ crate::VALIDATION_CANARY.set();
+ }
+
+ vk::FALSE
+}
+
+impl super::Swapchain {
+ unsafe fn release_resources(self, device: &ash::Device) -> Self {
+ profiling::scope!("Swapchain::release_resources");
+ {
+ profiling::scope!("vkDeviceWaitIdle");
+ let _ = unsafe { device.device_wait_idle() };
+ };
+ unsafe { device.destroy_fence(self.fence, None) };
+ self
+ }
+}
+
+impl super::InstanceShared {
+ pub fn entry(&self) -> &ash::Entry {
+ &self.entry
+ }
+
+ pub fn raw_instance(&self) -> &ash::Instance {
+ &self.raw
+ }
+
+ pub fn driver_api_version(&self) -> u32 {
+ self.driver_api_version
+ }
+
+ pub fn extensions(&self) -> &[&'static CStr] {
+ &self.extensions[..]
+ }
+}
+
+impl super::Instance {
+ pub fn shared_instance(&self) -> &super::InstanceShared {
+ &self.shared
+ }
+
+ pub fn required_extensions(
+ entry: &ash::Entry,
+ _driver_api_version: u32,
+ flags: crate::InstanceFlags,
+ ) -> Result<Vec<&'static CStr>, crate::InstanceError> {
+ let instance_extensions = entry
+ .enumerate_instance_extension_properties(None)
+ .map_err(|e| {
+ log::info!("enumerate_instance_extension_properties: {:?}", e);
+ crate::InstanceError
+ })?;
+
+ // Check our extensions against the available extensions
+ let mut extensions: Vec<&'static CStr> = Vec::new();
+
+ // VK_KHR_surface
+ extensions.push(khr::Surface::name());
+
+ // Platform-specific WSI extensions
+ if cfg!(all(
+ unix,
+ not(target_os = "android"),
+ not(target_os = "macos")
+ )) {
+ // VK_KHR_xlib_surface
+ extensions.push(khr::XlibSurface::name());
+ // VK_KHR_xcb_surface
+ extensions.push(khr::XcbSurface::name());
+ // VK_KHR_wayland_surface
+ extensions.push(khr::WaylandSurface::name());
+ }
+ if cfg!(target_os = "android") {
+ // VK_KHR_android_surface
+ extensions.push(khr::AndroidSurface::name());
+ }
+ if cfg!(target_os = "windows") {
+ // VK_KHR_win32_surface
+ extensions.push(khr::Win32Surface::name());
+ }
+ if cfg!(target_os = "macos") {
+ // VK_EXT_metal_surface
+ extensions.push(ext::MetalSurface::name());
+ }
+
+ if flags.contains(crate::InstanceFlags::DEBUG) {
+ // VK_EXT_debug_utils
+ extensions.push(ext::DebugUtils::name());
+ }
+
+ // VK_EXT_swapchain_colorspace
+ // Provid wide color gamut
+ extensions.push(vk::ExtSwapchainColorspaceFn::name());
+
+ // VK_KHR_get_physical_device_properties2
+ // Even though the extension was promoted to Vulkan 1.1, we still require the extension
+ // so that we don't have to conditionally use the functions provided by the 1.1 instance
+ extensions.push(vk::KhrGetPhysicalDeviceProperties2Fn::name());
+
+ // Only keep available extensions.
+ extensions.retain(|&ext| {
+ if instance_extensions.iter().any(|inst_ext| {
+ crate::auxil::cstr_from_bytes_until_nul(&inst_ext.extension_name) == Some(ext)
+ }) {
+ true
+ } else {
+ log::info!("Unable to find extension: {}", ext.to_string_lossy());
+ false
+ }
+ });
+ Ok(extensions)
+ }
+
+ /// # Safety
+ ///
+ /// - `raw_instance` must be created from `entry`
+ /// - `raw_instance` must be created respecting `driver_api_version`, `extensions` and `flags`
+ /// - `extensions` must be a superset of `required_extensions()` and must be created from the
+ /// same entry, driver_api_version and flags.
+ /// - `android_sdk_version` is ignored and can be `0` for all platforms besides Android
+ #[allow(clippy::too_many_arguments)]
+ pub unsafe fn from_raw(
+ entry: ash::Entry,
+ raw_instance: ash::Instance,
+ driver_api_version: u32,
+ android_sdk_version: u32,
+ extensions: Vec<&'static CStr>,
+ flags: crate::InstanceFlags,
+ has_nv_optimus: bool,
+ drop_guard: Option<crate::DropGuard>,
+ ) -> Result<Self, crate::InstanceError> {
+ log::info!("Instance version: 0x{:x}", driver_api_version);
+
+ let debug_utils = if extensions.contains(&ext::DebugUtils::name()) {
+ log::info!("Enabling debug utils");
+ let extension = ext::DebugUtils::new(&entry, &raw_instance);
+ // having ERROR unconditionally because Vk doesn't like empty flags
+ let mut severity = vk::DebugUtilsMessageSeverityFlagsEXT::ERROR;
+ if log::max_level() >= log::LevelFilter::Debug {
+ severity |= vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE;
+ }
+ if log::max_level() >= log::LevelFilter::Info {
+ severity |= vk::DebugUtilsMessageSeverityFlagsEXT::INFO;
+ }
+ if log::max_level() >= log::LevelFilter::Warn {
+ severity |= vk::DebugUtilsMessageSeverityFlagsEXT::WARNING;
+ }
+ let vk_info = vk::DebugUtilsMessengerCreateInfoEXT::builder()
+ .flags(vk::DebugUtilsMessengerCreateFlagsEXT::empty())
+ .message_severity(severity)
+ .message_type(
+ vk::DebugUtilsMessageTypeFlagsEXT::GENERAL
+ | vk::DebugUtilsMessageTypeFlagsEXT::VALIDATION
+ | vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE,
+ )
+ .pfn_user_callback(Some(debug_utils_messenger_callback));
+ let messenger =
+ unsafe { extension.create_debug_utils_messenger(&vk_info, None) }.unwrap();
+ Some(super::DebugUtils {
+ extension,
+ messenger,
+ })
+ } else {
+ None
+ };
+
+ let get_physical_device_properties =
+ if extensions.contains(&khr::GetPhysicalDeviceProperties2::name()) {
+ log::info!("Enabling device properties2");
+ Some(khr::GetPhysicalDeviceProperties2::new(
+ &entry,
+ &raw_instance,
+ ))
+ } else {
+ None
+ };
+
+ Ok(Self {
+ shared: Arc::new(super::InstanceShared {
+ raw: raw_instance,
+ extensions,
+ drop_guard,
+ flags,
+ debug_utils,
+ get_physical_device_properties,
+ entry,
+ has_nv_optimus,
+ driver_api_version,
+ android_sdk_version,
+ }),
+ })
+ }
+
+ #[allow(dead_code)]
+ fn create_surface_from_xlib(
+ &self,
+ dpy: *mut vk::Display,
+ window: vk::Window,
+ ) -> Result<super::Surface, crate::InstanceError> {
+ if !self.shared.extensions.contains(&khr::XlibSurface::name()) {
+ log::warn!("Vulkan driver does not support VK_KHR_xlib_surface");
+ return Err(crate::InstanceError);
+ }
+
+ let surface = {
+ let xlib_loader = khr::XlibSurface::new(&self.shared.entry, &self.shared.raw);
+ let info = vk::XlibSurfaceCreateInfoKHR::builder()
+ .flags(vk::XlibSurfaceCreateFlagsKHR::empty())
+ .window(window)
+ .dpy(dpy);
+
+ unsafe { xlib_loader.create_xlib_surface(&info, None) }
+ .expect("XlibSurface::create_xlib_surface() failed")
+ };
+
+ Ok(self.create_surface_from_vk_surface_khr(surface))
+ }
+
+ #[allow(dead_code)]
+ fn create_surface_from_xcb(
+ &self,
+ connection: *mut vk::xcb_connection_t,
+ window: vk::xcb_window_t,
+ ) -> Result<super::Surface, crate::InstanceError> {
+ if !self.shared.extensions.contains(&khr::XcbSurface::name()) {
+ log::warn!("Vulkan driver does not support VK_KHR_xcb_surface");
+ return Err(crate::InstanceError);
+ }
+
+ let surface = {
+ let xcb_loader = khr::XcbSurface::new(&self.shared.entry, &self.shared.raw);
+ let info = vk::XcbSurfaceCreateInfoKHR::builder()
+ .flags(vk::XcbSurfaceCreateFlagsKHR::empty())
+ .window(window)
+ .connection(connection);
+
+ unsafe { xcb_loader.create_xcb_surface(&info, None) }
+ .expect("XcbSurface::create_xcb_surface() failed")
+ };
+
+ Ok(self.create_surface_from_vk_surface_khr(surface))
+ }
+
+ #[allow(dead_code)]
+ fn create_surface_from_wayland(
+ &self,
+ display: *mut c_void,
+ surface: *mut c_void,
+ ) -> Result<super::Surface, crate::InstanceError> {
+ if !self
+ .shared
+ .extensions
+ .contains(&khr::WaylandSurface::name())
+ {
+ log::debug!("Vulkan driver does not support VK_KHR_wayland_surface");
+ return Err(crate::InstanceError);
+ }
+
+ let surface = {
+ let w_loader = khr::WaylandSurface::new(&self.shared.entry, &self.shared.raw);
+ let info = vk::WaylandSurfaceCreateInfoKHR::builder()
+ .flags(vk::WaylandSurfaceCreateFlagsKHR::empty())
+ .display(display)
+ .surface(surface);
+
+ unsafe { w_loader.create_wayland_surface(&info, None) }.expect("WaylandSurface failed")
+ };
+
+ Ok(self.create_surface_from_vk_surface_khr(surface))
+ }
+
+ #[allow(dead_code)]
+ fn create_surface_android(
+ &self,
+ window: *const c_void,
+ ) -> Result<super::Surface, crate::InstanceError> {
+ if !self
+ .shared
+ .extensions
+ .contains(&khr::AndroidSurface::name())
+ {
+ log::warn!("Vulkan driver does not support VK_KHR_android_surface");
+ return Err(crate::InstanceError);
+ }
+
+ let surface = {
+ let a_loader = khr::AndroidSurface::new(&self.shared.entry, &self.shared.raw);
+ let info = vk::AndroidSurfaceCreateInfoKHR::builder()
+ .flags(vk::AndroidSurfaceCreateFlagsKHR::empty())
+ .window(window as *mut _);
+
+ unsafe { a_loader.create_android_surface(&info, None) }.expect("AndroidSurface failed")
+ };
+
+ Ok(self.create_surface_from_vk_surface_khr(surface))
+ }
+
+ #[allow(dead_code)]
+ fn create_surface_from_hwnd(
+ &self,
+ hinstance: *mut c_void,
+ hwnd: *mut c_void,
+ ) -> Result<super::Surface, crate::InstanceError> {
+ if !self.shared.extensions.contains(&khr::Win32Surface::name()) {
+ log::debug!("Vulkan driver does not support VK_KHR_win32_surface");
+ return Err(crate::InstanceError);
+ }
+
+ let surface = {
+ let info = vk::Win32SurfaceCreateInfoKHR::builder()
+ .flags(vk::Win32SurfaceCreateFlagsKHR::empty())
+ .hinstance(hinstance)
+ .hwnd(hwnd);
+ let win32_loader = khr::Win32Surface::new(&self.shared.entry, &self.shared.raw);
+ unsafe {
+ win32_loader
+ .create_win32_surface(&info, None)
+ .expect("Unable to create Win32 surface")
+ }
+ };
+
+ Ok(self.create_surface_from_vk_surface_khr(surface))
+ }
+
+ #[cfg(any(target_os = "macos", target_os = "ios"))]
+ fn create_surface_from_view(
+ &self,
+ view: *mut c_void,
+ ) -> Result<super::Surface, crate::InstanceError> {
+ if !self.shared.extensions.contains(&ext::MetalSurface::name()) {
+ log::warn!("Vulkan driver does not support VK_EXT_metal_surface");
+ return Err(crate::InstanceError);
+ }
+
+ let layer = unsafe {
+ crate::metal::Surface::get_metal_layer(view as *mut objc::runtime::Object, None)
+ };
+
+ let surface = {
+ let metal_loader = ext::MetalSurface::new(&self.shared.entry, &self.shared.raw);
+ let vk_info = vk::MetalSurfaceCreateInfoEXT::builder()
+ .flags(vk::MetalSurfaceCreateFlagsEXT::empty())
+ .layer(layer as *mut _)
+ .build();
+
+ unsafe { metal_loader.create_metal_surface(&vk_info, None).unwrap() }
+ };
+
+ Ok(self.create_surface_from_vk_surface_khr(surface))
+ }
+
+ fn create_surface_from_vk_surface_khr(&self, surface: vk::SurfaceKHR) -> super::Surface {
+ let functor = khr::Surface::new(&self.shared.entry, &self.shared.raw);
+ super::Surface {
+ raw: surface,
+ functor,
+ instance: Arc::clone(&self.shared),
+ swapchain: None,
+ }
+ }
+}
+
+impl Drop for super::InstanceShared {
+ fn drop(&mut self) {
+ unsafe {
+ if let Some(du) = self.debug_utils.take() {
+ du.extension
+ .destroy_debug_utils_messenger(du.messenger, None);
+ }
+ if let Some(_drop_guard) = self.drop_guard.take() {
+ self.raw.destroy_instance(None);
+ }
+ }
+ }
+}
+
+impl crate::Instance<super::Api> for super::Instance {
+ unsafe fn init(desc: &crate::InstanceDescriptor) -> Result<Self, crate::InstanceError> {
+ use crate::auxil::cstr_from_bytes_until_nul;
+
+ let entry = match unsafe { ash::Entry::load() } {
+ Ok(entry) => entry,
+ Err(err) => {
+ log::info!("Missing Vulkan entry points: {:?}", err);
+ return Err(crate::InstanceError);
+ }
+ };
+ let driver_api_version = match entry.try_enumerate_instance_version() {
+ // Vulkan 1.1+
+ Ok(Some(version)) => version,
+ Ok(None) => vk::API_VERSION_1_0,
+ Err(err) => {
+ log::warn!("try_enumerate_instance_version: {:?}", err);
+ return Err(crate::InstanceError);
+ }
+ };
+
+ let app_name = CString::new(desc.name).unwrap();
+ let app_info = vk::ApplicationInfo::builder()
+ .application_name(app_name.as_c_str())
+ .application_version(1)
+ .engine_name(CStr::from_bytes_with_nul(b"wgpu-hal\0").unwrap())
+ .engine_version(2)
+ .api_version(
+ // Vulkan 1.0 doesn't like anything but 1.0 passed in here...
+ if driver_api_version < vk::API_VERSION_1_1 {
+ vk::API_VERSION_1_0
+ } else {
+ // This is the max Vulkan API version supported by `wgpu-hal`.
+ //
+ // If we want to increment this, there are some things that must be done first:
+ // - Audit the behavioral differences between the previous and new API versions.
+ // - Audit all extensions used by this backend:
+ // - If any were promoted in the new API version and the behavior has changed, we must handle the new behavior in addition to the old behavior.
+ // - If any were obsoleted in the new API version, we must implement a fallback for the new API version
+ // - If any are non-KHR-vendored, we must ensure the new behavior is still correct (since backwards-compatibility is not guaranteed).
+ vk::HEADER_VERSION_COMPLETE
+ },
+ );
+
+ let extensions = Self::required_extensions(&entry, driver_api_version, desc.flags)?;
+
+ let instance_layers = entry.enumerate_instance_layer_properties().map_err(|e| {
+ log::info!("enumerate_instance_layer_properties: {:?}", e);
+ crate::InstanceError
+ })?;
+
+ let nv_optimus_layer = CStr::from_bytes_with_nul(b"VK_LAYER_NV_optimus\0").unwrap();
+ let has_nv_optimus = instance_layers.iter().any(|inst_layer| {
+ cstr_from_bytes_until_nul(&inst_layer.layer_name) == Some(nv_optimus_layer)
+ });
+
+ // Check requested layers against the available layers
+ let layers = {
+ let mut layers: Vec<&'static CStr> = Vec::new();
+ if desc.flags.contains(crate::InstanceFlags::VALIDATION) {
+ layers.push(CStr::from_bytes_with_nul(b"VK_LAYER_KHRONOS_validation\0").unwrap());
+ }
+
+ // Only keep available layers.
+ layers.retain(|&layer| {
+ if instance_layers.iter().any(|inst_layer| {
+ cstr_from_bytes_until_nul(&inst_layer.layer_name) == Some(layer)
+ }) {
+ true
+ } else {
+ log::warn!("Unable to find layer: {}", layer.to_string_lossy());
+ false
+ }
+ });
+ layers
+ };
+
+ #[cfg(target_os = "android")]
+ let android_sdk_version = {
+ let properties = android_system_properties::AndroidSystemProperties::new();
+ // See: https://developer.android.com/reference/android/os/Build.VERSION_CODES
+ if let Some(val) = properties.get("ro.build.version.sdk") {
+ match val.parse::<u32>() {
+ Ok(sdk_ver) => sdk_ver,
+ Err(err) => {
+ log::error!(
+ "Couldn't parse Android's ro.build.version.sdk system property ({val}): {err}"
+ );
+ 0
+ }
+ }
+ } else {
+ log::error!("Couldn't read Android's ro.build.version.sdk system property");
+ 0
+ }
+ };
+ #[cfg(not(target_os = "android"))]
+ let android_sdk_version = 0;
+
+ let vk_instance = {
+ let str_pointers = layers
+ .iter()
+ .chain(extensions.iter())
+ .map(|&s| {
+ // Safe because `layers` and `extensions` entries have static lifetime.
+ s.as_ptr()
+ })
+ .collect::<Vec<_>>();
+
+ let create_info = vk::InstanceCreateInfo::builder()
+ .flags(vk::InstanceCreateFlags::empty())
+ .application_info(&app_info)
+ .enabled_layer_names(&str_pointers[..layers.len()])
+ .enabled_extension_names(&str_pointers[layers.len()..]);
+
+ unsafe { entry.create_instance(&create_info, None) }.map_err(|e| {
+ log::warn!("create_instance: {:?}", e);
+ crate::InstanceError
+ })?
+ };
+
+ unsafe {
+ Self::from_raw(
+ entry,
+ vk_instance,
+ driver_api_version,
+ android_sdk_version,
+ extensions,
+ desc.flags,
+ has_nv_optimus,
+ Some(Box::new(())), // `Some` signals that wgpu-hal is in charge of destroying vk_instance
+ )
+ }
+ }
+
+ unsafe fn create_surface(
+ &self,
+ display_handle: raw_window_handle::RawDisplayHandle,
+ window_handle: raw_window_handle::RawWindowHandle,
+ ) -> Result<super::Surface, crate::InstanceError> {
+ use raw_window_handle::{RawDisplayHandle as Rdh, RawWindowHandle as Rwh};
+
+ match (window_handle, display_handle) {
+ (Rwh::Wayland(handle), Rdh::Wayland(display)) => {
+ self.create_surface_from_wayland(display.display, handle.surface)
+ }
+ (Rwh::Xlib(handle), Rdh::Xlib(display)) => {
+ self.create_surface_from_xlib(display.display as *mut _, handle.window)
+ }
+ (Rwh::Xcb(handle), Rdh::Xcb(display)) => {
+ self.create_surface_from_xcb(display.connection, handle.window)
+ }
+ (Rwh::AndroidNdk(handle), _) => self.create_surface_android(handle.a_native_window),
+ #[cfg(windows)]
+ (Rwh::Win32(handle), _) => {
+ use winapi::um::libloaderapi::GetModuleHandleW;
+
+ let hinstance = unsafe { GetModuleHandleW(std::ptr::null()) };
+ self.create_surface_from_hwnd(hinstance as *mut _, handle.hwnd)
+ }
+ #[cfg(target_os = "macos")]
+ (Rwh::AppKit(handle), _)
+ if self.shared.extensions.contains(&ext::MetalSurface::name()) =>
+ {
+ self.create_surface_from_view(handle.ns_view)
+ }
+ #[cfg(target_os = "ios")]
+ (Rwh::UiKit(handle), _)
+ if self.shared.extensions.contains(&ext::MetalSurface::name()) =>
+ {
+ self.create_surface_from_view(handle.ui_view)
+ }
+ (_, _) => Err(crate::InstanceError),
+ }
+ }
+
+ unsafe fn destroy_surface(&self, surface: super::Surface) {
+ unsafe { surface.functor.destroy_surface(surface.raw, None) };
+ }
+
+ unsafe fn enumerate_adapters(&self) -> Vec<crate::ExposedAdapter<super::Api>> {
+ use crate::auxil::db;
+
+ let raw_devices = match unsafe { self.shared.raw.enumerate_physical_devices() } {
+ Ok(devices) => devices,
+ Err(err) => {
+ log::error!("enumerate_adapters: {}", err);
+ Vec::new()
+ }
+ };
+
+ let mut exposed_adapters = raw_devices
+ .into_iter()
+ .flat_map(|device| self.expose_adapter(device))
+ .collect::<Vec<_>>();
+
+ // Detect if it's an Intel + NVidia configuration with Optimus
+ let has_nvidia_dgpu = exposed_adapters.iter().any(|exposed| {
+ exposed.info.device_type == wgt::DeviceType::DiscreteGpu
+ && exposed.info.vendor == db::nvidia::VENDOR
+ });
+ if cfg!(target_os = "linux") && has_nvidia_dgpu && self.shared.has_nv_optimus {
+ for exposed in exposed_adapters.iter_mut() {
+ if exposed.info.device_type == wgt::DeviceType::IntegratedGpu
+ && exposed.info.vendor == db::intel::VENDOR
+ {
+ // See https://gitlab.freedesktop.org/mesa/mesa/-/issues/4688
+ log::warn!(
+ "Disabling presentation on '{}' (id {:?}) because of NV Optimus (on Linux)",
+ exposed.info.name,
+ exposed.adapter.raw
+ );
+ exposed.adapter.private_caps.can_present = false;
+ }
+ }
+ }
+
+ exposed_adapters
+ }
+}
+
+impl crate::Surface<super::Api> for super::Surface {
+ unsafe fn configure(
+ &mut self,
+ device: &super::Device,
+ config: &crate::SurfaceConfiguration,
+ ) -> Result<(), crate::SurfaceError> {
+ let old = self
+ .swapchain
+ .take()
+ .map(|sc| unsafe { sc.release_resources(&device.shared.raw) });
+
+ let swapchain = unsafe { device.create_swapchain(self, config, old)? };
+ self.swapchain = Some(swapchain);
+
+ Ok(())
+ }
+
+ unsafe fn unconfigure(&mut self, device: &super::Device) {
+ if let Some(sc) = self.swapchain.take() {
+ let swapchain = unsafe { sc.release_resources(&device.shared.raw) };
+ unsafe { swapchain.functor.destroy_swapchain(swapchain.raw, None) };
+ }
+ }
+
+ unsafe fn acquire_texture(
+ &mut self,
+ timeout: Option<std::time::Duration>,
+ ) -> Result<Option<crate::AcquiredSurfaceTexture<super::Api>>, crate::SurfaceError> {
+ let sc = self.swapchain.as_mut().unwrap();
+
+ let mut timeout_ns = match timeout {
+ Some(duration) => duration.as_nanos() as u64,
+ None => u64::MAX,
+ };
+
+ // AcquireNextImageKHR on Android (prior to Android 11) doesn't support timeouts
+ // and will also log verbose warnings if tying to use a timeout.
+ //
+ // Android 10 implementation for reference:
+ // https://android.googlesource.com/platform/frameworks/native/+/refs/tags/android-mainline-10.0.0_r13/vulkan/libvulkan/swapchain.cpp#1426
+ // Android 11 implementation for reference:
+ // https://android.googlesource.com/platform/frameworks/native/+/refs/tags/android-mainline-11.0.0_r45/vulkan/libvulkan/swapchain.cpp#1438
+ //
+ // Android 11 corresponds to an SDK_INT/ro.build.version.sdk of 30
+ if cfg!(target_os = "android") && self.instance.android_sdk_version < 30 {
+ timeout_ns = u64::MAX;
+ }
+
+ // will block if no image is available
+ let (index, suboptimal) = match unsafe {
+ sc.functor
+ .acquire_next_image(sc.raw, timeout_ns, vk::Semaphore::null(), sc.fence)
+ } {
+ // We treat `VK_SUBOPTIMAL_KHR` as `VK_SUCCESS` on Android.
+ // See the comment in `Queue::present`.
+ #[cfg(target_os = "android")]
+ Ok((index, _)) => (index, false),
+ #[cfg(not(target_os = "android"))]
+ Ok(pair) => pair,
+ Err(error) => {
+ return match error {
+ vk::Result::TIMEOUT => Ok(None),
+ vk::Result::NOT_READY | vk::Result::ERROR_OUT_OF_DATE_KHR => {
+ Err(crate::SurfaceError::Outdated)
+ }
+ vk::Result::ERROR_SURFACE_LOST_KHR => Err(crate::SurfaceError::Lost),
+ other => Err(crate::DeviceError::from(other).into()),
+ }
+ }
+ };
+
+ // special case for Intel Vulkan returning bizzare values (ugh)
+ if sc.device.vendor_id == crate::auxil::db::intel::VENDOR && index > 0x100 {
+ return Err(crate::SurfaceError::Outdated);
+ }
+
+ let fences = &[sc.fence];
+
+ unsafe { sc.device.raw.wait_for_fences(fences, true, !0) }
+ .map_err(crate::DeviceError::from)?;
+ unsafe { sc.device.raw.reset_fences(fences) }.map_err(crate::DeviceError::from)?;
+
+ // https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkRenderPassBeginInfo.html#VUID-VkRenderPassBeginInfo-framebuffer-03209
+ let raw_flags = if sc
+ .raw_flags
+ .contains(vk::SwapchainCreateFlagsKHR::MUTABLE_FORMAT)
+ {
+ vk::ImageCreateFlags::MUTABLE_FORMAT | vk::ImageCreateFlags::EXTENDED_USAGE
+ } else {
+ vk::ImageCreateFlags::empty()
+ };
+
+ let texture = super::SurfaceTexture {
+ index,
+ texture: super::Texture {
+ raw: sc.images[index as usize],
+ drop_guard: None,
+ block: None,
+ usage: sc.config.usage,
+ format: sc.config.format,
+ raw_flags,
+ copy_size: crate::CopyExtent {
+ width: sc.config.extent.width,
+ height: sc.config.extent.height,
+ depth: 1,
+ },
+ view_formats: sc.view_formats.clone(),
+ },
+ };
+ Ok(Some(crate::AcquiredSurfaceTexture {
+ texture,
+ suboptimal,
+ }))
+ }
+
+ unsafe fn discard_texture(&mut self, _texture: super::SurfaceTexture) {}
+}
diff --git a/third_party/rust/wgpu-hal/src/vulkan/mod.rs b/third_party/rust/wgpu-hal/src/vulkan/mod.rs
new file mode 100644
index 0000000000..27200dc4e0
--- /dev/null
+++ b/third_party/rust/wgpu-hal/src/vulkan/mod.rs
@@ -0,0 +1,626 @@
+/*!
+# Vulkan API internals.
+
+## Stack memory
+
+Ash expects slices, which we don't generally have available.
+We cope with this requirement by the combination of the following ways:
+ - temporarily allocating `Vec` on heap, where overhead is permitted
+ - growing temporary local storage
+ - using `implace_it` on iterators
+
+## Framebuffers and Render passes
+
+Render passes are cached on the device and kept forever.
+
+Framebuffers are also cached on the device, but they are removed when
+any of the image views (they have) gets removed.
+If Vulkan supports image-less framebuffers,
+then the actual views are excluded from the framebuffer key.
+
+## Fences
+
+If timeline semaphores are available, they are used 1:1 with wgpu-hal fences.
+Otherwise, we manage a pool of `VkFence` objects behind each `hal::Fence`.
+
+!*/
+
+mod adapter;
+mod command;
+mod conv;
+mod device;
+mod instance;
+
+use std::{borrow::Borrow, ffi::CStr, fmt, num::NonZeroU32, sync::Arc};
+
+use arrayvec::ArrayVec;
+use ash::{
+ extensions::{ext, khr},
+ vk,
+};
+use parking_lot::Mutex;
+
+const MILLIS_TO_NANOS: u64 = 1_000_000;
+const MAX_TOTAL_ATTACHMENTS: usize = crate::MAX_COLOR_ATTACHMENTS * 2 + 1;
+
+#[derive(Clone)]
+pub struct Api;
+
+impl crate::Api for Api {
+ type Instance = Instance;
+ type Surface = Surface;
+ type Adapter = Adapter;
+ type Device = Device;
+
+ type Queue = Queue;
+ type CommandEncoder = CommandEncoder;
+ type CommandBuffer = CommandBuffer;
+
+ type Buffer = Buffer;
+ type Texture = Texture;
+ type SurfaceTexture = SurfaceTexture;
+ type TextureView = TextureView;
+ type Sampler = Sampler;
+ type QuerySet = QuerySet;
+ type Fence = Fence;
+
+ type BindGroupLayout = BindGroupLayout;
+ type BindGroup = BindGroup;
+ type PipelineLayout = PipelineLayout;
+ type ShaderModule = ShaderModule;
+ type RenderPipeline = RenderPipeline;
+ type ComputePipeline = ComputePipeline;
+}
+
+struct DebugUtils {
+ extension: ext::DebugUtils,
+ messenger: vk::DebugUtilsMessengerEXT,
+}
+
+pub struct InstanceShared {
+ raw: ash::Instance,
+ extensions: Vec<&'static CStr>,
+ drop_guard: Option<crate::DropGuard>,
+ flags: crate::InstanceFlags,
+ debug_utils: Option<DebugUtils>,
+ get_physical_device_properties: Option<khr::GetPhysicalDeviceProperties2>,
+ entry: ash::Entry,
+ has_nv_optimus: bool,
+ android_sdk_version: u32,
+ driver_api_version: u32,
+}
+
+pub struct Instance {
+ shared: Arc<InstanceShared>,
+}
+
+struct Swapchain {
+ raw: vk::SwapchainKHR,
+ raw_flags: vk::SwapchainCreateFlagsKHR,
+ functor: khr::Swapchain,
+ device: Arc<DeviceShared>,
+ fence: vk::Fence,
+ images: Vec<vk::Image>,
+ config: crate::SurfaceConfiguration,
+ view_formats: Vec<wgt::TextureFormat>,
+}
+
+pub struct Surface {
+ raw: vk::SurfaceKHR,
+ functor: khr::Surface,
+ instance: Arc<InstanceShared>,
+ swapchain: Option<Swapchain>,
+}
+
+#[derive(Debug)]
+pub struct SurfaceTexture {
+ index: u32,
+ texture: Texture,
+}
+
+impl Borrow<Texture> for SurfaceTexture {
+ fn borrow(&self) -> &Texture {
+ &self.texture
+ }
+}
+
+pub struct Adapter {
+ raw: vk::PhysicalDevice,
+ instance: Arc<InstanceShared>,
+ //queue_families: Vec<vk::QueueFamilyProperties>,
+ known_memory_flags: vk::MemoryPropertyFlags,
+ phd_capabilities: adapter::PhysicalDeviceCapabilities,
+ //phd_features: adapter::PhysicalDeviceFeatures,
+ downlevel_flags: wgt::DownlevelFlags,
+ private_caps: PrivateCapabilities,
+ workarounds: Workarounds,
+}
+
+// TODO there's no reason why this can't be unified--the function pointers should all be the same--it's not clear how to do this with `ash`.
+enum ExtensionFn<T> {
+ /// The loaded function pointer struct for an extension.
+ Extension(T),
+ /// The extension was promoted to a core version of Vulkan and the functions on `ash`'s `DeviceV1_x` traits should be used.
+ Promoted,
+}
+
+struct DeviceExtensionFunctions {
+ draw_indirect_count: Option<khr::DrawIndirectCount>,
+ timeline_semaphore: Option<ExtensionFn<khr::TimelineSemaphore>>,
+}
+
+/// Set of internal capabilities, which don't show up in the exposed
+/// device geometry, but affect the code paths taken internally.
+#[derive(Clone, Debug)]
+struct PrivateCapabilities {
+ /// Y-flipping is implemented with either `VK_AMD_negative_viewport_height` or `VK_KHR_maintenance1`/1.1+. The AMD extension for negative viewport height does not require a Y shift.
+ ///
+ /// This flag is `true` if the device has `VK_KHR_maintenance1`/1.1+ and `false` otherwise (i.e. in the case of `VK_AMD_negative_viewport_height`).
+ flip_y_requires_shift: bool,
+ imageless_framebuffers: bool,
+ image_view_usage: bool,
+ timeline_semaphores: bool,
+ texture_d24: bool,
+ texture_d24_s8: bool,
+ texture_s8: bool,
+ /// Ability to present contents to any screen. Only needed to work around broken platform configurations.
+ can_present: bool,
+ non_coherent_map_mask: wgt::BufferAddress,
+ robust_buffer_access: bool,
+ robust_image_access: bool,
+ zero_initialize_workgroup_memory: bool,
+}
+
+bitflags::bitflags!(
+ /// Workaround flags.
+ #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
+ pub struct Workarounds: u32 {
+ /// Only generate SPIR-V for one entry point at a time.
+ const SEPARATE_ENTRY_POINTS = 0x1;
+ /// Qualcomm OOMs when there are zero color attachments but a non-null pointer
+ /// to a subpass resolve attachment array. This nulls out that pointer in that case.
+ const EMPTY_RESOLVE_ATTACHMENT_LISTS = 0x2;
+ }
+);
+
+#[derive(Clone, Debug, Eq, Hash, PartialEq)]
+struct AttachmentKey {
+ format: vk::Format,
+ layout: vk::ImageLayout,
+ ops: crate::AttachmentOps,
+}
+
+impl AttachmentKey {
+ /// Returns an attachment key for a compatible attachment.
+ fn compatible(format: vk::Format, layout: vk::ImageLayout) -> Self {
+ Self {
+ format,
+ layout,
+ ops: crate::AttachmentOps::all(),
+ }
+ }
+}
+
+#[derive(Clone, Eq, Hash, PartialEq)]
+struct ColorAttachmentKey {
+ base: AttachmentKey,
+ resolve: Option<AttachmentKey>,
+}
+
+#[derive(Clone, Eq, Hash, PartialEq)]
+struct DepthStencilAttachmentKey {
+ base: AttachmentKey,
+ stencil_ops: crate::AttachmentOps,
+}
+
+#[derive(Clone, Eq, Default, Hash, PartialEq)]
+struct RenderPassKey {
+ colors: ArrayVec<Option<ColorAttachmentKey>, { crate::MAX_COLOR_ATTACHMENTS }>,
+ depth_stencil: Option<DepthStencilAttachmentKey>,
+ sample_count: u32,
+ multiview: Option<NonZeroU32>,
+}
+
+#[derive(Clone, Debug, Eq, Hash, PartialEq)]
+struct FramebufferAttachment {
+ /// Can be NULL if the framebuffer is image-less
+ raw: vk::ImageView,
+ raw_image_flags: vk::ImageCreateFlags,
+ view_usage: crate::TextureUses,
+ view_format: wgt::TextureFormat,
+ raw_view_formats: Vec<vk::Format>,
+}
+
+#[derive(Clone, Eq, Hash, PartialEq)]
+struct FramebufferKey {
+ attachments: ArrayVec<FramebufferAttachment, { MAX_TOTAL_ATTACHMENTS }>,
+ extent: wgt::Extent3d,
+ sample_count: u32,
+}
+
+struct DeviceShared {
+ raw: ash::Device,
+ family_index: u32,
+ queue_index: u32,
+ raw_queue: ash::vk::Queue,
+ handle_is_owned: bool,
+ instance: Arc<InstanceShared>,
+ physical_device: ash::vk::PhysicalDevice,
+ enabled_extensions: Vec<&'static CStr>,
+ extension_fns: DeviceExtensionFunctions,
+ vendor_id: u32,
+ timestamp_period: f32,
+ private_caps: PrivateCapabilities,
+ workarounds: Workarounds,
+ render_passes: Mutex<rustc_hash::FxHashMap<RenderPassKey, vk::RenderPass>>,
+ framebuffers: Mutex<rustc_hash::FxHashMap<FramebufferKey, vk::Framebuffer>>,
+}
+
+pub struct Device {
+ shared: Arc<DeviceShared>,
+ mem_allocator: Mutex<gpu_alloc::GpuAllocator<vk::DeviceMemory>>,
+ desc_allocator:
+ Mutex<gpu_descriptor::DescriptorAllocator<vk::DescriptorPool, vk::DescriptorSet>>,
+ valid_ash_memory_types: u32,
+ naga_options: naga::back::spv::Options,
+ #[cfg(feature = "renderdoc")]
+ render_doc: crate::auxil::renderdoc::RenderDoc,
+}
+
+pub struct Queue {
+ raw: vk::Queue,
+ swapchain_fn: khr::Swapchain,
+ device: Arc<DeviceShared>,
+ family_index: u32,
+ /// We use a redundant chain of semaphores to pass on the signal
+ /// from submissions to the last present, since it's required by the
+ /// specification.
+ /// It would be correct to use a single semaphore there, but
+ /// [Intel hangs in `anv_queue_finish`](https://gitlab.freedesktop.org/mesa/mesa/-/issues/5508).
+ relay_semaphores: [vk::Semaphore; 2],
+ relay_index: Option<usize>,
+}
+
+#[derive(Debug)]
+pub struct Buffer {
+ raw: vk::Buffer,
+ block: Mutex<gpu_alloc::MemoryBlock<vk::DeviceMemory>>,
+}
+
+#[derive(Debug)]
+pub struct Texture {
+ raw: vk::Image,
+ drop_guard: Option<crate::DropGuard>,
+ block: Option<gpu_alloc::MemoryBlock<vk::DeviceMemory>>,
+ usage: crate::TextureUses,
+ format: wgt::TextureFormat,
+ raw_flags: vk::ImageCreateFlags,
+ copy_size: crate::CopyExtent,
+ view_formats: Vec<wgt::TextureFormat>,
+}
+
+impl Texture {
+ /// # Safety
+ ///
+ /// - The image handle must not be manually destroyed
+ pub unsafe fn raw_handle(&self) -> vk::Image {
+ self.raw
+ }
+}
+
+#[derive(Debug)]
+pub struct TextureView {
+ raw: vk::ImageView,
+ layers: NonZeroU32,
+ attachment: FramebufferAttachment,
+}
+
+#[derive(Debug)]
+pub struct Sampler {
+ raw: vk::Sampler,
+}
+
+#[derive(Debug)]
+pub struct BindGroupLayout {
+ raw: vk::DescriptorSetLayout,
+ desc_count: gpu_descriptor::DescriptorTotalCount,
+ types: Box<[(vk::DescriptorType, u32)]>,
+ /// Map of binding index to size,
+ binding_arrays: Vec<(u32, NonZeroU32)>,
+}
+
+#[derive(Debug)]
+pub struct PipelineLayout {
+ raw: vk::PipelineLayout,
+ binding_arrays: naga::back::spv::BindingMap,
+}
+
+#[derive(Debug)]
+pub struct BindGroup {
+ set: gpu_descriptor::DescriptorSet<vk::DescriptorSet>,
+}
+
+#[derive(Default)]
+struct Temp {
+ marker: Vec<u8>,
+ buffer_barriers: Vec<vk::BufferMemoryBarrier>,
+ image_barriers: Vec<vk::ImageMemoryBarrier>,
+}
+
+unsafe impl Send for Temp {}
+unsafe impl Sync for Temp {}
+
+impl Temp {
+ fn clear(&mut self) {
+ self.marker.clear();
+ self.buffer_barriers.clear();
+ self.image_barriers.clear();
+ //see also - https://github.com/NotIntMan/inplace_it/issues/8
+ }
+
+ fn make_c_str(&mut self, name: &str) -> &CStr {
+ self.marker.clear();
+ self.marker.extend_from_slice(name.as_bytes());
+ self.marker.push(0);
+ unsafe { CStr::from_bytes_with_nul_unchecked(&self.marker) }
+ }
+}
+
+pub struct CommandEncoder {
+ raw: vk::CommandPool,
+ device: Arc<DeviceShared>,
+ active: vk::CommandBuffer,
+ bind_point: vk::PipelineBindPoint,
+ temp: Temp,
+ free: Vec<vk::CommandBuffer>,
+ discarded: Vec<vk::CommandBuffer>,
+ /// If this is true, the active renderpass enabled a debug span,
+ /// and needs to be disabled on renderpass close.
+ rpass_debug_marker_active: bool,
+}
+
+impl fmt::Debug for CommandEncoder {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("CommandEncoder")
+ .field("raw", &self.raw)
+ .finish()
+ }
+}
+
+#[derive(Debug)]
+pub struct CommandBuffer {
+ raw: vk::CommandBuffer,
+}
+
+#[derive(Debug)]
+#[allow(clippy::large_enum_variant)]
+pub enum ShaderModule {
+ Raw(vk::ShaderModule),
+ Intermediate {
+ naga_shader: crate::NagaShader,
+ runtime_checks: bool,
+ },
+}
+
+#[derive(Debug)]
+pub struct RenderPipeline {
+ raw: vk::Pipeline,
+}
+
+#[derive(Debug)]
+pub struct ComputePipeline {
+ raw: vk::Pipeline,
+}
+
+#[derive(Debug)]
+pub struct QuerySet {
+ raw: vk::QueryPool,
+}
+
+#[derive(Debug)]
+pub enum Fence {
+ TimelineSemaphore(vk::Semaphore),
+ FencePool {
+ last_completed: crate::FenceValue,
+ /// The pending fence values have to be ascending.
+ active: Vec<(crate::FenceValue, vk::Fence)>,
+ free: Vec<vk::Fence>,
+ },
+}
+
+impl Fence {
+ fn check_active(
+ device: &ash::Device,
+ mut max_value: crate::FenceValue,
+ active: &[(crate::FenceValue, vk::Fence)],
+ ) -> Result<crate::FenceValue, crate::DeviceError> {
+ for &(value, raw) in active.iter() {
+ unsafe {
+ if value > max_value && device.get_fence_status(raw)? {
+ max_value = value;
+ }
+ }
+ }
+ Ok(max_value)
+ }
+
+ fn get_latest(
+ &self,
+ device: &ash::Device,
+ extension: Option<&ExtensionFn<khr::TimelineSemaphore>>,
+ ) -> Result<crate::FenceValue, crate::DeviceError> {
+ match *self {
+ Self::TimelineSemaphore(raw) => unsafe {
+ Ok(match *extension.unwrap() {
+ ExtensionFn::Extension(ref ext) => ext.get_semaphore_counter_value(raw)?,
+ ExtensionFn::Promoted => device.get_semaphore_counter_value(raw)?,
+ })
+ },
+ Self::FencePool {
+ last_completed,
+ ref active,
+ free: _,
+ } => Self::check_active(device, last_completed, active),
+ }
+ }
+
+ fn maintain(&mut self, device: &ash::Device) -> Result<(), crate::DeviceError> {
+ match *self {
+ Self::TimelineSemaphore(_) => {}
+ Self::FencePool {
+ ref mut last_completed,
+ ref mut active,
+ ref mut free,
+ } => {
+ let latest = Self::check_active(device, *last_completed, active)?;
+ let base_free = free.len();
+ for &(value, raw) in active.iter() {
+ if value <= latest {
+ free.push(raw);
+ }
+ }
+ if free.len() != base_free {
+ active.retain(|&(value, _)| value > latest);
+ unsafe {
+ device.reset_fences(&free[base_free..])?;
+ }
+ }
+ *last_completed = latest;
+ }
+ }
+ Ok(())
+ }
+}
+
+impl crate::Queue<Api> for Queue {
+ unsafe fn submit(
+ &mut self,
+ command_buffers: &[&CommandBuffer],
+ signal_fence: Option<(&mut Fence, crate::FenceValue)>,
+ ) -> Result<(), crate::DeviceError> {
+ let vk_cmd_buffers = command_buffers
+ .iter()
+ .map(|cmd| cmd.raw)
+ .collect::<Vec<_>>();
+
+ let mut vk_info = vk::SubmitInfo::builder().command_buffers(&vk_cmd_buffers);
+
+ let mut fence_raw = vk::Fence::null();
+ let mut vk_timeline_info;
+ let mut signal_semaphores = [vk::Semaphore::null(), vk::Semaphore::null()];
+ let signal_values;
+
+ if let Some((fence, value)) = signal_fence {
+ fence.maintain(&self.device.raw)?;
+ match *fence {
+ Fence::TimelineSemaphore(raw) => {
+ signal_values = [!0, value];
+ signal_semaphores[1] = raw;
+ vk_timeline_info = vk::TimelineSemaphoreSubmitInfo::builder()
+ .signal_semaphore_values(&signal_values);
+ vk_info = vk_info.push_next(&mut vk_timeline_info);
+ }
+ Fence::FencePool {
+ ref mut active,
+ ref mut free,
+ ..
+ } => {
+ fence_raw = match free.pop() {
+ Some(raw) => raw,
+ None => unsafe {
+ self.device
+ .raw
+ .create_fence(&vk::FenceCreateInfo::builder(), None)?
+ },
+ };
+ active.push((value, fence_raw));
+ }
+ }
+ }
+
+ let wait_stage_mask = [vk::PipelineStageFlags::TOP_OF_PIPE];
+ let sem_index = match self.relay_index {
+ Some(old_index) => {
+ vk_info = vk_info
+ .wait_semaphores(&self.relay_semaphores[old_index..old_index + 1])
+ .wait_dst_stage_mask(&wait_stage_mask);
+ (old_index + 1) % self.relay_semaphores.len()
+ }
+ None => 0,
+ };
+ self.relay_index = Some(sem_index);
+ signal_semaphores[0] = self.relay_semaphores[sem_index];
+
+ let signal_count = if signal_semaphores[1] == vk::Semaphore::null() {
+ 1
+ } else {
+ 2
+ };
+ vk_info = vk_info.signal_semaphores(&signal_semaphores[..signal_count]);
+
+ profiling::scope!("vkQueueSubmit");
+ unsafe {
+ self.device
+ .raw
+ .queue_submit(self.raw, &[vk_info.build()], fence_raw)?
+ };
+ Ok(())
+ }
+
+ unsafe fn present(
+ &mut self,
+ surface: &mut Surface,
+ texture: SurfaceTexture,
+ ) -> Result<(), crate::SurfaceError> {
+ let ssc = surface.swapchain.as_ref().unwrap();
+
+ let swapchains = [ssc.raw];
+ let image_indices = [texture.index];
+ let mut vk_info = vk::PresentInfoKHR::builder()
+ .swapchains(&swapchains)
+ .image_indices(&image_indices);
+
+ if let Some(old_index) = self.relay_index.take() {
+ vk_info = vk_info.wait_semaphores(&self.relay_semaphores[old_index..old_index + 1]);
+ }
+
+ let suboptimal = {
+ profiling::scope!("vkQueuePresentKHR");
+ unsafe { self.swapchain_fn.queue_present(self.raw, &vk_info) }.map_err(|error| {
+ match error {
+ vk::Result::ERROR_OUT_OF_DATE_KHR => crate::SurfaceError::Outdated,
+ vk::Result::ERROR_SURFACE_LOST_KHR => crate::SurfaceError::Lost,
+ _ => crate::DeviceError::from(error).into(),
+ }
+ })?
+ };
+ if suboptimal {
+ // We treat `VK_SUBOPTIMAL_KHR` as `VK_SUCCESS` on Android.
+ // On Android 10+, libvulkan's `vkQueuePresentKHR` implementation returns `VK_SUBOPTIMAL_KHR` if not doing pre-rotation
+ // (i.e `VkSwapchainCreateInfoKHR::preTransform` not being equal to the current device orientation).
+ // This is always the case when the device orientation is anything other than the identity one, as we unconditionally use `VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR`.
+ #[cfg(not(target_os = "android"))]
+ log::warn!("Suboptimal present of frame {}", texture.index);
+ }
+ Ok(())
+ }
+
+ unsafe fn get_timestamp_period(&self) -> f32 {
+ self.device.timestamp_period
+ }
+}
+
+impl From<vk::Result> for crate::DeviceError {
+ fn from(result: vk::Result) -> Self {
+ match result {
+ vk::Result::ERROR_OUT_OF_HOST_MEMORY | vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => {
+ Self::OutOfMemory
+ }
+ vk::Result::ERROR_DEVICE_LOST => Self::Lost,
+ _ => {
+ log::warn!("Unrecognized device error {:?}", result);
+ Self::Lost
+ }
+ }
+ }
+}