summaryrefslogtreecommitdiffstats
path: root/third_party/rust/wgpu-hal/src/vulkan/mod.rs
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/rust/wgpu-hal/src/vulkan/mod.rs')
-rw-r--r--third_party/rust/wgpu-hal/src/vulkan/mod.rs109
1 files changed, 105 insertions, 4 deletions
diff --git a/third_party/rust/wgpu-hal/src/vulkan/mod.rs b/third_party/rust/wgpu-hal/src/vulkan/mod.rs
index 0cd385045c..d1ea82772e 100644
--- a/third_party/rust/wgpu-hal/src/vulkan/mod.rs
+++ b/third_party/rust/wgpu-hal/src/vulkan/mod.rs
@@ -238,6 +238,7 @@ struct PrivateCapabilities {
robust_image_access2: bool,
zero_initialize_workgroup_memory: bool,
image_format_list: bool,
+ subgroup_size_control: bool,
}
bitflags::bitflags!(
@@ -413,6 +414,15 @@ pub struct TextureView {
attachment: FramebufferAttachment,
}
+impl TextureView {
+ /// # Safety
+ ///
+ /// - The image view handle must not be manually destroyed
+ pub unsafe fn raw_handle(&self) -> vk::ImageView {
+ self.raw
+ }
+}
+
#[derive(Debug)]
pub struct Sampler {
raw: vk::Sampler,
@@ -438,6 +448,7 @@ pub struct BindGroup {
set: gpu_descriptor::DescriptorSet<vk::DescriptorSet>,
}
+/// Miscellaneous allocation recycling pool for `CommandAllocator`.
#[derive(Default)]
struct Temp {
marker: Vec<u8>,
@@ -467,11 +478,31 @@ impl Temp {
pub struct CommandEncoder {
raw: vk::CommandPool,
device: Arc<DeviceShared>,
+
+ /// The current command buffer, if `self` is in the ["recording"]
+ /// state.
+ ///
+ /// ["recording"]: crate::CommandEncoder
+ ///
+ /// If non-`null`, the buffer is in the Vulkan "recording" state.
active: vk::CommandBuffer,
+
+ /// What kind of pass we are currently within: compute or render.
bind_point: vk::PipelineBindPoint,
+
+ /// Allocation recycling pool for this encoder.
temp: Temp,
+
+ /// A pool of available command buffers.
+ ///
+ /// These are all in the Vulkan "initial" state.
free: Vec<vk::CommandBuffer>,
+
+ /// A pool of discarded command buffers.
+ ///
+ /// These could be in any Vulkan state except "pending".
discarded: Vec<vk::CommandBuffer>,
+
/// If this is true, the active renderpass enabled a debug span,
/// and needs to be disabled on renderpass close.
rpass_debug_marker_active: bool,
@@ -481,6 +512,15 @@ pub struct CommandEncoder {
end_of_pass_timer_query: Option<(vk::QueryPool, u32)>,
}
+impl CommandEncoder {
+ /// # Safety
+ ///
+ /// - The command buffer handle must not be manually destroyed
+ pub unsafe fn raw_handle(&self) -> vk::CommandBuffer {
+ self.active
+ }
+}
+
impl fmt::Debug for CommandEncoder {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("CommandEncoder")
@@ -519,9 +559,47 @@ pub struct QuerySet {
raw: vk::QueryPool,
}
+/// The [`Api::Fence`] type for [`vulkan::Api`].
+///
+/// This is an `enum` because there are two possible implementations of
+/// `wgpu-hal` fences on Vulkan: Vulkan fences, which work on any version of
+/// Vulkan, and Vulkan timeline semaphores, which are easier and cheaper but
+/// require non-1.0 features.
+///
+/// [`Device::create_fence`] returns a [`TimelineSemaphore`] if
+/// [`VK_KHR_timeline_semaphore`] is available and enabled, and a [`FencePool`]
+/// otherwise.
+///
+/// [`Api::Fence`]: crate::Api::Fence
+/// [`vulkan::Api`]: Api
+/// [`Device::create_fence`]: crate::Device::create_fence
+/// [`TimelineSemaphore`]: Fence::TimelineSemaphore
+/// [`VK_KHR_timeline_semaphore`]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#VK_KHR_timeline_semaphore
+/// [`FencePool`]: Fence::FencePool
#[derive(Debug)]
pub enum Fence {
+ /// A Vulkan [timeline semaphore].
+ ///
+ /// These are simpler to use than Vulkan fences, since timeline semaphores
+ /// work exactly the way [`wpgu_hal::Api::Fence`] is specified to work.
+ ///
+ /// [timeline semaphore]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#synchronization-semaphores
+ /// [`wpgu_hal::Api::Fence`]: crate::Api::Fence
TimelineSemaphore(vk::Semaphore),
+
+ /// A collection of Vulkan [fence]s, each associated with a [`FenceValue`].
+ ///
+ /// The effective [`FenceValue`] of this variant is the greater of
+ /// `last_completed` and the maximum value associated with a signalled fence
+ /// in `active`.
+ ///
+ /// Fences are available in all versions of Vulkan, but since they only have
+ /// two states, "signaled" and "unsignaled", we need to use a separate fence
+ /// for each queue submission we might want to wait for, and remember which
+ /// [`FenceValue`] each one represents.
+ ///
+ /// [fence]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#synchronization-fences
+ /// [`FenceValue`]: crate::FenceValue
FencePool {
last_completed: crate::FenceValue,
/// The pending fence values have to be ascending.
@@ -531,21 +609,32 @@ pub enum Fence {
}
impl Fence {
+ /// Return the highest [`FenceValue`] among the signalled fences in `active`.
+ ///
+ /// As an optimization, assume that we already know that the fence has
+ /// reached `last_completed`, and don't bother checking fences whose values
+ /// are less than that: those fences remain in the `active` array only
+ /// because we haven't called `maintain` yet to clean them up.
+ ///
+ /// [`FenceValue`]: crate::FenceValue
fn check_active(
device: &ash::Device,
- mut max_value: crate::FenceValue,
+ mut last_completed: crate::FenceValue,
active: &[(crate::FenceValue, vk::Fence)],
) -> Result<crate::FenceValue, crate::DeviceError> {
for &(value, raw) in active.iter() {
unsafe {
- if value > max_value && device.get_fence_status(raw)? {
- max_value = value;
+ if value > last_completed && device.get_fence_status(raw)? {
+ last_completed = value;
}
}
}
- Ok(max_value)
+ Ok(last_completed)
}
+ /// Return the highest signalled [`FenceValue`] for `self`.
+ ///
+ /// [`FenceValue`]: crate::FenceValue
fn get_latest(
&self,
device: &ash::Device,
@@ -566,6 +655,18 @@ impl Fence {
}
}
+ /// Trim the internal state of this [`Fence`].
+ ///
+ /// This function has no externally visible effect, but you should call it
+ /// periodically to keep this fence's resource consumption under control.
+ ///
+ /// For fences using the [`FencePool`] implementation, this function
+ /// recycles fences that have been signaled. If you don't call this,
+ /// [`Queue::submit`] will just keep allocating a new Vulkan fence every
+ /// time it's called.
+ ///
+ /// [`FencePool`]: Fence::FencePool
+ /// [`Queue::submit`]: crate::Queue::submit
fn maintain(&mut self, device: &ash::Device) -> Result<(), crate::DeviceError> {
match *self {
Self::TimelineSemaphore(_) => {}