aboutsummaryrefslogtreecommitdiff
path: root/src/sync
diff options
context:
space:
mode:
Diffstat (limited to 'src/sync')
-rw-r--r--src/sync/event.rs311
-rw-r--r--src/sync/fence.rs1851
-rw-r--r--src/sync/future/fence_signal.rs358
-rw-r--r--src/sync/future/join.rs211
-rw-r--r--src/sync/future/mod.rs436
-rw-r--r--src/sync/future/now.rs55
-rw-r--r--src/sync/future/semaphore_signal.rs204
-rw-r--r--src/sync/mod.rs151
-rw-r--r--src/sync/pipeline.rs2840
-rw-r--r--src/sync/semaphore.rs1667
-rw-r--r--src/sync/semaphore/external_semaphore_handle_type.rs101
-rw-r--r--src/sync/semaphore/mod.rs15
-rw-r--r--src/sync/semaphore/semaphore.rs355
13 files changed, 6916 insertions, 1639 deletions
diff --git a/src/sync/event.rs b/src/sync/event.rs
index 2f02da6..8f8bc33 100644
--- a/src/sync/event.rs
+++ b/src/sync/event.rs
@@ -7,15 +7,36 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
-use crate::check_errors;
-use crate::device::Device;
-use crate::device::DeviceOwned;
-use crate::OomError;
-use crate::Success;
-use crate::VulkanObject;
-use std::mem::MaybeUninit;
-use std::ptr;
-use std::sync::Arc;
+//! An event provides fine-grained synchronization within a single queue, or from the host to a
+//! queue.
+//!
+//! When an event is signaled from a queue using the [`set_event`] command buffer command,
+//! an event acts similar to a [pipeline barrier], but the synchronization scopes are split:
+//! the source synchronization scope includes only commands before the `set_event` command,
+//! while the destination synchronization scope includes only commands after the
+//! [`wait_events`] command. Commands in between the two are not included.
+//!
+//! An event can also be signaled from the host, by calling the [`set`] method directly on the
+//! [`Event`].
+//!
+//! [`set_event`]: crate::command_buffer::CommandBufferBuilder::set_event
+//! [pipeline barrier]: crate::command_buffer::CommandBufferBuilder::pipeline_barrier
+//! [`wait_events`]: crate::command_buffer::CommandBufferBuilder::wait_events
+//! [`set`]: Event::set
+
+use crate::{
+ device::{Device, DeviceOwned},
+ macros::impl_id_counter,
+ OomError, RequiresOneOf, VulkanError, VulkanObject,
+};
+use std::{
+ error::Error,
+ fmt::{Display, Error as FmtError, Formatter},
+ mem::MaybeUninit,
+ num::NonZeroU64,
+ ptr,
+ sync::Arc,
+};
/// Used to block the GPU execution until an event on the CPU occurs.
///
@@ -25,71 +46,114 @@ use std::sync::Arc;
/// device loss.
#[derive(Debug)]
pub struct Event {
- // The event.
- event: ash::vk::Event,
- // The device.
+ handle: ash::vk::Event,
device: Arc<Device>,
+ id: NonZeroU64,
must_put_in_pool: bool,
}
impl Event {
+ /// Creates a new `Event`.
+ ///
+ /// On [portability subset](crate::instance#portability-subset-devices-and-the-enumerate_portability-flag)
+ /// devices, the
+ /// [`events`](crate::device::Features::events)
+ /// feature must be enabled on the device.
+ #[inline]
+ pub fn new(device: Arc<Device>, _create_info: EventCreateInfo) -> Result<Event, EventError> {
+ // VUID-vkCreateEvent-events-04468
+ if device.enabled_extensions().khr_portability_subset && !device.enabled_features().events {
+ return Err(EventError::RequirementNotMet {
+ required_for: "this device is a portability subset device, and `Event::new` was \
+ called",
+ requires_one_of: RequiresOneOf {
+ features: &["events"],
+ ..Default::default()
+ },
+ });
+ }
+
+ let create_info = ash::vk::EventCreateInfo {
+ flags: ash::vk::EventCreateFlags::empty(),
+ ..Default::default()
+ };
+
+ let handle = unsafe {
+ let mut output = MaybeUninit::uninit();
+ let fns = device.fns();
+ (fns.v1_0.create_event)(
+ device.handle(),
+ &create_info,
+ ptr::null(),
+ output.as_mut_ptr(),
+ )
+ .result()
+ .map_err(VulkanError::from)?;
+ output.assume_init()
+ };
+
+ Ok(Event {
+ handle,
+ device,
+ id: Self::next_id(),
+ must_put_in_pool: false,
+ })
+ }
+
/// Takes an event from the vulkano-provided event pool.
/// If the pool is empty, a new event will be allocated.
/// Upon `drop`, the event is put back into the pool.
///
/// For most applications, using the event pool should be preferred,
/// in order to avoid creating new events every frame.
- pub fn from_pool(device: Arc<Device>) -> Result<Event, OomError> {
- let maybe_raw_event = device.event_pool().lock().unwrap().pop();
- match maybe_raw_event {
- Some(raw_event) => {
+ #[inline]
+ pub fn from_pool(device: Arc<Device>) -> Result<Event, EventError> {
+ let handle = device.event_pool().lock().pop();
+ let event = match handle {
+ Some(handle) => {
unsafe {
// Make sure the event isn't signaled
let fns = device.fns();
- check_errors(fns.v1_0.reset_event(device.internal_object(), raw_event))?;
+ (fns.v1_0.reset_event)(device.handle(), handle)
+ .result()
+ .map_err(VulkanError::from)?;
}
- Ok(Event {
- event: raw_event,
- device: device,
+ Event {
+ handle,
+ device,
+ id: Self::next_id(),
must_put_in_pool: true,
- })
+ }
}
None => {
// Pool is empty, alloc new event
- Event::alloc_impl(device, true)
+ let mut event = Event::new(device, Default::default())?;
+ event.must_put_in_pool = true;
+ event
}
- }
- }
+ };
- /// Builds a new event.
- #[inline]
- pub fn alloc(device: Arc<Device>) -> Result<Event, OomError> {
- Event::alloc_impl(device, false)
+ Ok(event)
}
- fn alloc_impl(device: Arc<Device>, must_put_in_pool: bool) -> Result<Event, OomError> {
- let event = unsafe {
- let infos = ash::vk::EventCreateInfo {
- flags: ash::vk::EventCreateFlags::empty(),
- ..Default::default()
- };
-
- let mut output = MaybeUninit::uninit();
- let fns = device.fns();
- check_errors(fns.v1_0.create_event(
- device.internal_object(),
- &infos,
- ptr::null(),
- output.as_mut_ptr(),
- ))?;
- output.assume_init()
- };
-
- Ok(Event {
- device: device,
- event: event,
- must_put_in_pool: must_put_in_pool,
- })
+ /// Creates a new `Event` from a raw object handle.
+ ///
+ /// # Safety
+ ///
+ /// - `handle` must be a valid Vulkan object handle created from `device`.
+ /// - `create_info` must match the info used to create the object.
+ #[inline]
+ pub unsafe fn from_handle(
+ device: Arc<Device>,
+ handle: ash::vk::Event,
+ _create_info: EventCreateInfo,
+ ) -> Event {
+ Event {
+ handle,
+ device,
+ id: Self::next_id(),
+ must_put_in_pool: false,
+ }
}
/// Returns true if the event is signaled.
@@ -97,14 +161,11 @@ impl Event {
pub fn signaled(&self) -> Result<bool, OomError> {
unsafe {
let fns = self.device.fns();
- let result = check_errors(
- fns.v1_0
- .get_event_status(self.device.internal_object(), self.event),
- )?;
+ let result = (fns.v1_0.get_event_status)(self.device.handle(), self.handle);
match result {
- Success::EventSet => Ok(true),
- Success::EventReset => Ok(false),
- _ => unreachable!(),
+ ash::vk::Result::EVENT_SET => Ok(true),
+ ash::vk::Result::EVENT_RESET => Ok(false),
+ err => Err(VulkanError::from(err).into()),
}
}
}
@@ -114,10 +175,9 @@ impl Event {
pub fn set_raw(&mut self) -> Result<(), OomError> {
unsafe {
let fns = self.device.fns();
- check_errors(
- fns.v1_0
- .set_event(self.device.internal_object(), self.event),
- )?;
+ (fns.v1_0.set_event)(self.device.handle(), self.handle)
+ .result()
+ .map_err(VulkanError::from)?;
Ok(())
}
}
@@ -126,10 +186,9 @@ impl Event {
///
/// If a command buffer is waiting on this event, it is then unblocked.
///
- /// # Panic
+ /// # Panics
///
/// - Panics if the device or host ran out of memory.
- ///
#[inline]
pub fn set(&mut self) {
self.set_raw().unwrap();
@@ -140,26 +199,48 @@ impl Event {
pub fn reset_raw(&mut self) -> Result<(), OomError> {
unsafe {
let fns = self.device.fns();
- check_errors(
- fns.v1_0
- .reset_event(self.device.internal_object(), self.event),
- )?;
+ (fns.v1_0.reset_event)(self.device.handle(), self.handle)
+ .result()
+ .map_err(VulkanError::from)?;
Ok(())
}
}
/// Changes the `Event` to the unsignaled state.
///
- /// # Panic
+ /// # Panics
///
/// - Panics if the device or host ran out of memory.
- ///
#[inline]
pub fn reset(&mut self) {
self.reset_raw().unwrap();
}
}
+impl Drop for Event {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe {
+ if self.must_put_in_pool {
+ let raw_event = self.handle;
+ self.device.event_pool().lock().push(raw_event);
+ } else {
+ let fns = self.device.fns();
+ (fns.v1_0.destroy_event)(self.device.handle(), self.handle, ptr::null());
+ }
+ }
+ }
+}
+
+unsafe impl VulkanObject for Event {
+ type Handle = ash::vk::Event;
+
+ #[inline]
+ fn handle(&self) -> Self::Handle {
+ self.handle
+ }
+}
+
unsafe impl DeviceOwned for Event {
#[inline]
fn device(&self) -> &Arc<Device> {
@@ -167,47 +248,85 @@ unsafe impl DeviceOwned for Event {
}
}
-unsafe impl VulkanObject for Event {
- type Object = ash::vk::Event;
+impl_id_counter!(Event);
+
+/// Parameters to create a new `Event`.
+#[derive(Clone, Debug)]
+pub struct EventCreateInfo {
+ pub _ne: crate::NonExhaustive,
+}
+impl Default for EventCreateInfo {
#[inline]
- fn internal_object(&self) -> ash::vk::Event {
- self.event
+ fn default() -> Self {
+ Self {
+ _ne: crate::NonExhaustive(()),
+ }
}
}
-impl Drop for Event {
- #[inline]
- fn drop(&mut self) {
- unsafe {
- if self.must_put_in_pool {
- let raw_event = self.event;
- self.device.event_pool().lock().unwrap().push(raw_event);
- } else {
- let fns = self.device.fns();
- fns.v1_0
- .destroy_event(self.device.internal_object(), self.event, ptr::null());
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum EventError {
+ /// Not enough memory available.
+ OomError(OomError),
+
+ RequirementNotMet {
+ required_for: &'static str,
+ requires_one_of: RequiresOneOf,
+ },
+}
+
+impl Error for EventError {
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ match self {
+ Self::OomError(err) => Some(err),
+ _ => None,
+ }
+ }
+}
+
+impl Display for EventError {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
+ match self {
+ Self::OomError(_) => write!(f, "not enough memory available"),
+ Self::RequirementNotMet {
+ required_for,
+ requires_one_of,
+ } => write!(
+ f,
+ "a requirement was not met for: {}; requires one of: {}",
+ required_for, requires_one_of,
+ ),
+ }
+ }
+}
+
+impl From<VulkanError> for EventError {
+ fn from(err: VulkanError) -> Self {
+ match err {
+ e @ VulkanError::OutOfHostMemory | e @ VulkanError::OutOfDeviceMemory => {
+ Self::OomError(e.into())
}
+ _ => panic!("unexpected error: {:?}", err),
}
}
}
#[cfg(test)]
mod tests {
- use crate::sync::Event;
- use crate::VulkanObject;
+ use crate::{sync::event::Event, VulkanObject};
#[test]
fn event_create() {
let (device, _) = gfx_dev_and_queue!();
- let event = Event::alloc(device).unwrap();
+ let event = Event::new(device, Default::default()).unwrap();
assert!(!event.signaled().unwrap());
}
#[test]
fn event_set() {
let (device, _) = gfx_dev_and_queue!();
- let mut event = Event::alloc(device).unwrap();
+ let mut event = Event::new(device, Default::default()).unwrap();
assert!(!event.signaled().unwrap());
event.set();
@@ -218,7 +337,7 @@ mod tests {
fn event_reset() {
let (device, _) = gfx_dev_and_queue!();
- let mut event = Event::alloc(device).unwrap();
+ let mut event = Event::new(device, Default::default()).unwrap();
event.set();
assert!(event.signaled().unwrap());
@@ -230,16 +349,16 @@ mod tests {
fn event_pool() {
let (device, _) = gfx_dev_and_queue!();
- assert_eq!(device.event_pool().lock().unwrap().len(), 0);
+ assert_eq!(device.event_pool().lock().len(), 0);
let event1_internal_obj = {
let event = Event::from_pool(device.clone()).unwrap();
- assert_eq!(device.event_pool().lock().unwrap().len(), 0);
- event.internal_object()
+ assert_eq!(device.event_pool().lock().len(), 0);
+ event.handle()
};
- assert_eq!(device.event_pool().lock().unwrap().len(), 1);
+ assert_eq!(device.event_pool().lock().len(), 1);
let event2 = Event::from_pool(device.clone()).unwrap();
- assert_eq!(device.event_pool().lock().unwrap().len(), 0);
- assert_eq!(event2.internal_object(), event1_internal_obj);
+ assert_eq!(device.event_pool().lock().len(), 0);
+ assert_eq!(event2.handle(), event1_internal_obj);
}
}
diff --git a/src/sync/fence.rs b/src/sync/fence.rs
index 208573a..05968f2 100644
--- a/src/sync/fence.rs
+++ b/src/sync/fence.rs
@@ -7,148 +7,293 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
-use crate::check_errors;
-use crate::device::Device;
-use crate::device::DeviceOwned;
-use crate::Error;
-use crate::OomError;
-use crate::SafeDeref;
-use crate::Success;
-use crate::VulkanObject;
+//! A fence provides synchronization between the device and the host, or between an external source
+//! and the host.
+
+use crate::{
+ device::{Device, DeviceOwned, Queue},
+ macros::{impl_id_counter, vulkan_bitflags, vulkan_bitflags_enum},
+ OomError, RequirementNotMet, RequiresOneOf, Version, VulkanError, VulkanObject,
+};
+use parking_lot::{Mutex, MutexGuard};
use smallvec::SmallVec;
-use std::error;
-use std::fmt;
-use std::mem::MaybeUninit;
-use std::ptr;
-use std::sync::atomic::AtomicBool;
-use std::sync::atomic::Ordering;
-use std::sync::Arc;
-use std::time::Duration;
-
-/// A fence is used to know when a command buffer submission has finished its execution.
+#[cfg(unix)]
+use std::fs::File;
+use std::{
+ error::Error,
+ fmt::{Display, Error as FmtError, Formatter},
+ future::Future,
+ mem::MaybeUninit,
+ num::NonZeroU64,
+ pin::Pin,
+ ptr,
+ sync::{Arc, Weak},
+ task::{Context, Poll},
+ time::Duration,
+};
+
+/// A two-state synchronization primitive that is signalled by the device and waited on by the host.
+///
+/// # Queue-to-host synchronization
+///
+/// The primary use of a fence is to know when execution of a queue has reached a particular point.
+/// When adding a command to a queue, a fence can be provided with the command, to be signaled
+/// when the operation finishes. You can check for a fence's current status by calling
+/// `is_signaled`, `wait` or `await` on it. If the fence is found to be signaled, that means that
+/// the queue has completed the operation that is associated with the fence, and all operations that
+/// were submitted before it have been completed as well.
+///
+/// When a queue command accesses a resource, it must be kept alive until the queue command has
+/// finished executing, and you may not be allowed to perform certain other operations (or even any)
+/// while the resource is in use. By calling `is_signaled`, `wait` or `await`, the queue will be
+/// notified when the fence is signaled, so that all resources of the associated queue operation and
+/// preceding operations can be released.
///
-/// When a command buffer accesses a resource, you have to ensure that the CPU doesn't access
-/// the same resource simultaneously (except for concurrent reads). Therefore in order to know
-/// when the CPU can access a resource again, a fence has to be used.
+/// Because of this, it is highly recommended to call `is_signaled`, `wait` or `await` on your fences.
+/// Otherwise, the queue will hold onto resources indefinitely (using up memory)
+/// and resource locks will not be released, which may cause errors when submitting future
+/// queue operations. It is not strictly necessary to wait for *every* fence, as a fence
+/// that was signaled later in the queue will automatically clean up resources associated with
+/// earlier fences too.
#[derive(Debug)]
-pub struct Fence<D = Arc<Device>>
-where
- D: SafeDeref<Target = Device>,
-{
- fence: ash::vk::Fence,
-
- device: D,
+pub struct Fence {
+ handle: ash::vk::Fence,
+ device: Arc<Device>,
+ id: NonZeroU64,
+ must_put_in_pool: bool,
- // If true, we know that the `Fence` is signaled. If false, we don't know.
- // This variable exists so that we don't need to call `vkGetFenceStatus` or `vkWaitForFences`
- // multiple times.
- signaled: AtomicBool,
+ export_handle_types: ExternalFenceHandleTypes,
- // Indicates whether this fence was taken from the fence pool.
- // If true, will be put back into fence pool on drop.
- must_put_in_pool: bool,
+ state: Mutex<FenceState>,
}
-impl<D> Fence<D>
-where
- D: SafeDeref<Target = Device>,
-{
+impl Fence {
+ /// Creates a new `Fence`.
+ #[inline]
+ pub fn new(device: Arc<Device>, create_info: FenceCreateInfo) -> Result<Fence, FenceError> {
+ Self::validate_new(&device, &create_info)?;
+
+ unsafe { Ok(Self::new_unchecked(device, create_info)?) }
+ }
+
+ fn validate_new(device: &Device, create_info: &FenceCreateInfo) -> Result<(), FenceError> {
+ let &FenceCreateInfo {
+ signaled: _,
+ export_handle_types,
+ _ne: _,
+ } = create_info;
+
+ if !export_handle_types.is_empty() {
+ if !(device.api_version() >= Version::V1_1
+ || device.enabled_extensions().khr_external_fence)
+ {
+ return Err(FenceError::RequirementNotMet {
+ required_for: "`create_info.export_handle_types` is not empty",
+ requires_one_of: RequiresOneOf {
+ api_version: Some(Version::V1_1),
+ device_extensions: &["khr_external_fence"],
+ ..Default::default()
+ },
+ });
+ }
+
+ // VUID-VkExportFenceCreateInfo-handleTypes-01446
+ export_handle_types.validate_device(device)?;
+
+ // VUID-VkExportFenceCreateInfo-handleTypes-01446
+ for handle_type in export_handle_types.into_iter() {
+ let external_fence_properties = unsafe {
+ device
+ .physical_device()
+ .external_fence_properties_unchecked(ExternalFenceInfo::handle_type(
+ handle_type,
+ ))
+ };
+
+ if !external_fence_properties.exportable {
+ return Err(FenceError::HandleTypeNotExportable { handle_type });
+ }
+
+ if !external_fence_properties
+ .compatible_handle_types
+ .contains(export_handle_types)
+ {
+ return Err(FenceError::ExportHandleTypesNotCompatible);
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
+ #[inline]
+ pub unsafe fn new_unchecked(
+ device: Arc<Device>,
+ create_info: FenceCreateInfo,
+ ) -> Result<Fence, VulkanError> {
+ let FenceCreateInfo {
+ signaled,
+ export_handle_types,
+ _ne: _,
+ } = create_info;
+
+ let mut flags = ash::vk::FenceCreateFlags::empty();
+
+ if signaled {
+ flags |= ash::vk::FenceCreateFlags::SIGNALED;
+ }
+
+ let mut create_info_vk = ash::vk::FenceCreateInfo {
+ flags,
+ ..Default::default()
+ };
+ let mut export_fence_create_info_vk = None;
+
+ if !export_handle_types.is_empty() {
+ let _ = export_fence_create_info_vk.insert(ash::vk::ExportFenceCreateInfo {
+ handle_types: export_handle_types.into(),
+ ..Default::default()
+ });
+ }
+
+ if let Some(info) = export_fence_create_info_vk.as_mut() {
+ info.p_next = create_info_vk.p_next;
+ create_info_vk.p_next = info as *const _ as *const _;
+ }
+
+ let handle = {
+ let fns = device.fns();
+ let mut output = MaybeUninit::uninit();
+ (fns.v1_0.create_fence)(
+ device.handle(),
+ &create_info_vk,
+ ptr::null(),
+ output.as_mut_ptr(),
+ )
+ .result()
+ .map_err(VulkanError::from)?;
+
+ output.assume_init()
+ };
+
+ Ok(Fence {
+ handle,
+ device,
+ id: Self::next_id(),
+ must_put_in_pool: false,
+ export_handle_types,
+ state: Mutex::new(FenceState {
+ is_signaled: signaled,
+ ..Default::default()
+ }),
+ })
+ }
+
/// Takes a fence from the vulkano-provided fence pool.
- /// If the pool is empty, a new fence will be allocated.
+ /// If the pool is empty, a new fence will be created.
/// Upon `drop`, the fence is put back into the pool.
///
/// For most applications, using the fence pool should be preferred,
/// in order to avoid creating new fences every frame.
- pub fn from_pool(device: D) -> Result<Fence<D>, OomError> {
- let maybe_raw_fence = device.fence_pool().lock().unwrap().pop();
- match maybe_raw_fence {
- Some(raw_fence) => {
+ #[inline]
+ pub fn from_pool(device: Arc<Device>) -> Result<Fence, FenceError> {
+ let handle = device.fence_pool().lock().pop();
+ let fence = match handle {
+ Some(handle) => {
unsafe {
// Make sure the fence isn't signaled
let fns = device.fns();
- check_errors(
- fns.v1_0
- .reset_fences(device.internal_object(), 1, &raw_fence),
- )?;
+ (fns.v1_0.reset_fences)(device.handle(), 1, &handle)
+ .result()
+ .map_err(VulkanError::from)?;
}
- Ok(Fence {
- fence: raw_fence,
- device: device,
- signaled: AtomicBool::new(false),
+
+ Fence {
+ handle,
+ device,
+ id: Self::next_id(),
must_put_in_pool: true,
- })
+ export_handle_types: ExternalFenceHandleTypes::empty(),
+ state: Mutex::new(Default::default()),
+ }
}
None => {
// Pool is empty, alloc new fence
- Fence::alloc_impl(device, false, true)
+ let mut fence = Fence::new(device, FenceCreateInfo::default())?;
+ fence.must_put_in_pool = true;
+ fence
}
- }
- }
+ };
- /// Builds a new fence.
- #[inline]
- pub fn alloc(device: D) -> Result<Fence<D>, OomError> {
- Fence::alloc_impl(device, false, false)
+ Ok(fence)
}
- /// Builds a new fence in signaled state.
+ /// Creates a new `Fence` from a raw object handle.
+ ///
+ /// # Safety
+ ///
+ /// - `handle` must be a valid Vulkan object handle created from `device`.
+ /// - `create_info` must match the info used to create the object.
#[inline]
- pub fn alloc_signaled(device: D) -> Result<Fence<D>, OomError> {
- Fence::alloc_impl(device, true, false)
- }
+ pub unsafe fn from_handle(
+ device: Arc<Device>,
+ handle: ash::vk::Fence,
+ create_info: FenceCreateInfo,
+ ) -> Fence {
+ let FenceCreateInfo {
+ signaled,
+ export_handle_types,
+ _ne: _,
+ } = create_info;
- fn alloc_impl(device: D, signaled: bool, must_put_in_pool: bool) -> Result<Fence<D>, OomError> {
- let fence = unsafe {
- let infos = ash::vk::FenceCreateInfo {
- flags: if signaled {
- ash::vk::FenceCreateFlags::SIGNALED
- } else {
- ash::vk::FenceCreateFlags::empty()
- },
+ Fence {
+ handle,
+ device,
+ id: Self::next_id(),
+ must_put_in_pool: false,
+ export_handle_types,
+ state: Mutex::new(FenceState {
+ is_signaled: signaled,
..Default::default()
- };
-
- let fns = device.fns();
- let mut output = MaybeUninit::uninit();
- check_errors(fns.v1_0.create_fence(
- device.internal_object(),
- &infos,
- ptr::null(),
- output.as_mut_ptr(),
- ))?;
- output.assume_init()
- };
-
- Ok(Fence {
- fence: fence,
- device: device,
- signaled: AtomicBool::new(signaled),
- must_put_in_pool: must_put_in_pool,
- })
+ }),
+ }
}
/// Returns true if the fence is signaled.
#[inline]
- pub fn ready(&self) -> Result<bool, OomError> {
- unsafe {
- if self.signaled.load(Ordering::Relaxed) {
- return Ok(true);
+ pub fn is_signaled(&self) -> Result<bool, OomError> {
+ let queue_to_signal = {
+ let mut state = self.state();
+
+ // If the fence is already signaled, or it's unsignaled but there's no queue that
+ // could signal it, return the currently known value.
+ if let Some(is_signaled) = state.is_signaled() {
+ return Ok(is_signaled);
}
- let fns = self.device.fns();
- let result = check_errors(
- fns.v1_0
- .get_fence_status(self.device.internal_object(), self.fence),
- )?;
+ // We must ask Vulkan for the state.
+ let result = unsafe {
+ let fns = self.device.fns();
+ (fns.v1_0.get_fence_status)(self.device.handle(), self.handle)
+ };
+
match result {
- Success::Success => {
- self.signaled.store(true, Ordering::Relaxed);
- Ok(true)
- }
- Success::NotReady => Ok(false),
- _ => unreachable!(),
+ ash::vk::Result::SUCCESS => unsafe { state.set_signaled() },
+ ash::vk::Result::NOT_READY => return Ok(false),
+ err => return Err(VulkanError::from(err).into()),
+ }
+ };
+
+ // If we have a queue that we need to signal our status to,
+ // do so now after the state lock is dropped, to avoid deadlocks.
+ if let Some(queue) = queue_to_signal {
+ unsafe {
+ queue.with(|mut q| q.fence_signaled(self));
}
}
+
+ Ok(true)
}
/// Waits until the fence is signaled, or at least until the timeout duration has elapsed.
@@ -156,281 +301,1356 @@ where
/// Returns `Ok` if the fence is now signaled. Returns `Err` if the timeout was reached instead.
///
/// If you pass a duration of 0, then the function will return without blocking.
- pub fn wait(&self, timeout: Option<Duration>) -> Result<(), FenceWaitError> {
- unsafe {
- if self.signaled.load(Ordering::Relaxed) {
+ pub fn wait(&self, timeout: Option<Duration>) -> Result<(), FenceError> {
+ let queue_to_signal = {
+ let mut state = self.state.lock();
+
+ // If the fence is already signaled, we don't need to wait.
+ if state.is_signaled().unwrap_or(false) {
return Ok(());
}
- let timeout_ns = if let Some(timeout) = timeout {
+ let timeout_ns = timeout.map_or(u64::MAX, |timeout| {
timeout
.as_secs()
.saturating_mul(1_000_000_000)
.saturating_add(timeout.subsec_nanos() as u64)
- } else {
- u64::MAX
+ });
+
+ let result = unsafe {
+ let fns = self.device.fns();
+ (fns.v1_0.wait_for_fences)(
+ self.device.handle(),
+ 1,
+ &self.handle,
+ ash::vk::TRUE,
+ timeout_ns,
+ )
};
- let fns = self.device.fns();
- let r = check_errors(fns.v1_0.wait_for_fences(
- self.device.internal_object(),
- 1,
- &self.fence,
- ash::vk::TRUE,
- timeout_ns,
- ))?;
-
- match r {
- Success::Success => {
- self.signaled.store(true, Ordering::Relaxed);
- Ok(())
- }
- Success::Timeout => Err(FenceWaitError::Timeout),
- _ => unreachable!(),
+ match result {
+ ash::vk::Result::SUCCESS => unsafe { state.set_signaled() },
+ ash::vk::Result::TIMEOUT => return Err(FenceError::Timeout),
+ err => return Err(VulkanError::from(err).into()),
+ }
+ };
+
+ // If we have a queue that we need to signal our status to,
+ // do so now after the state lock is dropped, to avoid deadlocks.
+ if let Some(queue) = queue_to_signal {
+ unsafe {
+ queue.with(|mut q| q.fence_signaled(self));
}
}
+
+ Ok(())
}
/// Waits for multiple fences at once.
///
- /// # Panic
+ /// # Panics
///
- /// Panics if not all fences belong to the same device.
- pub fn multi_wait<'a, I>(iter: I, timeout: Option<Duration>) -> Result<(), FenceWaitError>
- where
- I: IntoIterator<Item = &'a Fence<D>>,
- D: 'a,
- {
- let mut device: Option<&Device> = None;
+ /// - Panics if not all fences belong to the same device.
+ pub fn multi_wait<'a>(
+ fences: impl IntoIterator<Item = &'a Fence>,
+ timeout: Option<Duration>,
+ ) -> Result<(), FenceError> {
+ let fences: SmallVec<[_; 8]> = fences.into_iter().collect();
+ Self::validate_multi_wait(&fences, timeout)?;
- let fences: SmallVec<[ash::vk::Fence; 8]> = iter
- .into_iter()
- .filter_map(|fence| {
- match &mut device {
- dev @ &mut None => *dev = Some(&*fence.device),
- &mut Some(ref dev)
- if &**dev as *const Device == &*fence.device as *const Device => {}
- _ => panic!(
- "Tried to wait for multiple fences that didn't belong to the \
- same device"
- ),
- };
+ unsafe { Self::multi_wait_unchecked(fences, timeout) }
+ }
+
+ fn validate_multi_wait(
+ fences: &[&Fence],
+ _timeout: Option<Duration>,
+ ) -> Result<(), FenceError> {
+ if fences.is_empty() {
+ return Ok(());
+ }
+
+ let device = &fences[0].device;
+
+ for fence in fences {
+ // VUID-vkWaitForFences-pFences-parent
+ assert_eq!(device, &fence.device);
+ }
+
+ Ok(())
+ }
+
+ #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
+ pub unsafe fn multi_wait_unchecked<'a>(
+ fences: impl IntoIterator<Item = &'a Fence>,
+ timeout: Option<Duration>,
+ ) -> Result<(), FenceError> {
+ let queues_to_signal: SmallVec<[_; 8]> = {
+ let iter = fences.into_iter();
+ let mut fences_vk: SmallVec<[_; 8]> = SmallVec::new();
+ let mut fences: SmallVec<[_; 8]> = SmallVec::new();
+ let mut states: SmallVec<[_; 8]> = SmallVec::new();
+
+ for fence in iter {
+ let state = fence.state.lock();
- if fence.signaled.load(Ordering::Relaxed) {
- None
- } else {
- Some(fence.fence)
+ // Skip the fences that are already signaled.
+ if !state.is_signaled().unwrap_or(false) {
+ fences_vk.push(fence.handle);
+ fences.push(fence);
+ states.push(state);
}
- })
- .collect();
+ }
- let timeout_ns = if let Some(timeout) = timeout {
- timeout
- .as_secs()
- .saturating_mul(1_000_000_000)
- .saturating_add(timeout.subsec_nanos() as u64)
- } else {
- u64::MAX
- };
+ // VUID-vkWaitForFences-fenceCount-arraylength
+ // If there are no fences, or all the fences are signaled, we don't need to wait.
+ if fences_vk.is_empty() {
+ return Ok(());
+ }
- let r = if let Some(device) = device {
- unsafe {
+ let device = &fences[0].device;
+ let timeout_ns = timeout.map_or(u64::MAX, |timeout| {
+ timeout
+ .as_secs()
+ .saturating_mul(1_000_000_000)
+ .saturating_add(timeout.subsec_nanos() as u64)
+ });
+
+ let result = {
let fns = device.fns();
- check_errors(fns.v1_0.wait_for_fences(
- device.internal_object(),
- fences.len() as u32,
- fences.as_ptr(),
- ash::vk::TRUE,
+ (fns.v1_0.wait_for_fences)(
+ device.handle(),
+ fences_vk.len() as u32,
+ fences_vk.as_ptr(),
+ ash::vk::TRUE, // TODO: let the user choose false here?
timeout_ns,
- ))?
+ )
+ };
+
+ match result {
+ ash::vk::Result::SUCCESS => fences
+ .into_iter()
+ .zip(&mut states)
+ .filter_map(|(fence, state)| state.set_signaled().map(|state| (state, fence)))
+ .collect(),
+ ash::vk::Result::TIMEOUT => return Err(FenceError::Timeout),
+ err => return Err(VulkanError::from(err).into()),
}
- } else {
- return Ok(());
};
- match r {
- Success::Success => Ok(()),
- Success::Timeout => Err(FenceWaitError::Timeout),
- _ => unreachable!(),
+ // If we have queues that we need to signal our status to,
+ // do so now after the state locks are dropped, to avoid deadlocks.
+ for (queue, fence) in queues_to_signal {
+ queue.with(|mut q| q.fence_signaled(fence));
}
+
+ Ok(())
}
/// Resets the fence.
- // This function takes a `&mut self` because the Vulkan API requires that the fence be
- // externally synchronized.
+ ///
+ /// The fence must not be in use by a queue operation.
#[inline]
- pub fn reset(&mut self) -> Result<(), OomError> {
- unsafe {
- let fns = self.device.fns();
- check_errors(
- fns.v1_0
- .reset_fences(self.device.internal_object(), 1, &self.fence),
- )?;
- self.signaled.store(false, Ordering::Relaxed);
- Ok(())
+ pub fn reset(&self) -> Result<(), FenceError> {
+ let mut state = self.state.lock();
+ self.validate_reset(&state)?;
+
+ unsafe { Ok(self.reset_unchecked_locked(&mut state)?) }
+ }
+
+ fn validate_reset(&self, state: &FenceState) -> Result<(), FenceError> {
+ // VUID-vkResetFences-pFences-01123
+ if state.is_in_queue() {
+ return Err(FenceError::InQueue);
}
+
+ Ok(())
+ }
+
+ #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
+ #[inline]
+ pub unsafe fn reset_unchecked(&self) -> Result<(), VulkanError> {
+ let mut state = self.state.lock();
+
+ self.reset_unchecked_locked(&mut state)
+ }
+
+ unsafe fn reset_unchecked_locked(&self, state: &mut FenceState) -> Result<(), VulkanError> {
+ let fns = self.device.fns();
+ (fns.v1_0.reset_fences)(self.device.handle(), 1, &self.handle)
+ .result()
+ .map_err(VulkanError::from)?;
+
+ state.reset();
+
+ Ok(())
}
/// Resets multiple fences at once.
///
- /// # Panic
+ /// The fences must not be in use by a queue operation.
///
- /// - Panics if not all fences belong to the same device.
+ /// # Panics
///
- pub fn multi_reset<'a, I>(iter: I) -> Result<(), OomError>
- where
- I: IntoIterator<Item = &'a mut Fence<D>>,
- D: 'a,
- {
- let mut device: Option<&Device> = None;
-
- let fences: SmallVec<[ash::vk::Fence; 8]> = iter
+ /// - Panics if not all fences belong to the same device.
+ pub fn multi_reset<'a>(fences: impl IntoIterator<Item = &'a Fence>) -> Result<(), FenceError> {
+ let (fences, mut states): (SmallVec<[_; 8]>, SmallVec<[_; 8]>) = fences
.into_iter()
.map(|fence| {
- match &mut device {
- dev @ &mut None => *dev = Some(&*fence.device),
- &mut Some(ref dev)
- if &**dev as *const Device == &*fence.device as *const Device => {}
- _ => panic!(
- "Tried to reset multiple fences that didn't belong to the same \
- device"
- ),
- };
+ let state = fence.state.lock();
+ (fence, state)
+ })
+ .unzip();
+ Self::validate_multi_reset(&fences, &states)?;
+
+ unsafe { Ok(Self::multi_reset_unchecked_locked(&fences, &mut states)?) }
+ }
+
+ fn validate_multi_reset(
+ fences: &[&Fence],
+ states: &[MutexGuard<'_, FenceState>],
+ ) -> Result<(), FenceError> {
+ if fences.is_empty() {
+ return Ok(());
+ }
+
+ let device = &fences[0].device;
+
+ for (fence, state) in fences.iter().zip(states) {
+ // VUID-vkResetFences-pFences-parent
+ assert_eq!(device, &fence.device);
+
+ // VUID-vkResetFences-pFences-01123
+ if state.is_in_queue() {
+ return Err(FenceError::InQueue);
+ }
+ }
+
+ Ok(())
+ }
- fence.signaled.store(false, Ordering::Relaxed);
- fence.fence
+ #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
+ pub unsafe fn multi_reset_unchecked<'a>(
+ fences: impl IntoIterator<Item = &'a Fence>,
+ ) -> Result<(), VulkanError> {
+ let (fences, mut states): (SmallVec<[_; 8]>, SmallVec<[_; 8]>) = fences
+ .into_iter()
+ .map(|fence| {
+ let state = fence.state.lock();
+ (fence, state)
})
- .collect();
+ .unzip();
- if let Some(device) = device {
- unsafe {
- let fns = device.fns();
- check_errors(fns.v1_0.reset_fences(
- device.internal_object(),
- fences.len() as u32,
- fences.as_ptr(),
- ))?;
+ Self::multi_reset_unchecked_locked(&fences, &mut states)
+ }
+
+ unsafe fn multi_reset_unchecked_locked(
+ fences: &[&Fence],
+ states: &mut [MutexGuard<'_, FenceState>],
+ ) -> Result<(), VulkanError> {
+ if fences.is_empty() {
+ return Ok(());
+ }
+
+ let device = &fences[0].device;
+ let fences_vk: SmallVec<[_; 8]> = fences.iter().map(|fence| fence.handle).collect();
+
+ let fns = device.fns();
+ (fns.v1_0.reset_fences)(device.handle(), fences_vk.len() as u32, fences_vk.as_ptr())
+ .result()
+ .map_err(VulkanError::from)?;
+
+ for state in states {
+ state.reset();
+ }
+
+ Ok(())
+ }
+
+ /// Exports the fence into a POSIX file descriptor. The caller owns the returned `File`.
+ ///
+ /// The [`khr_external_fence_fd`](crate::device::DeviceExtensions::khr_external_fence_fd)
+ /// extension must be enabled on the device.
+ #[cfg(unix)]
+ #[inline]
+ pub fn export_fd(&self, handle_type: ExternalFenceHandleType) -> Result<File, FenceError> {
+ let mut state = self.state.lock();
+ self.validate_export_fd(handle_type, &state)?;
+
+ unsafe { Ok(self.export_fd_unchecked_locked(handle_type, &mut state)?) }
+ }
+
+ #[cfg(unix)]
+ fn validate_export_fd(
+ &self,
+ handle_type: ExternalFenceHandleType,
+ state: &FenceState,
+ ) -> Result<(), FenceError> {
+ if !self.device.enabled_extensions().khr_external_fence_fd {
+ return Err(FenceError::RequirementNotMet {
+ required_for: "`Fence::export_fd`",
+ requires_one_of: RequiresOneOf {
+ device_extensions: &["khr_external_fence_fd"],
+ ..Default::default()
+ },
+ });
+ }
+
+ // VUID-VkFenceGetFdInfoKHR-handleType-parameter
+ handle_type.validate_device(&self.device)?;
+
+ // VUID-VkFenceGetFdInfoKHR-handleType-01453
+ if !self.export_handle_types.intersects(handle_type.into()) {
+ return Err(FenceError::HandleTypeNotEnabled);
+ }
+
+ // VUID-VkFenceGetFdInfoKHR-handleType-01454
+ if handle_type.has_copy_transference()
+ && !(state.is_signaled().unwrap_or(false) || state.is_in_queue())
+ {
+ return Err(FenceError::HandleTypeCopyNotSignaled);
+ }
+
+ // VUID-VkFenceGetFdInfoKHR-fence-01455
+ if let Some(imported_handle_type) = state.current_import {
+ match imported_handle_type {
+ ImportType::SwapchainAcquire => {
+ return Err(FenceError::ImportedForSwapchainAcquire)
+ }
+ ImportType::ExternalFence(imported_handle_type) => {
+ let external_fence_properties = unsafe {
+ self.device
+ .physical_device()
+ .external_fence_properties_unchecked(ExternalFenceInfo::handle_type(
+ handle_type,
+ ))
+ };
+
+ if !external_fence_properties
+ .export_from_imported_handle_types
+ .intersects(imported_handle_type.into())
+ {
+ return Err(FenceError::ExportFromImportedNotSupported {
+ imported_handle_type,
+ });
+ }
+ }
}
}
+
+ // VUID-VkFenceGetFdInfoKHR-handleType-01456
+ if !matches!(
+ handle_type,
+ ExternalFenceHandleType::OpaqueFd | ExternalFenceHandleType::SyncFd
+ ) {
+ return Err(FenceError::HandleTypeNotFd);
+ }
+
Ok(())
}
-}
-unsafe impl DeviceOwned for Fence {
+ #[cfg(unix)]
+ #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
#[inline]
- fn device(&self) -> &Arc<Device> {
- &self.device
+ pub unsafe fn export_fd_unchecked(
+ &self,
+ handle_type: ExternalFenceHandleType,
+ ) -> Result<File, VulkanError> {
+ let mut state = self.state.lock();
+ self.export_fd_unchecked_locked(handle_type, &mut state)
+ }
+
+ #[cfg(unix)]
+ unsafe fn export_fd_unchecked_locked(
+ &self,
+ handle_type: ExternalFenceHandleType,
+ state: &mut FenceState,
+ ) -> Result<File, VulkanError> {
+ use std::os::unix::io::FromRawFd;
+
+ let info_vk = ash::vk::FenceGetFdInfoKHR {
+ fence: self.handle,
+ handle_type: handle_type.into(),
+ ..Default::default()
+ };
+
+ let mut output = MaybeUninit::uninit();
+ let fns = self.device.fns();
+ (fns.khr_external_fence_fd.get_fence_fd_khr)(
+ self.device.handle(),
+ &info_vk,
+ output.as_mut_ptr(),
+ )
+ .result()
+ .map_err(VulkanError::from)?;
+
+ state.export(handle_type);
+
+ Ok(File::from_raw_fd(output.assume_init()))
+ }
+
+ /// Exports the fence into a Win32 handle.
+ ///
+ /// The [`khr_external_fence_win32`](crate::device::DeviceExtensions::khr_external_fence_win32)
+ /// extension must be enabled on the device.
+ #[cfg(windows)]
+ #[inline]
+ pub fn export_win32_handle(
+ &self,
+ handle_type: ExternalFenceHandleType,
+ ) -> Result<*mut std::ffi::c_void, FenceError> {
+ let mut state = self.state.lock();
+ self.validate_export_win32_handle(handle_type, &state)?;
+
+ unsafe { Ok(self.export_win32_handle_unchecked_locked(handle_type, &mut state)?) }
+ }
+
+ #[cfg(windows)]
+ fn validate_export_win32_handle(
+ &self,
+ handle_type: ExternalFenceHandleType,
+ state: &FenceState,
+ ) -> Result<(), FenceError> {
+ if !self.device.enabled_extensions().khr_external_fence_win32 {
+ return Err(FenceError::RequirementNotMet {
+ required_for: "`Fence::export_win32_handle`",
+ requires_one_of: RequiresOneOf {
+ device_extensions: &["khr_external_fence_win32"],
+ ..Default::default()
+ },
+ });
+ }
+
+ // VUID-VkFenceGetWin32HandleInfoKHR-handleType-parameter
+ handle_type.validate_device(&self.device)?;
+
+ // VUID-VkFenceGetWin32HandleInfoKHR-handleType-01448
+ if !self.export_handle_types.intersects(handle_type.into()) {
+ return Err(FenceError::HandleTypeNotEnabled);
+ }
+
+ // VUID-VkFenceGetWin32HandleInfoKHR-handleType-01449
+ if matches!(handle_type, ExternalFenceHandleType::OpaqueWin32)
+ && state.is_exported(handle_type)
+ {
+ return Err(FenceError::AlreadyExported);
+ }
+
+ // VUID-VkFenceGetWin32HandleInfoKHR-handleType-01451
+ if handle_type.has_copy_transference()
+ && !(state.is_signaled().unwrap_or(false) || state.is_in_queue())
+ {
+ return Err(FenceError::HandleTypeCopyNotSignaled);
+ }
+
+ // VUID-VkFenceGetWin32HandleInfoKHR-fence-01450
+ if let Some(imported_handle_type) = state.current_import {
+ match imported_handle_type {
+ ImportType::SwapchainAcquire => {
+ return Err(FenceError::ImportedForSwapchainAcquire)
+ }
+ ImportType::ExternalFence(imported_handle_type) => {
+ let external_fence_properties = unsafe {
+ self.device
+ .physical_device()
+ .external_fence_properties_unchecked(ExternalFenceInfo::handle_type(
+ handle_type,
+ ))
+ };
+
+ if !external_fence_properties
+ .export_from_imported_handle_types
+ .intersects(imported_handle_type.into())
+ {
+ return Err(FenceError::ExportFromImportedNotSupported {
+ imported_handle_type,
+ });
+ }
+ }
+ }
+ }
+
+ // VUID-VkFenceGetWin32HandleInfoKHR-handleType-01452
+ if !matches!(
+ handle_type,
+ ExternalFenceHandleType::OpaqueWin32 | ExternalFenceHandleType::OpaqueWin32Kmt
+ ) {
+ return Err(FenceError::HandleTypeNotWin32);
+ }
+
+ Ok(())
+ }
+
+ #[cfg(windows)]
+ #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
+ #[inline]
+ pub unsafe fn export_win32_handle_unchecked(
+ &self,
+ handle_type: ExternalFenceHandleType,
+ ) -> Result<*mut std::ffi::c_void, VulkanError> {
+ let mut state = self.state.lock();
+ self.export_win32_handle_unchecked_locked(handle_type, &mut state)
+ }
+
+ #[cfg(windows)]
+ unsafe fn export_win32_handle_unchecked_locked(
+ &self,
+ handle_type: ExternalFenceHandleType,
+ state: &mut FenceState,
+ ) -> Result<*mut std::ffi::c_void, VulkanError> {
+ let info_vk = ash::vk::FenceGetWin32HandleInfoKHR {
+ fence: self.handle,
+ handle_type: handle_type.into(),
+ ..Default::default()
+ };
+
+ let mut output = MaybeUninit::uninit();
+ let fns = self.device.fns();
+ (fns.khr_external_fence_win32.get_fence_win32_handle_khr)(
+ self.device.handle(),
+ &info_vk,
+ output.as_mut_ptr(),
+ )
+ .result()
+ .map_err(VulkanError::from)?;
+
+ state.export(handle_type);
+
+ Ok(output.assume_init())
+ }
+
+ /// Imports a fence from a POSIX file descriptor.
+ ///
+ /// The [`khr_external_fence_fd`](crate::device::DeviceExtensions::khr_external_fence_fd)
+ /// extension must be enabled on the device.
+ ///
+ /// # Safety
+ ///
+ /// - If in `import_fence_fd_info`, `handle_type` is `ExternalHandleType::OpaqueFd`,
+ /// then `file` must represent a fence that was exported from Vulkan or a compatible API,
+ /// with a driver and device UUID equal to those of the device that owns `self`.
+ #[cfg(unix)]
+ #[inline]
+ pub unsafe fn import_fd(
+ &self,
+ import_fence_fd_info: ImportFenceFdInfo,
+ ) -> Result<(), FenceError> {
+ let mut state = self.state.lock();
+ self.validate_import_fd(&import_fence_fd_info, &state)?;
+
+ Ok(self.import_fd_unchecked_locked(import_fence_fd_info, &mut state)?)
}
-}
-unsafe impl<D> VulkanObject for Fence<D>
-where
- D: SafeDeref<Target = Device>,
-{
- type Object = ash::vk::Fence;
+ #[cfg(unix)]
+ fn validate_import_fd(
+ &self,
+ import_fence_fd_info: &ImportFenceFdInfo,
+ state: &FenceState,
+ ) -> Result<(), FenceError> {
+ if !self.device.enabled_extensions().khr_external_fence_fd {
+ return Err(FenceError::RequirementNotMet {
+ required_for: "`Fence::import_fd`",
+ requires_one_of: RequiresOneOf {
+ device_extensions: &["khr_external_fence_fd"],
+ ..Default::default()
+ },
+ });
+ }
+
+ // VUID-vkImportFenceFdKHR-fence-01463
+ if state.is_in_queue() {
+ return Err(FenceError::InQueue);
+ }
+
+ let &ImportFenceFdInfo {
+ flags,
+ handle_type,
+ file: _,
+ _ne: _,
+ } = import_fence_fd_info;
+
+ // VUID-VkImportFenceFdInfoKHR-flags-parameter
+ flags.validate_device(&self.device)?;
+
+ // VUID-VkImportFenceFdInfoKHR-handleType-parameter
+ handle_type.validate_device(&self.device)?;
+
+ // VUID-VkImportFenceFdInfoKHR-handleType-01464
+ if !matches!(
+ handle_type,
+ ExternalFenceHandleType::OpaqueFd | ExternalFenceHandleType::SyncFd
+ ) {
+ return Err(FenceError::HandleTypeNotFd);
+ }
+
+ // VUID-VkImportFenceFdInfoKHR-fd-01541
+ // Can't validate, therefore unsafe
+
+ // VUID-VkImportFenceFdInfoKHR-handleType-07306
+ if handle_type.has_copy_transference() && !flags.intersects(FenceImportFlags::TEMPORARY) {
+ return Err(FenceError::HandletypeCopyNotTemporary);
+ }
+ Ok(())
+ }
+
+ #[cfg(unix)]
+ #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
#[inline]
- fn internal_object(&self) -> ash::vk::Fence {
- self.fence
+ pub unsafe fn import_fd_unchecked(
+ &self,
+ import_fence_fd_info: ImportFenceFdInfo,
+ ) -> Result<(), VulkanError> {
+ let mut state = self.state.lock();
+ self.import_fd_unchecked_locked(import_fence_fd_info, &mut state)
+ }
+
+ #[cfg(unix)]
+ unsafe fn import_fd_unchecked_locked(
+ &self,
+ import_fence_fd_info: ImportFenceFdInfo,
+ state: &mut FenceState,
+ ) -> Result<(), VulkanError> {
+ use std::os::unix::io::IntoRawFd;
+
+ let ImportFenceFdInfo {
+ flags,
+ handle_type,
+ file,
+ _ne: _,
+ } = import_fence_fd_info;
+
+ let info_vk = ash::vk::ImportFenceFdInfoKHR {
+ fence: self.handle,
+ flags: flags.into(),
+ handle_type: handle_type.into(),
+ fd: file.map_or(-1, |file| file.into_raw_fd()),
+ ..Default::default()
+ };
+
+ let fns = self.device.fns();
+ (fns.khr_external_fence_fd.import_fence_fd_khr)(self.device.handle(), &info_vk)
+ .result()
+ .map_err(VulkanError::from)?;
+
+ state.import(handle_type, flags.intersects(FenceImportFlags::TEMPORARY));
+
+ Ok(())
+ }
+
+ /// Imports a fence from a Win32 handle.
+ ///
+ /// The [`khr_external_fence_win32`](crate::device::DeviceExtensions::khr_external_fence_win32)
+ /// extension must be enabled on the device.
+ ///
+ /// # Safety
+ ///
+ /// - In `import_fence_win32_handle_info`, `handle` must represent a fence that was exported
+ /// from Vulkan or a compatible API, with a driver and device UUID equal to those of the
+ /// device that owns `self`.
+ #[cfg(windows)]
+ #[inline]
+ pub unsafe fn import_win32_handle(
+ &self,
+ import_fence_win32_handle_info: ImportFenceWin32HandleInfo,
+ ) -> Result<(), FenceError> {
+ let mut state = self.state.lock();
+ self.validate_import_win32_handle(&import_fence_win32_handle_info, &state)?;
+
+ Ok(self.import_win32_handle_unchecked_locked(import_fence_win32_handle_info, &mut state)?)
+ }
+
+ #[cfg(windows)]
+ fn validate_import_win32_handle(
+ &self,
+ import_fence_win32_handle_info: &ImportFenceWin32HandleInfo,
+ state: &FenceState,
+ ) -> Result<(), FenceError> {
+ if !self.device.enabled_extensions().khr_external_fence_win32 {
+ return Err(FenceError::RequirementNotMet {
+ required_for: "`Fence::import_win32_handle`",
+ requires_one_of: RequiresOneOf {
+ device_extensions: &["khr_external_fence_win32"],
+ ..Default::default()
+ },
+ });
+ }
+
+ // VUID-vkImportFenceWin32HandleKHR-fence-04448
+ if state.is_in_queue() {
+ return Err(FenceError::InQueue);
+ }
+
+ let &ImportFenceWin32HandleInfo {
+ flags,
+ handle_type,
+ handle: _,
+ _ne: _,
+ } = import_fence_win32_handle_info;
+
+ // VUID-VkImportFenceWin32HandleInfoKHR-flags-parameter
+ flags.validate_device(&self.device)?;
+
+ // VUID-VkImportFenceWin32HandleInfoKHR-handleType-01457
+ handle_type.validate_device(&self.device)?;
+
+ // VUID-VkImportFenceWin32HandleInfoKHR-handleType-01457
+ if !matches!(
+ handle_type,
+ ExternalFenceHandleType::OpaqueWin32 | ExternalFenceHandleType::OpaqueWin32Kmt
+ ) {
+ return Err(FenceError::HandleTypeNotWin32);
+ }
+
+ // VUID-VkImportFenceWin32HandleInfoKHR-handle-01539
+ // Can't validate, therefore unsafe
+
+ // VUID?
+ if handle_type.has_copy_transference() && !flags.intersects(FenceImportFlags::TEMPORARY) {
+ return Err(FenceError::HandletypeCopyNotTemporary);
+ }
+
+ Ok(())
+ }
+
+ #[cfg(windows)]
+ #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
+ #[inline]
+ pub unsafe fn import_win32_handle_unchecked(
+ &self,
+ import_fence_win32_handle_info: ImportFenceWin32HandleInfo,
+ ) -> Result<(), VulkanError> {
+ let mut state = self.state.lock();
+ self.import_win32_handle_unchecked_locked(import_fence_win32_handle_info, &mut state)
+ }
+
+ #[cfg(windows)]
+ unsafe fn import_win32_handle_unchecked_locked(
+ &self,
+ import_fence_win32_handle_info: ImportFenceWin32HandleInfo,
+ state: &mut FenceState,
+ ) -> Result<(), VulkanError> {
+ let ImportFenceWin32HandleInfo {
+ flags,
+ handle_type,
+ handle,
+ _ne: _,
+ } = import_fence_win32_handle_info;
+
+ let info_vk = ash::vk::ImportFenceWin32HandleInfoKHR {
+ fence: self.handle,
+ flags: flags.into(),
+ handle_type: handle_type.into(),
+ handle,
+ name: ptr::null(), // TODO: support?
+ ..Default::default()
+ };
+
+ let fns = self.device.fns();
+ (fns.khr_external_fence_win32.import_fence_win32_handle_khr)(
+ self.device.handle(),
+ &info_vk,
+ )
+ .result()
+ .map_err(VulkanError::from)?;
+
+ state.import(handle_type, flags.intersects(FenceImportFlags::TEMPORARY));
+
+ Ok(())
+ }
+
+ pub(crate) fn state(&self) -> MutexGuard<'_, FenceState> {
+ self.state.lock()
+ }
+
+ // Shared by Fence and FenceSignalFuture
+ pub(crate) fn poll_impl(&self, cx: &mut Context<'_>) -> Poll<Result<(), OomError>> {
+ // Vulkan only allows polling of the fence status, so we have to use a spin future.
+ // This is still better than blocking in async applications, since a smart-enough async engine
+ // can choose to run some other tasks between probing this one.
+
+ // Check if we are done without blocking
+ match self.is_signaled() {
+ Err(e) => return Poll::Ready(Err(e)),
+ Ok(signalled) => {
+ if signalled {
+ return Poll::Ready(Ok(()));
+ }
+ }
+ }
+
+ // Otherwise spin
+ cx.waker().wake_by_ref();
+ Poll::Pending
}
}
-impl<D> Drop for Fence<D>
-where
- D: SafeDeref<Target = Device>,
-{
+impl Drop for Fence {
#[inline]
fn drop(&mut self) {
unsafe {
if self.must_put_in_pool {
- let raw_fence = self.fence;
- self.device.fence_pool().lock().unwrap().push(raw_fence);
+ let raw_fence = self.handle;
+ self.device.fence_pool().lock().push(raw_fence);
} else {
let fns = self.device.fns();
- fns.v1_0
- .destroy_fence(self.device.internal_object(), self.fence, ptr::null());
+ (fns.v1_0.destroy_fence)(self.device.handle(), self.handle, ptr::null());
}
}
}
}
-/// Error that can be returned when waiting on a fence.
+impl Future for Fence {
+ type Output = Result<(), OomError>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ self.poll_impl(cx)
+ }
+}
+
+unsafe impl VulkanObject for Fence {
+ type Handle = ash::vk::Fence;
+
+ #[inline]
+ fn handle(&self) -> Self::Handle {
+ self.handle
+ }
+}
+
+unsafe impl DeviceOwned for Fence {
+ #[inline]
+ fn device(&self) -> &Arc<Device> {
+ &self.device
+ }
+}
+
+impl_id_counter!(Fence);
+
+#[derive(Debug, Default)]
+pub(crate) struct FenceState {
+ is_signaled: bool,
+ pending_signal: Option<Weak<Queue>>,
+
+ reference_exported: bool,
+ exported_handle_types: ExternalFenceHandleTypes,
+ current_import: Option<ImportType>,
+ permanent_import: Option<ExternalFenceHandleType>,
+}
+
+impl FenceState {
+ /// If the fence is not in a queue and has no external references, returns the current status.
+ #[inline]
+ fn is_signaled(&self) -> Option<bool> {
+ // If either of these is true, we can't be certain of the status.
+ if self.is_in_queue() || self.has_external_reference() {
+ None
+ } else {
+ Some(self.is_signaled)
+ }
+ }
+
+ #[inline]
+ fn is_in_queue(&self) -> bool {
+ self.pending_signal.is_some()
+ }
+
+ /// Returns whether there are any potential external references to the fence payload.
+ /// That is, the fence has been exported by reference transference, or imported.
+ #[inline]
+ fn has_external_reference(&self) -> bool {
+ self.reference_exported || self.current_import.is_some()
+ }
+
+ #[allow(dead_code)]
+ #[inline]
+ fn is_exported(&self, handle_type: ExternalFenceHandleType) -> bool {
+ self.exported_handle_types.intersects(handle_type.into())
+ }
+
+ #[inline]
+ pub(crate) unsafe fn add_queue_signal(&mut self, queue: &Arc<Queue>) {
+ self.pending_signal = Some(Arc::downgrade(queue));
+ }
+
+ /// Called when a fence first discovers that it is signaled.
+ /// Returns the queue that should be informed about it.
+ #[inline]
+ unsafe fn set_signaled(&mut self) -> Option<Arc<Queue>> {
+ self.is_signaled = true;
+
+ // Fences with external references can't be used to determine queue completion.
+ if self.has_external_reference() {
+ self.pending_signal = None;
+ None
+ } else {
+ self.pending_signal.take().and_then(|queue| queue.upgrade())
+ }
+ }
+
+ /// Called when a queue is unlocking resources.
+ #[inline]
+ pub(crate) unsafe fn set_signal_finished(&mut self) {
+ self.is_signaled = true;
+ self.pending_signal = None;
+ }
+
+ #[inline]
+ unsafe fn reset(&mut self) {
+ debug_assert!(!self.is_in_queue());
+ self.current_import = self.permanent_import.map(Into::into);
+ self.is_signaled = false;
+ }
+
+ #[allow(dead_code)]
+ #[inline]
+ unsafe fn export(&mut self, handle_type: ExternalFenceHandleType) {
+ self.exported_handle_types |= handle_type.into();
+
+ if handle_type.has_copy_transference() {
+ self.reset();
+ } else {
+ self.reference_exported = true;
+ }
+ }
+
+ #[allow(dead_code)]
+ #[inline]
+ unsafe fn import(&mut self, handle_type: ExternalFenceHandleType, temporary: bool) {
+ debug_assert!(!self.is_in_queue());
+ self.current_import = Some(handle_type.into());
+
+ if !temporary {
+ self.permanent_import = Some(handle_type);
+ }
+ }
+
+ #[inline]
+ pub(crate) unsafe fn import_swapchain_acquire(&mut self) {
+ debug_assert!(!self.is_in_queue());
+ self.current_import = Some(ImportType::SwapchainAcquire);
+ }
+}
+
+#[derive(Clone, Copy, Debug)]
+enum ImportType {
+ SwapchainAcquire,
+ ExternalFence(ExternalFenceHandleType),
+}
+
+impl From<ExternalFenceHandleType> for ImportType {
+ #[inline]
+ fn from(handle_type: ExternalFenceHandleType) -> Self {
+ Self::ExternalFence(handle_type)
+ }
+}
+
+/// Parameters to create a new `Fence`.
+#[derive(Clone, Debug)]
+pub struct FenceCreateInfo {
+ /// Whether the fence should be created in the signaled state.
+ ///
+ /// The default value is `false`.
+ pub signaled: bool,
+
+ /// The handle types that can be exported from the fence.
+ pub export_handle_types: ExternalFenceHandleTypes,
+
+ pub _ne: crate::NonExhaustive,
+}
+
+impl Default for FenceCreateInfo {
+ #[inline]
+ fn default() -> Self {
+ Self {
+ signaled: false,
+ export_handle_types: ExternalFenceHandleTypes::empty(),
+ _ne: crate::NonExhaustive(()),
+ }
+ }
+}
+
+vulkan_bitflags_enum! {
+ #[non_exhaustive]
+ /// A set of [`ExternalFenceHandleType`] values.
+ ExternalFenceHandleTypes,
+
+ /// The handle type used to export or import fences to/from an external source.
+ ExternalFenceHandleType impl {
+ /// Returns whether the given handle type has *copy transference* rather than *reference
+ /// transference*.
+ ///
+ /// Imports of handles with copy transference must always be temporary. Exports of such
+ /// handles must only occur if the fence is already signaled, or if there is a fence signal
+ /// operation pending in a queue.
+ #[inline]
+ pub fn has_copy_transference(self) -> bool {
+ // As defined by
+ // https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap7.html#synchronization-fence-handletypes-win32
+ // https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap7.html#synchronization-fence-handletypes-fd
+ matches!(self, Self::SyncFd)
+ }
+ },
+
+ = ExternalFenceHandleTypeFlags(u32);
+
+ /// A POSIX file descriptor handle that is only usable with Vulkan and compatible APIs.
+ ///
+ /// This handle type has *reference transference*.
+ OPAQUE_FD, OpaqueFd = OPAQUE_FD,
+
+ /// A Windows NT handle that is only usable with Vulkan and compatible APIs.
+ ///
+ /// This handle type has *reference transference*.
+ OPAQUE_WIN32, OpaqueWin32 = OPAQUE_WIN32,
+
+ /// A Windows global share handle that is only usable with Vulkan and compatible APIs.
+ ///
+ /// This handle type has *reference transference*.
+ OPAQUE_WIN32_KMT, OpaqueWin32Kmt = OPAQUE_WIN32_KMT,
+
+ /// A POSIX file descriptor handle to a Linux Sync File or Android Fence object.
+ ///
+ /// This handle type has *copy transference*.
+ SYNC_FD, SyncFd = SYNC_FD,
+}
+
+vulkan_bitflags! {
+ #[non_exhaustive]
+
+ /// Additional parameters for a fence payload import.
+ FenceImportFlags = FenceImportFlags(u32);
+
+ /// The fence payload will be imported only temporarily, regardless of the permanence of the
+ /// imported handle type.
+ TEMPORARY = TEMPORARY,
+}
+
+#[cfg(unix)]
+#[derive(Debug)]
+pub struct ImportFenceFdInfo {
+ /// Additional parameters for the import operation.
+ ///
+ /// If `handle_type` has *copy transference*, this must include the `temporary` flag.
+ ///
+ /// The default value is [`FenceImportFlags::empty()`].
+ pub flags: FenceImportFlags,
+
+ /// The handle type of `file`.
+ ///
+ /// There is no default value.
+ pub handle_type: ExternalFenceHandleType,
+
+ /// The file to import the fence from.
+ ///
+ /// If `handle_type` is `ExternalFenceHandleType::SyncFd`, then `file` can be `None`.
+ /// Instead of an imported file descriptor, a dummy file descriptor `-1` is used,
+ /// which represents a fence that is always signaled.
+ ///
+ /// The default value is `None`, which must be overridden if `handle_type` is not
+ /// `ExternalFenceHandleType::SyncFd`.
+ pub file: Option<File>,
+
+ pub _ne: crate::NonExhaustive,
+}
+
+#[cfg(unix)]
+impl ImportFenceFdInfo {
+ /// Returns an `ImportFenceFdInfo` with the specified `handle_type`.
+ #[inline]
+ pub fn handle_type(handle_type: ExternalFenceHandleType) -> Self {
+ Self {
+ flags: FenceImportFlags::empty(),
+ handle_type,
+ file: None,
+ _ne: crate::NonExhaustive(()),
+ }
+ }
+}
+
+#[cfg(windows)]
+#[derive(Debug)]
+pub struct ImportFenceWin32HandleInfo {
+ /// Additional parameters for the import operation.
+ ///
+ /// If `handle_type` has *copy transference*, this must include the `temporary` flag.
+ ///
+ /// The default value is [`FenceImportFlags::empty()`].
+ pub flags: FenceImportFlags,
+
+ /// The handle type of `handle`.
+ ///
+ /// There is no default value.
+ pub handle_type: ExternalFenceHandleType,
+
+ /// The file to import the fence from.
+ ///
+ /// The default value is `null`, which must be overridden.
+ pub handle: *mut std::ffi::c_void,
+
+ pub _ne: crate::NonExhaustive,
+}
+
+#[cfg(windows)]
+impl ImportFenceWin32HandleInfo {
+ /// Returns an `ImportFenceWin32HandleInfo` with the specified `handle_type`.
+ #[inline]
+ pub fn handle_type(handle_type: ExternalFenceHandleType) -> Self {
+ Self {
+ flags: FenceImportFlags::empty(),
+ handle_type,
+ handle: ptr::null_mut(),
+ _ne: crate::NonExhaustive(()),
+ }
+ }
+}
+
+/// The fence configuration to query in
+/// [`PhysicalDevice::external_fence_properties`](crate::device::physical::PhysicalDevice::external_fence_properties).
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+pub struct ExternalFenceInfo {
+ /// The external handle type that will be used with the fence.
+ pub handle_type: ExternalFenceHandleType,
+
+ pub _ne: crate::NonExhaustive,
+}
+
+impl ExternalFenceInfo {
+ /// Returns an `ExternalFenceInfo` with the specified `handle_type`.
+ #[inline]
+ pub fn handle_type(handle_type: ExternalFenceHandleType) -> Self {
+ Self {
+ handle_type,
+ _ne: crate::NonExhaustive(()),
+ }
+ }
+}
+
+/// The properties for exporting or importing external handles, when a fence is created
+/// with a specific configuration.
+#[derive(Clone, Debug)]
+#[non_exhaustive]
+pub struct ExternalFenceProperties {
+ /// Whether a handle can be exported to an external source with the queried
+ /// external handle type.
+ pub exportable: bool,
+
+ /// Whether a handle can be imported from an external source with the queried
+ /// external handle type.
+ pub importable: bool,
+
+ /// Which external handle types can be re-exported after the queried external handle type has
+ /// been imported.
+ pub export_from_imported_handle_types: ExternalFenceHandleTypes,
+
+ /// Which external handle types can be enabled along with the queried external handle type
+ /// when creating the fence.
+ pub compatible_handle_types: ExternalFenceHandleTypes,
+}
+
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
-pub enum FenceWaitError {
- /// Not enough memory to complete the wait.
+pub enum FenceError {
+ /// Not enough memory available.
OomError(OomError),
+ /// The device has been lost.
+ DeviceLost,
+
/// The specified timeout wasn't long enough.
Timeout,
- /// The device has been lost.
- DeviceLostError,
+ RequirementNotMet {
+ required_for: &'static str,
+ requires_one_of: RequiresOneOf,
+ },
+
+ /// The provided handle type does not permit more than one export,
+ /// and a handle of this type was already exported previously.
+ AlreadyExported,
+
+ /// The provided handle type cannot be exported from the current import handle type.
+ ExportFromImportedNotSupported {
+ imported_handle_type: ExternalFenceHandleType,
+ },
+
+ /// One of the export handle types is not compatible with the other provided handles.
+ ExportHandleTypesNotCompatible,
+
+ /// A handle type with copy transference was provided, but the fence is not signaled and there
+ /// is no pending queue operation that will signal it.
+ HandleTypeCopyNotSignaled,
+
+ /// A handle type with copy transference was provided,
+ /// but the `temporary` import flag was not set.
+ HandletypeCopyNotTemporary,
+
+ /// The provided export handle type was not set in `export_handle_types` when creating the
+ /// fence.
+ HandleTypeNotEnabled,
+
+ /// Exporting is not supported for the provided handle type.
+ HandleTypeNotExportable {
+ handle_type: ExternalFenceHandleType,
+ },
+
+ /// The provided handle type is not a POSIX file descriptor handle.
+ HandleTypeNotFd,
+
+ /// The provided handle type is not a Win32 handle.
+ HandleTypeNotWin32,
+
+ /// The fence currently has a temporary import for a swapchain acquire operation.
+ ImportedForSwapchainAcquire,
+
+ /// The fence is currently in use by a queue.
+ InQueue,
}
-impl error::Error for FenceWaitError {
- #[inline]
- fn source(&self) -> Option<&(dyn error::Error + 'static)> {
- match *self {
- FenceWaitError::OomError(ref err) => Some(err),
+impl Error for FenceError {
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ match self {
+ Self::OomError(err) => Some(err),
_ => None,
}
}
}
-impl fmt::Display for FenceWaitError {
- #[inline]
- fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
- write!(
- fmt,
- "{}",
- match *self {
- FenceWaitError::OomError(_) => "no memory available",
- FenceWaitError::Timeout => "the timeout has been reached",
- FenceWaitError::DeviceLostError => "the device was lost",
+impl Display for FenceError {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
+ match self {
+ Self::OomError(_) => write!(f, "not enough memory available"),
+ Self::DeviceLost => write!(f, "the device was lost"),
+ Self::Timeout => write!(f, "the timeout has been reached"),
+ Self::RequirementNotMet {
+ required_for,
+ requires_one_of,
+ } => write!(
+ f,
+ "a requirement was not met for: {}; requires one of: {}",
+ required_for, requires_one_of,
+ ),
+
+ Self::AlreadyExported => write!(
+ f,
+ "the provided handle type does not permit more than one export, and a handle of \
+ this type was already exported previously",
+ ),
+ Self::ExportFromImportedNotSupported {
+ imported_handle_type,
+ } => write!(
+ f,
+ "the provided handle type cannot be exported from the current imported handle type \
+ {:?}",
+ imported_handle_type,
+ ),
+ Self::ExportHandleTypesNotCompatible => write!(
+ f,
+ "one of the export handle types is not compatible with the other provided handles",
+ ),
+ Self::HandleTypeCopyNotSignaled => write!(
+ f,
+ "a handle type with copy transference was provided, but the fence is not signaled \
+ and there is no pending queue operation that will signal it",
+ ),
+ Self::HandletypeCopyNotTemporary => write!(
+ f,
+ "a handle type with copy transference was provided, but the `temporary` \
+ import flag was not set",
+ ),
+ Self::HandleTypeNotEnabled => write!(
+ f,
+ "the provided export handle type was not set in `export_handle_types` when \
+ creating the fence",
+ ),
+ Self::HandleTypeNotExportable { handle_type } => write!(
+ f,
+ "exporting is not supported for handles of type {:?}",
+ handle_type,
+ ),
+ Self::HandleTypeNotFd => write!(
+ f,
+ "the provided handle type is not a POSIX file descriptor handle",
+ ),
+ Self::HandleTypeNotWin32 => {
+ write!(f, "the provided handle type is not a Win32 handle")
}
- )
+ Self::ImportedForSwapchainAcquire => write!(
+ f,
+ "the fence currently has a temporary import for a swapchain acquire operation",
+ ),
+ Self::InQueue => write!(f, "the fence is currently in use by a queue"),
+ }
}
}
-impl From<Error> for FenceWaitError {
- #[inline]
- fn from(err: Error) -> FenceWaitError {
+impl From<VulkanError> for FenceError {
+ fn from(err: VulkanError) -> Self {
match err {
- Error::OutOfHostMemory => FenceWaitError::OomError(From::from(err)),
- Error::OutOfDeviceMemory => FenceWaitError::OomError(From::from(err)),
- Error::DeviceLost => FenceWaitError::DeviceLostError,
- _ => panic!("Unexpected error value: {}", err as i32),
+ e @ VulkanError::OutOfHostMemory | e @ VulkanError::OutOfDeviceMemory => {
+ Self::OomError(e.into())
+ }
+ VulkanError::DeviceLost => Self::DeviceLost,
+ _ => panic!("unexpected error: {:?}", err),
+ }
+ }
+}
+
+impl From<OomError> for FenceError {
+ fn from(err: OomError) -> Self {
+ Self::OomError(err)
+ }
+}
+
+impl From<RequirementNotMet> for FenceError {
+ fn from(err: RequirementNotMet) -> Self {
+ Self::RequirementNotMet {
+ required_for: err.required_for,
+ requires_one_of: err.requires_one_of,
}
}
}
#[cfg(test)]
mod tests {
- use crate::sync::Fence;
- use crate::VulkanObject;
+ use crate::{
+ sync::fence::{Fence, FenceCreateInfo},
+ VulkanObject,
+ };
use std::time::Duration;
#[test]
fn fence_create() {
let (device, _) = gfx_dev_and_queue!();
- let fence = Fence::alloc(device.clone()).unwrap();
- assert!(!fence.ready().unwrap());
+ let fence = Fence::new(device, Default::default()).unwrap();
+ assert!(!fence.is_signaled().unwrap());
}
#[test]
fn fence_create_signaled() {
let (device, _) = gfx_dev_and_queue!();
- let fence = Fence::alloc_signaled(device.clone()).unwrap();
- assert!(fence.ready().unwrap());
+ let fence = Fence::new(
+ device,
+ FenceCreateInfo {
+ signaled: true,
+ ..Default::default()
+ },
+ )
+ .unwrap();
+ assert!(fence.is_signaled().unwrap());
}
#[test]
fn fence_signaled_wait() {
let (device, _) = gfx_dev_and_queue!();
- let fence = Fence::alloc_signaled(device.clone()).unwrap();
+ let fence = Fence::new(
+ device,
+ FenceCreateInfo {
+ signaled: true,
+ ..Default::default()
+ },
+ )
+ .unwrap();
fence.wait(Some(Duration::new(0, 10))).unwrap();
}
@@ -438,9 +1658,16 @@ mod tests {
fn fence_reset() {
let (device, _) = gfx_dev_and_queue!();
- let mut fence = Fence::alloc_signaled(device.clone()).unwrap();
+ let fence = Fence::new(
+ device,
+ FenceCreateInfo {
+ signaled: true,
+ ..Default::default()
+ },
+ )
+ .unwrap();
fence.reset().unwrap();
- assert!(!fence.ready().unwrap());
+ assert!(!fence.is_signaled().unwrap());
}
#[test]
@@ -448,54 +1675,72 @@ mod tests {
let (device1, _) = gfx_dev_and_queue!();
let (device2, _) = gfx_dev_and_queue!();
- assert_should_panic!(
- "Tried to wait for multiple fences that didn't belong \
- to the same device",
- {
- let fence1 = Fence::alloc_signaled(device1.clone()).unwrap();
- let fence2 = Fence::alloc_signaled(device2.clone()).unwrap();
+ assert_should_panic!({
+ let fence1 = Fence::new(
+ device1.clone(),
+ FenceCreateInfo {
+ signaled: true,
+ ..Default::default()
+ },
+ )
+ .unwrap();
+ let fence2 = Fence::new(
+ device2.clone(),
+ FenceCreateInfo {
+ signaled: true,
+ ..Default::default()
+ },
+ )
+ .unwrap();
- let _ = Fence::multi_wait(
- [&fence1, &fence2].iter().cloned(),
- Some(Duration::new(0, 10)),
- );
- }
- );
+ let _ = Fence::multi_wait(
+ [&fence1, &fence2].iter().cloned(),
+ Some(Duration::new(0, 10)),
+ );
+ });
}
#[test]
fn multireset_different_devices() {
- use std::iter::once;
-
let (device1, _) = gfx_dev_and_queue!();
let (device2, _) = gfx_dev_and_queue!();
- assert_should_panic!(
- "Tried to reset multiple fences that didn't belong \
- to the same device",
- {
- let mut fence1 = Fence::alloc_signaled(device1.clone()).unwrap();
- let mut fence2 = Fence::alloc_signaled(device2.clone()).unwrap();
+ assert_should_panic!({
+ let fence1 = Fence::new(
+ device1.clone(),
+ FenceCreateInfo {
+ signaled: true,
+ ..Default::default()
+ },
+ )
+ .unwrap();
+ let fence2 = Fence::new(
+ device2.clone(),
+ FenceCreateInfo {
+ signaled: true,
+ ..Default::default()
+ },
+ )
+ .unwrap();
- let _ = Fence::multi_reset(once(&mut fence1).chain(once(&mut fence2)));
- }
- );
+ let _ = Fence::multi_reset([&fence1, &fence2]);
+ });
}
#[test]
fn fence_pool() {
let (device, _) = gfx_dev_and_queue!();
- assert_eq!(device.fence_pool().lock().unwrap().len(), 0);
+ assert_eq!(device.fence_pool().lock().len(), 0);
let fence1_internal_obj = {
let fence = Fence::from_pool(device.clone()).unwrap();
- assert_eq!(device.fence_pool().lock().unwrap().len(), 0);
- fence.internal_object()
+ assert_eq!(device.fence_pool().lock().len(), 0);
+ fence.handle()
};
- assert_eq!(device.fence_pool().lock().unwrap().len(), 1);
+ assert_eq!(device.fence_pool().lock().len(), 1);
let fence2 = Fence::from_pool(device.clone()).unwrap();
- assert_eq!(device.fence_pool().lock().unwrap().len(), 0);
- assert_eq!(fence2.internal_object(), fence1_internal_obj);
+ assert_eq!(device.fence_pool().lock().len(), 0);
+ assert_eq!(fence2.handle(), fence1_internal_obj);
}
}
diff --git a/src/sync/future/fence_signal.rs b/src/sync/future/fence_signal.rs
index c2f5338..7f71d88 100644
--- a/src/sync/future/fence_signal.rs
+++ b/src/sync/future/fence_signal.rs
@@ -7,29 +7,33 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
-use std::mem;
-use std::sync::Arc;
-use std::sync::Mutex;
-use std::sync::MutexGuard;
-use std::time::Duration;
-
-use crate::buffer::BufferAccess;
-use crate::command_buffer::submit::SubmitAnyBuilder;
-use crate::command_buffer::submit::SubmitCommandBufferBuilder;
-use crate::device::Device;
-use crate::device::DeviceOwned;
-use crate::device::Queue;
-use crate::image::ImageAccess;
-use crate::image::ImageLayout;
-use crate::sync::AccessCheckError;
-use crate::sync::AccessFlags;
-use crate::sync::Fence;
-use crate::sync::FlushError;
-use crate::sync::GpuFuture;
-use crate::sync::PipelineStages;
+use super::{AccessCheckError, FlushError, GpuFuture};
+use crate::{
+ buffer::Buffer,
+ command_buffer::{SemaphoreSubmitInfo, SubmitInfo},
+ device::{Device, DeviceOwned, Queue, QueueFlags},
+ image::{sys::Image, ImageLayout},
+ swapchain::Swapchain,
+ sync::{
+ fence::Fence,
+ future::{AccessError, SubmitAnyBuilder},
+ PipelineStages,
+ },
+ DeviceSize, OomError,
+};
+use parking_lot::{Mutex, MutexGuard};
+use std::{
+ future::Future,
+ mem::replace,
+ ops::Range,
+ pin::Pin,
+ sync::Arc,
+ task::{Context, Poll},
+ thread,
+ time::Duration,
+};
/// Builds a new fence signal future.
-#[inline]
pub fn then_signal_fence<F>(future: F, behavior: FenceSignalFutureBehavior) -> FenceSignalFuture<F>
where
F: GpuFuture,
@@ -38,11 +42,11 @@ where
assert!(future.queue().is_some()); // TODO: document
- let fence = Fence::from_pool(device.clone()).unwrap();
+ let fence = Arc::new(Fence::from_pool(device.clone()).unwrap());
FenceSignalFuture {
- device: device,
+ device,
state: Mutex::new(FenceSignalFutureState::Pending(future, fence)),
- behavior: behavior,
+ behavior,
}
}
@@ -52,6 +56,7 @@ pub enum FenceSignalFutureBehavior {
/// Continue execution on the same queue.
Continue,
/// Wait for the fence to be signalled before submitting any further operation.
+ #[allow(dead_code)] // TODO: why is this never constructed?
Block {
/// How long to block the current thread.
timeout: Option<Duration>,
@@ -63,6 +68,11 @@ pub enum FenceSignalFutureBehavior {
/// Contrary to most other future types, it is possible to block the current thread until the event
/// happens. This is done by calling the `wait()` function.
///
+/// This can also be done through Rust's Async system by simply `.await`ing this object. Note though
+/// that (due to the Vulkan API fence design) this will spin to check the fence, rather than
+/// blocking in the driver. Therefore if you have a long-running task, blocking may be less
+/// CPU intense (depending on the driver's implementation).
+///
/// Also note that the `GpuFuture` trait is implemented on `Arc<FenceSignalFuture<_>>`.
/// This means that you can put this future in an `Arc` and keep a copy of it somewhere in order
/// to know when the execution reached that point.
@@ -71,7 +81,7 @@ pub enum FenceSignalFutureBehavior {
/// use std::sync::Arc;
/// use vulkano::sync::GpuFuture;
///
-/// # let future: Box<GpuFuture> = return;
+/// # let future: Box<dyn GpuFuture> = return;
/// // Assuming you have a chain of operations, like this:
/// // let future = ...
/// // .then_execute(foo)
@@ -106,17 +116,17 @@ where
// been dropped).
enum FenceSignalFutureState<F> {
// Newly-created. Not submitted yet.
- Pending(F, Fence),
+ Pending(F, Arc<Fence>),
// Partially submitted to the queue. Only happens in situations where submitting requires two
// steps, and when the first step succeeded while the second step failed.
//
// Note that if there's ever a submit operation that needs three steps we will need to rework
// this code, as it was designed for two-step operations only.
- PartiallyFlushed(F, Fence),
+ PartiallyFlushed(F, Arc<Fence>),
// Submitted to the queue.
- Flushed(F, Fence),
+ Flushed(F, Arc<Fence>),
// The submission is finished. The previous future and the fence have been cleaned.
Cleaned,
@@ -129,6 +139,19 @@ impl<F> FenceSignalFuture<F>
where
F: GpuFuture,
{
+ /// Returns true if the fence is signaled by the GPU.
+ pub fn is_signaled(&self) -> Result<bool, OomError> {
+ let state = self.state.lock();
+
+ match &*state {
+ FenceSignalFutureState::Pending(_, fence)
+ | FenceSignalFutureState::PartiallyFlushed(_, fence)
+ | FenceSignalFutureState::Flushed(_, fence) => fence.is_signaled(),
+ FenceSignalFutureState::Cleaned => Ok(true),
+ FenceSignalFutureState::Poisoned => unreachable!(),
+ }
+ }
+
/// Blocks the current thread until the fence is signaled by the GPU. Performs a flush if
/// necessary.
///
@@ -138,11 +161,11 @@ where
/// If the wait is successful, this function also cleans any resource locked by previous
/// submissions.
pub fn wait(&self, timeout: Option<Duration>) -> Result<(), FlushError> {
- let mut state = self.state.lock().unwrap();
+ let mut state = self.state.lock();
self.flush_impl(&mut state)?;
- match mem::replace(&mut *state, FenceSignalFutureState::Cleaned) {
+ match replace(&mut *state, FenceSignalFutureState::Cleaned) {
FenceSignalFutureState::Flushed(previous, fence) => {
fence.wait(timeout)?;
unsafe {
@@ -162,48 +185,43 @@ where
{
// Implementation of `cleanup_finished`, but takes a `&self` instead of a `&mut self`.
// This is an external function so that we can also call it from an `Arc<FenceSignalFuture>`.
- #[inline]
fn cleanup_finished_impl(&self) {
- let mut state = self.state.lock().unwrap();
+ let mut state = self.state.lock();
match *state {
FenceSignalFutureState::Flushed(ref mut prev, ref fence) => {
match fence.wait(Some(Duration::from_secs(0))) {
- Ok(()) => unsafe { prev.signal_finished() },
+ Ok(()) => {
+ unsafe { prev.signal_finished() }
+ *state = FenceSignalFutureState::Cleaned;
+ }
Err(_) => {
prev.cleanup_finished();
- return;
}
}
}
FenceSignalFutureState::Pending(ref mut prev, _) => {
prev.cleanup_finished();
- return;
}
FenceSignalFutureState::PartiallyFlushed(ref mut prev, _) => {
prev.cleanup_finished();
- return;
}
- _ => return,
- };
-
- // This code can only be reached if we're already flushed and waiting on the fence
- // succeeded.
- *state = FenceSignalFutureState::Cleaned;
+ _ => (),
+ }
}
// Implementation of `flush`. You must lock the state and pass the mutex guard here.
fn flush_impl(
&self,
- state: &mut MutexGuard<FenceSignalFutureState<F>>,
+ state: &mut MutexGuard<'_, FenceSignalFutureState<F>>,
) -> Result<(), FlushError> {
unsafe {
// In this function we temporarily replace the current state with `Poisoned` at the
// beginning, and we take care to always put back a value into `state` before
// returning (even in case of error).
- let old_state = mem::replace(&mut **state, FenceSignalFutureState::Poisoned);
+ let old_state = replace(&mut **state, FenceSignalFutureState::Poisoned);
- let (previous, fence, partially_flushed) = match old_state {
+ let (previous, new_fence, partially_flushed) = match old_state {
FenceSignalFutureState::Pending(prev, fence) => (prev, fence, false),
FenceSignalFutureState::PartiallyFlushed(prev, fence) => (prev, fence, true),
other => {
@@ -215,7 +233,7 @@ where
};
// TODO: meh for unwrap
- let queue = previous.queue().unwrap().clone();
+ let queue = previous.queue().unwrap();
// There are three possible outcomes for the flush operation: success, partial success
// in which case `result` will contain `Err(OutcomeErr::Partial)`, or total failure
@@ -227,51 +245,107 @@ where
let result = match previous.build_submission()? {
SubmitAnyBuilder::Empty => {
debug_assert!(!partially_flushed);
- let mut b = SubmitCommandBufferBuilder::new();
- b.set_fence_signal(&fence);
- b.submit(&queue).map_err(|err| OutcomeErr::Full(err.into()))
+
+ queue
+ .with(|mut q| {
+ q.submit_unchecked([Default::default()], Some(new_fence.clone()))
+ })
+ .map_err(|err| OutcomeErr::Full(err.into()))
}
- SubmitAnyBuilder::SemaphoresWait(sem) => {
+ SubmitAnyBuilder::SemaphoresWait(semaphores) => {
debug_assert!(!partially_flushed);
- let b: SubmitCommandBufferBuilder = sem.into();
- debug_assert!(!b.has_fence());
- b.submit(&queue).map_err(|err| OutcomeErr::Full(err.into()))
+
+ queue
+ .with(|mut q| {
+ q.submit_unchecked(
+ [SubmitInfo {
+ wait_semaphores: semaphores
+ .into_iter()
+ .map(|semaphore| {
+ SemaphoreSubmitInfo {
+ // TODO: correct stages ; hard
+ stages: PipelineStages::ALL_COMMANDS,
+ ..SemaphoreSubmitInfo::semaphore(semaphore)
+ }
+ })
+ .collect(),
+ ..Default::default()
+ }],
+ None,
+ )
+ })
+ .map_err(|err| OutcomeErr::Full(err.into()))
}
- SubmitAnyBuilder::CommandBuffer(mut cb_builder) => {
+ SubmitAnyBuilder::CommandBuffer(submit_info, fence) => {
debug_assert!(!partially_flushed);
// The assert below could technically be a debug assertion as it is part of the
// safety contract of the trait. However it is easy to get this wrong if you
// write a custom implementation, and if so the consequences would be
// disastrous and hard to debug. Therefore we prefer to just use a regular
// assertion.
- assert!(!cb_builder.has_fence());
- cb_builder.set_fence_signal(&fence);
- cb_builder
- .submit(&queue)
- .map_err(|err| OutcomeErr::Full(err.into()))
+ assert!(fence.is_none());
+
+ queue
+ .with(|mut q| {
+ q.submit_with_future(
+ submit_info,
+ Some(new_fence.clone()),
+ &previous,
+ &queue,
+ )
+ })
+ .map_err(OutcomeErr::Full)
}
- SubmitAnyBuilder::BindSparse(mut sparse) => {
+ SubmitAnyBuilder::BindSparse(bind_infos, fence) => {
debug_assert!(!partially_flushed);
// Same remark as `CommandBuffer`.
- assert!(!sparse.has_fence());
- sparse.set_fence_signal(&fence);
- sparse
- .submit(&queue)
+ assert!(fence.is_none());
+ debug_assert!(queue.device().physical_device().queue_family_properties()
+ [queue.queue_family_index() as usize]
+ .queue_flags
+ .intersects(QueueFlags::SPARSE_BINDING));
+
+ queue
+ .with(|mut q| q.bind_sparse_unchecked(bind_infos, Some(new_fence.clone())))
.map_err(|err| OutcomeErr::Full(err.into()))
}
- SubmitAnyBuilder::QueuePresent(present) => {
+ SubmitAnyBuilder::QueuePresent(present_info) => {
let intermediary_result = if partially_flushed {
Ok(())
} else {
- present.submit(&queue)
+ // VUID-VkPresentIdKHR-presentIds-04999
+ for swapchain_info in &present_info.swapchain_infos {
+ if swapchain_info.present_id.map_or(false, |present_id| {
+ !swapchain_info.swapchain.try_claim_present_id(present_id)
+ }) {
+ return Err(FlushError::PresentIdLessThanOrEqual);
+ }
+
+ match previous.check_swapchain_image_acquired(
+ &swapchain_info.swapchain,
+ swapchain_info.image_index,
+ true,
+ ) {
+ Ok(_) => (),
+ Err(AccessCheckError::Unknown) => {
+ return Err(AccessError::SwapchainImageNotAcquired.into())
+ }
+ Err(AccessCheckError::Denied(e)) => return Err(e.into()),
+ }
+ }
+
+ queue
+ .with(|mut q| q.present_unchecked(present_info))?
+ .map(|r| r.map(|_| ()))
+ .fold(Ok(()), Result::and)
};
+
match intermediary_result {
- Ok(()) => {
- let mut b = SubmitCommandBufferBuilder::new();
- b.set_fence_signal(&fence);
- b.submit(&queue)
- .map_err(|err| OutcomeErr::Partial(err.into()))
- }
+ Ok(()) => queue
+ .with(|mut q| {
+ q.submit_unchecked([Default::default()], Some(new_fence.clone()))
+ })
+ .map_err(|err| OutcomeErr::Partial(err.into())),
Err(err) => Err(OutcomeErr::Full(err.into())),
}
}
@@ -280,15 +354,15 @@ where
// Restore the state before returning.
match result {
Ok(()) => {
- **state = FenceSignalFutureState::Flushed(previous, fence);
+ **state = FenceSignalFutureState::Flushed(previous, new_fence);
Ok(())
}
Err(OutcomeErr::Partial(err)) => {
- **state = FenceSignalFutureState::PartiallyFlushed(previous, fence);
+ **state = FenceSignalFutureState::PartiallyFlushed(previous, new_fence);
Err(err)
}
Err(OutcomeErr::Full(err)) => {
- **state = FenceSignalFutureState::Pending(previous, fence);
+ **state = FenceSignalFutureState::Pending(previous, new_fence);
Err(err)
}
}
@@ -296,13 +370,32 @@ where
}
}
+impl<F> Future for FenceSignalFuture<F>
+where
+ F: GpuFuture,
+{
+ type Output = Result<(), OomError>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ // Implement through fence
+ let state = self.state.lock();
+
+ match &*state {
+ FenceSignalFutureState::Pending(_, fence)
+ | FenceSignalFutureState::PartiallyFlushed(_, fence)
+ | FenceSignalFutureState::Flushed(_, fence) => fence.poll_impl(cx),
+ FenceSignalFutureState::Cleaned => Poll::Ready(Ok(())),
+ FenceSignalFutureState::Poisoned => unreachable!(),
+ }
+ }
+}
+
impl<F> FenceSignalFutureState<F> {
- #[inline]
fn get_prev(&self) -> Option<&F> {
- match *self {
- FenceSignalFutureState::Pending(ref prev, _) => Some(prev),
- FenceSignalFutureState::PartiallyFlushed(ref prev, _) => Some(prev),
- FenceSignalFutureState::Flushed(ref prev, _) => Some(prev),
+ match self {
+ FenceSignalFutureState::Pending(prev, _) => Some(prev),
+ FenceSignalFutureState::PartiallyFlushed(prev, _) => Some(prev),
+ FenceSignalFutureState::Flushed(prev, _) => Some(prev),
FenceSignalFutureState::Cleaned => None,
FenceSignalFutureState::Poisoned => None,
}
@@ -313,18 +406,16 @@ unsafe impl<F> GpuFuture for FenceSignalFuture<F>
where
F: GpuFuture,
{
- #[inline]
fn cleanup_finished(&mut self) {
self.cleanup_finished_impl()
}
- #[inline]
unsafe fn build_submission(&self) -> Result<SubmitAnyBuilder, FlushError> {
- let mut state = self.state.lock().unwrap();
+ let mut state = self.state.lock();
self.flush_impl(&mut state)?;
- match *state {
- FenceSignalFutureState::Flushed(_, ref fence) => match self.behavior {
+ match &*state {
+ FenceSignalFutureState::Flushed(_, fence) => match self.behavior {
FenceSignalFutureBehavior::Block { timeout } => {
fence.wait(timeout)?;
}
@@ -338,15 +429,13 @@ where
Ok(SubmitAnyBuilder::Empty)
}
- #[inline]
fn flush(&self) -> Result<(), FlushError> {
- let mut state = self.state.lock().unwrap();
+ let mut state = self.state.lock();
self.flush_impl(&mut state)
}
- #[inline]
unsafe fn signal_finished(&self) {
- let state = self.state.lock().unwrap();
+ let state = self.state.lock();
match *state {
FenceSignalFutureState::Flushed(ref prev, _) => {
prev.signal_finished();
@@ -356,24 +445,18 @@ where
}
}
- #[inline]
fn queue_change_allowed(&self) -> bool {
match self.behavior {
FenceSignalFutureBehavior::Continue => {
- let state = self.state.lock().unwrap();
- if state.get_prev().is_some() {
- false
- } else {
- true
- }
+ let state = self.state.lock();
+ state.get_prev().is_none()
}
FenceSignalFutureBehavior::Block { .. } => true,
}
}
- #[inline]
fn queue(&self) -> Option<Arc<Queue>> {
- let state = self.state.lock().unwrap();
+ let state = self.state.lock();
if let Some(prev) = state.get_prev() {
prev.queue()
} else {
@@ -381,32 +464,46 @@ where
}
}
- #[inline]
fn check_buffer_access(
&self,
- buffer: &dyn BufferAccess,
+ buffer: &Buffer,
+ range: Range<DeviceSize>,
exclusive: bool,
queue: &Queue,
- ) -> Result<Option<(PipelineStages, AccessFlags)>, AccessCheckError> {
- let state = self.state.lock().unwrap();
+ ) -> Result<(), AccessCheckError> {
+ let state = self.state.lock();
if let Some(previous) = state.get_prev() {
- previous.check_buffer_access(buffer, exclusive, queue)
+ previous.check_buffer_access(buffer, range, exclusive, queue)
} else {
Err(AccessCheckError::Unknown)
}
}
- #[inline]
fn check_image_access(
&self,
- image: &dyn ImageAccess,
- layout: ImageLayout,
+ image: &Image,
+ range: Range<DeviceSize>,
exclusive: bool,
+ expected_layout: ImageLayout,
queue: &Queue,
- ) -> Result<Option<(PipelineStages, AccessFlags)>, AccessCheckError> {
- let state = self.state.lock().unwrap();
+ ) -> Result<(), AccessCheckError> {
+ let state = self.state.lock();
if let Some(previous) = state.get_prev() {
- previous.check_image_access(image, layout, exclusive, queue)
+ previous.check_image_access(image, range, exclusive, expected_layout, queue)
+ } else {
+ Err(AccessCheckError::Unknown)
+ }
+ }
+
+ #[inline]
+ fn check_swapchain_image_acquired(
+ &self,
+ swapchain: &Swapchain,
+ image_index: u32,
+ _before: bool,
+ ) -> Result<(), AccessCheckError> {
+ if let Some(previous) = self.state.lock().get_prev() {
+ previous.check_swapchain_image_acquired(swapchain, image_index, false)
} else {
Err(AccessCheckError::Unknown)
}
@@ -417,7 +514,6 @@ unsafe impl<F> DeviceOwned for FenceSignalFuture<F>
where
F: GpuFuture,
{
- #[inline]
fn device(&self) -> &Arc<Device> {
&self.device
}
@@ -428,12 +524,16 @@ where
F: GpuFuture,
{
fn drop(&mut self) {
- let mut state = self.state.lock().unwrap();
+ if thread::panicking() {
+ return;
+ }
+
+ let mut state = self.state.lock();
// We ignore any possible error while submitting for now. Problems are handled below.
let _ = self.flush_impl(&mut state);
- match mem::replace(&mut *state, FenceSignalFutureState::Cleaned) {
+ match replace(&mut *state, FenceSignalFutureState::Cleaned) {
FenceSignalFutureState::Flushed(previous, fence) => {
// This is a normal situation. Submitting worked.
// TODO: handle errors?
@@ -461,56 +561,60 @@ unsafe impl<F> GpuFuture for Arc<FenceSignalFuture<F>>
where
F: GpuFuture,
{
- #[inline]
fn cleanup_finished(&mut self) {
self.cleanup_finished_impl()
}
- #[inline]
unsafe fn build_submission(&self) -> Result<SubmitAnyBuilder, FlushError> {
// Note that this is sound because we always return `SubmitAnyBuilder::Empty`. See the
// documentation of `build_submission`.
(**self).build_submission()
}
- #[inline]
fn flush(&self) -> Result<(), FlushError> {
(**self).flush()
}
- #[inline]
unsafe fn signal_finished(&self) {
(**self).signal_finished()
}
- #[inline]
fn queue_change_allowed(&self) -> bool {
(**self).queue_change_allowed()
}
- #[inline]
fn queue(&self) -> Option<Arc<Queue>> {
(**self).queue()
}
- #[inline]
fn check_buffer_access(
&self,
- buffer: &dyn BufferAccess,
+ buffer: &Buffer,
+ range: Range<DeviceSize>,
exclusive: bool,
queue: &Queue,
- ) -> Result<Option<(PipelineStages, AccessFlags)>, AccessCheckError> {
- (**self).check_buffer_access(buffer, exclusive, queue)
+ ) -> Result<(), AccessCheckError> {
+ (**self).check_buffer_access(buffer, range, exclusive, queue)
}
- #[inline]
fn check_image_access(
&self,
- image: &dyn ImageAccess,
- layout: ImageLayout,
+ image: &Image,
+ range: Range<DeviceSize>,
exclusive: bool,
+ expected_layout: ImageLayout,
queue: &Queue,
- ) -> Result<Option<(PipelineStages, AccessFlags)>, AccessCheckError> {
- (**self).check_image_access(image, layout, exclusive, queue)
+ ) -> Result<(), AccessCheckError> {
+ (**self).check_image_access(image, range, exclusive, expected_layout, queue)
+ }
+
+ #[inline]
+ fn check_swapchain_image_acquired(
+ &self,
+ swapchain: &Swapchain,
+ image_index: u32,
+ before: bool,
+ ) -> Result<(), AccessCheckError> {
+ (**self).check_swapchain_image_acquired(swapchain, image_index, before)
}
}
diff --git a/src/sync/future/join.rs b/src/sync/future/join.rs
index c45a764..6ead696 100644
--- a/src/sync/future/join.rs
+++ b/src/sync/future/join.rs
@@ -7,44 +7,30 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
-use std::sync::Arc;
-
-use crate::buffer::BufferAccess;
-use crate::command_buffer::submit::SubmitAnyBuilder;
-use crate::device::Device;
-use crate::device::DeviceOwned;
-use crate::device::Queue;
-use crate::image::ImageAccess;
-use crate::image::ImageLayout;
-use crate::sync::AccessCheckError;
-use crate::sync::AccessFlags;
-use crate::sync::FlushError;
-use crate::sync::GpuFuture;
-use crate::sync::PipelineStages;
-
-use crate::VulkanObject;
+use super::{AccessCheckError, FlushError, GpuFuture, SubmitAnyBuilder};
+use crate::{
+ buffer::Buffer,
+ device::{Device, DeviceOwned, Queue},
+ image::{sys::Image, ImageLayout},
+ swapchain::Swapchain,
+ DeviceSize, VulkanObject,
+};
+use std::{ops::Range, sync::Arc};
/// Joins two futures together.
// TODO: handle errors
-#[inline]
pub fn join<F, S>(first: F, second: S) -> JoinFuture<F, S>
where
F: GpuFuture,
S: GpuFuture,
{
- assert_eq!(
- first.device().internal_object(),
- second.device().internal_object()
- );
+ assert_eq!(first.device().handle(), second.device().handle());
if !first.queue_change_allowed() && !second.queue_change_allowed() {
- assert!(first.queue().unwrap().is_same(&second.queue().unwrap()));
+ assert!(first.queue().unwrap() == second.queue().unwrap());
}
- JoinFuture {
- first: first,
- second: second,
- }
+ JoinFuture { first, second }
}
/// Two futures joined into one.
@@ -59,13 +45,9 @@ where
A: DeviceOwned,
B: DeviceOwned,
{
- #[inline]
fn device(&self) -> &Arc<Device> {
let device = self.first.device();
- debug_assert_eq!(
- self.second.device().internal_object(),
- device.internal_object()
- );
+ debug_assert_eq!(self.second.device().handle(), device.handle());
device
}
}
@@ -75,22 +57,20 @@ where
A: GpuFuture,
B: GpuFuture,
{
- #[inline]
fn cleanup_finished(&mut self) {
self.first.cleanup_finished();
self.second.cleanup_finished();
}
- #[inline]
fn flush(&self) -> Result<(), FlushError> {
// Since each future remembers whether it has been flushed, there's no safety issue here
// if we call this function multiple times.
self.first.flush()?;
self.second.flush()?;
+
Ok(())
}
- #[inline]
unsafe fn build_submission(&self) -> Result<SubmitAnyBuilder, FlushError> {
// TODO: review this function
let first = self.first.build_submission()?;
@@ -102,90 +82,106 @@ where
(SubmitAnyBuilder::Empty, b) => b,
(a, SubmitAnyBuilder::Empty) => a,
(SubmitAnyBuilder::SemaphoresWait(mut a), SubmitAnyBuilder::SemaphoresWait(b)) => {
- a.merge(b);
+ a.extend(b);
SubmitAnyBuilder::SemaphoresWait(a)
}
- (SubmitAnyBuilder::SemaphoresWait(a), SubmitAnyBuilder::CommandBuffer(b)) => {
+ (SubmitAnyBuilder::SemaphoresWait(a), SubmitAnyBuilder::CommandBuffer(_, _)) => {
self.second.flush()?;
SubmitAnyBuilder::SemaphoresWait(a)
}
- (SubmitAnyBuilder::CommandBuffer(a), SubmitAnyBuilder::SemaphoresWait(b)) => {
+ (SubmitAnyBuilder::CommandBuffer(_, _), SubmitAnyBuilder::SemaphoresWait(b)) => {
self.first.flush()?;
SubmitAnyBuilder::SemaphoresWait(b)
}
- (SubmitAnyBuilder::SemaphoresWait(a), SubmitAnyBuilder::QueuePresent(b)) => {
+ (SubmitAnyBuilder::SemaphoresWait(a), SubmitAnyBuilder::QueuePresent(_)) => {
self.second.flush()?;
SubmitAnyBuilder::SemaphoresWait(a)
}
- (SubmitAnyBuilder::QueuePresent(a), SubmitAnyBuilder::SemaphoresWait(b)) => {
+ (SubmitAnyBuilder::QueuePresent(_), SubmitAnyBuilder::SemaphoresWait(b)) => {
self.first.flush()?;
SubmitAnyBuilder::SemaphoresWait(b)
}
- (SubmitAnyBuilder::SemaphoresWait(a), SubmitAnyBuilder::BindSparse(b)) => {
+ (SubmitAnyBuilder::SemaphoresWait(a), SubmitAnyBuilder::BindSparse(_, _)) => {
self.second.flush()?;
SubmitAnyBuilder::SemaphoresWait(a)
}
- (SubmitAnyBuilder::BindSparse(a), SubmitAnyBuilder::SemaphoresWait(b)) => {
+ (SubmitAnyBuilder::BindSparse(_, _), SubmitAnyBuilder::SemaphoresWait(b)) => {
self.first.flush()?;
SubmitAnyBuilder::SemaphoresWait(b)
}
- (SubmitAnyBuilder::CommandBuffer(a), SubmitAnyBuilder::CommandBuffer(b)) => {
- // TODO: we may want to add debug asserts here
- let new = a.merge(b);
- SubmitAnyBuilder::CommandBuffer(new)
+ (
+ SubmitAnyBuilder::CommandBuffer(mut submit_info_a, fence_a),
+ SubmitAnyBuilder::CommandBuffer(submit_info_b, fence_b),
+ ) => {
+ assert!(
+ fence_a.is_none() || fence_b.is_none(),
+ "Can't merge two queue submits that both have a fence"
+ );
+
+ submit_info_a
+ .wait_semaphores
+ .extend(submit_info_b.wait_semaphores);
+ submit_info_a
+ .command_buffers
+ .extend(submit_info_b.command_buffers);
+ submit_info_a
+ .signal_semaphores
+ .extend(submit_info_b.signal_semaphores);
+
+ SubmitAnyBuilder::CommandBuffer(submit_info_a, fence_a.or(fence_b))
}
- (SubmitAnyBuilder::QueuePresent(a), SubmitAnyBuilder::QueuePresent(b)) => {
+ (SubmitAnyBuilder::QueuePresent(_), SubmitAnyBuilder::QueuePresent(_)) => {
self.first.flush()?;
self.second.flush()?;
SubmitAnyBuilder::Empty
}
- (SubmitAnyBuilder::CommandBuffer(a), SubmitAnyBuilder::QueuePresent(b)) => {
+ (SubmitAnyBuilder::CommandBuffer(_, _), SubmitAnyBuilder::QueuePresent(_)) => {
unimplemented!()
}
- (SubmitAnyBuilder::QueuePresent(a), SubmitAnyBuilder::CommandBuffer(b)) => {
+ (SubmitAnyBuilder::QueuePresent(_), SubmitAnyBuilder::CommandBuffer(_, _)) => {
unimplemented!()
}
- (SubmitAnyBuilder::BindSparse(a), SubmitAnyBuilder::QueuePresent(b)) => {
+ (SubmitAnyBuilder::BindSparse(_, _), SubmitAnyBuilder::QueuePresent(_)) => {
unimplemented!()
}
- (SubmitAnyBuilder::QueuePresent(a), SubmitAnyBuilder::BindSparse(b)) => {
+ (SubmitAnyBuilder::QueuePresent(_), SubmitAnyBuilder::BindSparse(_, _)) => {
unimplemented!()
}
- (SubmitAnyBuilder::BindSparse(a), SubmitAnyBuilder::CommandBuffer(b)) => {
+ (SubmitAnyBuilder::BindSparse(_, _), SubmitAnyBuilder::CommandBuffer(_, _)) => {
unimplemented!()
}
- (SubmitAnyBuilder::CommandBuffer(a), SubmitAnyBuilder::BindSparse(b)) => {
+ (SubmitAnyBuilder::CommandBuffer(_, _), SubmitAnyBuilder::BindSparse(_, _)) => {
unimplemented!()
}
- (SubmitAnyBuilder::BindSparse(mut a), SubmitAnyBuilder::BindSparse(b)) => {
- match a.merge(b) {
- Ok(()) => SubmitAnyBuilder::BindSparse(a),
- Err(_) => {
- // TODO: this happens if both bind sparse have been given a fence already
- // annoying, but not impossible, to handle
- unimplemented!()
- }
+ (
+ SubmitAnyBuilder::BindSparse(mut bind_infos_a, fence_a),
+ SubmitAnyBuilder::BindSparse(bind_infos_b, fence_b),
+ ) => {
+ if fence_a.is_some() && fence_b.is_some() {
+ // TODO: this happens if both bind sparse have been given a fence already
+ // annoying, but not impossible, to handle
+ unimplemented!()
}
+
+ bind_infos_a.extend(bind_infos_b);
+ SubmitAnyBuilder::BindSparse(bind_infos_a, fence_a)
}
})
}
- #[inline]
unsafe fn signal_finished(&self) {
self.first.signal_finished();
self.second.signal_finished();
}
- #[inline]
fn queue_change_allowed(&self) -> bool {
self.first.queue_change_allowed() && self.second.queue_change_allowed()
}
- #[inline]
fn queue(&self) -> Option<Arc<Queue>> {
match (self.first.queue(), self.second.queue()) {
(Some(q1), Some(q2)) => {
- if q1.is_same(&q2) {
+ if q1 == q2 {
Some(q1)
} else if self.first.queue_change_allowed() {
Some(q2)
@@ -201,68 +197,93 @@ where
}
}
- #[inline]
fn check_buffer_access(
&self,
- buffer: &dyn BufferAccess,
+ buffer: &Buffer,
+ range: Range<DeviceSize>,
exclusive: bool,
queue: &Queue,
- ) -> Result<Option<(PipelineStages, AccessFlags)>, AccessCheckError> {
- let first = self.first.check_buffer_access(buffer, exclusive, queue);
- let second = self.second.check_buffer_access(buffer, exclusive, queue);
+ ) -> Result<(), AccessCheckError> {
+ let first = self
+ .first
+ .check_buffer_access(buffer, range.clone(), exclusive, queue);
+ let second = self
+ .second
+ .check_buffer_access(buffer, range, exclusive, queue);
debug_assert!(
- !exclusive || !(first.is_ok() && second.is_ok()),
+ !(exclusive && first.is_ok() && second.is_ok()),
"Two futures gave exclusive access to the same resource"
);
match (first, second) {
(v, Err(AccessCheckError::Unknown)) => v,
(Err(AccessCheckError::Unknown), v) => v,
- (Err(AccessCheckError::Denied(e1)), Err(AccessCheckError::Denied(e2))) => {
+ (Err(AccessCheckError::Denied(e1)), Err(AccessCheckError::Denied(_))) => {
Err(AccessCheckError::Denied(e1))
} // TODO: which one?
- (Ok(_), Err(AccessCheckError::Denied(_)))
- | (Err(AccessCheckError::Denied(_)), Ok(_)) => panic!(
+ (Ok(()), Err(AccessCheckError::Denied(_)))
+ | (Err(AccessCheckError::Denied(_)), Ok(())) => panic!(
"Contradictory information \
between two futures"
),
- (Ok(None), Ok(None)) => Ok(None),
- (Ok(Some(a)), Ok(None)) | (Ok(None), Ok(Some(a))) => Ok(Some(a)),
- (Ok(Some((a1, a2))), Ok(Some((b1, b2)))) => Ok(Some((a1 | b1, a2 | b2))),
+ (Ok(()), Ok(())) => Ok(()),
}
}
- #[inline]
fn check_image_access(
&self,
- image: &dyn ImageAccess,
- layout: ImageLayout,
+ image: &Image,
+ range: Range<DeviceSize>,
exclusive: bool,
+ expected_layout: ImageLayout,
queue: &Queue,
- ) -> Result<Option<(PipelineStages, AccessFlags)>, AccessCheckError> {
+ ) -> Result<(), AccessCheckError> {
+ let first =
+ self.first
+ .check_image_access(image, range.clone(), exclusive, expected_layout, queue);
+ let second =
+ self.second
+ .check_image_access(image, range, exclusive, expected_layout, queue);
+ debug_assert!(
+ !(exclusive && first.is_ok() && second.is_ok()),
+ "Two futures gave exclusive access to the same resource"
+ );
+ match (first, second) {
+ (v, Err(AccessCheckError::Unknown)) => v,
+ (Err(AccessCheckError::Unknown), v) => v,
+ (Err(AccessCheckError::Denied(e1)), Err(AccessCheckError::Denied(_))) => {
+ Err(AccessCheckError::Denied(e1))
+ } // TODO: which one?
+ (Ok(()), Err(AccessCheckError::Denied(_)))
+ | (Err(AccessCheckError::Denied(_)), Ok(())) => {
+ panic!("Contradictory information between two futures")
+ }
+ (Ok(()), Ok(())) => Ok(()),
+ }
+ }
+
+ #[inline]
+ fn check_swapchain_image_acquired(
+ &self,
+ swapchain: &Swapchain,
+ image_index: u32,
+ _before: bool,
+ ) -> Result<(), AccessCheckError> {
let first = self
.first
- .check_image_access(image, layout, exclusive, queue);
+ .check_swapchain_image_acquired(swapchain, image_index, false);
let second = self
.second
- .check_image_access(image, layout, exclusive, queue);
- debug_assert!(
- !exclusive || !(first.is_ok() && second.is_ok()),
- "Two futures gave exclusive access to the same resource"
- );
+ .check_swapchain_image_acquired(swapchain, image_index, false);
+
match (first, second) {
(v, Err(AccessCheckError::Unknown)) => v,
(Err(AccessCheckError::Unknown), v) => v,
- (Err(AccessCheckError::Denied(e1)), Err(AccessCheckError::Denied(e2))) => {
+ (Err(AccessCheckError::Denied(e1)), Err(AccessCheckError::Denied(_))) => {
Err(AccessCheckError::Denied(e1))
} // TODO: which one?
- (Ok(_), Err(AccessCheckError::Denied(_)))
- | (Err(AccessCheckError::Denied(_)), Ok(_)) => panic!(
- "Contradictory information \
- between two futures"
- ),
- (Ok(None), Ok(None)) => Ok(None),
- (Ok(Some(a)), Ok(None)) | (Ok(None), Ok(Some(a))) => Ok(Some(a)),
- (Ok(Some((a1, a2))), Ok(Some((b1, b2)))) => Ok(Some((a1 | b1, a2 | b2))),
+ (Ok(()), Err(AccessCheckError::Denied(_)))
+ | (Err(AccessCheckError::Denied(_)), Ok(())) => Ok(()),
+ (Ok(()), Ok(())) => Ok(()), // TODO: Double Acquired?
}
}
}
diff --git a/src/sync/future/mod.rs b/src/sync/future/mod.rs
index e6fde7a..e03e54b 100644
--- a/src/sync/future/mod.rs
+++ b/src/sync/future/mod.rs
@@ -7,33 +7,118 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
-pub use self::fence_signal::{FenceSignalFuture, FenceSignalFutureBehavior};
-pub use self::join::JoinFuture;
-pub use self::now::{now, NowFuture};
-pub use self::semaphore_signal::SemaphoreSignalFuture;
-use crate::buffer::BufferAccess;
-use crate::command_buffer::submit::SubmitAnyBuilder;
-use crate::command_buffer::submit::SubmitBindSparseError;
-use crate::command_buffer::submit::SubmitCommandBufferError;
-use crate::command_buffer::submit::SubmitPresentError;
-use crate::command_buffer::CommandBufferExecError;
-use crate::command_buffer::CommandBufferExecFuture;
-use crate::command_buffer::PrimaryCommandBuffer;
-use crate::device::DeviceOwned;
-use crate::device::Queue;
-use crate::image::ImageAccess;
-use crate::image::ImageLayout;
-use crate::swapchain;
-use crate::swapchain::PresentFuture;
-use crate::swapchain::PresentRegion;
-use crate::swapchain::Swapchain;
-use crate::sync::AccessFlags;
-use crate::sync::FenceWaitError;
-use crate::sync::PipelineStages;
-use crate::OomError;
-use std::error;
-use std::fmt;
-use std::sync::Arc;
+//! Represents an event that will happen on the GPU in the future.
+//!
+//! Whenever you ask the GPU to start an operation by using a function of the vulkano library (for
+//! example executing a command buffer), this function will return a *future*. A future is an
+//! object that implements [the `GpuFuture` trait](crate::sync::GpuFuture) and that represents the
+//! point in time when this operation is over.
+//!
+//! No function in vulkano immediately sends an operation to the GPU (with the exception of some
+//! unsafe low-level functions). Instead they return a future that is in the pending state. Before
+//! the GPU actually starts doing anything, you have to *flush* the future by calling the `flush()`
+//! method or one of its derivatives.
+//!
+//! Futures serve several roles:
+//!
+//! - Futures can be used to build dependencies between operations and makes it possible to ask
+//! that an operation starts only after a previous operation is finished.
+//! - Submitting an operation to the GPU is a costly operation. By chaining multiple operations
+//! with futures you will submit them all at once instead of one by one, thereby reducing this
+//! cost.
+//! - Futures keep alive the resources and objects used by the GPU so that they don't get destroyed
+//! while they are still in use.
+//!
+//! The last point means that you should keep futures alive in your program for as long as their
+//! corresponding operation is potentially still being executed by the GPU. Dropping a future
+//! earlier will block the current thread (after flushing, if necessary) until the GPU has finished
+//! the operation, which is usually not what you want.
+//!
+//! If you write a function that submits an operation to the GPU in your program, you are
+//! encouraged to let this function return the corresponding future and let the caller handle it.
+//! This way the caller will be able to chain multiple futures together and decide when it wants to
+//! keep the future alive or drop it.
+//!
+//! # Executing an operation after a future
+//!
+//! Respecting the order of operations on the GPU is important, as it is what *proves* vulkano that
+//! what you are doing is indeed safe. For example if you submit two operations that modify the
+//! same buffer, then you need to execute one after the other instead of submitting them
+//! independently. Failing to do so would mean that these two operations could potentially execute
+//! simultaneously on the GPU, which would be unsafe.
+//!
+//! This is done by calling one of the methods of the `GpuFuture` trait. For example calling
+//! `prev_future.then_execute(command_buffer)` takes ownership of `prev_future` and will make sure
+//! to only start executing `command_buffer` after the moment corresponding to `prev_future`
+//! happens. The object returned by the `then_execute` function is itself a future that corresponds
+//! to the moment when the execution of `command_buffer` ends.
+//!
+//! ## Between two different GPU queues
+//!
+//! When you want to perform an operation after another operation on two different queues, you
+//! **must** put a *semaphore* between them. Failure to do so would result in a runtime error.
+//! Adding a semaphore is a simple as replacing `prev_future.then_execute(...)` with
+//! `prev_future.then_signal_semaphore().then_execute(...)`.
+//!
+//! > **Note**: A common use-case is using a transfer queue (ie. a queue that is only capable of
+//! > performing transfer operations) to write data to a buffer, then read that data from the
+//! > rendering queue.
+//!
+//! What happens when you do so is that the first queue will execute the first set of operations
+//! (represented by `prev_future` in the example), then put a semaphore in the signalled state.
+//! Meanwhile the second queue blocks (if necessary) until that same semaphore gets signalled, and
+//! then only will execute the second set of operations.
+//!
+//! Since you want to avoid blocking the second queue as much as possible, you probably want to
+//! flush the operation to the first queue as soon as possible. This can easily be done by calling
+//! `then_signal_semaphore_and_flush()` instead of `then_signal_semaphore()`.
+//!
+//! ## Between several different GPU queues
+//!
+//! The `then_signal_semaphore()` method is appropriate when you perform an operation in one queue,
+//! and want to see the result in another queue. However in some situations you want to start
+//! multiple operations on several different queues.
+//!
+//! TODO: this is not yet implemented
+//!
+//! # Fences
+//!
+//! A `Fence` is an object that is used to signal the CPU when an operation on the GPU is finished.
+//!
+//! Signalling a fence is done by calling `then_signal_fence()` on a future. Just like semaphores,
+//! you are encouraged to use `then_signal_fence_and_flush()` instead.
+//!
+//! Signalling a fence is kind of a "terminator" to a chain of futures
+
+pub use self::{
+ fence_signal::{FenceSignalFuture, FenceSignalFutureBehavior},
+ join::JoinFuture,
+ now::{now, NowFuture},
+ semaphore_signal::SemaphoreSignalFuture,
+};
+use super::{
+ fence::{Fence, FenceError},
+ semaphore::Semaphore,
+};
+use crate::{
+ buffer::Buffer,
+ command_buffer::{
+ CommandBufferExecError, CommandBufferExecFuture, PrimaryCommandBufferAbstract,
+ ResourceUseRef, SubmitInfo,
+ },
+ device::{DeviceOwned, Queue},
+ image::{sys::Image, ImageLayout},
+ memory::BindSparseInfo,
+ swapchain::{self, PresentFuture, PresentInfo, Swapchain, SwapchainPresentInfo},
+ DeviceSize, OomError, VulkanError,
+};
+use smallvec::SmallVec;
+use std::{
+ error::Error,
+ fmt::{Display, Error as FmtError, Formatter},
+ ops::Range,
+ sync::Arc,
+};
mod fence_signal;
mod join;
@@ -108,24 +193,19 @@ pub unsafe trait GpuFuture: DeviceOwned {
/// Checks whether submitting something after this future grants access (exclusive or shared,
/// depending on the parameter) to the given buffer on the given queue.
///
- /// If the access is granted, returns the pipeline stage and access flags of the latest usage
- /// of this resource, or `None` if irrelevant.
- ///
/// > **Note**: Returning `Ok` means "access granted", while returning `Err` means
/// > "don't know". Therefore returning `Err` is never unsafe.
fn check_buffer_access(
&self,
- buffer: &dyn BufferAccess,
+ buffer: &Buffer,
+ range: Range<DeviceSize>,
exclusive: bool,
queue: &Queue,
- ) -> Result<Option<(PipelineStages, AccessFlags)>, AccessCheckError>;
+ ) -> Result<(), AccessCheckError>;
/// Checks whether submitting something after this future grants access (exclusive or shared,
/// depending on the parameter) to the given image on the given queue.
///
- /// If the access is granted, returns the pipeline stage and access flags of the latest usage
- /// of this resource, or `None` if irrelevant.
- ///
/// Implementations must ensure that the image is in the given layout. However if the `layout`
/// is `Undefined` then the implementation should accept any actual layout.
///
@@ -136,11 +216,23 @@ pub unsafe trait GpuFuture: DeviceOwned {
/// > access.
fn check_image_access(
&self,
- image: &dyn ImageAccess,
- layout: ImageLayout,
+ image: &Image,
+ range: Range<DeviceSize>,
exclusive: bool,
+ expected_layout: ImageLayout,
queue: &Queue,
- ) -> Result<Option<(PipelineStages, AccessFlags)>, AccessCheckError>;
+ ) -> Result<(), AccessCheckError>;
+
+ /// Checks whether accessing a swapchain image is permitted.
+ ///
+ /// > **Note**: Setting `before` to `true` should skip checking the current future and always
+ /// > forward the call to the future before.
+ fn check_swapchain_image_acquired(
+ &self,
+ swapchain: &Swapchain,
+ image_index: u32,
+ before: bool,
+ ) -> Result<(), AccessCheckError>;
/// Joins this future with another one, representing the moment when both events have happened.
// TODO: handle errors
@@ -156,15 +248,14 @@ pub unsafe trait GpuFuture: DeviceOwned {
///
/// > **Note**: This is just a shortcut function. The actual implementation is in the
/// > `CommandBuffer` trait.
- #[inline]
fn then_execute<Cb>(
self,
queue: Arc<Queue>,
command_buffer: Cb,
- ) -> Result<CommandBufferExecFuture<Self, Cb>, CommandBufferExecError>
+ ) -> Result<CommandBufferExecFuture<Self>, CommandBufferExecError>
where
Self: Sized,
- Cb: PrimaryCommandBuffer + 'static,
+ Cb: PrimaryCommandBufferAbstract + 'static,
{
command_buffer.execute_after(self, queue)
}
@@ -173,16 +264,15 @@ pub unsafe trait GpuFuture: DeviceOwned {
///
/// > **Note**: This is just a shortcut function. The actual implementation is in the
/// > `CommandBuffer` trait.
- #[inline]
fn then_execute_same_queue<Cb>(
self,
command_buffer: Cb,
- ) -> Result<CommandBufferExecFuture<Self, Cb>, CommandBufferExecError>
+ ) -> Result<CommandBufferExecFuture<Self>, CommandBufferExecError>
where
Self: Sized,
- Cb: PrimaryCommandBuffer + 'static,
+ Cb: PrimaryCommandBufferAbstract + 'static,
{
- let queue = self.queue().unwrap().clone();
+ let queue = self.queue().unwrap();
command_buffer.execute_after(self, queue)
}
@@ -218,6 +308,7 @@ pub unsafe trait GpuFuture: DeviceOwned {
{
let f = self.then_signal_semaphore();
f.flush()?;
+
Ok(f)
}
@@ -243,6 +334,7 @@ pub unsafe trait GpuFuture: DeviceOwned {
{
let f = self.then_signal_fence();
f.flush()?;
+
Ok(f)
}
@@ -253,41 +345,58 @@ pub unsafe trait GpuFuture: DeviceOwned {
///
/// > **Note**: This is just a shortcut for the `Swapchain::present()` function.
#[inline]
- fn then_swapchain_present<W>(
+ fn then_swapchain_present(
self,
queue: Arc<Queue>,
- swapchain: Arc<Swapchain<W>>,
- image_index: usize,
- ) -> PresentFuture<Self, W>
+ swapchain_info: SwapchainPresentInfo,
+ ) -> PresentFuture<Self>
where
Self: Sized,
{
- swapchain::present(swapchain, self, queue, image_index)
+ swapchain::present(self, queue, swapchain_info)
}
- /// Same as `then_swapchain_present`, except it allows specifying a present region.
+ /// Turn the current future into a `Box<dyn GpuFuture>`.
///
- /// > **Note**: This is just a shortcut for the `Swapchain::present_incremental()` function.
+ /// This is a helper function that calls `Box::new(yourFuture) as Box<dyn GpuFuture>`.
#[inline]
- fn then_swapchain_present_incremental<W>(
- self,
- queue: Arc<Queue>,
- swapchain: Arc<Swapchain<W>>,
- image_index: usize,
- present_region: PresentRegion,
- ) -> PresentFuture<Self, W>
+ fn boxed(self) -> Box<dyn GpuFuture>
where
- Self: Sized,
+ Self: Sized + 'static,
{
- swapchain::present_incremental(swapchain, self, queue, image_index, present_region)
+ Box::new(self) as _
}
- /// Turn the current future into a `Box<dyn GpuFuture>`.
+ /// Turn the current future into a `Box<dyn GpuFuture + Send>`.
///
- /// This is a helper function that calls `Box::new(yourFuture) as Box<dyn GpuFuture>`.
- fn boxed(self) -> Box<dyn GpuFuture>
+ /// This is a helper function that calls `Box::new(yourFuture) as Box<dyn GpuFuture + Send>`.
+ #[inline]
+ fn boxed_send(self) -> Box<dyn GpuFuture + Send>
where
- Self: Sized + 'static,
+ Self: Sized + Send + 'static,
+ {
+ Box::new(self) as _
+ }
+
+ /// Turn the current future into a `Box<dyn GpuFuture + Sync>`.
+ ///
+ /// This is a helper function that calls `Box::new(yourFuture) as Box<dyn GpuFuture + Sync>`.
+ #[inline]
+ fn boxed_sync(self) -> Box<dyn GpuFuture + Sync>
+ where
+ Self: Sized + Sync + 'static,
+ {
+ Box::new(self) as _
+ }
+
+ /// Turn the current future into a `Box<dyn GpuFuture + Send + Sync>`.
+ ///
+ /// This is a helper function that calls `Box::new(yourFuture) as Box<dyn GpuFuture + Send +
+ /// Sync>`.
+ #[inline]
+ fn boxed_send_sync(self) -> Box<dyn GpuFuture + Send + Sync>
+ where
+ Self: Sized + Send + Sync + 'static,
{
Box::new(self) as _
}
@@ -297,55 +406,77 @@ unsafe impl<F: ?Sized> GpuFuture for Box<F>
where
F: GpuFuture,
{
- #[inline]
fn cleanup_finished(&mut self) {
(**self).cleanup_finished()
}
- #[inline]
unsafe fn build_submission(&self) -> Result<SubmitAnyBuilder, FlushError> {
(**self).build_submission()
}
- #[inline]
fn flush(&self) -> Result<(), FlushError> {
(**self).flush()
}
- #[inline]
unsafe fn signal_finished(&self) {
(**self).signal_finished()
}
- #[inline]
fn queue_change_allowed(&self) -> bool {
(**self).queue_change_allowed()
}
- #[inline]
fn queue(&self) -> Option<Arc<Queue>> {
(**self).queue()
}
- #[inline]
fn check_buffer_access(
&self,
- buffer: &dyn BufferAccess,
+ buffer: &Buffer,
+ range: Range<DeviceSize>,
exclusive: bool,
queue: &Queue,
- ) -> Result<Option<(PipelineStages, AccessFlags)>, AccessCheckError> {
- (**self).check_buffer_access(buffer, exclusive, queue)
+ ) -> Result<(), AccessCheckError> {
+ (**self).check_buffer_access(buffer, range, exclusive, queue)
}
- #[inline]
fn check_image_access(
&self,
- image: &dyn ImageAccess,
- layout: ImageLayout,
+ image: &Image,
+ range: Range<DeviceSize>,
exclusive: bool,
+ expected_layout: ImageLayout,
queue: &Queue,
- ) -> Result<Option<(PipelineStages, AccessFlags)>, AccessCheckError> {
- (**self).check_image_access(image, layout, exclusive, queue)
+ ) -> Result<(), AccessCheckError> {
+ (**self).check_image_access(image, range, exclusive, expected_layout, queue)
+ }
+
+ #[inline]
+ fn check_swapchain_image_acquired(
+ &self,
+ swapchain: &Swapchain,
+ image_index: u32,
+ before: bool,
+ ) -> Result<(), AccessCheckError> {
+ (**self).check_swapchain_image_acquired(swapchain, image_index, before)
+ }
+}
+
+/// Contains all the possible submission builders.
+#[derive(Debug)]
+pub enum SubmitAnyBuilder {
+ Empty,
+ SemaphoresWait(SmallVec<[Arc<Semaphore>; 8]>),
+ CommandBuffer(SubmitInfo, Option<Arc<Fence>>),
+ QueuePresent(PresentInfo),
+ BindSparse(SmallVec<[BindSparseInfo; 1]>, Option<Arc<Fence>>),
+}
+
+impl SubmitAnyBuilder {
+ /// Returns true if equal to `SubmitAnyBuilder::Empty`.
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ matches!(self, SubmitAnyBuilder::Empty)
}
}
@@ -374,18 +505,17 @@ pub enum AccessError {
BufferNotInitialized,
/// Trying to use a swapchain image without depending on a corresponding acquire image future.
- SwapchainImageAcquireOnly,
+ SwapchainImageNotAcquired,
}
-impl error::Error for AccessError {}
+impl Error for AccessError {}
-impl fmt::Display for AccessError {
- #[inline]
- fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+impl Display for AccessError {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
write!(
- fmt,
+ f,
"{}",
- match *self {
+ match self {
AccessError::ExclusiveDenied => "only shared access is allowed for this resource",
AccessError::AlreadyInUse => {
"the resource is already in use, and there is no tracking of concurrent usages"
@@ -395,14 +525,14 @@ impl fmt::Display for AccessError {
}
AccessError::ImageNotInitialized { .. } => {
"trying to use an image without transitioning it from the undefined or \
- preinitialized layouts first"
+ preinitialized layouts first"
}
AccessError::BufferNotInitialized => {
"trying to use a buffer that still contains garbage data"
}
- AccessError::SwapchainImageAcquireOnly => {
+ AccessError::SwapchainImageNotAcquired => {
"trying to use a swapchain image without depending on a corresponding acquire \
- image future"
+ image future"
}
}
)
@@ -418,15 +548,14 @@ pub enum AccessCheckError {
Unknown,
}
-impl error::Error for AccessCheckError {}
+impl Error for AccessCheckError {}
-impl fmt::Display for AccessCheckError {
- #[inline]
- fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+impl Display for AccessCheckError {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
write!(
- fmt,
+ f,
"{}",
- match *self {
+ match self {
AccessCheckError::Denied(_) => "access to the resource has been denied",
AccessCheckError::Unknown => "the resource is unknown",
}
@@ -435,7 +564,6 @@ impl fmt::Display for AccessCheckError {
}
impl From<AccessError> for AccessCheckError {
- #[inline]
fn from(err: AccessError) -> AccessCheckError {
AccessCheckError::Denied(err)
}
@@ -460,43 +588,72 @@ pub enum FlushError {
/// surface's new properties and recreate a new swapchain if you want to continue drawing.
OutOfDate,
- /// The swapchain has lost or doesn't have fullscreen exclusivity possibly for
+ /// The swapchain has lost or doesn't have full screen exclusivity possibly for
/// implementation-specific reasons outside of the application’s control.
- FullscreenExclusiveLost,
+ FullScreenExclusiveModeLost,
/// The flush operation needed to block, but the timeout has elapsed.
Timeout,
+
+ /// A non-zero present_id must be greater than any non-zero present_id passed previously
+ /// for the same swapchain.
+ PresentIdLessThanOrEqual,
+
+ /// Access to a resource has been denied.
+ ResourceAccessError {
+ error: AccessError,
+ use_ref: Option<ResourceUseRef>,
+ },
+
+ /// The command buffer or one of the secondary command buffers it executes was created with the
+ /// "one time submit" flag, but has already been submitted it the past.
+ OneTimeSubmitAlreadySubmitted,
+
+ /// The command buffer or one of the secondary command buffers it executes is already in use by
+ /// the GPU and was not created with the "concurrent" flag.
+ ExclusiveAlreadyInUse,
}
-impl error::Error for FlushError {
- #[inline]
- fn source(&self) -> Option<&(dyn error::Error + 'static)> {
- match *self {
- FlushError::AccessError(ref err) => Some(err),
- FlushError::OomError(ref err) => Some(err),
+impl Error for FlushError {
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ match self {
+ FlushError::AccessError(err) => Some(err),
+ FlushError::OomError(err) => Some(err),
+ FlushError::ResourceAccessError { error, .. } => Some(error),
_ => None,
}
}
}
-impl fmt::Display for FlushError {
- #[inline]
- fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+impl Display for FlushError {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
write!(
- fmt,
+ f,
"{}",
- match *self {
+ match self {
FlushError::AccessError(_) => "access to a resource has been denied",
FlushError::OomError(_) => "not enough memory",
FlushError::DeviceLost => "the connection to the device has been lost",
FlushError::SurfaceLost => "the surface of this swapchain is no longer valid",
FlushError::OutOfDate => "the swapchain needs to be recreated",
- FlushError::FullscreenExclusiveLost => {
- "the swapchain no longer has fullscreen exclusivity"
+ FlushError::FullScreenExclusiveModeLost => {
+ "the swapchain no longer has full screen exclusivity"
}
FlushError::Timeout => {
- "the flush operation needed to block, but the timeout has \
- elapsed"
+ "the flush operation needed to block, but the timeout has elapsed"
+ }
+ FlushError::PresentIdLessThanOrEqual => {
+ "present id is less than or equal to previous"
+ }
+ FlushError::ResourceAccessError { .. } => "access to a resource has been denied",
+ FlushError::OneTimeSubmitAlreadySubmitted => {
+ "the command buffer or one of the secondary command buffers it executes was \
+ created with the \"one time submit\" flag, but has already been submitted in \
+ the past"
+ }
+ FlushError::ExclusiveAlreadyInUse => {
+ "the command buffer or one of the secondary command buffers it executes is \
+ already in use was not created with the \"concurrent\" flag"
}
}
)
@@ -504,52 +661,33 @@ impl fmt::Display for FlushError {
}
impl From<AccessError> for FlushError {
- #[inline]
fn from(err: AccessError) -> FlushError {
FlushError::AccessError(err)
}
}
-impl From<SubmitPresentError> for FlushError {
- #[inline]
- fn from(err: SubmitPresentError) -> FlushError {
+impl From<VulkanError> for FlushError {
+ fn from(err: VulkanError) -> Self {
match err {
- SubmitPresentError::OomError(err) => FlushError::OomError(err),
- SubmitPresentError::DeviceLost => FlushError::DeviceLost,
- SubmitPresentError::SurfaceLost => FlushError::SurfaceLost,
- SubmitPresentError::OutOfDate => FlushError::OutOfDate,
- SubmitPresentError::FullscreenExclusiveLost => FlushError::FullscreenExclusiveLost,
- }
- }
-}
-
-impl From<SubmitCommandBufferError> for FlushError {
- #[inline]
- fn from(err: SubmitCommandBufferError) -> FlushError {
- match err {
- SubmitCommandBufferError::OomError(err) => FlushError::OomError(err),
- SubmitCommandBufferError::DeviceLost => FlushError::DeviceLost,
- }
- }
-}
-
-impl From<SubmitBindSparseError> for FlushError {
- #[inline]
- fn from(err: SubmitBindSparseError) -> FlushError {
- match err {
- SubmitBindSparseError::OomError(err) => FlushError::OomError(err),
- SubmitBindSparseError::DeviceLost => FlushError::DeviceLost,
+ VulkanError::OutOfHostMemory | VulkanError::OutOfDeviceMemory => {
+ Self::OomError(err.into())
+ }
+ VulkanError::DeviceLost => Self::DeviceLost,
+ VulkanError::SurfaceLost => Self::SurfaceLost,
+ VulkanError::OutOfDate => Self::OutOfDate,
+ VulkanError::FullScreenExclusiveModeLost => Self::FullScreenExclusiveModeLost,
+ _ => panic!("unexpected error: {:?}", err),
}
}
}
-impl From<FenceWaitError> for FlushError {
- #[inline]
- fn from(err: FenceWaitError) -> FlushError {
+impl From<FenceError> for FlushError {
+ fn from(err: FenceError) -> FlushError {
match err {
- FenceWaitError::OomError(err) => FlushError::OomError(err),
- FenceWaitError::Timeout => FlushError::Timeout,
- FenceWaitError::DeviceLostError => FlushError::DeviceLost,
+ FenceError::OomError(err) => FlushError::OomError(err),
+ FenceError::Timeout => FlushError::Timeout,
+ FenceError::DeviceLost => FlushError::DeviceLost,
+ _ => unreachable!(),
}
}
}
diff --git a/src/sync/future/now.rs b/src/sync/future/now.rs
index 131eed6..11e53db 100644
--- a/src/sync/future/now.rs
+++ b/src/sync/future/now.rs
@@ -7,25 +7,20 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
-use std::sync::Arc;
-
-use crate::buffer::BufferAccess;
-use crate::command_buffer::submit::SubmitAnyBuilder;
-use crate::device::Device;
-use crate::device::DeviceOwned;
-use crate::device::Queue;
-use crate::image::ImageAccess;
-use crate::image::ImageLayout;
-use crate::sync::AccessCheckError;
-use crate::sync::AccessFlags;
-use crate::sync::FlushError;
-use crate::sync::GpuFuture;
-use crate::sync::PipelineStages;
+use super::{AccessCheckError, FlushError, GpuFuture, SubmitAnyBuilder};
+use crate::{
+ buffer::Buffer,
+ device::{Device, DeviceOwned, Queue},
+ image::{sys::Image, ImageLayout},
+ swapchain::Swapchain,
+ DeviceSize,
+};
+use std::{ops::Range, sync::Arc};
/// Builds a future that represents "now".
#[inline]
pub fn now(device: Arc<Device>) -> NowFuture {
- NowFuture { device: device }
+ NowFuture { device }
}
/// A dummy future that represents "now".
@@ -63,21 +58,33 @@ unsafe impl GpuFuture for NowFuture {
#[inline]
fn check_buffer_access(
&self,
- buffer: &dyn BufferAccess,
- _: bool,
- _: &Queue,
- ) -> Result<Option<(PipelineStages, AccessFlags)>, AccessCheckError> {
+ _buffer: &Buffer,
+ _range: Range<DeviceSize>,
+ _exclusive: bool,
+ _queue: &Queue,
+ ) -> Result<(), AccessCheckError> {
Err(AccessCheckError::Unknown)
}
#[inline]
fn check_image_access(
&self,
- _: &dyn ImageAccess,
- _: ImageLayout,
- _: bool,
- _: &Queue,
- ) -> Result<Option<(PipelineStages, AccessFlags)>, AccessCheckError> {
+ _image: &Image,
+ _range: Range<DeviceSize>,
+ _exclusive: bool,
+ _expected_layout: ImageLayout,
+ _queue: &Queue,
+ ) -> Result<(), AccessCheckError> {
+ Err(AccessCheckError::Unknown)
+ }
+
+ #[inline]
+ fn check_swapchain_image_acquired(
+ &self,
+ _swapchain: &Swapchain,
+ _image_index: u32,
+ _before: bool,
+ ) -> Result<(), AccessCheckError> {
Err(AccessCheckError::Unknown)
}
}
diff --git a/src/sync/future/semaphore_signal.rs b/src/sync/future/semaphore_signal.rs
index 829860b..f31d37a 100644
--- a/src/sync/future/semaphore_signal.rs
+++ b/src/sync/future/semaphore_signal.rs
@@ -7,29 +7,28 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
-use std::sync::atomic::AtomicBool;
-use std::sync::atomic::Ordering;
-use std::sync::Arc;
-use std::sync::Mutex;
-
-use crate::buffer::BufferAccess;
-use crate::command_buffer::submit::SubmitAnyBuilder;
-use crate::command_buffer::submit::SubmitCommandBufferBuilder;
-use crate::command_buffer::submit::SubmitSemaphoresWaitBuilder;
-use crate::device::Device;
-use crate::device::DeviceOwned;
-use crate::device::Queue;
-use crate::image::ImageAccess;
-use crate::image::ImageLayout;
-use crate::sync::AccessCheckError;
-use crate::sync::AccessFlags;
-use crate::sync::FlushError;
-use crate::sync::GpuFuture;
-use crate::sync::PipelineStages;
-use crate::sync::Semaphore;
+use super::{AccessCheckError, FlushError, GpuFuture, SubmitAnyBuilder};
+use crate::{
+ buffer::Buffer,
+ command_buffer::{SemaphoreSubmitInfo, SubmitInfo},
+ device::{Device, DeviceOwned, Queue},
+ image::{sys::Image, ImageLayout},
+ swapchain::Swapchain,
+ sync::{future::AccessError, semaphore::Semaphore, PipelineStages},
+ DeviceSize,
+};
+use parking_lot::Mutex;
+use smallvec::smallvec;
+use std::{
+ ops::Range,
+ sync::{
+ atomic::{AtomicBool, Ordering},
+ Arc,
+ },
+ thread,
+};
/// Builds a new semaphore signal future.
-#[inline]
pub fn then_signal_semaphore<F>(future: F) -> SemaphoreSignalFuture<F>
where
F: GpuFuture,
@@ -40,7 +39,7 @@ where
SemaphoreSignalFuture {
previous: future,
- semaphore: Semaphore::from_pool(device).unwrap(),
+ semaphore: Arc::new(Semaphore::from_pool(device).unwrap()),
wait_submitted: Mutex::new(false),
finished: AtomicBool::new(false),
}
@@ -54,7 +53,7 @@ where
F: GpuFuture,
{
previous: F,
- semaphore: Semaphore,
+ semaphore: Arc<Semaphore>,
// True if the signaling command has already been submitted.
// If flush is called multiple times, we want to block so that only one flushing is executed.
// Therefore we use a `Mutex<bool>` and not an `AtomicBool`.
@@ -66,58 +65,120 @@ unsafe impl<F> GpuFuture for SemaphoreSignalFuture<F>
where
F: GpuFuture,
{
- #[inline]
fn cleanup_finished(&mut self) {
self.previous.cleanup_finished();
}
- #[inline]
unsafe fn build_submission(&self) -> Result<SubmitAnyBuilder, FlushError> {
// Flushing the signaling part, since it must always be submitted before the waiting part.
self.flush()?;
+ let sem = smallvec![self.semaphore.clone()];
- let mut sem = SubmitSemaphoresWaitBuilder::new();
- sem.add_wait_semaphore(&self.semaphore);
Ok(SubmitAnyBuilder::SemaphoresWait(sem))
}
fn flush(&self) -> Result<(), FlushError> {
unsafe {
- let mut wait_submitted = self.wait_submitted.lock().unwrap();
+ let mut wait_submitted = self.wait_submitted.lock();
if *wait_submitted {
return Ok(());
}
- let queue = self.previous.queue().unwrap().clone();
+ let queue = self.previous.queue().unwrap();
match self.previous.build_submission()? {
SubmitAnyBuilder::Empty => {
- let mut builder = SubmitCommandBufferBuilder::new();
- builder.add_signal_semaphore(&self.semaphore);
- builder.submit(&queue)?;
+ queue.with(|mut q| {
+ q.submit_unchecked(
+ [SubmitInfo {
+ signal_semaphores: vec![SemaphoreSubmitInfo::semaphore(
+ self.semaphore.clone(),
+ )],
+ ..Default::default()
+ }],
+ None,
+ )
+ })?;
}
- SubmitAnyBuilder::SemaphoresWait(sem) => {
- let mut builder: SubmitCommandBufferBuilder = sem.into();
- builder.add_signal_semaphore(&self.semaphore);
- builder.submit(&queue)?;
+ SubmitAnyBuilder::SemaphoresWait(semaphores) => {
+ queue.with(|mut q| {
+ q.submit_unchecked(
+ [SubmitInfo {
+ wait_semaphores: semaphores
+ .into_iter()
+ .map(|semaphore| {
+ SemaphoreSubmitInfo {
+ // TODO: correct stages ; hard
+ stages: PipelineStages::ALL_COMMANDS,
+ ..SemaphoreSubmitInfo::semaphore(semaphore)
+ }
+ })
+ .collect(),
+ signal_semaphores: vec![SemaphoreSubmitInfo::semaphore(
+ self.semaphore.clone(),
+ )],
+ ..Default::default()
+ }],
+ None,
+ )
+ })?;
}
- SubmitAnyBuilder::CommandBuffer(mut builder) => {
- debug_assert_eq!(builder.num_signal_semaphores(), 0);
- builder.add_signal_semaphore(&self.semaphore);
- builder.submit(&queue)?;
+ SubmitAnyBuilder::CommandBuffer(mut submit_info, fence) => {
+ debug_assert!(submit_info.signal_semaphores.is_empty());
+
+ submit_info
+ .signal_semaphores
+ .push(SemaphoreSubmitInfo::semaphore(self.semaphore.clone()));
+
+ queue.with(|mut q| {
+ q.submit_with_future(submit_info, fence, &self.previous, &queue)
+ })?;
}
- SubmitAnyBuilder::BindSparse(_) => {
+ SubmitAnyBuilder::BindSparse(_, _) => {
unimplemented!() // TODO: how to do that?
/*debug_assert_eq!(builder.num_signal_semaphores(), 0);
builder.add_signal_semaphore(&self.semaphore);
builder.submit(&queue)?;*/
}
- SubmitAnyBuilder::QueuePresent(present) => {
- present.submit(&queue)?;
- let mut builder = SubmitCommandBufferBuilder::new();
- builder.add_signal_semaphore(&self.semaphore);
- builder.submit(&queue)?; // FIXME: problematic because if we return an error and flush() is called again, then we'll submit the present twice
+ SubmitAnyBuilder::QueuePresent(present_info) => {
+ // VUID-VkPresentIdKHR-presentIds-04999
+ for swapchain_info in &present_info.swapchain_infos {
+ if swapchain_info.present_id.map_or(false, |present_id| {
+ !swapchain_info.swapchain.try_claim_present_id(present_id)
+ }) {
+ return Err(FlushError::PresentIdLessThanOrEqual);
+ }
+
+ match self.previous.check_swapchain_image_acquired(
+ &swapchain_info.swapchain,
+ swapchain_info.image_index,
+ true,
+ ) {
+ Ok(_) => (),
+ Err(AccessCheckError::Unknown) => {
+ return Err(AccessError::SwapchainImageNotAcquired.into())
+ }
+ Err(AccessCheckError::Denied(e)) => return Err(e.into()),
+ }
+ }
+
+ queue.with(|mut q| {
+ q.present_unchecked(present_info)?
+ .map(|r| r.map(|_| ()))
+ .fold(Ok(()), Result::and)?;
+ // FIXME: problematic because if we return an error and flush() is called again, then we'll submit the present twice
+ q.submit_unchecked(
+ [SubmitInfo {
+ signal_semaphores: vec![SemaphoreSubmitInfo::semaphore(
+ self.semaphore.clone(),
+ )],
+ ..Default::default()
+ }],
+ None,
+ )?;
+ Ok::<_, FlushError>(())
+ })?;
}
};
@@ -127,46 +188,52 @@ where
}
}
- #[inline]
unsafe fn signal_finished(&self) {
- debug_assert!(*self.wait_submitted.lock().unwrap());
+ debug_assert!(*self.wait_submitted.lock());
self.finished.store(true, Ordering::SeqCst);
self.previous.signal_finished();
}
- #[inline]
fn queue_change_allowed(&self) -> bool {
true
}
- #[inline]
fn queue(&self) -> Option<Arc<Queue>> {
self.previous.queue()
}
- #[inline]
fn check_buffer_access(
&self,
- buffer: &dyn BufferAccess,
+ buffer: &Buffer,
+ range: Range<DeviceSize>,
exclusive: bool,
queue: &Queue,
- ) -> Result<Option<(PipelineStages, AccessFlags)>, AccessCheckError> {
+ ) -> Result<(), AccessCheckError> {
self.previous
- .check_buffer_access(buffer, exclusive, queue)
- .map(|_| None)
+ .check_buffer_access(buffer, range, exclusive, queue)
}
- #[inline]
fn check_image_access(
&self,
- image: &dyn ImageAccess,
- layout: ImageLayout,
+ image: &Image,
+ range: Range<DeviceSize>,
exclusive: bool,
+ expected_layout: ImageLayout,
queue: &Queue,
- ) -> Result<Option<(PipelineStages, AccessFlags)>, AccessCheckError> {
+ ) -> Result<(), AccessCheckError> {
self.previous
- .check_image_access(image, layout, exclusive, queue)
- .map(|_| None)
+ .check_image_access(image, range, exclusive, expected_layout, queue)
+ }
+
+ #[inline]
+ fn check_swapchain_image_acquired(
+ &self,
+ swapchain: &Swapchain,
+ image_index: u32,
+ _before: bool,
+ ) -> Result<(), AccessCheckError> {
+ self.previous
+ .check_swapchain_image_acquired(swapchain, image_index, false)
}
}
@@ -174,7 +241,6 @@ unsafe impl<F> DeviceOwned for SemaphoreSignalFuture<F>
where
F: GpuFuture,
{
- #[inline]
fn device(&self) -> &Arc<Device> {
self.semaphore.device()
}
@@ -185,14 +251,12 @@ where
F: GpuFuture,
{
fn drop(&mut self) {
- unsafe {
- if !*self.finished.get_mut() {
- // TODO: handle errors?
- self.flush().unwrap();
- // Block until the queue finished.
- self.queue().unwrap().wait().unwrap();
- self.previous.signal_finished();
- }
+ if !*self.finished.get_mut() && !thread::panicking() {
+ // TODO: handle errors?
+ self.flush().unwrap();
+ // Block until the queue finished.
+ self.queue().unwrap().with(|mut q| q.wait_idle()).unwrap();
+ unsafe { self.previous.signal_finished() };
}
}
}
diff --git a/src/sync/mod.rs b/src/sync/mod.rs
index decbd7d..512c02e 100644
--- a/src/sync/mod.rs
+++ b/src/sync/mod.rs
@@ -15,122 +15,24 @@
//!
//! This safety is enforced at runtime by vulkano but it is not magic and you will require some
//! knowledge if you want to avoid errors.
-//!
-//! # Futures
-//!
-//! Whenever you ask the GPU to start an operation by using a function of the vulkano library (for
-//! example executing a command buffer), this function will return a *future*. A future is an
-//! object that implements [the `GpuFuture` trait](trait.GpuFuture.html) and that represents the
-//! point in time when this operation is over.
-//!
-//! No function in vulkano immediately sends an operation to the GPU (with the exception of some
-//! unsafe low-level functions). Instead they return a future that is in the pending state. Before
-//! the GPU actually starts doing anything, you have to *flush* the future by calling the `flush()`
-//! method or one of its derivatives.
-//!
-//! Futures serve several roles:
-//!
-//! - Futures can be used to build dependencies between operations and makes it possible to ask
-//! that an operation starts only after a previous operation is finished.
-//! - Submitting an operation to the GPU is a costly operation. By chaining multiple operations
-//! with futures you will submit them all at once instead of one by one, thereby reducing this
-//! cost.
-//! - Futures keep alive the resources and objects used by the GPU so that they don't get destroyed
-//! while they are still in use.
-//!
-//! The last point means that you should keep futures alive in your program for as long as their
-//! corresponding operation is potentially still being executed by the GPU. Dropping a future
-//! earlier will block the current thread (after flushing, if necessary) until the GPU has finished
-//! the operation, which is usually not what you want.
-//!
-//! If you write a function that submits an operation to the GPU in your program, you are
-//! encouraged to let this function return the corresponding future and let the caller handle it.
-//! This way the caller will be able to chain multiple futures together and decide when it wants to
-//! keep the future alive or drop it.
-//!
-//! # Executing an operation after a future
-//!
-//! Respecting the order of operations on the GPU is important, as it is what *proves* vulkano that
-//! what you are doing is indeed safe. For example if you submit two operations that modify the
-//! same buffer, then you need to execute one after the other instead of submitting them
-//! independently. Failing to do so would mean that these two operations could potentially execute
-//! simultaneously on the GPU, which would be unsafe.
-//!
-//! This is done by calling one of the methods of the `GpuFuture` trait. For example calling
-//! `prev_future.then_execute(command_buffer)` takes ownership of `prev_future` and will make sure
-//! to only start executing `command_buffer` after the moment corresponding to `prev_future`
-//! happens. The object returned by the `then_execute` function is itself a future that corresponds
-//! to the moment when the execution of `command_buffer` ends.
-//!
-//! ## Between two different GPU queues
-//!
-//! When you want to perform an operation after another operation on two different queues, you
-//! **must** put a *semaphore* between them. Failure to do so would result in a runtime error.
-//! Adding a semaphore is a simple as replacing `prev_future.then_execute(...)` with
-//! `prev_future.then_signal_semaphore().then_execute(...)`.
-//!
-//! > **Note**: A common use-case is using a transfer queue (ie. a queue that is only capable of
-//! > performing transfer operations) to write data to a buffer, then read that data from the
-//! > rendering queue.
-//!
-//! What happens when you do so is that the first queue will execute the first set of operations
-//! (represented by `prev_future` in the example), then put a semaphore in the signalled state.
-//! Meanwhile the second queue blocks (if necessary) until that same semaphore gets signalled, and
-//! then only will execute the second set of operations.
-//!
-//! Since you want to avoid blocking the second queue as much as possible, you probably want to
-//! flush the operation to the first queue as soon as possible. This can easily be done by calling
-//! `then_signal_semaphore_and_flush()` instead of `then_signal_semaphore()`.
-//!
-//! ## Between several different GPU queues
-//!
-//! The `then_signal_semaphore()` method is appropriate when you perform an operation in one queue,
-//! and want to see the result in another queue. However in some situations you want to start
-//! multiple operations on several different queues.
-//!
-//! TODO: this is not yet implemented
-//!
-//! # Fences
-//!
-//! A `Fence` is an object that is used to signal the CPU when an operation on the GPU is finished.
-//!
-//! Signalling a fence is done by calling `then_signal_fence()` on a future. Just like semaphores,
-//! you are encouraged to use `then_signal_fence_and_flush()` instead.
-//!
-//! Signalling a fence is kind of a "terminator" to a chain of futures.
-//!
-//! TODO: lots of problems with how to use fences
-//! TODO: talk about fence + semaphore simultaneously
-//! TODO: talk about using fences to clean up
+pub(crate) use self::pipeline::{PipelineStageAccess, PipelineStageAccessSet};
+pub use self::{
+ future::{now, FlushError, GpuFuture},
+ pipeline::{
+ AccessFlags, BufferMemoryBarrier, DependencyFlags, DependencyInfo, ImageMemoryBarrier,
+ MemoryBarrier, PipelineMemoryAccess, PipelineStage, PipelineStages,
+ QueueFamilyOwnershipTransfer,
+ },
+};
use crate::device::Queue;
use std::sync::Arc;
-pub use self::event::Event;
-pub use self::fence::Fence;
-pub use self::fence::FenceWaitError;
-pub use self::future::now;
-pub use self::future::AccessCheckError;
-pub use self::future::AccessError;
-pub use self::future::FenceSignalFuture;
-pub use self::future::FlushError;
-pub use self::future::GpuFuture;
-pub use self::future::JoinFuture;
-pub use self::future::NowFuture;
-pub use self::future::SemaphoreSignalFuture;
-pub use self::pipeline::AccessFlags;
-pub use self::pipeline::PipelineMemoryAccess;
-pub use self::pipeline::PipelineStage;
-pub use self::pipeline::PipelineStages;
-pub use self::semaphore::ExternalSemaphoreHandleType;
-pub use self::semaphore::Semaphore;
-pub use self::semaphore::SemaphoreError;
-
-mod event;
-mod fence;
-mod future;
+pub mod event;
+pub mod fence;
+pub mod future;
mod pipeline;
-pub(crate) mod semaphore;
+pub mod semaphore;
/// Declares in which queue(s) a resource can be used.
///
@@ -148,7 +50,7 @@ pub enum SharingMode {
impl<'a> From<&'a Arc<Queue>> for SharingMode {
#[inline]
- fn from(queue: &'a Arc<Queue>) -> SharingMode {
+ fn from(_queue: &'a Arc<Queue>) -> SharingMode {
SharingMode::Exclusive
}
}
@@ -156,7 +58,12 @@ impl<'a> From<&'a Arc<Queue>> for SharingMode {
impl<'a> From<&'a [&'a Arc<Queue>]> for SharingMode {
#[inline]
fn from(queues: &'a [&'a Arc<Queue>]) -> SharingMode {
- SharingMode::Concurrent(queues.iter().map(|queue| queue.family().id()).collect())
+ SharingMode::Concurrent(
+ queues
+ .iter()
+ .map(|queue| queue.queue_family_index())
+ .collect(),
+ )
}
}
@@ -164,10 +71,26 @@ impl<'a> From<&'a [&'a Arc<Queue>]> for SharingMode {
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Sharing<I>
where
- I: Iterator<Item = u32>,
+ I: IntoIterator<Item = u32>,
{
/// The resource is used is only one queue family.
Exclusive,
/// The resource is used in multiple queue families. Can be slower than `Exclusive`.
Concurrent(I),
}
+
+/// How the memory of a resource is currently being accessed.
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub(crate) enum CurrentAccess {
+ /// The resource is currently being accessed exclusively by the CPU.
+ CpuExclusive,
+
+ /// The resource is currently being accessed exclusively by the GPU.
+ /// The GPU can have multiple exclusive accesses, if they are separated by synchronization.
+ ///
+ /// `gpu_writes` must not be 0. If it's decremented to 0, switch to `Shared`.
+ GpuExclusive { gpu_reads: usize, gpu_writes: usize },
+
+ /// The resource is not currently being accessed, or is being accessed for reading only.
+ Shared { cpu_reads: usize, gpu_reads: usize },
+}
diff --git a/src/sync/pipeline.rs b/src/sync/pipeline.rs
index 62cd95c..53e3941 100644
--- a/src/sync/pipeline.rs
+++ b/src/sync/pipeline.rs
@@ -7,80 +7,550 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
-use std::ops;
+use crate::{
+ buffer::Buffer,
+ descriptor_set::layout::DescriptorType,
+ device::{Device, QueueFlags},
+ image::{sys::Image, ImageAspects, ImageLayout, ImageSubresourceRange},
+ macros::{vulkan_bitflags, vulkan_bitflags_enum},
+ shader::ShaderStages,
+ DeviceSize, RequirementNotMet, Version,
+};
+use ahash::HashMap;
+use once_cell::sync::Lazy;
+use smallvec::SmallVec;
+use std::{ops::Range, sync::Arc};
-macro_rules! pipeline_stages {
- ($($elem:ident, $var:ident => $val:expr, $queue:expr;)+) => (
- #[derive(Debug, Copy, Clone, PartialEq, Eq)]
- pub struct PipelineStages {
- $(
- pub $elem: bool,
- )+
+vulkan_bitflags_enum! {
+ #[non_exhaustive]
+ /// A set of [`PipelineStage`] values.
+ PipelineStages impl {
+ /// Returns whether `self` contains stages that are only available in
+ /// `VkPipelineStageFlagBits2`.
+ pub(crate) fn is_2(self) -> bool {
+ !(self
+ - (PipelineStages::TOP_OF_PIPE
+ | PipelineStages::DRAW_INDIRECT
+ | PipelineStages::VERTEX_INPUT
+ | PipelineStages::VERTEX_SHADER
+ | PipelineStages::TESSELLATION_CONTROL_SHADER
+ | PipelineStages::TESSELLATION_EVALUATION_SHADER
+ | PipelineStages::GEOMETRY_SHADER
+ | PipelineStages::FRAGMENT_SHADER
+ | PipelineStages::EARLY_FRAGMENT_TESTS
+ | PipelineStages::LATE_FRAGMENT_TESTS
+ | PipelineStages::COLOR_ATTACHMENT_OUTPUT
+ | PipelineStages::COMPUTE_SHADER
+ | PipelineStages::ALL_TRANSFER
+ | PipelineStages::BOTTOM_OF_PIPE
+ | PipelineStages::HOST
+ | PipelineStages::ALL_GRAPHICS
+ | PipelineStages::ALL_COMMANDS
+ | PipelineStages::TRANSFORM_FEEDBACK
+ | PipelineStages::CONDITIONAL_RENDERING
+ | PipelineStages::ACCELERATION_STRUCTURE_BUILD
+ | PipelineStages::RAY_TRACING_SHADER
+ | PipelineStages::FRAGMENT_DENSITY_PROCESS
+ | PipelineStages::FRAGMENT_SHADING_RATE_ATTACHMENT
+ | PipelineStages::COMMAND_PREPROCESS
+ | PipelineStages::TASK_SHADER
+ | PipelineStages::MESH_SHADER))
+ .is_empty()
}
- impl PipelineStages {
- /// Builds an `PipelineStages` struct with none of the stages set.
- pub fn none() -> PipelineStages {
- PipelineStages {
- $(
- $elem: false,
- )+
- }
+ /// Replaces and unsets flags that are equivalent to multiple other flags.
+ ///
+ /// This may set flags that are not supported by the device, so this is for internal use only
+ /// and should not be passed on to Vulkan.
+ pub(crate) fn expand(mut self, queue_flags: QueueFlags) -> Self {
+ if self.intersects(PipelineStages::ALL_COMMANDS) {
+ self -= PipelineStages::ALL_COMMANDS;
+ self |= queue_flags.into();
}
- }
- impl From<PipelineStages> for ash::vk::PipelineStageFlags {
- #[inline]
- fn from(val: PipelineStages) -> Self {
- let mut result = ash::vk::PipelineStageFlags::empty();
- $(
- if val.$elem { result |= $val }
- )+
- result
+ if self.intersects(PipelineStages::ALL_GRAPHICS) {
+ self -= PipelineStages::ALL_GRAPHICS;
+ self |= QueueFlags::GRAPHICS.into();
}
- }
- impl ops::BitOr for PipelineStages {
- type Output = PipelineStages;
+ if self.intersects(PipelineStages::VERTEX_INPUT) {
+ self -= PipelineStages::VERTEX_INPUT;
+ self |= PipelineStages::INDEX_INPUT | PipelineStages::VERTEX_ATTRIBUTE_INPUT;
+ }
- #[inline]
- fn bitor(self, rhs: PipelineStages) -> PipelineStages {
- PipelineStages {
- $(
- $elem: self.$elem || rhs.$elem,
- )+
- }
+ if self.intersects(PipelineStages::PRE_RASTERIZATION_SHADERS) {
+ self -= PipelineStages::PRE_RASTERIZATION_SHADERS;
+ self |= PipelineStages::VERTEX_SHADER
+ | PipelineStages::TESSELLATION_CONTROL_SHADER
+ | PipelineStages::TESSELLATION_EVALUATION_SHADER
+ | PipelineStages::GEOMETRY_SHADER
+ | PipelineStages::TASK_SHADER
+ | PipelineStages::MESH_SHADER;
}
- }
- impl ops::BitOrAssign for PipelineStages {
- #[inline]
- fn bitor_assign(&mut self, rhs: PipelineStages) {
- $(
- self.$elem = self.$elem || rhs.$elem;
- )+
+ if self.intersects(PipelineStages::ALL_TRANSFER) {
+ self -= PipelineStages::ALL_TRANSFER;
+ self |= PipelineStages::COPY
+ | PipelineStages::RESOLVE
+ | PipelineStages::BLIT
+ | PipelineStages::CLEAR
+ | PipelineStages::ACCELERATION_STRUCTURE_COPY;
}
+
+ self
}
- #[derive(Debug, Copy, Clone, PartialEq, Eq)]
- #[repr(u32)]
- pub enum PipelineStage {
+ pub(crate) fn with_earlier(self) -> Self {
+ STAGE_ORDER.iter().rev().fold(
+ self,
+ |stages, &(before, after)| if stages.intersects(after) {
+ stages.union(before)
+ } else {
+ stages
+ }
+ )
+ }
+
+ pub(crate) fn with_later(self) -> Self {
+ STAGE_ORDER.iter().fold(
+ self,
+ |stages, &(before, after)| if stages.intersects(before) {
+ stages.union(after)
+ } else {
+ stages
+ }
+ )
+ }
+ },
+
+ /// A single stage in the device's processing pipeline.
+ PipelineStage,
+
+ = PipelineStageFlags2(u64);
+
+ /// A pseudo-stage representing the start of the pipeline.
+ TOP_OF_PIPE, TopOfPipe = TOP_OF_PIPE,
+
+ /// Indirect buffers are read.
+ DRAW_INDIRECT, DrawIndirect = DRAW_INDIRECT,
+
+ /// Vertex and index buffers are read.
+ ///
+ /// It is currently equivalent to setting all of the following flags, but automatically
+ /// omitting any that are not supported in a given context. It also implicitly includes future
+ /// flags that are added to Vulkan, if they are not yet supported by Vulkano.
+ /// - `index_input`
+ /// - `vertex_attribute_input`
+ VERTEX_INPUT, VertexInput = VERTEX_INPUT,
+
+ /// Vertex shaders are executed.
+ VERTEX_SHADER, VertexShader = VERTEX_SHADER,
+
+ /// Tessellation control shaders are executed.
+ TESSELLATION_CONTROL_SHADER, TessellationControlShader = TESSELLATION_CONTROL_SHADER,
+
+ /// Tessellation evaluation shaders are executed.
+ TESSELLATION_EVALUATION_SHADER, TessellationEvaluationShader = TESSELLATION_EVALUATION_SHADER,
+
+ /// Geometry shaders are executed.
+ GEOMETRY_SHADER, GeometryShader = GEOMETRY_SHADER,
+
+ /// Fragment shaders are executed.
+ FRAGMENT_SHADER, FragmentShader = FRAGMENT_SHADER,
+
+ /// Early fragment tests (depth and stencil tests before fragment shading) are performed.
+ /// Subpass load operations for framebuffer attachments with a depth/stencil format are
+ /// performed.
+ EARLY_FRAGMENT_TESTS, EarlyFragmentTests = EARLY_FRAGMENT_TESTS,
+
+ /// Late fragment tests (depth and stencil tests after fragment shading) are performed.
+ /// Subpass store operations for framebuffer attachments with a depth/stencil format are
+ /// performed.
+ LATE_FRAGMENT_TESTS, LateFragmentTests = LATE_FRAGMENT_TESTS,
+
+ /// The final color values are output from the pipeline after blending.
+ /// Subpass load and store operations, multisample resolve operations for framebuffer
+ /// attachments with a color or depth/stencil format, and `clear_attachments` are performed.
+ COLOR_ATTACHMENT_OUTPUT, ColorAttachmentOutput = COLOR_ATTACHMENT_OUTPUT,
+
+ /// Compute shaders are executed.
+ COMPUTE_SHADER, ComputeShader = COMPUTE_SHADER,
+
+ /// The set of all current and future transfer pipeline stages.
+ ///
+ /// It is currently equivalent to setting all of the following flags, but automatically
+ /// omitting any that are not supported in a given context. It also implicitly includes future
+ /// flags that are added to Vulkan, if they are not yet supported by Vulkano.
+ /// - `copy`
+ /// - `blit`
+ /// - `resolve`
+ /// - `clear`
+ /// - `acceleration_structure_copy`
+ ALL_TRANSFER, AllTransfer = ALL_TRANSFER,
+
+ /// A pseudo-stage representing the end of the pipeline.
+ BOTTOM_OF_PIPE, BottomOfPipe = BOTTOM_OF_PIPE,
+
+ /// A pseudo-stage representing reads and writes to device memory on the host.
+ HOST, Host = HOST,
+
+ /// The set of all current and future graphics pipeline stages.
+ ///
+ /// It is currently equivalent to setting all of the following flags, but automatically
+ /// omitting any that are not supported in a given context. It also implicitly includes future
+ /// flags that are added to Vulkan, if they are not yet supported by Vulkano.
+ /// - `draw_indirect`
+ /// - `task_shader`
+ /// - `mesh_shader`
+ /// - `vertex_input`
+ /// - `vertex_shader`
+ /// - `tessellation_control_shader`
+ /// - `tessellation_evaluation_shader`
+ /// - `geometry_shader`
+ /// - `fragment_shader`
+ /// - `early_fragment_tests`
+ /// - `late_fragment_tests`
+ /// - `color_attachment_output`
+ /// - `conditional_rendering`
+ /// - `transform_feedback`
+ /// - `fragment_shading_rate_attachment`
+ /// - `fragment_density_process`
+ /// - `invocation_mask`
+ ALL_GRAPHICS, AllGraphics = ALL_GRAPHICS,
+
+ /// The set of all current and future pipeline stages of all types.
+ ///
+ /// It is currently equivalent to setting all flags in `PipelineStages`, but automatically
+ /// omitting any that are not supported in a given context. It also implicitly includes future
+ /// flags that are added to Vulkan, if they are not yet supported by Vulkano.
+ ALL_COMMANDS, AllCommands = ALL_COMMANDS,
+
+ /// The `copy_buffer`, `copy_image`, `copy_buffer_to_image`, `copy_image_to_buffer` and
+ /// `copy_query_pool_results` commands are executed.
+ COPY, Copy = COPY {
+ api_version: V1_3,
+ device_extensions: [khr_synchronization2],
+ },
+
+ /// The `resolve_image` command is executed.
+ RESOLVE, Resolve = RESOLVE {
+ api_version: V1_3,
+ device_extensions: [khr_synchronization2],
+ },
+
+ /// The `blit_image` command is executed.
+ BLIT, Blit = BLIT {
+ api_version: V1_3,
+ device_extensions: [khr_synchronization2],
+ },
+
+ /// The `clear_color_image`, `clear_depth_stencil_image`, `fill_buffer` and `update_buffer`
+ /// commands are executed.
+ CLEAR, Clear = CLEAR {
+ api_version: V1_3,
+ device_extensions: [khr_synchronization2],
+ },
+
+ /// Index buffers are read.
+ INDEX_INPUT, IndexInput = INDEX_INPUT {
+ api_version: V1_3,
+ device_extensions: [khr_synchronization2],
+ },
+
+ /// Vertex buffers are read.
+ VERTEX_ATTRIBUTE_INPUT, VertexAttributeInput = VERTEX_ATTRIBUTE_INPUT {
+ api_version: V1_3,
+ device_extensions: [khr_synchronization2],
+ },
+
+ /// The various pre-rasterization shader types are executed.
+ ///
+ /// It is currently equivalent to setting all of the following flags, but automatically
+ /// omitting any that are not supported in a given context. It also implicitly includes future
+ /// flags that are added to Vulkan, if they are not yet supported by Vulkano.
+ /// - `vertex_shader`
+ /// - `tessellation_control_shader`
+ /// - `tessellation_evaluation_shader`
+ /// - `geometry_shader`
+ /// - `task_shader`
+ /// - `mesh_shader`
+ PRE_RASTERIZATION_SHADERS, PreRasterizationShaders = PRE_RASTERIZATION_SHADERS {
+ api_version: V1_3,
+ device_extensions: [khr_synchronization2],
+ },
+
+ /// Video decode operations are performed.
+ VIDEO_DECODE, VideoDecode = VIDEO_DECODE_KHR {
+ device_extensions: [khr_video_decode_queue],
+ },
+
+ /// Video encode operations are performed.
+ VIDEO_ENCODE, VideoEncode = VIDEO_ENCODE_KHR {
+ device_extensions: [khr_video_encode_queue],
+ },
+
+ /// Vertex attribute output values are written to the transform feedback buffers.
+ TRANSFORM_FEEDBACK, TransformFeedback = TRANSFORM_FEEDBACK_EXT {
+ device_extensions: [ext_transform_feedback],
+ },
+
+ /// The predicate of conditional rendering is read.
+ CONDITIONAL_RENDERING, ConditionalRendering = CONDITIONAL_RENDERING_EXT {
+ device_extensions: [ext_conditional_rendering],
+ },
+
+ /// Acceleration_structure commands are executed.
+ ACCELERATION_STRUCTURE_BUILD, AccelerationStructureBuild = ACCELERATION_STRUCTURE_BUILD_KHR {
+ device_extensions: [khr_acceleration_structure, nv_ray_tracing],
+ },
+
+ /// The various ray tracing shader types are executed.
+ RAY_TRACING_SHADER, RayTracingShader = RAY_TRACING_SHADER_KHR {
+ device_extensions: [khr_ray_tracing_pipeline, nv_ray_tracing],
+ },
+
+ /// The fragment density map is read to generate the fragment areas.
+ FRAGMENT_DENSITY_PROCESS, FragmentDensityProcess = FRAGMENT_DENSITY_PROCESS_EXT {
+ device_extensions: [ext_fragment_density_map],
+ },
+
+ /// The fragment shading rate attachment or shading rate image is read to determine the
+ /// fragment shading rate for portions of a rasterized primitive.
+ FRAGMENT_SHADING_RATE_ATTACHMENT, FragmentShadingRateAttachment = FRAGMENT_SHADING_RATE_ATTACHMENT_KHR {
+ device_extensions: [khr_fragment_shading_rate],
+ },
+
+ /// Device-side preprocessing for generated commands via the `preprocess_generated_commands`
+ /// command is handled.
+ COMMAND_PREPROCESS, CommandPreprocess = COMMAND_PREPROCESS_NV {
+ device_extensions: [nv_device_generated_commands],
+ },
+
+ /// Task shaders are executed.
+ TASK_SHADER, TaskShader = TASK_SHADER_EXT {
+ device_extensions: [ext_mesh_shader, nv_mesh_shader],
+ },
+
+ /// Mesh shaders are executed.
+ MESH_SHADER, MeshShader = MESH_SHADER_EXT {
+ device_extensions: [ext_mesh_shader, nv_mesh_shader],
+ },
+
+ /// Subpass shading shaders are executed.
+ SUBPASS_SHADING, SubpassShading = SUBPASS_SHADING_HUAWEI {
+ device_extensions: [huawei_subpass_shading],
+ },
+
+ /// The invocation mask image is read to optimize ray dispatch.
+ INVOCATION_MASK, InvocationMask = INVOCATION_MASK_HUAWEI {
+ device_extensions: [huawei_invocation_mask],
+ },
+
+ /// The `copy_acceleration_structure` command is executed.
+ ACCELERATION_STRUCTURE_COPY, AccelerationStructureCopy = ACCELERATION_STRUCTURE_COPY_KHR {
+ device_extensions: [khr_ray_tracing_maintenance1],
+ },
+
+ /// Micromap commands are executed.
+ MICROMAP_BUILD, MicromapBuild = MICROMAP_BUILD_EXT {
+ device_extensions: [ext_opacity_micromap],
+ },
+
+ /// Optical flow operations are performed.
+ OPTICAL_FLOW, OpticalFlow = OPTICAL_FLOW_NV {
+ device_extensions: [nv_optical_flow],
+ },
+}
+
+macro_rules! stage_order {
+ {
+ $((
+ $($before:ident)|+,
+ $($after:ident)|+,
+ ),)+
+ } => {
+ static STAGE_ORDER: [(PipelineStages, PipelineStages); 15] = [
$(
- $var = $val.as_raw(),
+ (
+ PipelineStages::empty()
+ $(.union(PipelineStages::$before))+
+ ,
+ PipelineStages::empty()
+ $(.union(PipelineStages::$after))+
+ ),
)+
+ ];
+ };
+}
+
+// Per
+// https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap7.html#synchronization-pipeline-stages-types
+stage_order! {
+ (
+ TOP_OF_PIPE,
+ DRAW_INDIRECT
+ | COPY | RESOLVE | BLIT | CLEAR
+ | VIDEO_DECODE | VIDEO_ENCODE
+ | CONDITIONAL_RENDERING
+ | COMMAND_PREPROCESS
+ | ACCELERATION_STRUCTURE_BUILD
+ | SUBPASS_SHADING
+ | ACCELERATION_STRUCTURE_COPY
+ | MICROMAP_BUILD
+ | OPTICAL_FLOW,
+ ),
+
+ (
+ DRAW_INDIRECT,
+ COMPUTE_SHADER | INDEX_INPUT | RAY_TRACING_SHADER | TASK_SHADER,
+ ),
+
+ (
+ INDEX_INPUT,
+ VERTEX_ATTRIBUTE_INPUT,
+ ),
+
+ (
+ VERTEX_ATTRIBUTE_INPUT,
+ VERTEX_SHADER,
+ ),
+
+ (
+ VERTEX_SHADER,
+ TESSELLATION_CONTROL_SHADER,
+ ),
+
+ (
+ TESSELLATION_CONTROL_SHADER,
+ TESSELLATION_EVALUATION_SHADER,
+ ),
+
+ (
+ TESSELLATION_EVALUATION_SHADER,
+ GEOMETRY_SHADER,
+ ),
+
+ (
+ GEOMETRY_SHADER,
+ TRANSFORM_FEEDBACK,
+ ),
+
+ (
+ TASK_SHADER,
+ MESH_SHADER,
+ ),
+
+ (
+ TRANSFORM_FEEDBACK | MESH_SHADER,
+ FRAGMENT_SHADING_RATE_ATTACHMENT,
+ ),
+
+ (
+ FRAGMENT_DENSITY_PROCESS | FRAGMENT_SHADING_RATE_ATTACHMENT,
+ EARLY_FRAGMENT_TESTS,
+ ),
+
+ (
+ EARLY_FRAGMENT_TESTS,
+ FRAGMENT_SHADER,
+ ),
+
+ (
+ FRAGMENT_SHADER,
+ LATE_FRAGMENT_TESTS,
+ ),
+
+ (
+ LATE_FRAGMENT_TESTS,
+ COLOR_ATTACHMENT_OUTPUT,
+ ),
+
+ (
+ COLOR_ATTACHMENT_OUTPUT
+ | COMPUTE_SHADER
+ | COPY | RESOLVE | BLIT | CLEAR
+ | VIDEO_DECODE | VIDEO_ENCODE
+ | CONDITIONAL_RENDERING
+ | COMMAND_PREPROCESS
+ | ACCELERATION_STRUCTURE_BUILD | RAY_TRACING_SHADER
+ | SUBPASS_SHADING
+ | ACCELERATION_STRUCTURE_COPY
+ | MICROMAP_BUILD
+ | OPTICAL_FLOW,
+ BOTTOM_OF_PIPE,
+ ),
+}
+
+impl From<QueueFlags> for PipelineStages {
+ /// Corresponds to the table "[Supported pipeline stage flags]" in the Vulkan specification.
+ ///
+ /// [Supported pipeline stage flags]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap7.html#synchronization-pipeline-stages-supported
+ #[inline]
+ fn from(val: QueueFlags) -> Self {
+ let mut result = PipelineStages::TOP_OF_PIPE
+ | PipelineStages::BOTTOM_OF_PIPE
+ | PipelineStages::HOST
+ | PipelineStages::ALL_COMMANDS;
+
+ if val.intersects(QueueFlags::GRAPHICS | QueueFlags::COMPUTE | QueueFlags::TRANSFER) {
+ result |= PipelineStages::ALL_TRANSFER
+ | PipelineStages::COPY
+ | PipelineStages::RESOLVE
+ | PipelineStages::BLIT
+ | PipelineStages::CLEAR
+ | PipelineStages::ACCELERATION_STRUCTURE_COPY;
}
- impl PipelineStage {
- #[inline]
- pub fn required_queue_flags(&self) -> ash::vk::QueueFlags {
- match self {
- $(
- Self::$var => $queue,
- )+
- }
- }
+ if val.intersects(QueueFlags::GRAPHICS) {
+ result |= PipelineStages::DRAW_INDIRECT
+ | PipelineStages::VERTEX_INPUT
+ | PipelineStages::VERTEX_SHADER
+ | PipelineStages::TESSELLATION_CONTROL_SHADER
+ | PipelineStages::TESSELLATION_EVALUATION_SHADER
+ | PipelineStages::GEOMETRY_SHADER
+ | PipelineStages::FRAGMENT_SHADER
+ | PipelineStages::EARLY_FRAGMENT_TESTS
+ | PipelineStages::LATE_FRAGMENT_TESTS
+ | PipelineStages::COLOR_ATTACHMENT_OUTPUT
+ | PipelineStages::ALL_GRAPHICS
+ | PipelineStages::INDEX_INPUT
+ | PipelineStages::VERTEX_ATTRIBUTE_INPUT
+ | PipelineStages::PRE_RASTERIZATION_SHADERS
+ | PipelineStages::CONDITIONAL_RENDERING
+ | PipelineStages::TRANSFORM_FEEDBACK
+ | PipelineStages::COMMAND_PREPROCESS
+ | PipelineStages::FRAGMENT_SHADING_RATE_ATTACHMENT
+ | PipelineStages::TASK_SHADER
+ | PipelineStages::MESH_SHADER
+ | PipelineStages::FRAGMENT_DENSITY_PROCESS
+ | PipelineStages::SUBPASS_SHADING
+ | PipelineStages::INVOCATION_MASK;
+ }
+
+ if val.intersects(QueueFlags::COMPUTE) {
+ result |= PipelineStages::DRAW_INDIRECT
+ | PipelineStages::COMPUTE_SHADER
+ | PipelineStages::CONDITIONAL_RENDERING
+ | PipelineStages::COMMAND_PREPROCESS
+ | PipelineStages::ACCELERATION_STRUCTURE_BUILD
+ | PipelineStages::RAY_TRACING_SHADER
+ | PipelineStages::MICROMAP_BUILD;
+ }
+
+ if val.intersects(QueueFlags::VIDEO_DECODE) {
+ result |= PipelineStages::VIDEO_DECODE;
+ }
+
+ if val.intersects(QueueFlags::VIDEO_ENCODE) {
+ result |= PipelineStages::VIDEO_ENCODE;
}
- );
+
+ if val.intersects(QueueFlags::OPTICAL_FLOW) {
+ result |= PipelineStages::OPTICAL_FLOW;
+ }
+
+ result
+ }
}
impl From<PipelineStage> for ash::vk::PipelineStageFlags {
@@ -90,177 +560,434 @@ impl From<PipelineStage> for ash::vk::PipelineStageFlags {
}
}
-pipeline_stages! {
- top_of_pipe, TopOfPipe => ash::vk::PipelineStageFlags::TOP_OF_PIPE, ash::vk::QueueFlags::empty();
- draw_indirect, DrawIndirect => ash::vk::PipelineStageFlags::DRAW_INDIRECT, ash::vk::QueueFlags::GRAPHICS | ash::vk::QueueFlags::COMPUTE;
- vertex_input, VertexInput => ash::vk::PipelineStageFlags::VERTEX_INPUT, ash::vk::QueueFlags::GRAPHICS;
- vertex_shader, VertexShader => ash::vk::PipelineStageFlags::VERTEX_SHADER, ash::vk::QueueFlags::GRAPHICS;
- tessellation_control_shader, TessellationControlShader => ash::vk::PipelineStageFlags::TESSELLATION_CONTROL_SHADER, ash::vk::QueueFlags::GRAPHICS;
- tessellation_evaluation_shader, TessellationEvaluationShader => ash::vk::PipelineStageFlags::TESSELLATION_EVALUATION_SHADER, ash::vk::QueueFlags::GRAPHICS;
- geometry_shader, GeometryShader => ash::vk::PipelineStageFlags::GEOMETRY_SHADER, ash::vk::QueueFlags::GRAPHICS;
- fragment_shader, FragmentShader => ash::vk::PipelineStageFlags::FRAGMENT_SHADER, ash::vk::QueueFlags::GRAPHICS;
- early_fragment_tests, EarlyFragmentTests => ash::vk::PipelineStageFlags::EARLY_FRAGMENT_TESTS, ash::vk::QueueFlags::GRAPHICS;
- late_fragment_tests, LateFragmentTests => ash::vk::PipelineStageFlags::LATE_FRAGMENT_TESTS, ash::vk::QueueFlags::GRAPHICS;
- color_attachment_output, ColorAttachmentOutput => ash::vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT, ash::vk::QueueFlags::GRAPHICS;
- compute_shader, ComputeShader => ash::vk::PipelineStageFlags::COMPUTE_SHADER, ash::vk::QueueFlags::COMPUTE;
- transfer, Transfer => ash::vk::PipelineStageFlags::TRANSFER, ash::vk::QueueFlags::GRAPHICS | ash::vk::QueueFlags::COMPUTE | ash::vk::QueueFlags::TRANSFER;
- bottom_of_pipe, BottomOfPipe => ash::vk::PipelineStageFlags::BOTTOM_OF_PIPE, ash::vk::QueueFlags::empty();
- host, Host => ash::vk::PipelineStageFlags::HOST, ash::vk::QueueFlags::empty();
- all_graphics, AllGraphics => ash::vk::PipelineStageFlags::ALL_GRAPHICS, ash::vk::QueueFlags::GRAPHICS;
- all_commands, AllCommands => ash::vk::PipelineStageFlags::ALL_COMMANDS, ash::vk::QueueFlags::empty();
-}
-
-macro_rules! access_flags {
- ($($elem:ident => $val:expr,)+) => (
- #[derive(Debug, Copy, Clone)]
- #[allow(missing_docs)]
- pub struct AccessFlags {
- $(
- pub $elem: bool,
- )+
+impl From<PipelineStages> for ash::vk::PipelineStageFlags {
+ #[inline]
+ fn from(val: PipelineStages) -> Self {
+ Self::from_raw(ash::vk::PipelineStageFlags2::from(val).as_raw() as u32)
+ }
+}
+
+vulkan_bitflags! {
+ #[non_exhaustive]
+
+ /// A set of memory access types that are included in a memory dependency.
+ AccessFlags impl {
+ /// Returns whether `self` contains stages that are only available in
+ /// `VkAccessFlagBits2`.
+ pub(crate) fn is_2(self) -> bool {
+ !(self
+ - (AccessFlags::INDIRECT_COMMAND_READ
+ | AccessFlags::INDEX_READ
+ | AccessFlags::VERTEX_ATTRIBUTE_READ
+ | AccessFlags::UNIFORM_READ
+ | AccessFlags::INPUT_ATTACHMENT_READ
+ | AccessFlags::SHADER_READ
+ | AccessFlags::SHADER_WRITE
+ | AccessFlags::COLOR_ATTACHMENT_READ
+ | AccessFlags::COLOR_ATTACHMENT_WRITE
+ | AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ
+ | AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE
+ | AccessFlags::TRANSFER_READ
+ | AccessFlags::TRANSFER_WRITE
+ | AccessFlags::HOST_READ
+ | AccessFlags::HOST_WRITE
+ | AccessFlags::MEMORY_READ
+ | AccessFlags::MEMORY_WRITE
+ | AccessFlags::SHADER_SAMPLED_READ
+ | AccessFlags::SHADER_STORAGE_READ
+ | AccessFlags::SHADER_STORAGE_WRITE
+ | AccessFlags::VIDEO_DECODE_READ
+ | AccessFlags::VIDEO_DECODE_WRITE
+ | AccessFlags::VIDEO_ENCODE_READ
+ | AccessFlags::VIDEO_ENCODE_WRITE
+ | AccessFlags::TRANSFORM_FEEDBACK_WRITE
+ | AccessFlags::TRANSFORM_FEEDBACK_COUNTER_READ
+ | AccessFlags::TRANSFORM_FEEDBACK_COUNTER_WRITE
+ | AccessFlags::CONDITIONAL_RENDERING_READ
+ | AccessFlags::COMMAND_PREPROCESS_READ
+ | AccessFlags::COMMAND_PREPROCESS_WRITE
+ | AccessFlags::FRAGMENT_SHADING_RATE_ATTACHMENT_READ
+ | AccessFlags::ACCELERATION_STRUCTURE_READ
+ | AccessFlags::ACCELERATION_STRUCTURE_WRITE
+ | AccessFlags::FRAGMENT_DENSITY_MAP_READ
+ | AccessFlags::COLOR_ATTACHMENT_READ_NONCOHERENT
+ | AccessFlags::INVOCATION_MASK_READ))
+ .is_empty()
}
- impl AccessFlags {
- /// Builds an `AccessFlags` struct with all bits set.
- pub fn all() -> AccessFlags {
- AccessFlags {
- $(
- $elem: true,
- )+
- }
+ /// Replaces and unsets flags that are equivalent to multiple other flags.
+ ///
+ /// This may set flags that are not supported by the device, so this is for internal use
+ /// only and should not be passed on to Vulkan.
+ #[allow(dead_code)] // TODO: use this function
+ pub(crate) fn expand(mut self) -> Self {
+ if self.intersects(AccessFlags::SHADER_READ) {
+ self -= AccessFlags::SHADER_READ;
+ self |= AccessFlags::UNIFORM_READ
+ | AccessFlags::SHADER_SAMPLED_READ
+ | AccessFlags::SHADER_STORAGE_READ
+ | AccessFlags::SHADER_BINDING_TABLE_READ;
}
- /// Builds an `AccessFlags` struct with none of the bits set.
- pub fn none() -> AccessFlags {
- AccessFlags {
- $(
- $elem: false,
- )+
- }
+ if self.intersects(AccessFlags::SHADER_WRITE) {
+ self -= AccessFlags::SHADER_WRITE;
+ self |= AccessFlags::SHADER_STORAGE_WRITE;
}
- }
- impl From<AccessFlags> for ash::vk::AccessFlags {
- #[inline]
- fn from(val: AccessFlags) -> Self {
- let mut result = ash::vk::AccessFlags::empty();
- $(
- if val.$elem { result |= $val }
- )+
- result
- }
+ self
}
+ }
+ = AccessFlags2(u64);
- impl ops::BitOr for AccessFlags {
- type Output = AccessFlags;
+ /// Read access to an indirect buffer.
+ INDIRECT_COMMAND_READ = INDIRECT_COMMAND_READ,
- #[inline]
- fn bitor(self, rhs: AccessFlags) -> AccessFlags {
- AccessFlags {
- $(
- $elem: self.$elem || rhs.$elem,
- )+
- }
- }
- }
+ /// Read access to an index buffer.
+ INDEX_READ = INDEX_READ,
- impl ops::BitOrAssign for AccessFlags {
- #[inline]
- fn bitor_assign(&mut self, rhs: AccessFlags) {
- $(
- self.$elem = self.$elem || rhs.$elem;
- )+
- }
- }
- );
-}
+ /// Read access to a vertex buffer.
+ VERTEX_ATTRIBUTE_READ = VERTEX_ATTRIBUTE_READ,
+
+ /// Read access to a uniform buffer in a shader.
+ UNIFORM_READ = UNIFORM_READ,
+
+ /// Read access to an input attachment in a fragment shader, within a render pass.
+ INPUT_ATTACHMENT_READ = INPUT_ATTACHMENT_READ,
+
+ /// Read access to a buffer or image in a shader.
+ ///
+ /// It is currently equivalent to setting all of the following flags, but automatically
+ /// omitting any that are not supported in a given context. It also implicitly includes future
+ /// flags that are added to Vulkan, if they are not yet supported by Vulkano.
+ /// - `uniform_read`
+ /// - `shader_sampled_read`
+ /// - `shader_storage_read`
+ /// - `shader_binding_table_read`
+ SHADER_READ = SHADER_READ,
+
+ /// Write access to a buffer or image in a shader.
+ ///
+ /// It is currently equivalent to `shader_storage_write`. It also implicitly includes future
+ /// flags that are added to Vulkan, if they are not yet supported by Vulkano.
+ SHADER_WRITE = SHADER_WRITE,
+
+ /// Read access to a color attachment during blending, logic operations or
+ /// subpass load operations.
+ COLOR_ATTACHMENT_READ = COLOR_ATTACHMENT_READ,
+
+ /// Write access to a color, resolve or depth/stencil resolve attachment during a render pass
+ /// or subpass store operations.
+ COLOR_ATTACHMENT_WRITE = COLOR_ATTACHMENT_WRITE,
+
+ /// Read access to a depth/stencil attachment during depth/stencil operations or
+ /// subpass load operations.
+ DEPTH_STENCIL_ATTACHMENT_READ = DEPTH_STENCIL_ATTACHMENT_READ,
+
+ /// Write access to a depth/stencil attachment during depth/stencil operations or
+ /// subpass store operations.
+ DEPTH_STENCIL_ATTACHMENT_WRITE = DEPTH_STENCIL_ATTACHMENT_WRITE,
+
+ /// Read access to a buffer or image during a copy, blit or resolve command.
+ TRANSFER_READ = TRANSFER_READ,
+
+ /// Write access to a buffer or image during a copy, blit, resolve or clear command.
+ TRANSFER_WRITE = TRANSFER_WRITE,
+
+ /// Read access performed by the host.
+ HOST_READ = HOST_READ,
+
+ /// Write access performed by the host.
+ HOST_WRITE = HOST_WRITE,
+
+ /// Any type of read access.
+ ///
+ /// This is equivalent to setting all `_read` flags that are allowed in the given context.
+ MEMORY_READ = MEMORY_READ,
+
+ /// Any type of write access.
+ ///
+ /// This is equivalent to setting all `_write` flags that are allowed in the given context.
+ MEMORY_WRITE = MEMORY_WRITE,
+
+ /// Read access to a uniform texel buffer or sampled image in a shader.
+ SHADER_SAMPLED_READ = SHADER_SAMPLED_READ {
+ api_version: V1_3,
+ device_extensions: [khr_synchronization2],
+ },
+
+ /// Read access to a storage buffer, storage texel buffer or storage image in a shader.
+ SHADER_STORAGE_READ = SHADER_STORAGE_READ {
+ api_version: V1_3,
+ device_extensions: [khr_synchronization2],
+ },
+
+ /// Write access to a storage buffer, storage texel buffer or storage image in a shader.
+ SHADER_STORAGE_WRITE = SHADER_STORAGE_WRITE {
+ api_version: V1_3,
+ device_extensions: [khr_synchronization2],
+ },
+
+ /// Read access to an image or buffer as part of a video decode operation.
+ VIDEO_DECODE_READ = VIDEO_DECODE_READ_KHR {
+ device_extensions: [khr_video_decode_queue],
+ },
+
+ /// Write access to an image or buffer as part of a video decode operation.
+ VIDEO_DECODE_WRITE = VIDEO_DECODE_WRITE_KHR {
+ device_extensions: [khr_video_decode_queue],
+ },
+
+ /// Read access to an image or buffer as part of a video encode operation.
+ VIDEO_ENCODE_READ = VIDEO_ENCODE_READ_KHR {
+ device_extensions: [khr_video_encode_queue],
+ },
+
+ /// Write access to an image or buffer as part of a video encode operation.
+ VIDEO_ENCODE_WRITE = VIDEO_ENCODE_WRITE_KHR {
+ device_extensions: [khr_video_encode_queue],
+ },
+
+ /// Write access to a transform feedback buffer during transform feedback operations.
+ TRANSFORM_FEEDBACK_WRITE = TRANSFORM_FEEDBACK_WRITE_EXT {
+ device_extensions: [ext_transform_feedback],
+ },
+
+ /// Read access to a transform feedback counter buffer during transform feedback operations.
+ TRANSFORM_FEEDBACK_COUNTER_READ = TRANSFORM_FEEDBACK_COUNTER_READ_EXT {
+ device_extensions: [ext_transform_feedback],
+ },
+
+ /// Write access to a transform feedback counter buffer during transform feedback operations.
+ TRANSFORM_FEEDBACK_COUNTER_WRITE = TRANSFORM_FEEDBACK_COUNTER_WRITE_EXT {
+ device_extensions: [ext_transform_feedback],
+ },
+
+ /// Read access to a predicate during conditional rendering.
+ CONDITIONAL_RENDERING_READ = CONDITIONAL_RENDERING_READ_EXT {
+ device_extensions: [ext_conditional_rendering],
+ },
+
+ /// Read access to preprocess buffers input to `preprocess_generated_commands`.
+ COMMAND_PREPROCESS_READ = COMMAND_PREPROCESS_READ_NV {
+ device_extensions: [nv_device_generated_commands],
+ },
+
+ /// Read access to sequences buffers output by `preprocess_generated_commands`.
+ COMMAND_PREPROCESS_WRITE = COMMAND_PREPROCESS_WRITE_NV {
+ device_extensions: [nv_device_generated_commands],
+ },
+
+ /// Read access to a fragment shading rate attachment during rasterization.
+ FRAGMENT_SHADING_RATE_ATTACHMENT_READ = FRAGMENT_SHADING_RATE_ATTACHMENT_READ_KHR {
+ device_extensions: [khr_fragment_shading_rate],
+ },
-access_flags! {
- indirect_command_read => ash::vk::AccessFlags::INDIRECT_COMMAND_READ,
- index_read => ash::vk::AccessFlags::INDEX_READ,
- vertex_attribute_read => ash::vk::AccessFlags::VERTEX_ATTRIBUTE_READ,
- uniform_read => ash::vk::AccessFlags::UNIFORM_READ,
- input_attachment_read => ash::vk::AccessFlags::INPUT_ATTACHMENT_READ,
- shader_read => ash::vk::AccessFlags::SHADER_READ,
- shader_write => ash::vk::AccessFlags::SHADER_WRITE,
- color_attachment_read => ash::vk::AccessFlags::COLOR_ATTACHMENT_READ,
- color_attachment_write => ash::vk::AccessFlags::COLOR_ATTACHMENT_WRITE,
- depth_stencil_attachment_read => ash::vk::AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ,
- depth_stencil_attachment_write => ash::vk::AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE,
- transfer_read => ash::vk::AccessFlags::TRANSFER_READ,
- transfer_write => ash::vk::AccessFlags::TRANSFER_WRITE,
- host_read => ash::vk::AccessFlags::HOST_READ,
- host_write => ash::vk::AccessFlags::HOST_WRITE,
- memory_read => ash::vk::AccessFlags::MEMORY_READ,
- memory_write => ash::vk::AccessFlags::MEMORY_WRITE,
+ /// Read access to an acceleration structure or acceleration structure scratch buffer during
+ /// trace, build or copy commands.
+ ACCELERATION_STRUCTURE_READ = ACCELERATION_STRUCTURE_READ_KHR {
+ device_extensions: [khr_acceleration_structure, nv_ray_tracing],
+ },
+
+ /// Write access to an acceleration structure or acceleration structure scratch buffer during
+ /// trace, build or copy commands.
+ ACCELERATION_STRUCTURE_WRITE = ACCELERATION_STRUCTURE_WRITE_KHR {
+ device_extensions: [khr_acceleration_structure, nv_ray_tracing],
+ },
+
+ /// Read access to a fragment density map attachment during dynamic fragment density map
+ /// operations.
+ FRAGMENT_DENSITY_MAP_READ = FRAGMENT_DENSITY_MAP_READ_EXT {
+ device_extensions: [ext_fragment_density_map],
+ },
+
+ /// Read access to color attachments when performing advanced blend operations.
+ COLOR_ATTACHMENT_READ_NONCOHERENT = COLOR_ATTACHMENT_READ_NONCOHERENT_EXT {
+ device_extensions: [ext_blend_operation_advanced],
+ },
+
+ /// Read access to an invocation mask image.
+ INVOCATION_MASK_READ = INVOCATION_MASK_READ_HUAWEI {
+ device_extensions: [huawei_invocation_mask],
+ },
+
+ /// Read access to a shader binding table.
+ SHADER_BINDING_TABLE_READ = SHADER_BINDING_TABLE_READ_KHR {
+ device_extensions: [khr_ray_tracing_maintenance1],
+ },
+
+ /// Read access to a micromap object.
+ MICROMAP_READ = MICROMAP_READ_EXT {
+ device_extensions: [ext_opacity_micromap],
+ },
+
+ /// Write access to a micromap object.
+ MICROMAP_WRITE = MICROMAP_WRITE_EXT {
+ device_extensions: [ext_opacity_micromap],
+ },
+
+ /// Read access to a buffer or image during optical flow operations.
+ OPTICAL_FLOW_READ = OPTICAL_FLOW_READ_NV {
+ device_extensions: [nv_optical_flow],
+ },
+
+ /// Write access to a buffer or image during optical flow operations.
+ OPTICAL_FLOW_WRITE = OPTICAL_FLOW_WRITE_NV {
+ device_extensions: [nv_optical_flow],
+ },
}
-impl AccessFlags {
- /// Returns true if the access flags can be used with the given pipeline stages.
+impl From<PipelineStages> for AccessFlags {
+ /// Corresponds to the table "[Supported access types]" in the Vulkan specification.
///
- /// Corresponds to `Table 4. Supported access types` in section `6.1.3. Access Types` of the
- /// Vulkan specs.
- pub fn is_compatible_with(&self, stages: &PipelineStages) -> bool {
- if stages.all_commands {
- return true;
+ /// [Supported access types]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap7.html#synchronization-access-types-supported
+ #[inline]
+ fn from(mut val: PipelineStages) -> Self {
+ if val.is_empty() {
+ return AccessFlags::empty();
}
- if self.indirect_command_read && !stages.draw_indirect && !stages.all_graphics {
- return false;
+ val = val.expand(QueueFlags::GRAPHICS | QueueFlags::COMPUTE | QueueFlags::TRANSFER);
+ let mut result = AccessFlags::MEMORY_READ | AccessFlags::MEMORY_WRITE;
+
+ if val.intersects(PipelineStages::DRAW_INDIRECT) {
+ result |=
+ AccessFlags::INDIRECT_COMMAND_READ | AccessFlags::TRANSFORM_FEEDBACK_COUNTER_READ;
}
- if (self.index_read || self.vertex_attribute_read)
- && !stages.vertex_input
- && !stages.all_graphics
- {
- return false;
+ if val.intersects(
+ PipelineStages::VERTEX_SHADER
+ | PipelineStages::TESSELLATION_CONTROL_SHADER
+ | PipelineStages::TESSELLATION_EVALUATION_SHADER
+ | PipelineStages::GEOMETRY_SHADER
+ | PipelineStages::FRAGMENT_SHADER
+ | PipelineStages::COMPUTE_SHADER
+ | PipelineStages::RAY_TRACING_SHADER
+ | PipelineStages::TASK_SHADER
+ | PipelineStages::MESH_SHADER,
+ ) {
+ result |= AccessFlags::SHADER_READ
+ | AccessFlags::UNIFORM_READ
+ | AccessFlags::SHADER_SAMPLED_READ
+ | AccessFlags::SHADER_STORAGE_READ
+ | AccessFlags::SHADER_WRITE
+ | AccessFlags::SHADER_STORAGE_WRITE
+ | AccessFlags::ACCELERATION_STRUCTURE_READ;
}
- if (self.uniform_read || self.shader_read || self.shader_write)
- && !stages.vertex_shader
- && !stages.tessellation_control_shader
- && !stages.tessellation_evaluation_shader
- && !stages.geometry_shader
- && !stages.fragment_shader
- && !stages.compute_shader
- && !stages.all_graphics
+ if val.intersects(PipelineStages::FRAGMENT_SHADER | PipelineStages::SUBPASS_SHADING) {
+ result |= AccessFlags::INPUT_ATTACHMENT_READ;
+ }
+
+ if val
+ .intersects(PipelineStages::EARLY_FRAGMENT_TESTS | PipelineStages::LATE_FRAGMENT_TESTS)
{
- return false;
+ result |= AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ
+ | AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE;
}
- if self.input_attachment_read && !stages.fragment_shader && !stages.all_graphics {
- return false;
+ if val.intersects(PipelineStages::COLOR_ATTACHMENT_OUTPUT) {
+ result |= AccessFlags::COLOR_ATTACHMENT_READ
+ | AccessFlags::COLOR_ATTACHMENT_WRITE
+ | AccessFlags::COLOR_ATTACHMENT_READ_NONCOHERENT;
}
- if (self.color_attachment_read || self.color_attachment_write)
- && !stages.color_attachment_output
- && !stages.all_graphics
- {
- return false;
+ if val.intersects(PipelineStages::HOST) {
+ result |= AccessFlags::HOST_READ | AccessFlags::HOST_WRITE;
}
- if (self.depth_stencil_attachment_read || self.depth_stencil_attachment_write)
- && !stages.early_fragment_tests
- && !stages.late_fragment_tests
- && !stages.all_graphics
- {
- return false;
+ if val.intersects(
+ PipelineStages::COPY
+ | PipelineStages::RESOLVE
+ | PipelineStages::BLIT
+ | PipelineStages::ACCELERATION_STRUCTURE_COPY,
+ ) {
+ result |= AccessFlags::TRANSFER_READ | AccessFlags::TRANSFER_WRITE;
+ }
+
+ if val.intersects(PipelineStages::CLEAR) {
+ result |= AccessFlags::TRANSFER_WRITE;
}
- if (self.transfer_read || self.transfer_write) && !stages.transfer {
- return false;
+ if val.intersects(PipelineStages::INDEX_INPUT) {
+ result |= AccessFlags::INDEX_READ;
}
- if (self.host_read || self.host_write) && !stages.host {
- return false;
+ if val.intersects(PipelineStages::VERTEX_ATTRIBUTE_INPUT) {
+ result |= AccessFlags::VERTEX_ATTRIBUTE_READ;
}
- true
+ if val.intersects(PipelineStages::VIDEO_DECODE) {
+ result |= AccessFlags::VIDEO_DECODE_READ | AccessFlags::VIDEO_DECODE_WRITE;
+ }
+
+ if val.intersects(PipelineStages::VIDEO_ENCODE) {
+ result |= AccessFlags::VIDEO_ENCODE_READ | AccessFlags::VIDEO_ENCODE_WRITE;
+ }
+
+ if val.intersects(PipelineStages::TRANSFORM_FEEDBACK) {
+ result |= AccessFlags::TRANSFORM_FEEDBACK_WRITE
+ | AccessFlags::TRANSFORM_FEEDBACK_COUNTER_WRITE
+ | AccessFlags::TRANSFORM_FEEDBACK_COUNTER_READ;
+ }
+
+ if val.intersects(PipelineStages::CONDITIONAL_RENDERING) {
+ result |= AccessFlags::CONDITIONAL_RENDERING_READ;
+ }
+
+ if val.intersects(PipelineStages::ACCELERATION_STRUCTURE_BUILD) {
+ result |= AccessFlags::INDIRECT_COMMAND_READ
+ | AccessFlags::SHADER_READ
+ | AccessFlags::SHADER_SAMPLED_READ
+ | AccessFlags::SHADER_STORAGE_READ
+ | AccessFlags::SHADER_STORAGE_WRITE
+ | AccessFlags::TRANSFER_READ
+ | AccessFlags::TRANSFER_WRITE
+ | AccessFlags::ACCELERATION_STRUCTURE_READ
+ | AccessFlags::ACCELERATION_STRUCTURE_WRITE
+ | AccessFlags::MICROMAP_READ;
+ }
+
+ if val.intersects(PipelineStages::RAY_TRACING_SHADER) {
+ result |= AccessFlags::SHADER_BINDING_TABLE_READ;
+ }
+
+ if val.intersects(PipelineStages::FRAGMENT_DENSITY_PROCESS) {
+ result |= AccessFlags::FRAGMENT_DENSITY_MAP_READ;
+ }
+
+ if val.intersects(PipelineStages::FRAGMENT_SHADING_RATE_ATTACHMENT) {
+ result |= AccessFlags::FRAGMENT_SHADING_RATE_ATTACHMENT_READ;
+ }
+
+ if val.intersects(PipelineStages::COMMAND_PREPROCESS) {
+ result |= AccessFlags::COMMAND_PREPROCESS_READ | AccessFlags::COMMAND_PREPROCESS_WRITE;
+ }
+
+ if val.intersects(PipelineStages::INVOCATION_MASK) {
+ result |= AccessFlags::INVOCATION_MASK_READ;
+ }
+
+ if val.intersects(PipelineStages::MICROMAP_BUILD) {
+ result |= AccessFlags::MICROMAP_READ | AccessFlags::MICROMAP_WRITE;
+ }
+
+ if val.intersects(PipelineStages::OPTICAL_FLOW) {
+ result |= AccessFlags::OPTICAL_FLOW_READ | AccessFlags::OPTICAL_FLOW_WRITE;
+ }
+
+ result
+ }
+}
+
+impl From<AccessFlags> for ash::vk::AccessFlags {
+ #[inline]
+ fn from(val: AccessFlags) -> Self {
+ Self::from_raw(ash::vk::AccessFlags2::from(val).as_raw() as u32)
}
}
/// The full specification of memory access by the pipeline for a particular resource.
-#[derive(Clone, Copy, Debug)]
+#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
pub struct PipelineMemoryAccess {
/// The pipeline stages the resource will be accessed in.
pub stages: PipelineStages,
@@ -269,3 +996,1736 @@ pub struct PipelineMemoryAccess {
/// Whether the resource needs exclusive (mutable) access or can be shared.
pub exclusive: bool,
}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+#[allow(non_camel_case_types, dead_code)]
+#[repr(u8)]
+pub(crate) enum PipelineStageAccess {
+ // There is no stage/access for this, but it is a memory write operation nonetheless.
+ ImageLayoutTransition,
+
+ DrawIndirect_IndirectCommandRead,
+ DrawIndirect_TransformFeedbackCounterRead,
+ VertexShader_UniformRead,
+ VertexShader_ShaderSampledRead,
+ VertexShader_ShaderStorageRead,
+ VertexShader_ShaderStorageWrite,
+ VertexShader_AccelerationStructureRead,
+ TessellationControlShader_UniformRead,
+ TessellationControlShader_ShaderSampledRead,
+ TessellationControlShader_ShaderStorageRead,
+ TessellationControlShader_ShaderStorageWrite,
+ TessellationControlShader_AccelerationStructureRead,
+ TessellationEvaluationShader_UniformRead,
+ TessellationEvaluationShader_ShaderSampledRead,
+ TessellationEvaluationShader_ShaderStorageRead,
+ TessellationEvaluationShader_ShaderStorageWrite,
+ TessellationEvaluationShader_AccelerationStructureRead,
+ GeometryShader_UniformRead,
+ GeometryShader_ShaderSampledRead,
+ GeometryShader_ShaderStorageRead,
+ GeometryShader_ShaderStorageWrite,
+ GeometryShader_AccelerationStructureRead,
+ FragmentShader_UniformRead,
+ FragmentShader_InputAttachmentRead,
+ FragmentShader_ShaderSampledRead,
+ FragmentShader_ShaderStorageRead,
+ FragmentShader_ShaderStorageWrite,
+ FragmentShader_AccelerationStructureRead,
+ EarlyFragmentTests_DepthStencilAttachmentRead,
+ EarlyFragmentTests_DepthStencilAttachmentWrite,
+ LateFragmentTests_DepthStencilAttachmentRead,
+ LateFragmentTests_DepthStencilAttachmentWrite,
+ ColorAttachmentOutput_ColorAttachmentRead,
+ ColorAttachmentOutput_ColorAttachmentWrite,
+ ColorAttachmentOutput_ColorAttachmentReadNoncoherent,
+ ComputeShader_UniformRead,
+ ComputeShader_ShaderSampledRead,
+ ComputeShader_ShaderStorageRead,
+ ComputeShader_ShaderStorageWrite,
+ ComputeShader_AccelerationStructureRead,
+ Host_HostRead,
+ Host_HostWrite,
+ Copy_TransferRead,
+ Copy_TransferWrite,
+ Resolve_TransferRead,
+ Resolve_TransferWrite,
+ Blit_TransferRead,
+ Blit_TransferWrite,
+ Clear_TransferWrite,
+ IndexInput_IndexRead,
+ VertexAttributeInput_VertexAttributeRead,
+ VideoDecode_VideoDecodeRead,
+ VideoDecode_VideoDecodeWrite,
+ VideoEncode_VideoEncodeRead,
+ VideoEncode_VideoEncodeWrite,
+ TransformFeedback_TransformFeedbackWrite,
+ TransformFeedback_TransformFeedbackCounterRead,
+ TransformFeedback_TransformFeedbackCounterWrite,
+ ConditionalRendering_ConditionalRenderingRead,
+ AccelerationStructureBuild_IndirectCommandRead,
+ AccelerationStructureBuild_UniformRead,
+ AccelerationStructureBuild_TransferRead,
+ AccelerationStructureBuild_TransferWrite,
+ AccelerationStructureBuild_ShaderSampledRead,
+ AccelerationStructureBuild_ShaderStorageRead,
+ AccelerationStructureBuild_AccelerationStructureRead,
+ AccelerationStructureBuild_AccelerationStructureWrite,
+ AccelerationStructureBuild_MicromapRead,
+ RayTracingShader_UniformRead,
+ RayTracingShader_ShaderSampledRead,
+ RayTracingShader_ShaderStorageRead,
+ RayTracingShader_ShaderStorageWrite,
+ RayTracingShader_AccelerationStructureRead,
+ RayTracingShader_ShaderBindingTableRead,
+ FragmentDensityProcess_FragmentDensityMapRead,
+ FragmentShadingRateAttachment_FragmentShadingRateAttachmentRead,
+ CommandPreprocess_CommandPreprocessRead,
+ CommandPreprocess_CommandPreprocessWrite,
+ TaskShader_UniformRead,
+ TaskShader_ShaderSampledRead,
+ TaskShader_ShaderStorageRead,
+ TaskShader_ShaderStorageWrite,
+ TaskShader_AccelerationStructureRead,
+ MeshShader_UniformRead,
+ MeshShader_ShaderSampledRead,
+ MeshShader_ShaderStorageRead,
+ MeshShader_ShaderStorageWrite,
+ MeshShader_AccelerationStructureRead,
+ SubpassShading_InputAttachmentRead,
+ InvocationMask_InvocationMaskRead,
+ AccelerationStructureCopy_TransferRead,
+ AccelerationStructureCopy_TransferWrite,
+ OpticalFlow_OpticalFlowRead,
+ OpticalFlow_OpticalFlowWrite,
+ MicromapBuild_MicromapRead,
+ MicromapBuild_MicromapWrite,
+
+ // If there are ever more than 128 preceding values, then there will be a compile error:
+ // "discriminant value `128` assigned more than once"
+ __MAX_VALUE__ = 128,
+}
+
+impl PipelineStageAccess {
+ #[inline]
+ pub(crate) const fn is_write(self) -> bool {
+ matches!(
+ self,
+ PipelineStageAccess::ImageLayoutTransition
+ | PipelineStageAccess::VertexShader_ShaderStorageWrite
+ | PipelineStageAccess::TessellationControlShader_ShaderStorageWrite
+ | PipelineStageAccess::TessellationEvaluationShader_ShaderStorageWrite
+ | PipelineStageAccess::GeometryShader_ShaderStorageWrite
+ | PipelineStageAccess::FragmentShader_ShaderStorageWrite
+ | PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentWrite
+ | PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentWrite
+ | PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentWrite
+ | PipelineStageAccess::ComputeShader_ShaderStorageWrite
+ | PipelineStageAccess::Host_HostWrite
+ | PipelineStageAccess::Copy_TransferWrite
+ | PipelineStageAccess::Resolve_TransferWrite
+ | PipelineStageAccess::Blit_TransferWrite
+ | PipelineStageAccess::Clear_TransferWrite
+ | PipelineStageAccess::VideoDecode_VideoDecodeWrite
+ | PipelineStageAccess::VideoEncode_VideoEncodeWrite
+ | PipelineStageAccess::TransformFeedback_TransformFeedbackWrite
+ | PipelineStageAccess::TransformFeedback_TransformFeedbackCounterWrite
+ | PipelineStageAccess::AccelerationStructureBuild_TransferWrite
+ | PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureWrite
+ | PipelineStageAccess::RayTracingShader_ShaderStorageWrite
+ | PipelineStageAccess::CommandPreprocess_CommandPreprocessWrite
+ | PipelineStageAccess::TaskShader_ShaderStorageWrite
+ | PipelineStageAccess::MeshShader_ShaderStorageWrite
+ | PipelineStageAccess::AccelerationStructureCopy_TransferWrite
+ | PipelineStageAccess::OpticalFlow_OpticalFlowWrite
+ | PipelineStageAccess::MicromapBuild_MicromapWrite
+ )
+ }
+
+ pub(crate) fn iter_descriptor_stages(
+ descriptor_type: DescriptorType,
+ stages_read: ShaderStages,
+ stages_write: ShaderStages,
+ ) -> impl Iterator<Item = Self> + 'static {
+ static MAP_READ: Lazy<
+ HashMap<DescriptorType, HashMap<PipelineStage, PipelineStageAccess>>,
+ > = Lazy::new(|| {
+ let uniform_read = [
+ DescriptorType::UniformBuffer,
+ DescriptorType::UniformBufferDynamic,
+ ]
+ .into_iter()
+ .map(|descriptor_type| {
+ (
+ descriptor_type,
+ [
+ (
+ PipelineStage::VertexShader,
+ PipelineStageAccess::VertexShader_UniformRead,
+ ),
+ (
+ PipelineStage::TessellationControlShader,
+ PipelineStageAccess::TessellationControlShader_UniformRead,
+ ),
+ (
+ PipelineStage::TessellationEvaluationShader,
+ PipelineStageAccess::TessellationControlShader_UniformRead,
+ ),
+ (
+ PipelineStage::GeometryShader,
+ PipelineStageAccess::GeometryShader_UniformRead,
+ ),
+ (
+ PipelineStage::FragmentShader,
+ PipelineStageAccess::FragmentShader_UniformRead,
+ ),
+ (
+ PipelineStage::ComputeShader,
+ PipelineStageAccess::ComputeShader_UniformRead,
+ ),
+ (
+ PipelineStage::RayTracingShader,
+ PipelineStageAccess::RayTracingShader_UniformRead,
+ ),
+ (
+ PipelineStage::TaskShader,
+ PipelineStageAccess::TaskShader_UniformRead,
+ ),
+ (
+ PipelineStage::MeshShader,
+ PipelineStageAccess::MeshShader_UniformRead,
+ ),
+ ]
+ .into_iter()
+ .collect(),
+ )
+ });
+
+ let shader_sampled_read = [
+ DescriptorType::CombinedImageSampler,
+ DescriptorType::SampledImage,
+ DescriptorType::UniformTexelBuffer,
+ ]
+ .into_iter()
+ .map(|descriptor_type| {
+ (
+ descriptor_type,
+ [
+ (
+ PipelineStage::VertexShader,
+ PipelineStageAccess::VertexShader_ShaderSampledRead,
+ ),
+ (
+ PipelineStage::TessellationControlShader,
+ PipelineStageAccess::TessellationControlShader_ShaderSampledRead,
+ ),
+ (
+ PipelineStage::TessellationEvaluationShader,
+ PipelineStageAccess::TessellationControlShader_ShaderSampledRead,
+ ),
+ (
+ PipelineStage::GeometryShader,
+ PipelineStageAccess::GeometryShader_ShaderSampledRead,
+ ),
+ (
+ PipelineStage::FragmentShader,
+ PipelineStageAccess::FragmentShader_ShaderSampledRead,
+ ),
+ (
+ PipelineStage::ComputeShader,
+ PipelineStageAccess::ComputeShader_ShaderSampledRead,
+ ),
+ (
+ PipelineStage::RayTracingShader,
+ PipelineStageAccess::RayTracingShader_ShaderSampledRead,
+ ),
+ (
+ PipelineStage::TaskShader,
+ PipelineStageAccess::TaskShader_ShaderSampledRead,
+ ),
+ (
+ PipelineStage::MeshShader,
+ PipelineStageAccess::MeshShader_ShaderSampledRead,
+ ),
+ ]
+ .into_iter()
+ .collect(),
+ )
+ });
+
+ let shader_storage_read = [
+ DescriptorType::StorageImage,
+ DescriptorType::StorageTexelBuffer,
+ DescriptorType::StorageBuffer,
+ DescriptorType::StorageBufferDynamic,
+ ]
+ .into_iter()
+ .map(|descriptor_type| {
+ (
+ descriptor_type,
+ [
+ (
+ PipelineStage::VertexShader,
+ PipelineStageAccess::VertexShader_ShaderStorageRead,
+ ),
+ (
+ PipelineStage::TessellationControlShader,
+ PipelineStageAccess::TessellationControlShader_ShaderStorageRead,
+ ),
+ (
+ PipelineStage::TessellationEvaluationShader,
+ PipelineStageAccess::TessellationControlShader_ShaderStorageRead,
+ ),
+ (
+ PipelineStage::GeometryShader,
+ PipelineStageAccess::GeometryShader_ShaderStorageRead,
+ ),
+ (
+ PipelineStage::FragmentShader,
+ PipelineStageAccess::FragmentShader_ShaderStorageRead,
+ ),
+ (
+ PipelineStage::ComputeShader,
+ PipelineStageAccess::ComputeShader_ShaderStorageRead,
+ ),
+ (
+ PipelineStage::RayTracingShader,
+ PipelineStageAccess::RayTracingShader_ShaderStorageRead,
+ ),
+ (
+ PipelineStage::TaskShader,
+ PipelineStageAccess::TaskShader_ShaderStorageRead,
+ ),
+ (
+ PipelineStage::MeshShader,
+ PipelineStageAccess::MeshShader_ShaderStorageRead,
+ ),
+ ]
+ .into_iter()
+ .collect(),
+ )
+ });
+
+ let input_attachment_read =
+ [DescriptorType::InputAttachment]
+ .into_iter()
+ .map(|descriptor_type| {
+ (
+ descriptor_type,
+ [(
+ PipelineStage::FragmentShader,
+ PipelineStageAccess::FragmentShader_InputAttachmentRead,
+ )]
+ .into_iter()
+ .collect(),
+ )
+ });
+
+ uniform_read
+ .chain(shader_sampled_read)
+ .chain(shader_storage_read)
+ .chain(input_attachment_read)
+ .collect()
+ });
+ static MAP_WRITE: Lazy<
+ HashMap<DescriptorType, HashMap<PipelineStage, PipelineStageAccess>>,
+ > = Lazy::new(|| {
+ let shader_storage_write = [
+ DescriptorType::StorageImage,
+ DescriptorType::StorageTexelBuffer,
+ DescriptorType::StorageBuffer,
+ DescriptorType::StorageBufferDynamic,
+ ]
+ .into_iter()
+ .map(|descriptor_type| {
+ (
+ descriptor_type,
+ [
+ (
+ PipelineStage::VertexShader,
+ PipelineStageAccess::VertexShader_ShaderStorageWrite,
+ ),
+ (
+ PipelineStage::TessellationControlShader,
+ PipelineStageAccess::TessellationControlShader_ShaderStorageWrite,
+ ),
+ (
+ PipelineStage::TessellationEvaluationShader,
+ PipelineStageAccess::TessellationControlShader_ShaderStorageWrite,
+ ),
+ (
+ PipelineStage::GeometryShader,
+ PipelineStageAccess::GeometryShader_ShaderStorageWrite,
+ ),
+ (
+ PipelineStage::FragmentShader,
+ PipelineStageAccess::FragmentShader_ShaderStorageWrite,
+ ),
+ (
+ PipelineStage::ComputeShader,
+ PipelineStageAccess::ComputeShader_ShaderStorageWrite,
+ ),
+ (
+ PipelineStage::RayTracingShader,
+ PipelineStageAccess::RayTracingShader_ShaderStorageWrite,
+ ),
+ (
+ PipelineStage::TaskShader,
+ PipelineStageAccess::TaskShader_ShaderStorageWrite,
+ ),
+ (
+ PipelineStage::MeshShader,
+ PipelineStageAccess::MeshShader_ShaderStorageWrite,
+ ),
+ ]
+ .into_iter()
+ .collect(),
+ )
+ });
+
+ shader_storage_write.collect()
+ });
+
+ [
+ (stages_read, &*MAP_READ, "read"),
+ (stages_write, &*MAP_WRITE, "write"),
+ ]
+ .into_iter()
+ .filter(|(stages, _, _)| !stages.is_empty())
+ .flat_map(move |(stages, descriptor_map, access)| {
+ let stages_map = descriptor_map.get(&descriptor_type).unwrap_or_else(|| {
+ panic!(
+ "DescriptorType::{:?} does not {} memory",
+ descriptor_type, access,
+ )
+ });
+
+ PipelineStages::from(stages).into_iter().map(move |stage| {
+ *stages_map.get(&stage).unwrap_or_else(|| {
+ panic!(
+ "DescriptorType::{:?} does not {} memory in PipelineStage::{:?}",
+ descriptor_type, access, stage,
+ )
+ })
+ })
+ })
+ }
+}
+
+impl TryFrom<PipelineStageAccess> for PipelineStage {
+ type Error = ();
+
+ #[inline]
+ fn try_from(val: PipelineStageAccess) -> Result<Self, Self::Error> {
+ Ok(match val {
+ PipelineStageAccess::ImageLayoutTransition => return Err(()),
+ PipelineStageAccess::DrawIndirect_IndirectCommandRead
+ | PipelineStageAccess::DrawIndirect_TransformFeedbackCounterRead => PipelineStage::DrawIndirect,
+ PipelineStageAccess::VertexShader_UniformRead
+ | PipelineStageAccess::VertexShader_ShaderSampledRead
+ | PipelineStageAccess::VertexShader_ShaderStorageRead
+ | PipelineStageAccess::VertexShader_ShaderStorageWrite
+ | PipelineStageAccess::VertexShader_AccelerationStructureRead => PipelineStage::VertexShader,
+ PipelineStageAccess::TessellationControlShader_UniformRead
+ | PipelineStageAccess::TessellationControlShader_ShaderSampledRead
+ | PipelineStageAccess::TessellationControlShader_ShaderStorageRead
+ | PipelineStageAccess::TessellationControlShader_ShaderStorageWrite
+ | PipelineStageAccess::TessellationControlShader_AccelerationStructureRead => PipelineStage::TessellationControlShader,
+ PipelineStageAccess::TessellationEvaluationShader_UniformRead
+ | PipelineStageAccess::TessellationEvaluationShader_ShaderSampledRead
+ | PipelineStageAccess::TessellationEvaluationShader_ShaderStorageRead
+ | PipelineStageAccess::TessellationEvaluationShader_ShaderStorageWrite
+ | PipelineStageAccess::TessellationEvaluationShader_AccelerationStructureRead => PipelineStage::TessellationEvaluationShader,
+ PipelineStageAccess::GeometryShader_UniformRead
+ | PipelineStageAccess::GeometryShader_ShaderSampledRead
+ | PipelineStageAccess::GeometryShader_ShaderStorageRead
+ | PipelineStageAccess::GeometryShader_ShaderStorageWrite
+ | PipelineStageAccess::GeometryShader_AccelerationStructureRead => PipelineStage::GeometryShader,
+ PipelineStageAccess::FragmentShader_UniformRead
+ | PipelineStageAccess::FragmentShader_InputAttachmentRead
+ | PipelineStageAccess::FragmentShader_ShaderSampledRead
+ | PipelineStageAccess::FragmentShader_ShaderStorageRead
+ | PipelineStageAccess::FragmentShader_ShaderStorageWrite
+ | PipelineStageAccess::FragmentShader_AccelerationStructureRead => PipelineStage::FragmentShader,
+ PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentRead
+ | PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentWrite => PipelineStage::EarlyFragmentTests,
+ PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentRead
+ | PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentWrite => PipelineStage::LateFragmentTests,
+ PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentRead
+ | PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentWrite
+ | PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentReadNoncoherent => PipelineStage::ColorAttachmentOutput,
+ PipelineStageAccess::ComputeShader_UniformRead
+ | PipelineStageAccess::ComputeShader_ShaderSampledRead
+ | PipelineStageAccess::ComputeShader_ShaderStorageRead
+ | PipelineStageAccess::ComputeShader_ShaderStorageWrite
+ | PipelineStageAccess::ComputeShader_AccelerationStructureRead => PipelineStage::ComputeShader,
+ PipelineStageAccess::Host_HostRead
+ | PipelineStageAccess::Host_HostWrite => PipelineStage::Host,
+ PipelineStageAccess::Copy_TransferRead
+ | PipelineStageAccess::Copy_TransferWrite => PipelineStage::Copy,
+ PipelineStageAccess::Resolve_TransferRead
+ | PipelineStageAccess::Resolve_TransferWrite => PipelineStage::Resolve,
+ PipelineStageAccess::Blit_TransferRead
+ | PipelineStageAccess::Blit_TransferWrite => PipelineStage::Blit,
+ PipelineStageAccess::Clear_TransferWrite => PipelineStage::Clear,
+ PipelineStageAccess::IndexInput_IndexRead => PipelineStage::IndexInput,
+ PipelineStageAccess::VertexAttributeInput_VertexAttributeRead => PipelineStage::VertexAttributeInput,
+ PipelineStageAccess::VideoDecode_VideoDecodeRead
+ | PipelineStageAccess::VideoDecode_VideoDecodeWrite => PipelineStage::VideoDecode,
+ PipelineStageAccess::VideoEncode_VideoEncodeRead
+ | PipelineStageAccess::VideoEncode_VideoEncodeWrite => PipelineStage::VideoEncode,
+ PipelineStageAccess::TransformFeedback_TransformFeedbackWrite
+ | PipelineStageAccess::TransformFeedback_TransformFeedbackCounterRead
+ | PipelineStageAccess::TransformFeedback_TransformFeedbackCounterWrite => PipelineStage::TransformFeedback,
+ PipelineStageAccess::ConditionalRendering_ConditionalRenderingRead => PipelineStage::ConditionalRendering,
+ PipelineStageAccess::AccelerationStructureBuild_IndirectCommandRead
+ | PipelineStageAccess::AccelerationStructureBuild_UniformRead
+ | PipelineStageAccess::AccelerationStructureBuild_TransferRead
+ | PipelineStageAccess::AccelerationStructureBuild_TransferWrite
+ | PipelineStageAccess::AccelerationStructureBuild_ShaderSampledRead
+ | PipelineStageAccess::AccelerationStructureBuild_ShaderStorageRead
+ | PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureRead
+ | PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureWrite
+ | PipelineStageAccess::AccelerationStructureBuild_MicromapRead => PipelineStage::AccelerationStructureBuild,
+ PipelineStageAccess::RayTracingShader_UniformRead
+ | PipelineStageAccess::RayTracingShader_ShaderSampledRead
+ | PipelineStageAccess::RayTracingShader_ShaderStorageRead
+ | PipelineStageAccess::RayTracingShader_ShaderStorageWrite
+ | PipelineStageAccess::RayTracingShader_AccelerationStructureRead => PipelineStage::RayTracingShader,
+ | PipelineStageAccess::RayTracingShader_ShaderBindingTableRead => PipelineStage::RayTracingShader,
+ PipelineStageAccess::FragmentDensityProcess_FragmentDensityMapRead => PipelineStage::FragmentDensityProcess,
+ PipelineStageAccess::FragmentShadingRateAttachment_FragmentShadingRateAttachmentRead => PipelineStage::FragmentShadingRateAttachment,
+ PipelineStageAccess::CommandPreprocess_CommandPreprocessRead
+ | PipelineStageAccess::CommandPreprocess_CommandPreprocessWrite => PipelineStage::CommandPreprocess,
+ PipelineStageAccess::TaskShader_UniformRead
+ | PipelineStageAccess::TaskShader_ShaderSampledRead
+ | PipelineStageAccess::TaskShader_ShaderStorageRead
+ | PipelineStageAccess::TaskShader_ShaderStorageWrite
+ | PipelineStageAccess::TaskShader_AccelerationStructureRead => PipelineStage::TaskShader,
+ PipelineStageAccess::MeshShader_UniformRead
+ | PipelineStageAccess::MeshShader_ShaderSampledRead
+ | PipelineStageAccess::MeshShader_ShaderStorageRead
+ | PipelineStageAccess::MeshShader_ShaderStorageWrite
+ | PipelineStageAccess::MeshShader_AccelerationStructureRead => PipelineStage::MeshShader,
+ PipelineStageAccess::SubpassShading_InputAttachmentRead => PipelineStage::SubpassShading,
+ PipelineStageAccess::InvocationMask_InvocationMaskRead => PipelineStage::InvocationMask,
+ PipelineStageAccess::AccelerationStructureCopy_TransferRead
+ | PipelineStageAccess::AccelerationStructureCopy_TransferWrite => PipelineStage::AccelerationStructureCopy,
+ PipelineStageAccess::OpticalFlow_OpticalFlowRead
+ | PipelineStageAccess::OpticalFlow_OpticalFlowWrite => PipelineStage::OpticalFlow,
+ PipelineStageAccess::MicromapBuild_MicromapRead
+ | PipelineStageAccess::MicromapBuild_MicromapWrite => PipelineStage::MicromapBuild,
+ PipelineStageAccess::__MAX_VALUE__ => unreachable!(),
+ })
+ }
+}
+
+impl From<PipelineStageAccess> for AccessFlags {
+ #[inline]
+ fn from(val: PipelineStageAccess) -> Self {
+ match val {
+ PipelineStageAccess::ImageLayoutTransition => AccessFlags::empty(),
+ PipelineStageAccess::DrawIndirect_IndirectCommandRead
+ | PipelineStageAccess::AccelerationStructureBuild_IndirectCommandRead => AccessFlags::INDIRECT_COMMAND_READ,
+ PipelineStageAccess::IndexInput_IndexRead => AccessFlags::INDEX_READ,
+ PipelineStageAccess::VertexAttributeInput_VertexAttributeRead => AccessFlags::VERTEX_ATTRIBUTE_READ,
+ PipelineStageAccess::VertexShader_UniformRead
+ | PipelineStageAccess::TessellationControlShader_UniformRead
+ | PipelineStageAccess::TessellationEvaluationShader_UniformRead
+ | PipelineStageAccess::GeometryShader_UniformRead
+ | PipelineStageAccess::FragmentShader_UniformRead
+ | PipelineStageAccess::ComputeShader_UniformRead
+ | PipelineStageAccess::AccelerationStructureBuild_UniformRead
+ | PipelineStageAccess::RayTracingShader_UniformRead
+ | PipelineStageAccess::TaskShader_UniformRead
+ | PipelineStageAccess::MeshShader_UniformRead => AccessFlags::UNIFORM_READ,
+ PipelineStageAccess::FragmentShader_InputAttachmentRead
+ | PipelineStageAccess::SubpassShading_InputAttachmentRead => AccessFlags::INPUT_ATTACHMENT_READ,
+ PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentRead => AccessFlags::COLOR_ATTACHMENT_READ,
+ PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentWrite => AccessFlags::COLOR_ATTACHMENT_WRITE,
+ PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentRead
+ | PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentRead => AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ,
+ PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentWrite
+ | PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentWrite => AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE,
+ PipelineStageAccess::Copy_TransferRead
+ | PipelineStageAccess::Resolve_TransferRead
+ | PipelineStageAccess::Blit_TransferRead
+ | PipelineStageAccess::AccelerationStructureBuild_TransferRead
+ | PipelineStageAccess::AccelerationStructureCopy_TransferRead => AccessFlags::TRANSFER_READ,
+ PipelineStageAccess::Copy_TransferWrite
+ | PipelineStageAccess::Resolve_TransferWrite
+ | PipelineStageAccess::Blit_TransferWrite
+ | PipelineStageAccess::Clear_TransferWrite
+ | PipelineStageAccess::AccelerationStructureBuild_TransferWrite
+ | PipelineStageAccess::AccelerationStructureCopy_TransferWrite => AccessFlags::TRANSFER_WRITE,
+ PipelineStageAccess::Host_HostRead => AccessFlags::HOST_READ,
+ PipelineStageAccess::Host_HostWrite => AccessFlags::HOST_WRITE,
+ PipelineStageAccess::VertexShader_ShaderSampledRead
+ | PipelineStageAccess::TessellationControlShader_ShaderSampledRead
+ | PipelineStageAccess::TessellationEvaluationShader_ShaderSampledRead
+ | PipelineStageAccess::GeometryShader_ShaderSampledRead
+ | PipelineStageAccess::FragmentShader_ShaderSampledRead
+ | PipelineStageAccess::ComputeShader_ShaderSampledRead
+ | PipelineStageAccess::AccelerationStructureBuild_ShaderSampledRead
+ | PipelineStageAccess::RayTracingShader_ShaderSampledRead
+ | PipelineStageAccess::TaskShader_ShaderSampledRead
+ | PipelineStageAccess::MeshShader_ShaderSampledRead => AccessFlags::SHADER_SAMPLED_READ,
+ PipelineStageAccess::VertexShader_ShaderStorageRead
+ | PipelineStageAccess::TessellationControlShader_ShaderStorageRead
+ | PipelineStageAccess::TessellationEvaluationShader_ShaderStorageRead
+ | PipelineStageAccess::GeometryShader_ShaderStorageRead
+ | PipelineStageAccess::FragmentShader_ShaderStorageRead
+ | PipelineStageAccess::ComputeShader_ShaderStorageRead
+ | PipelineStageAccess::AccelerationStructureBuild_ShaderStorageRead
+ | PipelineStageAccess::RayTracingShader_ShaderStorageRead
+ | PipelineStageAccess::TaskShader_ShaderStorageRead
+ | PipelineStageAccess::MeshShader_ShaderStorageRead => AccessFlags::SHADER_STORAGE_READ,
+ PipelineStageAccess::VertexShader_ShaderStorageWrite
+ | PipelineStageAccess::TessellationControlShader_ShaderStorageWrite
+ | PipelineStageAccess::TessellationEvaluationShader_ShaderStorageWrite
+ | PipelineStageAccess::GeometryShader_ShaderStorageWrite
+ | PipelineStageAccess::FragmentShader_ShaderStorageWrite
+ | PipelineStageAccess::ComputeShader_ShaderStorageWrite
+ | PipelineStageAccess::RayTracingShader_ShaderStorageWrite
+ | PipelineStageAccess::TaskShader_ShaderStorageWrite
+ | PipelineStageAccess::MeshShader_ShaderStorageWrite => AccessFlags::SHADER_STORAGE_WRITE,
+ PipelineStageAccess::VideoDecode_VideoDecodeRead => AccessFlags::VIDEO_DECODE_READ,
+ PipelineStageAccess::VideoDecode_VideoDecodeWrite => AccessFlags::VIDEO_DECODE_WRITE,
+ PipelineStageAccess::VideoEncode_VideoEncodeRead => AccessFlags::VIDEO_ENCODE_READ,
+ PipelineStageAccess::VideoEncode_VideoEncodeWrite => AccessFlags::VIDEO_ENCODE_WRITE,
+ PipelineStageAccess::TransformFeedback_TransformFeedbackWrite => AccessFlags::TRANSFORM_FEEDBACK_WRITE,
+ PipelineStageAccess::DrawIndirect_TransformFeedbackCounterRead
+ | PipelineStageAccess::TransformFeedback_TransformFeedbackCounterRead => AccessFlags::TRANSFORM_FEEDBACK_COUNTER_READ,
+ PipelineStageAccess::TransformFeedback_TransformFeedbackCounterWrite => AccessFlags::TRANSFORM_FEEDBACK_COUNTER_WRITE,
+ PipelineStageAccess::ConditionalRendering_ConditionalRenderingRead => AccessFlags::CONDITIONAL_RENDERING_READ,
+ PipelineStageAccess::CommandPreprocess_CommandPreprocessRead => AccessFlags::COMMAND_PREPROCESS_READ,
+ PipelineStageAccess::CommandPreprocess_CommandPreprocessWrite => AccessFlags::COMMAND_PREPROCESS_WRITE,
+ PipelineStageAccess::FragmentShadingRateAttachment_FragmentShadingRateAttachmentRead => AccessFlags::FRAGMENT_SHADING_RATE_ATTACHMENT_READ,
+ PipelineStageAccess::VertexShader_AccelerationStructureRead
+ | PipelineStageAccess::TessellationControlShader_AccelerationStructureRead
+ | PipelineStageAccess::TessellationEvaluationShader_AccelerationStructureRead
+ | PipelineStageAccess::GeometryShader_AccelerationStructureRead
+ | PipelineStageAccess::FragmentShader_AccelerationStructureRead
+ | PipelineStageAccess::ComputeShader_AccelerationStructureRead
+ | PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureRead
+ | PipelineStageAccess::RayTracingShader_AccelerationStructureRead
+ | PipelineStageAccess::TaskShader_AccelerationStructureRead
+ | PipelineStageAccess::MeshShader_AccelerationStructureRead => AccessFlags::ACCELERATION_STRUCTURE_READ,
+ PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureWrite => AccessFlags::ACCELERATION_STRUCTURE_WRITE,
+ PipelineStageAccess::FragmentDensityProcess_FragmentDensityMapRead => AccessFlags::FRAGMENT_DENSITY_MAP_READ,
+ PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentReadNoncoherent => AccessFlags::COLOR_ATTACHMENT_READ_NONCOHERENT,
+ PipelineStageAccess::InvocationMask_InvocationMaskRead => AccessFlags::INVOCATION_MASK_READ,
+ PipelineStageAccess::RayTracingShader_ShaderBindingTableRead => AccessFlags::SHADER_BINDING_TABLE_READ,
+ PipelineStageAccess::AccelerationStructureBuild_MicromapRead
+ | PipelineStageAccess::MicromapBuild_MicromapRead => AccessFlags::MICROMAP_READ,
+ PipelineStageAccess::MicromapBuild_MicromapWrite => AccessFlags::MICROMAP_WRITE,
+ PipelineStageAccess::OpticalFlow_OpticalFlowRead => AccessFlags::OPTICAL_FLOW_READ,
+ PipelineStageAccess::OpticalFlow_OpticalFlowWrite => AccessFlags::OPTICAL_FLOW_WRITE,
+ PipelineStageAccess::__MAX_VALUE__ => unreachable!(),
+ }
+ }
+}
+
+#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash)]
+pub(crate) struct PipelineStageAccessSet(u128);
+
+#[allow(dead_code)]
+impl PipelineStageAccessSet {
+ #[inline]
+ pub(crate) const fn empty() -> Self {
+ Self(0)
+ }
+
+ #[inline]
+ pub(crate) const fn count(self) -> u32 {
+ self.0.count_ones()
+ }
+
+ #[inline]
+ pub(crate) const fn is_empty(self) -> bool {
+ self.0 == 0
+ }
+
+ #[inline]
+ pub(crate) const fn intersects(self, other: Self) -> bool {
+ self.0 & other.0 != 0
+ }
+
+ #[inline]
+ pub(crate) const fn contains(self, other: Self) -> bool {
+ self.0 & other.0 == other.0
+ }
+
+ #[inline]
+ pub(crate) const fn union(self, other: Self) -> Self {
+ Self(self.0 | other.0)
+ }
+
+ #[inline]
+ pub(crate) const fn intersection(self, other: Self) -> Self {
+ Self(self.0 & other.0)
+ }
+
+ #[inline]
+ pub(crate) const fn difference(self, other: Self) -> Self {
+ Self(self.0 & !other.0)
+ }
+
+ #[inline]
+ pub(crate) const fn symmetric_difference(self, other: Self) -> Self {
+ Self(self.0 ^ other.0)
+ }
+
+ #[inline]
+ pub(crate) fn contains_enum(self, val: PipelineStageAccess) -> bool {
+ self.intersects(val.into())
+ }
+}
+
+impl std::ops::BitAnd for PipelineStageAccessSet {
+ type Output = Self;
+
+ #[inline]
+ fn bitand(self, rhs: Self) -> Self {
+ self.intersection(rhs)
+ }
+}
+
+impl std::ops::BitAndAssign for PipelineStageAccessSet {
+ #[inline]
+ fn bitand_assign(&mut self, rhs: Self) {
+ *self = self.intersection(rhs);
+ }
+}
+
+impl std::ops::BitOr for PipelineStageAccessSet {
+ type Output = Self;
+
+ #[inline]
+ fn bitor(self, rhs: Self) -> Self {
+ self.union(rhs)
+ }
+}
+
+impl std::ops::BitOrAssign for PipelineStageAccessSet {
+ #[inline]
+ fn bitor_assign(&mut self, rhs: Self) {
+ *self = self.union(rhs);
+ }
+}
+
+impl std::ops::BitXor for PipelineStageAccessSet {
+ type Output = Self;
+
+ #[inline]
+ fn bitxor(self, rhs: Self) -> Self {
+ self.symmetric_difference(rhs)
+ }
+}
+
+impl std::ops::BitXorAssign for PipelineStageAccessSet {
+ #[inline]
+ fn bitxor_assign(&mut self, rhs: Self) {
+ *self = self.symmetric_difference(rhs);
+ }
+}
+
+impl std::ops::Sub for PipelineStageAccessSet {
+ type Output = Self;
+
+ #[inline]
+ fn sub(self, rhs: Self) -> Self {
+ self.difference(rhs)
+ }
+}
+
+impl std::ops::SubAssign for PipelineStageAccessSet {
+ #[inline]
+ fn sub_assign(&mut self, rhs: Self) {
+ *self = self.difference(rhs);
+ }
+}
+
+impl From<PipelineStageAccess> for PipelineStageAccessSet {
+ #[inline]
+ fn from(val: PipelineStageAccess) -> Self {
+ debug_assert!(val != PipelineStageAccess::__MAX_VALUE__); // You did something very dumb...
+ Self(1u128 << val as u8)
+ }
+}
+
+impl From<PipelineStages> for PipelineStageAccessSet {
+ #[inline]
+ fn from(stages: PipelineStages) -> Self {
+ let mut result = Self::empty();
+
+ if stages.intersects(PipelineStages::DRAW_INDIRECT) {
+ result |= Self::from(PipelineStageAccess::DrawIndirect_IndirectCommandRead)
+ | Self::from(PipelineStageAccess::DrawIndirect_TransformFeedbackCounterRead)
+ }
+
+ if stages.intersects(PipelineStages::VERTEX_SHADER) {
+ result |= Self::from(PipelineStageAccess::VertexShader_UniformRead)
+ | Self::from(PipelineStageAccess::VertexShader_ShaderSampledRead)
+ | Self::from(PipelineStageAccess::VertexShader_ShaderStorageRead)
+ | Self::from(PipelineStageAccess::VertexShader_ShaderStorageWrite)
+ | Self::from(PipelineStageAccess::VertexShader_AccelerationStructureRead)
+ }
+
+ if stages.intersects(PipelineStages::TESSELLATION_CONTROL_SHADER) {
+ result |= Self::from(PipelineStageAccess::TessellationControlShader_UniformRead)
+ | Self::from(PipelineStageAccess::TessellationControlShader_ShaderSampledRead)
+ | Self::from(PipelineStageAccess::TessellationControlShader_ShaderStorageRead)
+ | Self::from(PipelineStageAccess::TessellationControlShader_ShaderStorageWrite)
+ | Self::from(
+ PipelineStageAccess::TessellationControlShader_AccelerationStructureRead,
+ )
+ }
+
+ if stages.intersects(PipelineStages::TESSELLATION_EVALUATION_SHADER) {
+ result |= Self::from(PipelineStageAccess::TessellationEvaluationShader_UniformRead)
+ | Self::from(PipelineStageAccess::TessellationEvaluationShader_ShaderSampledRead)
+ | Self::from(PipelineStageAccess::TessellationEvaluationShader_ShaderStorageRead)
+ | Self::from(PipelineStageAccess::TessellationEvaluationShader_ShaderStorageWrite)
+ | Self::from(
+ PipelineStageAccess::TessellationEvaluationShader_AccelerationStructureRead,
+ )
+ }
+
+ if stages.intersects(PipelineStages::GEOMETRY_SHADER) {
+ result |= Self::from(PipelineStageAccess::GeometryShader_UniformRead)
+ | Self::from(PipelineStageAccess::GeometryShader_ShaderSampledRead)
+ | Self::from(PipelineStageAccess::GeometryShader_ShaderStorageRead)
+ | Self::from(PipelineStageAccess::GeometryShader_ShaderStorageWrite)
+ | Self::from(PipelineStageAccess::GeometryShader_AccelerationStructureRead)
+ }
+
+ if stages.intersects(PipelineStages::FRAGMENT_SHADER) {
+ result |= Self::from(PipelineStageAccess::FragmentShader_UniformRead)
+ | Self::from(PipelineStageAccess::FragmentShader_InputAttachmentRead)
+ | Self::from(PipelineStageAccess::FragmentShader_ShaderSampledRead)
+ | Self::from(PipelineStageAccess::FragmentShader_ShaderStorageRead)
+ | Self::from(PipelineStageAccess::FragmentShader_ShaderStorageWrite)
+ | Self::from(PipelineStageAccess::FragmentShader_AccelerationStructureRead)
+ }
+
+ if stages.intersects(PipelineStages::EARLY_FRAGMENT_TESTS) {
+ result |= Self::from(PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentRead)
+ | Self::from(PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentWrite)
+ }
+
+ if stages.intersects(PipelineStages::LATE_FRAGMENT_TESTS) {
+ result |= Self::from(PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentRead)
+ | Self::from(PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentWrite)
+ }
+
+ if stages.intersects(PipelineStages::COLOR_ATTACHMENT_OUTPUT) {
+ result |= Self::from(PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentRead)
+ | Self::from(PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentWrite)
+ | Self::from(
+ PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentReadNoncoherent,
+ )
+ }
+
+ if stages.intersects(PipelineStages::COMPUTE_SHADER) {
+ result |= Self::from(PipelineStageAccess::ComputeShader_UniformRead)
+ | Self::from(PipelineStageAccess::ComputeShader_ShaderSampledRead)
+ | Self::from(PipelineStageAccess::ComputeShader_ShaderStorageRead)
+ | Self::from(PipelineStageAccess::ComputeShader_ShaderStorageWrite)
+ | Self::from(PipelineStageAccess::ComputeShader_AccelerationStructureRead)
+ }
+
+ if stages.intersects(PipelineStages::HOST) {
+ result |= Self::from(PipelineStageAccess::Host_HostRead)
+ | Self::from(PipelineStageAccess::Host_HostWrite)
+ }
+
+ if stages.intersects(PipelineStages::COPY) {
+ result |= Self::from(PipelineStageAccess::Copy_TransferRead)
+ | Self::from(PipelineStageAccess::Copy_TransferWrite)
+ }
+
+ if stages.intersects(PipelineStages::RESOLVE) {
+ result |= Self::from(PipelineStageAccess::Resolve_TransferRead)
+ | Self::from(PipelineStageAccess::Resolve_TransferWrite)
+ }
+
+ if stages.intersects(PipelineStages::BLIT) {
+ result |= Self::from(PipelineStageAccess::Blit_TransferRead)
+ | Self::from(PipelineStageAccess::Blit_TransferWrite)
+ }
+
+ if stages.intersects(PipelineStages::CLEAR) {
+ result |= Self::from(PipelineStageAccess::Clear_TransferWrite)
+ }
+
+ if stages.intersects(PipelineStages::INDEX_INPUT) {
+ result |= Self::from(PipelineStageAccess::IndexInput_IndexRead)
+ }
+
+ if stages.intersects(PipelineStages::VERTEX_ATTRIBUTE_INPUT) {
+ result |= Self::from(PipelineStageAccess::VertexAttributeInput_VertexAttributeRead)
+ }
+
+ if stages.intersects(PipelineStages::VIDEO_DECODE) {
+ result |= Self::from(PipelineStageAccess::VideoDecode_VideoDecodeRead)
+ | Self::from(PipelineStageAccess::VideoDecode_VideoDecodeWrite)
+ }
+
+ if stages.intersects(PipelineStages::VIDEO_ENCODE) {
+ result |= Self::from(PipelineStageAccess::VideoEncode_VideoEncodeRead)
+ | Self::from(PipelineStageAccess::VideoEncode_VideoEncodeWrite)
+ }
+
+ if stages.intersects(PipelineStages::TRANSFORM_FEEDBACK) {
+ result |= Self::from(PipelineStageAccess::TransformFeedback_TransformFeedbackWrite)
+ | Self::from(PipelineStageAccess::TransformFeedback_TransformFeedbackCounterRead)
+ | Self::from(PipelineStageAccess::TransformFeedback_TransformFeedbackCounterWrite)
+ }
+
+ if stages.intersects(PipelineStages::CONDITIONAL_RENDERING) {
+ result |= Self::from(PipelineStageAccess::ConditionalRendering_ConditionalRenderingRead)
+ }
+
+ if stages.intersects(PipelineStages::ACCELERATION_STRUCTURE_BUILD) {
+ result |=
+ Self::from(PipelineStageAccess::AccelerationStructureBuild_IndirectCommandRead)
+ | Self::from(PipelineStageAccess::AccelerationStructureBuild_UniformRead)
+ | Self::from(PipelineStageAccess::AccelerationStructureBuild_TransferRead)
+ | Self::from(PipelineStageAccess::AccelerationStructureBuild_TransferWrite)
+ | Self::from(PipelineStageAccess::AccelerationStructureBuild_ShaderSampledRead)
+ | Self::from(PipelineStageAccess::AccelerationStructureBuild_ShaderStorageRead)
+ | Self::from(
+ PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureRead,
+ )
+ | Self::from(
+ PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureWrite,
+ )
+ // | Self::from(PipelineStageAccess::AccelerationStructureBuild_MicromapRead)
+ }
+
+ if stages.intersects(PipelineStages::RAY_TRACING_SHADER) {
+ result |= Self::from(PipelineStageAccess::RayTracingShader_UniformRead)
+ | Self::from(PipelineStageAccess::RayTracingShader_ShaderSampledRead)
+ | Self::from(PipelineStageAccess::RayTracingShader_ShaderStorageRead)
+ | Self::from(PipelineStageAccess::RayTracingShader_ShaderStorageWrite)
+ | Self::from(PipelineStageAccess::RayTracingShader_AccelerationStructureRead)
+ // | Self::from(PipelineStageAccess::RayTracingShader_ShaderBindingTableRead)
+ }
+
+ if stages.intersects(PipelineStages::FRAGMENT_DENSITY_PROCESS) {
+ result |= Self::from(PipelineStageAccess::FragmentDensityProcess_FragmentDensityMapRead)
+ }
+
+ if stages.intersects(PipelineStages::FRAGMENT_SHADING_RATE_ATTACHMENT) {
+ result |=
+ PipelineStageAccess::FragmentShadingRateAttachment_FragmentShadingRateAttachmentRead
+ .into()
+ }
+
+ if stages.intersects(PipelineStages::COMMAND_PREPROCESS) {
+ result |= Self::from(PipelineStageAccess::CommandPreprocess_CommandPreprocessRead)
+ | Self::from(PipelineStageAccess::CommandPreprocess_CommandPreprocessWrite)
+ }
+
+ if stages.intersects(PipelineStages::TASK_SHADER) {
+ result |= Self::from(PipelineStageAccess::TaskShader_UniformRead)
+ | Self::from(PipelineStageAccess::TaskShader_ShaderSampledRead)
+ | Self::from(PipelineStageAccess::TaskShader_ShaderStorageRead)
+ | Self::from(PipelineStageAccess::TaskShader_ShaderStorageWrite)
+ | Self::from(PipelineStageAccess::TaskShader_AccelerationStructureRead)
+ }
+
+ if stages.intersects(PipelineStages::MESH_SHADER) {
+ result |= Self::from(PipelineStageAccess::MeshShader_UniformRead)
+ | Self::from(PipelineStageAccess::MeshShader_ShaderSampledRead)
+ | Self::from(PipelineStageAccess::MeshShader_ShaderStorageRead)
+ | Self::from(PipelineStageAccess::MeshShader_ShaderStorageWrite)
+ | Self::from(PipelineStageAccess::MeshShader_AccelerationStructureRead)
+ }
+
+ if stages.intersects(PipelineStages::SUBPASS_SHADING) {
+ result |= Self::from(PipelineStageAccess::SubpassShading_InputAttachmentRead)
+ }
+
+ if stages.intersects(PipelineStages::INVOCATION_MASK) {
+ result |= Self::from(PipelineStageAccess::InvocationMask_InvocationMaskRead)
+ }
+
+ /*
+ if stages.intersects(PipelineStages::OPTICAL_FLOW) {
+ result |= Self::from(PipelineStageAccess::OpticalFlow_OpticalFlowRead)
+ | Self::from(PipelineStageAccess::OpticalFlow_OpticalFlowWrite)
+ }
+
+ if stages.intersects(PipelineStages::MICROMAP_BUILD) {
+ result |= Self::from(PipelineStageAccess::MicromapBuild_MicromapWrite)
+ | Self::from(PipelineStageAccess::MicromapBuild_MicromapRead)
+ }
+ */
+
+ result
+ }
+}
+
+impl From<AccessFlags> for PipelineStageAccessSet {
+ #[inline]
+ fn from(access: AccessFlags) -> Self {
+ let mut result = Self::empty();
+
+ if access.intersects(AccessFlags::INDIRECT_COMMAND_READ) {
+ result |= Self::from(PipelineStageAccess::DrawIndirect_IndirectCommandRead)
+ | Self::from(PipelineStageAccess::AccelerationStructureBuild_IndirectCommandRead)
+ }
+
+ if access.intersects(AccessFlags::INDEX_READ) {
+ result |= Self::from(PipelineStageAccess::IndexInput_IndexRead)
+ }
+
+ if access.intersects(AccessFlags::VERTEX_ATTRIBUTE_READ) {
+ result |= Self::from(PipelineStageAccess::VertexAttributeInput_VertexAttributeRead)
+ }
+
+ if access.intersects(AccessFlags::UNIFORM_READ) {
+ result |= Self::from(PipelineStageAccess::VertexShader_UniformRead)
+ | Self::from(PipelineStageAccess::TessellationControlShader_UniformRead)
+ | Self::from(PipelineStageAccess::TessellationEvaluationShader_UniformRead)
+ | Self::from(PipelineStageAccess::GeometryShader_UniformRead)
+ | Self::from(PipelineStageAccess::FragmentShader_UniformRead)
+ | Self::from(PipelineStageAccess::ComputeShader_UniformRead)
+ | Self::from(PipelineStageAccess::AccelerationStructureBuild_UniformRead)
+ | Self::from(PipelineStageAccess::RayTracingShader_UniformRead)
+ | Self::from(PipelineStageAccess::TaskShader_UniformRead)
+ | Self::from(PipelineStageAccess::MeshShader_UniformRead)
+ }
+
+ if access.intersects(AccessFlags::INPUT_ATTACHMENT_READ) {
+ result |= Self::from(PipelineStageAccess::FragmentShader_InputAttachmentRead)
+ | Self::from(PipelineStageAccess::SubpassShading_InputAttachmentRead)
+ }
+
+ if access.intersects(AccessFlags::COLOR_ATTACHMENT_READ) {
+ result |= Self::from(PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentRead)
+ }
+
+ if access.intersects(AccessFlags::COLOR_ATTACHMENT_WRITE) {
+ result |= Self::from(PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentWrite)
+ }
+
+ if access.intersects(AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ) {
+ result |= Self::from(PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentRead)
+ | Self::from(PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentRead)
+ }
+
+ if access.intersects(AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE) {
+ result |=
+ Self::from(PipelineStageAccess::EarlyFragmentTests_DepthStencilAttachmentWrite)
+ | Self::from(PipelineStageAccess::LateFragmentTests_DepthStencilAttachmentWrite)
+ }
+
+ if access.intersects(AccessFlags::TRANSFER_READ) {
+ result |= Self::from(PipelineStageAccess::Copy_TransferRead)
+ | Self::from(PipelineStageAccess::Resolve_TransferRead)
+ | Self::from(PipelineStageAccess::Blit_TransferRead)
+ | Self::from(PipelineStageAccess::AccelerationStructureBuild_TransferRead)
+ }
+
+ if access.intersects(AccessFlags::TRANSFER_WRITE) {
+ result |= Self::from(PipelineStageAccess::Copy_TransferWrite)
+ | Self::from(PipelineStageAccess::Resolve_TransferWrite)
+ | Self::from(PipelineStageAccess::Blit_TransferWrite)
+ | Self::from(PipelineStageAccess::Clear_TransferWrite)
+ | Self::from(PipelineStageAccess::AccelerationStructureBuild_TransferWrite)
+ }
+
+ if access.intersects(AccessFlags::HOST_READ) {
+ result |= Self::from(PipelineStageAccess::Host_HostRead)
+ }
+
+ if access.intersects(AccessFlags::HOST_WRITE) {
+ result |= Self::from(PipelineStageAccess::Host_HostWrite)
+ }
+
+ if access.intersects(AccessFlags::SHADER_SAMPLED_READ) {
+ result |= Self::from(PipelineStageAccess::VertexShader_ShaderSampledRead)
+ | Self::from(PipelineStageAccess::TessellationControlShader_ShaderSampledRead)
+ | Self::from(PipelineStageAccess::TessellationEvaluationShader_ShaderSampledRead)
+ | Self::from(PipelineStageAccess::GeometryShader_ShaderSampledRead)
+ | Self::from(PipelineStageAccess::FragmentShader_ShaderSampledRead)
+ | Self::from(PipelineStageAccess::ComputeShader_ShaderSampledRead)
+ | Self::from(PipelineStageAccess::AccelerationStructureBuild_ShaderSampledRead)
+ | Self::from(PipelineStageAccess::RayTracingShader_ShaderSampledRead)
+ | Self::from(PipelineStageAccess::TaskShader_ShaderSampledRead)
+ | Self::from(PipelineStageAccess::MeshShader_ShaderSampledRead)
+ }
+
+ if access.intersects(AccessFlags::SHADER_STORAGE_READ) {
+ result |= Self::from(PipelineStageAccess::VertexShader_ShaderStorageRead)
+ | Self::from(PipelineStageAccess::TessellationControlShader_ShaderStorageRead)
+ | Self::from(PipelineStageAccess::TessellationEvaluationShader_ShaderStorageRead)
+ | Self::from(PipelineStageAccess::GeometryShader_ShaderStorageRead)
+ | Self::from(PipelineStageAccess::FragmentShader_ShaderStorageRead)
+ | Self::from(PipelineStageAccess::ComputeShader_ShaderStorageRead)
+ | Self::from(PipelineStageAccess::AccelerationStructureBuild_ShaderStorageRead)
+ | Self::from(PipelineStageAccess::RayTracingShader_ShaderStorageRead)
+ | Self::from(PipelineStageAccess::TaskShader_ShaderStorageRead)
+ | Self::from(PipelineStageAccess::MeshShader_ShaderStorageRead)
+ }
+
+ if access.intersects(AccessFlags::SHADER_STORAGE_WRITE) {
+ result |= Self::from(PipelineStageAccess::VertexShader_ShaderStorageWrite)
+ | Self::from(PipelineStageAccess::TessellationControlShader_ShaderStorageWrite)
+ | Self::from(PipelineStageAccess::TessellationEvaluationShader_ShaderStorageWrite)
+ | Self::from(PipelineStageAccess::GeometryShader_ShaderStorageWrite)
+ | Self::from(PipelineStageAccess::FragmentShader_ShaderStorageWrite)
+ | Self::from(PipelineStageAccess::ComputeShader_ShaderStorageWrite)
+ | Self::from(PipelineStageAccess::RayTracingShader_ShaderStorageWrite)
+ | Self::from(PipelineStageAccess::TaskShader_ShaderStorageWrite)
+ | Self::from(PipelineStageAccess::MeshShader_ShaderStorageWrite)
+ }
+
+ if access.intersects(AccessFlags::VIDEO_DECODE_READ) {
+ result |= Self::from(PipelineStageAccess::VideoDecode_VideoDecodeRead)
+ }
+
+ if access.intersects(AccessFlags::VIDEO_DECODE_WRITE) {
+ result |= Self::from(PipelineStageAccess::VideoDecode_VideoDecodeWrite)
+ }
+
+ if access.intersects(AccessFlags::VIDEO_ENCODE_READ) {
+ result |= Self::from(PipelineStageAccess::VideoEncode_VideoEncodeRead)
+ }
+
+ if access.intersects(AccessFlags::VIDEO_ENCODE_WRITE) {
+ result |= Self::from(PipelineStageAccess::VideoEncode_VideoEncodeWrite)
+ }
+
+ if access.intersects(AccessFlags::TRANSFORM_FEEDBACK_WRITE) {
+ result |= Self::from(PipelineStageAccess::TransformFeedback_TransformFeedbackWrite)
+ }
+
+ if access.intersects(AccessFlags::TRANSFORM_FEEDBACK_COUNTER_READ) {
+ result |= Self::from(PipelineStageAccess::DrawIndirect_TransformFeedbackCounterRead)
+ | Self::from(PipelineStageAccess::TransformFeedback_TransformFeedbackCounterRead)
+ }
+
+ if access.intersects(AccessFlags::TRANSFORM_FEEDBACK_COUNTER_WRITE) {
+ result |=
+ Self::from(PipelineStageAccess::TransformFeedback_TransformFeedbackCounterWrite)
+ }
+
+ if access.intersects(AccessFlags::CONDITIONAL_RENDERING_READ) {
+ result |= Self::from(PipelineStageAccess::ConditionalRendering_ConditionalRenderingRead)
+ }
+
+ if access.intersects(AccessFlags::COMMAND_PREPROCESS_READ) {
+ result |= Self::from(PipelineStageAccess::CommandPreprocess_CommandPreprocessRead)
+ }
+
+ if access.intersects(AccessFlags::COMMAND_PREPROCESS_WRITE) {
+ result |= Self::from(PipelineStageAccess::CommandPreprocess_CommandPreprocessWrite)
+ }
+
+ if access.intersects(AccessFlags::FRAGMENT_SHADING_RATE_ATTACHMENT_READ) {
+ result |=
+ Self::from(PipelineStageAccess::FragmentShadingRateAttachment_FragmentShadingRateAttachmentRead)
+ }
+
+ if access.intersects(AccessFlags::ACCELERATION_STRUCTURE_READ) {
+ result |= Self::from(PipelineStageAccess::VertexShader_AccelerationStructureRead)
+ | Self::from(
+ PipelineStageAccess::TessellationControlShader_AccelerationStructureRead,
+ )
+ | Self::from(
+ PipelineStageAccess::TessellationEvaluationShader_AccelerationStructureRead,
+ )
+ | Self::from(PipelineStageAccess::GeometryShader_AccelerationStructureRead)
+ | Self::from(PipelineStageAccess::FragmentShader_AccelerationStructureRead)
+ | Self::from(PipelineStageAccess::ComputeShader_AccelerationStructureRead)
+ | Self::from(
+ PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureRead,
+ )
+ | Self::from(PipelineStageAccess::RayTracingShader_AccelerationStructureRead)
+ | Self::from(PipelineStageAccess::TaskShader_AccelerationStructureRead)
+ | Self::from(PipelineStageAccess::MeshShader_AccelerationStructureRead)
+ }
+
+ if access.intersects(AccessFlags::ACCELERATION_STRUCTURE_WRITE) {
+ result |= Self::from(
+ PipelineStageAccess::AccelerationStructureBuild_AccelerationStructureWrite,
+ )
+ }
+
+ if access.intersects(AccessFlags::FRAGMENT_DENSITY_MAP_READ) {
+ result |= Self::from(PipelineStageAccess::FragmentDensityProcess_FragmentDensityMapRead)
+ }
+
+ if access.intersects(AccessFlags::COLOR_ATTACHMENT_READ_NONCOHERENT) {
+ result |= Self::from(
+ PipelineStageAccess::ColorAttachmentOutput_ColorAttachmentReadNoncoherent,
+ )
+ }
+
+ if access.intersects(AccessFlags::INVOCATION_MASK_READ) {
+ result |= Self::from(PipelineStageAccess::InvocationMask_InvocationMaskRead)
+ }
+
+ /*
+ if access.intersects(AccessFlags::SHADER_BINDING_TABLE_READ) {
+ result |= Self::from(PipelineStageAccess::RayTracingShader_ShaderBindingTableRead)
+ }
+
+ if access.intersects(AccessFlags::MICROMAP_READ) {
+ result |= Self::from(PipelineStageAccess::AccelerationStructureBuild_MicromapRead)
+ | Self::from(PipelineStageAccess::MicromapBuild_MicromapRead)
+ }
+
+ if access.intersects(AccessFlags::MICROMAP_WRITE) {
+ result |= Self::from(PipelineStageAccess::MicromapBuild_MicromapWrite)
+ }
+
+ if access.intersects(AccessFlags::OPTICAL_FLOW_READ) {
+ result |= Self::from(PipelineStageAccess::OpticalFlow_OpticalFlowRead)
+ }
+
+ if access.intersects(AccessFlags::OPTICAL_FLOW_WRITE) {
+ result |= Self::from(PipelineStageAccess::OpticalFlow_OpticalFlowWrite)
+ }
+ */
+
+ result
+ }
+}
+
+/// Dependency info for barriers in a pipeline barrier or event command.
+///
+/// A pipeline barrier creates a dependency between commands submitted before the barrier (the
+/// source scope) and commands submitted after it (the destination scope). An event command acts
+/// like a split pipeline barrier: the source scope and destination scope are defined
+/// relative to different commands. Each `DependencyInfo` consists of multiple individual barriers
+/// that concern a either single resource or operate globally.
+///
+/// Each barrier has a set of source/destination pipeline stages and source/destination memory
+/// access types. The pipeline stages create an *execution dependency*: the `src_stages` of
+/// commands submitted before the barrier must be completely finished before before any of the
+/// `dst_stages` of commands after the barrier are allowed to start. The memory access types
+/// create a *memory dependency*: in addition to the execution dependency, any `src_access`
+/// performed before the barrier must be made available and visible before any `dst_access`
+/// are made after the barrier.
+#[derive(Clone, Debug)]
+pub struct DependencyInfo {
+ /// Flags to modify how the execution and memory dependencies are formed.
+ ///
+ /// The default value is empty.
+ pub dependency_flags: DependencyFlags,
+
+ /// Memory barriers for global operations and accesses, not limited to a single resource.
+ ///
+ /// The default value is empty.
+ pub memory_barriers: SmallVec<[MemoryBarrier; 2]>,
+
+ /// Memory barriers for individual buffers.
+ ///
+ /// The default value is empty.
+ pub buffer_memory_barriers: SmallVec<[BufferMemoryBarrier; 8]>,
+
+ /// Memory barriers for individual images.
+ ///
+ /// The default value is empty.
+ pub image_memory_barriers: SmallVec<[ImageMemoryBarrier; 8]>,
+
+ pub _ne: crate::NonExhaustive,
+}
+
+impl DependencyInfo {
+ /// Returns whether `self` contains any barriers.
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.memory_barriers.is_empty()
+ && self.buffer_memory_barriers.is_empty()
+ && self.image_memory_barriers.is_empty()
+ }
+
+ /// Clears all barriers.
+ #[inline]
+ pub fn clear(&mut self) {
+ self.memory_barriers.clear();
+ self.buffer_memory_barriers.clear();
+ self.image_memory_barriers.clear();
+ }
+}
+
+impl Default for DependencyInfo {
+ #[inline]
+ fn default() -> Self {
+ Self {
+ dependency_flags: DependencyFlags::empty(),
+ memory_barriers: SmallVec::new(),
+ buffer_memory_barriers: SmallVec::new(),
+ image_memory_barriers: SmallVec::new(),
+ _ne: crate::NonExhaustive(()),
+ }
+ }
+}
+
+vulkan_bitflags! {
+ #[non_exhaustive]
+
+ /// Flags that modify how execution and memory dependencies are formed.
+ DependencyFlags = DependencyFlags(u32);
+
+ /// For framebuffer-space pipeline stages, specifies that the dependency is framebuffer-local.
+ /// The implementation can start the destination operation for some given pixels as long as the
+ /// source operation is finished for these given pixels.
+ ///
+ /// Framebuffer-local dependencies are usually more efficient, especially on tile-based
+ /// architectures.
+ BY_REGION = BY_REGION,
+
+ /// For devices that consist of multiple physical devices, specifies that the dependency is
+ /// device-local. The dependency will only apply to the operations on each physical device
+ /// individually, rather than applying to all physical devices as a whole. This allows each
+ /// physical device to operate independently of the others.
+ ///
+ /// The device API version must be at least 1.1, or the [`khr_device_group`] extension must be
+ /// enabled on the device.
+ ///
+ /// [`khr_device_group`]: crate::device::DeviceExtensions::khr_device_group
+ DEVICE_GROUP = DEVICE_GROUP {
+ api_version: V1_1,
+ device_extensions: [khr_device_group],
+ },
+
+
+ /// For subpass dependencies, and pipeline barriers executing within a render pass instance,
+ /// if the render pass uses multiview rendering, specifies that the dependency is view-local.
+ /// Each view in the destination subpass will only depend on a single view in the destination
+ /// subpass, instead of all views.
+ ///
+ /// The device API version must be at least 1.1, or the [`khr_multiview`] extension must be
+ /// enabled on the device.
+ ///
+ /// [`khr_multiview`]: crate::device::DeviceExtensions::khr_multiview
+ VIEW_LOCAL = VIEW_LOCAL {
+ api_version: V1_1,
+ device_extensions: [khr_multiview],
+ },
+}
+
+/// A memory barrier that is applied globally.
+#[derive(Clone, Debug)]
+pub struct MemoryBarrier {
+ /// The pipeline stages in the source scope to wait for.
+ ///
+ /// The default value is [`PipelineStages::empty()`].
+ pub src_stages: PipelineStages,
+
+ /// The memory accesses in the source scope to make available and visible.
+ ///
+ /// The default value is [`AccessFlags::empty()`].
+ pub src_access: AccessFlags,
+
+ /// The pipeline stages in the destination scope that must wait for `src_stages`.
+ ///
+ /// The default value is [`PipelineStages::empty()`].
+ pub dst_stages: PipelineStages,
+
+ /// The memory accesses in the destination scope that must wait for `src_access` to be made
+ /// available and visible.
+ pub dst_access: AccessFlags,
+
+ pub _ne: crate::NonExhaustive,
+}
+
+impl Default for MemoryBarrier {
+ #[inline]
+ fn default() -> Self {
+ Self {
+ src_stages: PipelineStages::empty(),
+ src_access: AccessFlags::empty(),
+ dst_stages: PipelineStages::empty(),
+ dst_access: AccessFlags::empty(),
+ _ne: crate::NonExhaustive(()),
+ }
+ }
+}
+
+/// A memory barrier that is applied to a single buffer.
+#[derive(Clone, Debug)]
+pub struct BufferMemoryBarrier {
+ /// The pipeline stages in the source scope to wait for.
+ ///
+ /// The default value is [`PipelineStages::empty()`].
+ pub src_stages: PipelineStages,
+
+ /// The memory accesses in the source scope to make available and visible.
+ ///
+ /// The default value is [`AccessFlags::empty()`].
+ pub src_access: AccessFlags,
+
+ /// The pipeline stages in the destination scope that must wait for `src_stages`.
+ ///
+ /// The default value is [`PipelineStages::empty()`].
+ pub dst_stages: PipelineStages,
+
+ /// The memory accesses in the destination scope that must wait for `src_access` to be made
+ /// available and visible.
+ pub dst_access: AccessFlags,
+
+ /// For resources created with [`Sharing::Exclusive`](crate::sync::Sharing), transfers
+ /// ownership of a resource from one queue family to another.
+ pub queue_family_ownership_transfer: Option<QueueFamilyOwnershipTransfer>,
+
+ /// The buffer to apply the barrier to.
+ pub buffer: Arc<Buffer>,
+
+ /// The byte range of `buffer` to apply the barrier to.
+ pub range: Range<DeviceSize>,
+
+ pub _ne: crate::NonExhaustive,
+}
+
+impl BufferMemoryBarrier {
+ #[inline]
+ pub fn buffer(buffer: Arc<Buffer>) -> Self {
+ Self {
+ src_stages: PipelineStages::empty(),
+ src_access: AccessFlags::empty(),
+ dst_stages: PipelineStages::empty(),
+ dst_access: AccessFlags::empty(),
+ queue_family_ownership_transfer: None,
+ buffer,
+ range: 0..0,
+ _ne: crate::NonExhaustive(()),
+ }
+ }
+}
+
+/// A memory barrier that is applied to a single image.
+#[derive(Clone, Debug)]
+pub struct ImageMemoryBarrier {
+ /// The pipeline stages in the source scope to wait for.
+ ///
+ /// The default value is [`PipelineStages::empty()`].
+ pub src_stages: PipelineStages,
+
+ /// The memory accesses in the source scope to make available and visible.
+ ///
+ /// The default value is [`AccessFlags::empty()`].
+ pub src_access: AccessFlags,
+
+ /// The pipeline stages in the destination scope that must wait for `src_stages`.
+ ///
+ /// The default value is [`PipelineStages::empty()`].
+ pub dst_stages: PipelineStages,
+
+ /// The memory accesses in the destination scope that must wait for `src_access` to be made
+ /// available and visible.
+ pub dst_access: AccessFlags,
+
+ /// The layout that the specified `subresource_range` of `image` is expected to be in when the
+ /// source scope completes.
+ pub old_layout: ImageLayout,
+
+ /// The layout that the specified `subresource_range` of `image` will be transitioned to before
+ /// the destination scope begins.
+ pub new_layout: ImageLayout,
+
+ /// For resources created with [`Sharing::Exclusive`](crate::sync::Sharing), transfers
+ /// ownership of a resource from one queue family to another.
+ pub queue_family_ownership_transfer: Option<QueueFamilyOwnershipTransfer>,
+
+ /// The image to apply the barrier to.
+ pub image: Arc<Image>,
+
+ /// The subresource range of `image` to apply the barrier to.
+ pub subresource_range: ImageSubresourceRange,
+
+ pub _ne: crate::NonExhaustive,
+}
+
+impl ImageMemoryBarrier {
+ #[inline]
+ pub fn image(image: Arc<Image>) -> Self {
+ Self {
+ src_stages: PipelineStages::empty(),
+ src_access: AccessFlags::empty(),
+ dst_stages: PipelineStages::empty(),
+ dst_access: AccessFlags::empty(),
+ old_layout: ImageLayout::Undefined,
+ new_layout: ImageLayout::Undefined,
+ queue_family_ownership_transfer: None,
+ image,
+ subresource_range: ImageSubresourceRange {
+ aspects: ImageAspects::empty(), // Can't use image format aspects because `color` can't be specified with `planeN`.
+ mip_levels: 0..0,
+ array_layers: 0..0,
+ },
+ _ne: crate::NonExhaustive(()),
+ }
+ }
+}
+
+/// Specifies a queue family ownership transfer for a resource.
+///
+/// There are three classes of queues that can be used in an ownership transfer:
+/// - A **local** queue exists on the current [`Instance`] and [`Device`].
+/// - An **external** queue does not exist on the current [`Instance`], but has the same
+/// [`device_uuid`] and [`driver_uuid`] as the current [`Device`].
+/// - A **foreign** queue can be an external queue, or any queue on another device for which the
+/// mentioned parameters do not match.
+///
+/// [`Instance`]: crate::instance::Instance
+/// [`Device`]: crate::device::Device
+/// [`device_uuid`]: crate::device::Properties::device_uuid
+/// [`driver_uuid`]: crate::device::Properties::driver_uuid
+#[derive(Clone, Copy, Debug)]
+pub enum QueueFamilyOwnershipTransfer {
+ /// For a resource with [`Sharing::Exclusive`], transfers ownership between two local queues.
+ ///
+ /// [`Sharing::Exclusive`]: crate::sync::Sharing::Exclusive
+ ExclusiveBetweenLocal {
+ /// The queue family that currently owns the resource.
+ src_index: u32,
+
+ /// The queue family to transfer ownership to.
+ dst_index: u32,
+ },
+
+ /// For a resource with [`Sharing::Exclusive`], transfers ownership from a local queue to an
+ /// external queue.
+ ///
+ /// The device API version must be at least 1.1, or the [`khr_external_memory`] extension must
+ /// be enabled on the device.
+ ///
+ /// [`Sharing::Exclusive`]: crate::sync::Sharing::Exclusive
+ /// [`khr_external_memory`]: crate::device::DeviceExtensions::khr_external_memory
+ ExclusiveToExternal {
+ /// The queue family that currently owns the resource.
+ src_index: u32,
+ },
+
+ /// For a resource with [`Sharing::Exclusive`], transfers ownership from an external queue to a
+ /// local queue.
+ ///
+ /// The device API version must be at least 1.1, or the [`khr_external_memory`] extension must
+ /// be enabled on the device.
+ ///
+ /// [`Sharing::Exclusive`]: crate::sync::Sharing::Exclusive
+ /// [`khr_external_memory`]: crate::device::DeviceExtensions::khr_external_memory
+ ExclusiveFromExternal {
+ /// The queue family to transfer ownership to.
+ dst_index: u32,
+ },
+
+ /// For a resource with [`Sharing::Exclusive`], transfers ownership from a local queue to a
+ /// foreign queue.
+ ///
+ /// The [`ext_queue_family_foreign`] extension must be enabled on the device.
+ ///
+ /// [`Sharing::Exclusive`]: crate::sync::Sharing::Exclusive
+ /// [`ext_queue_family_foreign`]: crate::device::DeviceExtensions::ext_queue_family_foreign
+ ExclusiveToForeign {
+ /// The queue family that currently owns the resource.
+ src_index: u32,
+ },
+
+ /// For a resource with [`Sharing::Exclusive`], transfers ownership from a foreign queue to a
+ /// local queue.
+ ///
+ /// The [`ext_queue_family_foreign`] extension must be enabled on the device.
+ ///
+ /// [`Sharing::Exclusive`]: crate::sync::Sharing::Exclusive
+ /// [`ext_queue_family_foreign`]: crate::device::DeviceExtensions::ext_queue_family_foreign
+ ExclusiveFromForeign {
+ /// The queue family to transfer ownership to.
+ dst_index: u32,
+ },
+
+ /// For a resource with [`Sharing::Concurrent`], transfers ownership from its local queues to
+ /// an external queue.
+ ///
+ /// The device API version must be at least 1.1, or the [`khr_external_memory`] extension must
+ /// be enabled on the device.
+ ///
+ /// [`Sharing::Concurrent`]: crate::sync::Sharing::Concurrent
+ /// [`khr_external_memory`]: crate::device::DeviceExtensions::khr_external_memory
+ ConcurrentToExternal,
+
+ /// For a resource with [`Sharing::Concurrent`], transfers ownership from an external queue to
+ /// its local queues.
+ ///
+ /// The device API version must be at least 1.1, or the [`khr_external_memory`] extension must
+ /// be enabled on the device.
+ ///
+ /// [`Sharing::Concurrent`]: crate::sync::Sharing::Concurrent
+ /// [`khr_external_memory`]: crate::device::DeviceExtensions::khr_external_memory
+ ConcurrentFromExternal,
+
+ /// For a resource with [`Sharing::Concurrent`], transfers ownership from its local queues to
+ /// a foreign queue.
+ ///
+ /// The [`ext_queue_family_foreign`] extension must be enabled on the device.
+ ///
+ /// [`Sharing::Concurrent`]: crate::sync::Sharing::Concurrent
+ /// [`ext_queue_family_foreign`]: crate::device::DeviceExtensions::ext_queue_family_foreign
+ ConcurrentToForeign,
+
+ /// For a resource with [`Sharing::Concurrent`], transfers ownership from a foreign queue to
+ /// its local queues.
+ ///
+ /// The [`ext_queue_family_foreign`] extension must be enabled on the device.
+ ///
+ /// [`Sharing::Concurrent`]: crate::sync::Sharing::Concurrent
+ /// [`ext_queue_family_foreign`]: crate::device::DeviceExtensions::ext_queue_family_foreign
+ ConcurrentFromForeign,
+}
+
+impl QueueFamilyOwnershipTransfer {
+ pub(crate) fn validate_device(self, device: &Device) -> Result<(), RequirementNotMet> {
+ match self {
+ QueueFamilyOwnershipTransfer::ExclusiveToExternal { .. } => {
+ if !(device.api_version() >= Version::V1_1
+ || device.enabled_extensions().khr_external_memory)
+ {
+ return Err(crate::RequirementNotMet {
+ required_for: "`QueueFamilyOwnershipTransfer::ExclusiveToExternal",
+ requires_one_of: crate::RequiresOneOf {
+ api_version: Some(Version::V1_1),
+ device_extensions: &["khr_external_memory"],
+ ..Default::default()
+ },
+ });
+ }
+ }
+ QueueFamilyOwnershipTransfer::ExclusiveFromExternal { .. } => {
+ if !(device.api_version() >= Version::V1_1
+ || device.enabled_extensions().khr_external_memory)
+ {
+ return Err(crate::RequirementNotMet {
+ required_for: "`QueueFamilyOwnershipTransfer::ExclusiveFromExternal",
+ requires_one_of: crate::RequiresOneOf {
+ api_version: Some(Version::V1_1),
+ device_extensions: &["khr_external_memory"],
+ ..Default::default()
+ },
+ });
+ }
+ }
+ QueueFamilyOwnershipTransfer::ExclusiveToForeign { .. } => {
+ if !device.enabled_extensions().ext_queue_family_foreign {
+ return Err(crate::RequirementNotMet {
+ required_for: "`QueueFamilyOwnershipTransfer::ExclusiveToForeign",
+ requires_one_of: crate::RequiresOneOf {
+ device_extensions: &["ext_queue_family_foreign"],
+ ..Default::default()
+ },
+ });
+ }
+ }
+ QueueFamilyOwnershipTransfer::ExclusiveFromForeign { .. } => {
+ if !device.enabled_extensions().ext_queue_family_foreign {
+ return Err(crate::RequirementNotMet {
+ required_for: "`QueueFamilyOwnershipTransfer::ExclusiveFromForeign",
+ requires_one_of: crate::RequiresOneOf {
+ device_extensions: &["ext_queue_family_foreign"],
+ ..Default::default()
+ },
+ });
+ }
+ }
+ QueueFamilyOwnershipTransfer::ConcurrentToExternal => {
+ if !(device.api_version() >= Version::V1_1
+ || device.enabled_extensions().khr_external_memory)
+ {
+ return Err(crate::RequirementNotMet {
+ required_for: "`QueueFamilyOwnershipTransfer::ConcurrentToExternal",
+ requires_one_of: crate::RequiresOneOf {
+ api_version: Some(Version::V1_1),
+ device_extensions: &["khr_external_memory"],
+ ..Default::default()
+ },
+ });
+ }
+ }
+ QueueFamilyOwnershipTransfer::ConcurrentFromExternal => {
+ if !(device.api_version() >= Version::V1_1
+ || device.enabled_extensions().khr_external_memory)
+ {
+ return Err(crate::RequirementNotMet {
+ required_for: "`QueueFamilyOwnershipTransfer::ConcurrentFromExternal",
+ requires_one_of: crate::RequiresOneOf {
+ api_version: Some(Version::V1_1),
+ device_extensions: &["khr_external_memory"],
+ ..Default::default()
+ },
+ });
+ }
+ }
+ QueueFamilyOwnershipTransfer::ConcurrentToForeign => {
+ if !device.enabled_extensions().ext_queue_family_foreign {
+ return Err(crate::RequirementNotMet {
+ required_for: "`QueueFamilyOwnershipTransfer::ConcurrentToForeign",
+ requires_one_of: crate::RequiresOneOf {
+ device_extensions: &["ext_queue_family_foreign"],
+ ..Default::default()
+ },
+ });
+ }
+ }
+ QueueFamilyOwnershipTransfer::ConcurrentFromForeign => {
+ if !device.enabled_extensions().ext_queue_family_foreign {
+ return Err(crate::RequirementNotMet {
+ required_for: "`QueueFamilyOwnershipTransfer::ConcurrentFromForeign",
+ requires_one_of: crate::RequiresOneOf {
+ device_extensions: &["ext_queue_family_foreign"],
+ ..Default::default()
+ },
+ });
+ }
+ }
+ _ => (),
+ }
+
+ Ok(())
+ }
+}
+
+impl From<QueueFamilyOwnershipTransfer> for (u32, u32) {
+ fn from(val: QueueFamilyOwnershipTransfer) -> Self {
+ match val {
+ QueueFamilyOwnershipTransfer::ExclusiveBetweenLocal {
+ src_index,
+ dst_index,
+ } => (src_index, dst_index),
+ QueueFamilyOwnershipTransfer::ExclusiveToExternal { src_index } => {
+ (src_index, ash::vk::QUEUE_FAMILY_EXTERNAL)
+ }
+ QueueFamilyOwnershipTransfer::ExclusiveFromExternal { dst_index } => {
+ (ash::vk::QUEUE_FAMILY_EXTERNAL, dst_index)
+ }
+ QueueFamilyOwnershipTransfer::ExclusiveToForeign { src_index } => {
+ (src_index, ash::vk::QUEUE_FAMILY_FOREIGN_EXT)
+ }
+ QueueFamilyOwnershipTransfer::ExclusiveFromForeign { dst_index } => {
+ (ash::vk::QUEUE_FAMILY_FOREIGN_EXT, dst_index)
+ }
+ QueueFamilyOwnershipTransfer::ConcurrentToExternal => (
+ ash::vk::QUEUE_FAMILY_IGNORED,
+ ash::vk::QUEUE_FAMILY_EXTERNAL,
+ ),
+ QueueFamilyOwnershipTransfer::ConcurrentFromExternal => (
+ ash::vk::QUEUE_FAMILY_EXTERNAL,
+ ash::vk::QUEUE_FAMILY_IGNORED,
+ ),
+ QueueFamilyOwnershipTransfer::ConcurrentToForeign => (
+ ash::vk::QUEUE_FAMILY_IGNORED,
+ ash::vk::QUEUE_FAMILY_FOREIGN_EXT,
+ ),
+ QueueFamilyOwnershipTransfer::ConcurrentFromForeign => (
+ ash::vk::QUEUE_FAMILY_FOREIGN_EXT,
+ ash::vk::QUEUE_FAMILY_IGNORED,
+ ),
+ }
+ }
+}
diff --git a/src/sync/semaphore.rs b/src/sync/semaphore.rs
new file mode 100644
index 0000000..ac0ca56
--- /dev/null
+++ b/src/sync/semaphore.rs
@@ -0,0 +1,1667 @@
+// Copyright (c) 2016 The vulkano developers
+// Licensed under the Apache License, Version 2.0
+// <LICENSE-APACHE or
+// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
+// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
+// at your option. All files in the project carrying such
+// notice may not be copied, modified, or distributed except
+// according to those terms.
+
+//! A semaphore provides synchronization between multiple queues, with non-command buffer
+//! commands on the same queue, or between the device and an external source.
+
+use crate::{
+ device::{Device, DeviceOwned, Queue},
+ macros::{impl_id_counter, vulkan_bitflags, vulkan_bitflags_enum},
+ OomError, RequirementNotMet, RequiresOneOf, Version, VulkanError, VulkanObject,
+};
+use parking_lot::{Mutex, MutexGuard};
+#[cfg(unix)]
+use std::fs::File;
+use std::{
+ error::Error,
+ fmt::{Display, Error as FmtError, Formatter},
+ mem::MaybeUninit,
+ num::NonZeroU64,
+ ptr,
+ sync::{Arc, Weak},
+};
+
+/// Used to provide synchronization between command buffers during their execution.
+///
+/// It is similar to a fence, except that it is purely on the GPU side. The CPU can't query a
+/// semaphore's status or wait for it to be signaled.
+#[derive(Debug)]
+pub struct Semaphore {
+ handle: ash::vk::Semaphore,
+ device: Arc<Device>,
+ id: NonZeroU64,
+ must_put_in_pool: bool,
+
+ export_handle_types: ExternalSemaphoreHandleTypes,
+
+ state: Mutex<SemaphoreState>,
+}
+
+impl Semaphore {
+ /// Creates a new `Semaphore`.
+ #[inline]
+ pub fn new(
+ device: Arc<Device>,
+ create_info: SemaphoreCreateInfo,
+ ) -> Result<Semaphore, SemaphoreError> {
+ Self::validate_new(&device, &create_info)?;
+
+ unsafe { Ok(Self::new_unchecked(device, create_info)?) }
+ }
+
+ fn validate_new(
+ device: &Device,
+ create_info: &SemaphoreCreateInfo,
+ ) -> Result<(), SemaphoreError> {
+ let &SemaphoreCreateInfo {
+ export_handle_types,
+ _ne: _,
+ } = create_info;
+
+ if !export_handle_types.is_empty() {
+ if !(device.api_version() >= Version::V1_1
+ || device.enabled_extensions().khr_external_semaphore)
+ {
+ return Err(SemaphoreError::RequirementNotMet {
+ required_for: "`create_info.export_handle_types` is not empty",
+ requires_one_of: RequiresOneOf {
+ api_version: Some(Version::V1_1),
+ device_extensions: &["khr_external_semaphore"],
+ ..Default::default()
+ },
+ });
+ }
+
+ // VUID-VkExportSemaphoreCreateInfo-handleTypes-parameter
+ export_handle_types.validate_device(device)?;
+
+ // VUID-VkExportSemaphoreCreateInfo-handleTypes-01124
+ for handle_type in export_handle_types.into_iter() {
+ let external_semaphore_properties = unsafe {
+ device
+ .physical_device()
+ .external_semaphore_properties_unchecked(
+ ExternalSemaphoreInfo::handle_type(handle_type),
+ )
+ };
+
+ if !external_semaphore_properties.exportable {
+ return Err(SemaphoreError::HandleTypeNotExportable { handle_type });
+ }
+
+ if !external_semaphore_properties
+ .compatible_handle_types
+ .contains(export_handle_types)
+ {
+ return Err(SemaphoreError::ExportHandleTypesNotCompatible);
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
+ #[inline]
+ pub unsafe fn new_unchecked(
+ device: Arc<Device>,
+ create_info: SemaphoreCreateInfo,
+ ) -> Result<Semaphore, VulkanError> {
+ let SemaphoreCreateInfo {
+ export_handle_types,
+ _ne: _,
+ } = create_info;
+
+ let mut create_info_vk = ash::vk::SemaphoreCreateInfo {
+ flags: ash::vk::SemaphoreCreateFlags::empty(),
+ ..Default::default()
+ };
+ let mut export_semaphore_create_info_vk = None;
+
+ if !export_handle_types.is_empty() {
+ let _ = export_semaphore_create_info_vk.insert(ash::vk::ExportSemaphoreCreateInfo {
+ handle_types: export_handle_types.into(),
+ ..Default::default()
+ });
+ };
+
+ if let Some(info) = export_semaphore_create_info_vk.as_mut() {
+ info.p_next = create_info_vk.p_next;
+ create_info_vk.p_next = info as *const _ as *const _;
+ }
+
+ let handle = {
+ let fns = device.fns();
+ let mut output = MaybeUninit::uninit();
+ (fns.v1_0.create_semaphore)(
+ device.handle(),
+ &create_info_vk,
+ ptr::null(),
+ output.as_mut_ptr(),
+ )
+ .result()
+ .map_err(VulkanError::from)?;
+ output.assume_init()
+ };
+
+ Ok(Semaphore {
+ handle,
+ device,
+ id: Self::next_id(),
+ must_put_in_pool: false,
+ export_handle_types,
+ state: Mutex::new(Default::default()),
+ })
+ }
+
+ /// Takes a semaphore from the vulkano-provided semaphore pool.
+ /// If the pool is empty, a new semaphore will be allocated.
+ /// Upon `drop`, the semaphore is put back into the pool.
+ ///
+ /// For most applications, using the pool should be preferred,
+ /// in order to avoid creating new semaphores every frame.
+ #[inline]
+ pub fn from_pool(device: Arc<Device>) -> Result<Semaphore, SemaphoreError> {
+ let handle = device.semaphore_pool().lock().pop();
+ let semaphore = match handle {
+ Some(handle) => Semaphore {
+ handle,
+ device,
+ id: Self::next_id(),
+ must_put_in_pool: true,
+ export_handle_types: ExternalSemaphoreHandleTypes::empty(),
+ state: Mutex::new(Default::default()),
+ },
+ None => {
+ // Pool is empty, alloc new semaphore
+ let mut semaphore = Semaphore::new(device, Default::default())?;
+ semaphore.must_put_in_pool = true;
+ semaphore
+ }
+ };
+
+ Ok(semaphore)
+ }
+
+ /// Creates a new `Semaphore` from a raw object handle.
+ ///
+ /// # Safety
+ ///
+ /// - `handle` must be a valid Vulkan object handle created from `device`.
+ /// - `create_info` must match the info used to create the object.
+ #[inline]
+ pub unsafe fn from_handle(
+ device: Arc<Device>,
+ handle: ash::vk::Semaphore,
+ create_info: SemaphoreCreateInfo,
+ ) -> Semaphore {
+ let SemaphoreCreateInfo {
+ export_handle_types,
+ _ne: _,
+ } = create_info;
+
+ Semaphore {
+ handle,
+ device,
+ id: Self::next_id(),
+ must_put_in_pool: false,
+ export_handle_types,
+ state: Mutex::new(Default::default()),
+ }
+ }
+
+ /// Exports the semaphore into a POSIX file descriptor. The caller owns the returned `File`.
+ #[cfg(unix)]
+ #[inline]
+ pub fn export_fd(
+ &self,
+ handle_type: ExternalSemaphoreHandleType,
+ ) -> Result<File, SemaphoreError> {
+ let mut state = self.state.lock();
+ self.validate_export_fd(handle_type, &state)?;
+
+ unsafe { Ok(self.export_fd_unchecked_locked(handle_type, &mut state)?) }
+ }
+
+ #[cfg(unix)]
+ fn validate_export_fd(
+ &self,
+ handle_type: ExternalSemaphoreHandleType,
+ state: &SemaphoreState,
+ ) -> Result<(), SemaphoreError> {
+ if !self.device.enabled_extensions().khr_external_semaphore_fd {
+ return Err(SemaphoreError::RequirementNotMet {
+ required_for: "`Semaphore::export_fd`",
+ requires_one_of: RequiresOneOf {
+ device_extensions: &["khr_external_semaphore_fd"],
+ ..Default::default()
+ },
+ });
+ }
+
+ // VUID-VkSemaphoreGetFdInfoKHR-handleType-parameter
+ handle_type.validate_device(&self.device)?;
+
+ // VUID-VkSemaphoreGetFdInfoKHR-handleType-01132
+ if !self.export_handle_types.intersects(handle_type.into()) {
+ return Err(SemaphoreError::HandleTypeNotEnabled);
+ }
+
+ // VUID-VkSemaphoreGetFdInfoKHR-semaphore-01133
+ if let Some(imported_handle_type) = state.current_import {
+ match imported_handle_type {
+ ImportType::SwapchainAcquire => {
+ return Err(SemaphoreError::ImportedForSwapchainAcquire)
+ }
+ ImportType::ExternalSemaphore(imported_handle_type) => {
+ let external_semaphore_properties = unsafe {
+ self.device
+ .physical_device()
+ .external_semaphore_properties_unchecked(
+ ExternalSemaphoreInfo::handle_type(handle_type),
+ )
+ };
+
+ if !external_semaphore_properties
+ .export_from_imported_handle_types
+ .intersects(imported_handle_type.into())
+ {
+ return Err(SemaphoreError::ExportFromImportedNotSupported {
+ imported_handle_type,
+ });
+ }
+ }
+ }
+ }
+
+ if handle_type.has_copy_transference() {
+ // VUID-VkSemaphoreGetFdInfoKHR-handleType-01134
+ if state.is_wait_pending() {
+ return Err(SemaphoreError::QueueIsWaiting);
+ }
+
+ // VUID-VkSemaphoreGetFdInfoKHR-handleType-01135
+ // VUID-VkSemaphoreGetFdInfoKHR-handleType-03254
+ if !(state.is_signaled().unwrap_or(false) || state.is_signal_pending()) {
+ return Err(SemaphoreError::HandleTypeCopyNotSignaled);
+ }
+ }
+
+ // VUID-VkSemaphoreGetFdInfoKHR-handleType-01136
+ if !matches!(
+ handle_type,
+ ExternalSemaphoreHandleType::OpaqueFd | ExternalSemaphoreHandleType::SyncFd
+ ) {
+ return Err(SemaphoreError::HandleTypeNotFd);
+ }
+
+ Ok(())
+ }
+
+ #[cfg(unix)]
+ #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
+ #[inline]
+ pub unsafe fn export_fd_unchecked(
+ &self,
+ handle_type: ExternalSemaphoreHandleType,
+ ) -> Result<File, VulkanError> {
+ let mut state = self.state.lock();
+ self.export_fd_unchecked_locked(handle_type, &mut state)
+ }
+
+ #[cfg(unix)]
+ unsafe fn export_fd_unchecked_locked(
+ &self,
+ handle_type: ExternalSemaphoreHandleType,
+ state: &mut SemaphoreState,
+ ) -> Result<File, VulkanError> {
+ use std::os::unix::io::FromRawFd;
+
+ let info = ash::vk::SemaphoreGetFdInfoKHR {
+ semaphore: self.handle,
+ handle_type: handle_type.into(),
+ ..Default::default()
+ };
+
+ let mut output = MaybeUninit::uninit();
+ let fns = self.device.fns();
+ (fns.khr_external_semaphore_fd.get_semaphore_fd_khr)(
+ self.device.handle(),
+ &info,
+ output.as_mut_ptr(),
+ )
+ .result()
+ .map_err(VulkanError::from)?;
+
+ state.export(handle_type);
+
+ Ok(File::from_raw_fd(output.assume_init()))
+ }
+
+ /// Exports the semaphore into a Win32 handle.
+ ///
+ /// The [`khr_external_semaphore_win32`](crate::device::DeviceExtensions::khr_external_semaphore_win32)
+ /// extension must be enabled on the device.
+ #[cfg(windows)]
+ #[inline]
+ pub fn export_win32_handle(
+ &self,
+ handle_type: ExternalSemaphoreHandleType,
+ ) -> Result<*mut std::ffi::c_void, SemaphoreError> {
+ let mut state = self.state.lock();
+ self.validate_export_win32_handle(handle_type, &state)?;
+
+ unsafe { Ok(self.export_win32_handle_unchecked_locked(handle_type, &mut state)?) }
+ }
+
+ #[cfg(windows)]
+ fn validate_export_win32_handle(
+ &self,
+ handle_type: ExternalSemaphoreHandleType,
+ state: &SemaphoreState,
+ ) -> Result<(), SemaphoreError> {
+ if !self
+ .device
+ .enabled_extensions()
+ .khr_external_semaphore_win32
+ {
+ return Err(SemaphoreError::RequirementNotMet {
+ required_for: "`Semaphore::export_win32_handle`",
+ requires_one_of: RequiresOneOf {
+ device_extensions: &["khr_external_semaphore_win32"],
+ ..Default::default()
+ },
+ });
+ }
+
+ // VUID-VkSemaphoreGetWin32HandleInfoKHR-handleType-parameter
+ handle_type.validate_device(&self.device)?;
+
+ // VUID-VkSemaphoreGetWin32HandleInfoKHR-handleType-01126
+ if !self.export_handle_types.intersects(handle_type.into()) {
+ return Err(SemaphoreError::HandleTypeNotEnabled);
+ }
+
+ // VUID-VkSemaphoreGetWin32HandleInfoKHR-handleType-01127
+ if matches!(
+ handle_type,
+ ExternalSemaphoreHandleType::OpaqueWin32 | ExternalSemaphoreHandleType::D3D12Fence
+ ) && state.is_exported(handle_type)
+ {
+ return Err(SemaphoreError::AlreadyExported);
+ }
+
+ // VUID-VkSemaphoreGetWin32HandleInfoKHR-semaphore-01128
+ if let Some(imported_handle_type) = state.current_import {
+ match imported_handle_type {
+ ImportType::SwapchainAcquire => {
+ return Err(SemaphoreError::ImportedForSwapchainAcquire)
+ }
+ ImportType::ExternalSemaphore(imported_handle_type) => {
+ let external_semaphore_properties = unsafe {
+ self.device
+ .physical_device()
+ .external_semaphore_properties_unchecked(
+ ExternalSemaphoreInfo::handle_type(handle_type),
+ )
+ };
+
+ if !external_semaphore_properties
+ .export_from_imported_handle_types
+ .intersects(imported_handle_type.into())
+ {
+ return Err(SemaphoreError::ExportFromImportedNotSupported {
+ imported_handle_type,
+ });
+ }
+ }
+ }
+ }
+
+ if handle_type.has_copy_transference() {
+ // VUID-VkSemaphoreGetWin32HandleInfoKHR-handleType-01129
+ if state.is_wait_pending() {
+ return Err(SemaphoreError::QueueIsWaiting);
+ }
+
+ // VUID-VkSemaphoreGetWin32HandleInfoKHR-handleType-01130
+ if !(state.is_signaled().unwrap_or(false) || state.is_signal_pending()) {
+ return Err(SemaphoreError::HandleTypeCopyNotSignaled);
+ }
+ }
+
+ // VUID-VkSemaphoreGetWin32HandleInfoKHR-handleType-01131
+ if !matches!(
+ handle_type,
+ ExternalSemaphoreHandleType::OpaqueWin32
+ | ExternalSemaphoreHandleType::OpaqueWin32Kmt
+ | ExternalSemaphoreHandleType::D3D12Fence
+ ) {
+ return Err(SemaphoreError::HandleTypeNotWin32);
+ }
+
+ Ok(())
+ }
+
+ #[cfg(windows)]
+ #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
+ #[inline]
+ pub unsafe fn export_win32_handle_unchecked(
+ &self,
+ handle_type: ExternalSemaphoreHandleType,
+ ) -> Result<*mut std::ffi::c_void, VulkanError> {
+ let mut state = self.state.lock();
+ self.export_win32_handle_unchecked_locked(handle_type, &mut state)
+ }
+
+ #[cfg(windows)]
+ unsafe fn export_win32_handle_unchecked_locked(
+ &self,
+ handle_type: ExternalSemaphoreHandleType,
+ state: &mut SemaphoreState,
+ ) -> Result<*mut std::ffi::c_void, VulkanError> {
+ let info_vk = ash::vk::SemaphoreGetWin32HandleInfoKHR {
+ semaphore: self.handle,
+ handle_type: handle_type.into(),
+ ..Default::default()
+ };
+
+ let mut output = MaybeUninit::uninit();
+ let fns = self.device.fns();
+ (fns.khr_external_semaphore_win32
+ .get_semaphore_win32_handle_khr)(
+ self.device.handle(), &info_vk, output.as_mut_ptr()
+ )
+ .result()
+ .map_err(VulkanError::from)?;
+
+ state.export(handle_type);
+
+ Ok(output.assume_init())
+ }
+
+ /// Exports the semaphore into a Zircon event handle.
+ #[cfg(target_os = "fuchsia")]
+ #[inline]
+ pub fn export_zircon_handle(
+ &self,
+ handle_type: ExternalSemaphoreHandleType,
+ ) -> Result<ash::vk::zx_handle_t, SemaphoreError> {
+ let mut state = self.state.lock();
+ self.validate_export_zircon_handle(handle_type, &state)?;
+
+ unsafe { Ok(self.export_zircon_handle_unchecked_locked(handle_type, &mut state)?) }
+ }
+
+ #[cfg(target_os = "fuchsia")]
+ fn validate_export_zircon_handle(
+ &self,
+ handle_type: ExternalSemaphoreHandleType,
+ state: &SemaphoreState,
+ ) -> Result<(), SemaphoreError> {
+ if !self.device.enabled_extensions().fuchsia_external_semaphore {
+ return Err(SemaphoreError::RequirementNotMet {
+ required_for: "`Semaphore::export_zircon_handle`",
+ requires_one_of: RequiresOneOf {
+ device_extensions: &["fuchsia_external_semaphore"],
+ ..Default::default()
+ },
+ });
+ }
+
+ // VUID-VkSemaphoreGetZirconHandleInfoFUCHSIA-handleType-parameter
+ handle_type.validate_device(&self.device)?;
+
+ // VUID-VkSemaphoreGetZirconHandleInfoFUCHSIA-handleType-04758
+ if !self.export_handle_types.intersects(&handle_type.into()) {
+ return Err(SemaphoreError::HandleTypeNotEnabled);
+ }
+
+ // VUID-VkSemaphoreGetZirconHandleInfoFUCHSIA-semaphore-04759
+ if let Some(imported_handle_type) = state.current_import {
+ match imported_handle_type {
+ ImportType::SwapchainAcquire => {
+ return Err(SemaphoreError::ImportedForSwapchainAcquire)
+ }
+ ImportType::ExternalSemaphore(imported_handle_type) => {
+ let external_semaphore_properties = unsafe {
+ self.device
+ .physical_device()
+ .external_semaphore_properties_unchecked(
+ ExternalSemaphoreInfo::handle_type(handle_type),
+ )
+ };
+
+ if !external_semaphore_properties
+ .export_from_imported_handle_types
+ .intersects(&imported_handle_type.into())
+ {
+ return Err(SemaphoreError::ExportFromImportedNotSupported {
+ imported_handle_type,
+ });
+ }
+ }
+ }
+ }
+
+ if handle_type.has_copy_transference() {
+ // VUID-VkSemaphoreGetZirconHandleInfoFUCHSIA-handleType-04760
+ if state.is_wait_pending() {
+ return Err(SemaphoreError::QueueIsWaiting);
+ }
+
+ // VUID-VkSemaphoreGetZirconHandleInfoFUCHSIA-handleType-04761
+ if !(state.is_signaled().unwrap_or(false) || state.is_signal_pending()) {
+ return Err(SemaphoreError::HandleTypeCopyNotSignaled);
+ }
+ }
+
+ // VUID-VkSemaphoreGetZirconHandleInfoFUCHSIA-handleType-04762
+ if !matches!(handle_type, ExternalSemaphoreHandleType::ZirconEvent) {
+ return Err(SemaphoreError::HandleTypeNotZircon);
+ }
+
+ Ok(())
+ }
+
+ #[cfg(target_os = "fuchsia")]
+ #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
+ #[inline]
+ pub unsafe fn export_zircon_handle_unchecked(
+ &self,
+ handle_type: ExternalSemaphoreHandleType,
+ ) -> Result<ash::vk::zx_handle_t, VulkanError> {
+ let mut state = self.state.lock();
+ self.export_zircon_handle_unchecked_locked(handle_type, &mut state)
+ }
+
+ #[cfg(target_os = "fuchsia")]
+ unsafe fn export_zircon_handle_unchecked_locked(
+ &self,
+ handle_type: ExternalSemaphoreHandleType,
+ state: &mut SemaphoreState,
+ ) -> Result<ash::vk::zx_handle_t, VulkanError> {
+ let info = ash::vk::SemaphoreGetZirconHandleInfoFUCHSIA {
+ semaphore: self.handle,
+ handle_type: handle_type.into(),
+ ..Default::default()
+ };
+
+ let mut output = MaybeUninit::uninit();
+ let fns = self.device.fns();
+ (fns.fuchsia_external_semaphore
+ .get_semaphore_zircon_handle_fuchsia)(
+ self.device.handle(), &info, output.as_mut_ptr()
+ )
+ .result()
+ .map_err(VulkanError::from)?;
+
+ state.export(handle_type);
+
+ Ok(output.assume_init())
+ }
+
+ /// Imports a semaphore from a POSIX file descriptor.
+ ///
+ /// The [`khr_external_semaphore_fd`](crate::device::DeviceExtensions::khr_external_semaphore_fd)
+ /// extension must be enabled on the device.
+ ///
+ /// # Safety
+ ///
+ /// - If in `import_semaphore_fd_info`, `handle_type` is `ExternalHandleType::OpaqueFd`,
+ /// then `file` must represent a binary semaphore that was exported from Vulkan or a
+ /// compatible API, with a driver and device UUID equal to those of the device that owns
+ /// `self`.
+ #[cfg(unix)]
+ #[inline]
+ pub unsafe fn import_fd(
+ &self,
+ import_semaphore_fd_info: ImportSemaphoreFdInfo,
+ ) -> Result<(), SemaphoreError> {
+ let mut state = self.state.lock();
+ self.validate_import_fd(&import_semaphore_fd_info, &state)?;
+
+ Ok(self.import_fd_unchecked_locked(import_semaphore_fd_info, &mut state)?)
+ }
+
+ #[cfg(unix)]
+ fn validate_import_fd(
+ &self,
+ import_semaphore_fd_info: &ImportSemaphoreFdInfo,
+ state: &SemaphoreState,
+ ) -> Result<(), SemaphoreError> {
+ if !self.device.enabled_extensions().khr_external_semaphore_fd {
+ return Err(SemaphoreError::RequirementNotMet {
+ required_for: "`Semaphore::import_fd`",
+ requires_one_of: RequiresOneOf {
+ device_extensions: &["khr_external_semaphore_fd"],
+ ..Default::default()
+ },
+ });
+ }
+
+ // VUID-vkImportSemaphoreFdKHR-semaphore-01142
+ if state.is_in_queue() {
+ return Err(SemaphoreError::InQueue);
+ }
+
+ let &ImportSemaphoreFdInfo {
+ flags,
+ handle_type,
+ file: _,
+ _ne: _,
+ } = import_semaphore_fd_info;
+
+ // VUID-VkImportSemaphoreFdInfoKHR-flags-parameter
+ flags.validate_device(&self.device)?;
+
+ // VUID-VkImportSemaphoreFdInfoKHR-handleType-parameter
+ handle_type.validate_device(&self.device)?;
+
+ // VUID-VkImportSemaphoreFdInfoKHR-handleType-01143
+ if !matches!(
+ handle_type,
+ ExternalSemaphoreHandleType::OpaqueFd | ExternalSemaphoreHandleType::SyncFd
+ ) {
+ return Err(SemaphoreError::HandleTypeNotFd);
+ }
+
+ // VUID-VkImportSemaphoreFdInfoKHR-fd-01544
+ // VUID-VkImportSemaphoreFdInfoKHR-handleType-03263
+ // Can't validate, therefore unsafe
+
+ // VUID-VkImportSemaphoreFdInfoKHR-handleType-07307
+ if handle_type.has_copy_transference() && !flags.intersects(SemaphoreImportFlags::TEMPORARY)
+ {
+ return Err(SemaphoreError::HandletypeCopyNotTemporary);
+ }
+
+ Ok(())
+ }
+
+ #[cfg(unix)]
+ #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
+ #[inline]
+ pub unsafe fn import_fd_unchecked(
+ &self,
+ import_semaphore_fd_info: ImportSemaphoreFdInfo,
+ ) -> Result<(), VulkanError> {
+ let mut state = self.state.lock();
+ self.import_fd_unchecked_locked(import_semaphore_fd_info, &mut state)
+ }
+
+ #[cfg(unix)]
+ unsafe fn import_fd_unchecked_locked(
+ &self,
+ import_semaphore_fd_info: ImportSemaphoreFdInfo,
+ state: &mut SemaphoreState,
+ ) -> Result<(), VulkanError> {
+ use std::os::unix::io::IntoRawFd;
+
+ let ImportSemaphoreFdInfo {
+ flags,
+ handle_type,
+ file,
+ _ne: _,
+ } = import_semaphore_fd_info;
+
+ let info_vk = ash::vk::ImportSemaphoreFdInfoKHR {
+ semaphore: self.handle,
+ flags: flags.into(),
+ handle_type: handle_type.into(),
+ fd: file.map_or(-1, |file| file.into_raw_fd()),
+ ..Default::default()
+ };
+
+ let fns = self.device.fns();
+ (fns.khr_external_semaphore_fd.import_semaphore_fd_khr)(self.device.handle(), &info_vk)
+ .result()
+ .map_err(VulkanError::from)?;
+
+ state.import(
+ handle_type,
+ flags.intersects(SemaphoreImportFlags::TEMPORARY),
+ );
+
+ Ok(())
+ }
+
+ /// Imports a semaphore from a Win32 handle.
+ ///
+ /// The [`khr_external_semaphore_win32`](crate::device::DeviceExtensions::khr_external_semaphore_win32)
+ /// extension must be enabled on the device.
+ ///
+ /// # Safety
+ ///
+ /// - In `import_semaphore_win32_handle_info`, `handle` must represent a binary semaphore that
+ /// was exported from Vulkan or a compatible API, with a driver and device UUID equal to
+ /// those of the device that owns `self`.
+ #[cfg(windows)]
+ #[inline]
+ pub unsafe fn import_win32_handle(
+ &self,
+ import_semaphore_win32_handle_info: ImportSemaphoreWin32HandleInfo,
+ ) -> Result<(), SemaphoreError> {
+ let mut state = self.state.lock();
+ self.validate_import_win32_handle(&import_semaphore_win32_handle_info, &state)?;
+
+ Ok(self
+ .import_win32_handle_unchecked_locked(import_semaphore_win32_handle_info, &mut state)?)
+ }
+
+ #[cfg(windows)]
+ fn validate_import_win32_handle(
+ &self,
+ import_semaphore_win32_handle_info: &ImportSemaphoreWin32HandleInfo,
+ state: &SemaphoreState,
+ ) -> Result<(), SemaphoreError> {
+ if !self
+ .device
+ .enabled_extensions()
+ .khr_external_semaphore_win32
+ {
+ return Err(SemaphoreError::RequirementNotMet {
+ required_for: "`Semaphore::import_win32_handle`",
+ requires_one_of: RequiresOneOf {
+ device_extensions: &["khr_external_semaphore_win32"],
+ ..Default::default()
+ },
+ });
+ }
+
+ // VUID?
+ if state.is_in_queue() {
+ return Err(SemaphoreError::InQueue);
+ }
+
+ let &ImportSemaphoreWin32HandleInfo {
+ flags,
+ handle_type,
+ handle: _,
+ _ne: _,
+ } = import_semaphore_win32_handle_info;
+
+ // VUID-VkImportSemaphoreWin32HandleInfoKHR-flags-parameter
+ flags.validate_device(&self.device)?;
+
+ // VUID-VkImportSemaphoreWin32HandleInfoKHR-handleType-01140
+ handle_type.validate_device(&self.device)?;
+
+ // VUID-VkImportSemaphoreWin32HandleInfoKHR-handleType-01140
+ if !matches!(
+ handle_type,
+ ExternalSemaphoreHandleType::OpaqueWin32
+ | ExternalSemaphoreHandleType::OpaqueWin32Kmt
+ | ExternalSemaphoreHandleType::D3D12Fence
+ ) {
+ return Err(SemaphoreError::HandleTypeNotWin32);
+ }
+
+ // VUID-VkImportSemaphoreWin32HandleInfoKHR-handle-01542
+ // Can't validate, therefore unsafe
+
+ // VUID?
+ if handle_type.has_copy_transference() && !flags.intersects(SemaphoreImportFlags::TEMPORARY)
+ {
+ return Err(SemaphoreError::HandletypeCopyNotTemporary);
+ }
+
+ Ok(())
+ }
+
+ #[cfg(windows)]
+ #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
+ #[inline]
+ pub unsafe fn import_win32_handle_unchecked(
+ &self,
+ import_semaphore_win32_handle_info: ImportSemaphoreWin32HandleInfo,
+ ) -> Result<(), VulkanError> {
+ let mut state = self.state.lock();
+ self.import_win32_handle_unchecked_locked(import_semaphore_win32_handle_info, &mut state)
+ }
+
+ #[cfg(windows)]
+ unsafe fn import_win32_handle_unchecked_locked(
+ &self,
+ import_semaphore_win32_handle_info: ImportSemaphoreWin32HandleInfo,
+ state: &mut SemaphoreState,
+ ) -> Result<(), VulkanError> {
+ let ImportSemaphoreWin32HandleInfo {
+ flags,
+ handle_type,
+ handle,
+ _ne: _,
+ } = import_semaphore_win32_handle_info;
+
+ let info_vk = ash::vk::ImportSemaphoreWin32HandleInfoKHR {
+ semaphore: self.handle,
+ flags: flags.into(),
+ handle_type: handle_type.into(),
+ handle,
+ name: ptr::null(), // TODO: support?
+ ..Default::default()
+ };
+
+ let fns = self.device.fns();
+ (fns.khr_external_semaphore_win32
+ .import_semaphore_win32_handle_khr)(self.device.handle(), &info_vk)
+ .result()
+ .map_err(VulkanError::from)?;
+
+ state.import(
+ handle_type,
+ flags.intersects(SemaphoreImportFlags::TEMPORARY),
+ );
+
+ Ok(())
+ }
+
+ /// Imports a semaphore from a Zircon event handle.
+ ///
+ /// The [`fuchsia_external_semaphore`](crate::device::DeviceExtensions::fuchsia_external_semaphore)
+ /// extension must be enabled on the device.
+ ///
+ /// # Safety
+ ///
+ /// - In `import_semaphore_zircon_handle_info`, `zircon_handle` must have `ZX_RIGHTS_BASIC` and
+ /// `ZX_RIGHTS_SIGNAL`.
+ #[cfg(target_os = "fuchsia")]
+ #[inline]
+ pub unsafe fn import_zircon_handle(
+ &self,
+ import_semaphore_zircon_handle_info: ImportSemaphoreZirconHandleInfo,
+ ) -> Result<(), SemaphoreError> {
+ let mut state = self.state.lock();
+ self.validate_import_zircon_handle(&import_semaphore_zircon_handle_info, &state)?;
+
+ Ok(self.import_zircon_handle_unchecked_locked(
+ import_semaphore_zircon_handle_info,
+ &mut state,
+ )?)
+ }
+
+ #[cfg(target_os = "fuchsia")]
+ fn validate_import_zircon_handle(
+ &self,
+ import_semaphore_zircon_handle_info: &ImportSemaphoreZirconHandleInfo,
+ state: &SemaphoreState,
+ ) -> Result<(), SemaphoreError> {
+ if !self.device.enabled_extensions().fuchsia_external_semaphore {
+ return Err(SemaphoreError::RequirementNotMet {
+ required_for: "`Semaphore::import_zircon_handle`",
+ requires_one_of: RequiresOneOf {
+ device_extensions: &["fuchsia_external_semaphore"],
+ ..Default::default()
+ },
+ });
+ }
+
+ // VUID-vkImportSemaphoreZirconHandleFUCHSIA-semaphore-04764
+ if state.is_in_queue() {
+ return Err(SemaphoreError::InQueue);
+ }
+
+ let &ImportSemaphoreZirconHandleInfo {
+ flags,
+ handle_type,
+ zircon_handle: _,
+ _ne: _,
+ } = import_semaphore_zircon_handle_info;
+
+ // VUID-VkImportSemaphoreZirconHandleInfoFUCHSIA-flags-parameter
+ flags.validate_device(&self.device)?;
+
+ // VUID-VkImportSemaphoreZirconHandleInfoFUCHSIA-handleType-parameter
+ handle_type.validate_device(&self.device)?;
+
+ // VUID-VkImportSemaphoreZirconHandleInfoFUCHSIA-handleType-04765
+ if !matches!(handle_type, ExternalSemaphoreHandleType::ZirconEvent) {
+ return Err(SemaphoreError::HandleTypeNotFd);
+ }
+
+ // VUID-VkImportSemaphoreZirconHandleInfoFUCHSIA-zirconHandle-04766
+ // VUID-VkImportSemaphoreZirconHandleInfoFUCHSIA-zirconHandle-04767
+ // Can't validate, therefore unsafe
+
+ if handle_type.has_copy_transference() && !flags.intersects(SemaphoreImportFlags::TEMPORARY)
+ {
+ return Err(SemaphoreError::HandletypeCopyNotTemporary);
+ }
+
+ Ok(())
+ }
+
+ #[cfg(target_os = "fuchsia")]
+ #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
+ #[inline]
+ pub unsafe fn import_zircon_handle_unchecked(
+ &self,
+ import_semaphore_zircon_handle_info: ImportSemaphoreZirconHandleInfo,
+ ) -> Result<(), VulkanError> {
+ let mut state = self.state.lock();
+ self.import_zircon_handle_unchecked_locked(import_semaphore_zircon_handle_info, &mut state)
+ }
+
+ #[cfg(target_os = "fuchsia")]
+ unsafe fn import_zircon_handle_unchecked_locked(
+ &self,
+ import_semaphore_zircon_handle_info: ImportSemaphoreZirconHandleInfo,
+ state: &mut SemaphoreState,
+ ) -> Result<(), VulkanError> {
+ let ImportSemaphoreZirconHandleInfo {
+ flags,
+ handle_type,
+ zircon_handle,
+ _ne: _,
+ } = import_semaphore_zircon_handle_info;
+
+ let info_vk = ash::vk::ImportSemaphoreZirconHandleInfoFUCHSIA {
+ semaphore: self.handle,
+ flags: flags.into(),
+ handle_type: handle_type.into(),
+ zircon_handle,
+ ..Default::default()
+ };
+
+ let fns = self.device.fns();
+ (fns.fuchsia_external_semaphore
+ .import_semaphore_zircon_handle_fuchsia)(self.device.handle(), &info_vk)
+ .result()
+ .map_err(VulkanError::from)?;
+
+ state.import(
+ handle_type,
+ flags.intersects(SemaphoreImportFlags::TEMPORARY),
+ );
+
+ Ok(())
+ }
+
+ pub(crate) fn state(&self) -> MutexGuard<'_, SemaphoreState> {
+ self.state.lock()
+ }
+}
+
+impl Drop for Semaphore {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe {
+ if self.must_put_in_pool {
+ let raw_sem = self.handle;
+ self.device.semaphore_pool().lock().push(raw_sem);
+ } else {
+ let fns = self.device.fns();
+ (fns.v1_0.destroy_semaphore)(self.device.handle(), self.handle, ptr::null());
+ }
+ }
+ }
+}
+
+unsafe impl VulkanObject for Semaphore {
+ type Handle = ash::vk::Semaphore;
+
+ #[inline]
+ fn handle(&self) -> Self::Handle {
+ self.handle
+ }
+}
+
+unsafe impl DeviceOwned for Semaphore {
+ #[inline]
+ fn device(&self) -> &Arc<Device> {
+ &self.device
+ }
+}
+
+impl_id_counter!(Semaphore);
+
+#[derive(Debug, Default)]
+pub(crate) struct SemaphoreState {
+ is_signaled: bool,
+ pending_signal: Option<SignalType>,
+ pending_wait: Option<Weak<Queue>>,
+
+ reference_exported: bool,
+ exported_handle_types: ExternalSemaphoreHandleTypes,
+ current_import: Option<ImportType>,
+ permanent_import: Option<ExternalSemaphoreHandleType>,
+}
+
+impl SemaphoreState {
+ /// If the semaphore does not have a pending operation and has no external references,
+ /// returns the current status.
+ #[inline]
+ fn is_signaled(&self) -> Option<bool> {
+ // If any of these is true, we can't be certain of the status.
+ if self.pending_signal.is_some()
+ || self.pending_wait.is_some()
+ || self.has_external_reference()
+ {
+ None
+ } else {
+ Some(self.is_signaled)
+ }
+ }
+
+ #[inline]
+ fn is_signal_pending(&self) -> bool {
+ self.pending_signal.is_some()
+ }
+
+ #[inline]
+ fn is_wait_pending(&self) -> bool {
+ self.pending_wait.is_some()
+ }
+
+ #[inline]
+ fn is_in_queue(&self) -> bool {
+ matches!(self.pending_signal, Some(SignalType::Queue(_))) || self.pending_wait.is_some()
+ }
+
+ /// Returns whether there are any potential external references to the semaphore payload.
+ /// That is, the semaphore has been exported by reference transference, or imported.
+ #[inline]
+ fn has_external_reference(&self) -> bool {
+ self.reference_exported || self.current_import.is_some()
+ }
+
+ #[allow(dead_code)]
+ #[inline]
+ fn is_exported(&self, handle_type: ExternalSemaphoreHandleType) -> bool {
+ self.exported_handle_types.intersects(handle_type.into())
+ }
+
+ #[inline]
+ pub(crate) unsafe fn add_queue_signal(&mut self, queue: &Arc<Queue>) {
+ self.pending_signal = Some(SignalType::Queue(Arc::downgrade(queue)));
+ }
+
+ #[inline]
+ pub(crate) unsafe fn add_queue_wait(&mut self, queue: &Arc<Queue>) {
+ self.pending_wait = Some(Arc::downgrade(queue));
+ }
+
+ /// Called when a queue is unlocking resources.
+ #[inline]
+ pub(crate) unsafe fn set_signal_finished(&mut self) {
+ self.pending_signal = None;
+ self.is_signaled = true;
+ }
+
+ /// Called when a queue is unlocking resources.
+ #[inline]
+ pub(crate) unsafe fn set_wait_finished(&mut self) {
+ self.pending_wait = None;
+ self.current_import = self.permanent_import.map(Into::into);
+ self.is_signaled = false;
+ }
+
+ #[allow(dead_code)]
+ #[inline]
+ unsafe fn export(&mut self, handle_type: ExternalSemaphoreHandleType) {
+ self.exported_handle_types |= handle_type.into();
+
+ if handle_type.has_copy_transference() {
+ self.current_import = self.permanent_import.map(Into::into);
+ self.is_signaled = false;
+ } else {
+ self.reference_exported = true;
+ }
+ }
+
+ #[allow(dead_code)]
+ #[inline]
+ unsafe fn import(&mut self, handle_type: ExternalSemaphoreHandleType, temporary: bool) {
+ self.current_import = Some(handle_type.into());
+
+ if !temporary {
+ self.permanent_import = Some(handle_type);
+ }
+ }
+
+ #[inline]
+ pub(crate) unsafe fn swapchain_acquire(&mut self) {
+ self.pending_signal = Some(SignalType::SwapchainAcquire);
+ self.current_import = Some(ImportType::SwapchainAcquire);
+ }
+}
+
+#[derive(Clone, Debug)]
+enum SignalType {
+ Queue(Weak<Queue>),
+ SwapchainAcquire,
+}
+
+#[derive(Clone, Copy, Debug)]
+enum ImportType {
+ SwapchainAcquire,
+ ExternalSemaphore(ExternalSemaphoreHandleType),
+}
+
+impl From<ExternalSemaphoreHandleType> for ImportType {
+ #[inline]
+ fn from(handle_type: ExternalSemaphoreHandleType) -> Self {
+ Self::ExternalSemaphore(handle_type)
+ }
+}
+
+/// Parameters to create a new `Semaphore`.
+#[derive(Clone, Debug)]
+pub struct SemaphoreCreateInfo {
+ /// The handle types that can be exported from the semaphore.
+ ///
+ /// The default value is [`ExternalSemaphoreHandleTypes::empty()`].
+ pub export_handle_types: ExternalSemaphoreHandleTypes,
+
+ pub _ne: crate::NonExhaustive,
+}
+
+impl Default for SemaphoreCreateInfo {
+ #[inline]
+ fn default() -> Self {
+ Self {
+ export_handle_types: ExternalSemaphoreHandleTypes::empty(),
+ _ne: crate::NonExhaustive(()),
+ }
+ }
+}
+
+vulkan_bitflags_enum! {
+ #[non_exhaustive]
+
+ /// A set of [`ExternalSemaphoreHandleType`] values.
+ ExternalSemaphoreHandleTypes,
+
+ /// The handle type used to export or import semaphores to/from an external source.
+ ExternalSemaphoreHandleType impl {
+ /// Returns whether the given handle type has *copy transference* rather than *reference
+ /// transference*.
+ ///
+ /// Imports of handles with copy transference must always be temporary. Exports of such
+ /// handles must only occur if no queue is waiting on the semaphore, and only if the semaphore
+ /// is already signaled, or if there is a semaphore signal operation pending in a queue.
+ #[inline]
+ pub fn has_copy_transference(self) -> bool {
+ // As defined by
+ // https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap7.html#synchronization-semaphore-handletypes-win32
+ // https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap7.html#synchronization-semaphore-handletypes-fd
+ // https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap7.html#synchronization-semaphore-handletypes-fuchsia
+ matches!(self, Self::SyncFd)
+ }
+ },
+
+ = ExternalSemaphoreHandleTypeFlags(u32);
+
+ /// A POSIX file descriptor handle that is only usable with Vulkan and compatible APIs.
+ ///
+ /// This handle type has *reference transference*.
+ OPAQUE_FD, OpaqueFd = OPAQUE_FD,
+
+ /// A Windows NT handle that is only usable with Vulkan and compatible APIs.
+ ///
+ /// This handle type has *reference transference*.
+ OPAQUE_WIN32, OpaqueWin32 = OPAQUE_WIN32,
+
+ /// A Windows global share handle that is only usable with Vulkan and compatible APIs.
+ ///
+ /// This handle type has *reference transference*.
+ OPAQUE_WIN32_KMT, OpaqueWin32Kmt = OPAQUE_WIN32_KMT,
+
+ /// A Windows NT handle that refers to a Direct3D 11 or 12 fence.
+ ///
+ /// This handle type has *reference transference*.
+ D3D12_FENCE, D3D12Fence = D3D12_FENCE,
+
+ /// A POSIX file descriptor handle to a Linux Sync File or Android Fence object.
+ ///
+ /// This handle type has *copy transference*.
+ SYNC_FD, SyncFd = SYNC_FD,
+
+ /// A handle to a Zircon event object.
+ ///
+ /// This handle type has *reference transference*.
+ ///
+ /// The [`fuchsia_external_semaphore`] extension must be enabled on the device.
+ ///
+ /// [`fuchsia_external_semaphore`]: crate::device::DeviceExtensions::fuchsia_external_semaphore
+ ZIRCON_EVENT, ZirconEvent = ZIRCON_EVENT_FUCHSIA {
+ device_extensions: [fuchsia_external_semaphore],
+ },
+}
+
+vulkan_bitflags! {
+ #[non_exhaustive]
+
+ /// Additional parameters for a semaphore payload import.
+ SemaphoreImportFlags = SemaphoreImportFlags(u32);
+
+ /// The semaphore payload will be imported only temporarily, regardless of the permanence of the
+ /// imported handle type.
+ TEMPORARY = TEMPORARY,
+}
+
+#[cfg(unix)]
+#[derive(Debug)]
+pub struct ImportSemaphoreFdInfo {
+ /// Additional parameters for the import operation.
+ ///
+ /// If `handle_type` has *copy transference*, this must include the `temporary` flag.
+ ///
+ /// The default value is [`SemaphoreImportFlags::empty()`].
+ pub flags: SemaphoreImportFlags,
+
+ /// The handle type of `file`.
+ ///
+ /// There is no default value.
+ pub handle_type: ExternalSemaphoreHandleType,
+
+ /// The file to import the semaphore from.
+ ///
+ /// If `handle_type` is `ExternalSemaphoreHandleType::SyncFd`, then `file` can be `None`.
+ /// Instead of an imported file descriptor, a dummy file descriptor `-1` is used,
+ /// which represents a semaphore that is always signaled.
+ ///
+ /// The default value is `None`, which must be overridden if `handle_type` is not
+ /// `ExternalSemaphoreHandleType::SyncFd`.
+ pub file: Option<File>,
+
+ pub _ne: crate::NonExhaustive,
+}
+
+#[cfg(unix)]
+impl ImportSemaphoreFdInfo {
+ /// Returns an `ImportSemaphoreFdInfo` with the specified `handle_type`.
+ #[inline]
+ pub fn handle_type(handle_type: ExternalSemaphoreHandleType) -> Self {
+ Self {
+ flags: SemaphoreImportFlags::empty(),
+ handle_type,
+ file: None,
+ _ne: crate::NonExhaustive(()),
+ }
+ }
+}
+
+#[cfg(windows)]
+#[derive(Debug)]
+pub struct ImportSemaphoreWin32HandleInfo {
+ /// Additional parameters for the import operation.
+ ///
+ /// If `handle_type` has *copy transference*, this must include the `temporary` flag.
+ ///
+ /// The default value is [`SemaphoreImportFlags::empty()`].
+ pub flags: SemaphoreImportFlags,
+
+ /// The handle type of `handle`.
+ ///
+ /// There is no default value.
+ pub handle_type: ExternalSemaphoreHandleType,
+
+ /// The handle to import the semaphore from.
+ ///
+ /// The default value is `null`, which must be overridden.
+ pub handle: *mut std::ffi::c_void,
+
+ pub _ne: crate::NonExhaustive,
+}
+
+#[cfg(windows)]
+impl ImportSemaphoreWin32HandleInfo {
+ /// Returns an `ImportSemaphoreWin32HandleInfo` with the specified `handle_type`.
+ #[inline]
+ pub fn handle_type(handle_type: ExternalSemaphoreHandleType) -> Self {
+ Self {
+ flags: SemaphoreImportFlags::empty(),
+ handle_type,
+ handle: ptr::null_mut(),
+ _ne: crate::NonExhaustive(()),
+ }
+ }
+}
+
+#[cfg(target_os = "fuchsia")]
+#[derive(Debug)]
+pub struct ImportSemaphoreZirconHandleInfo {
+ /// Additional parameters for the import operation.
+ ///
+ /// If `handle_type` has *copy transference*, this must include the `temporary` flag.
+ ///
+ /// The default value is [`SemaphoreImportFlags::empty()`].
+ pub flags: SemaphoreImportFlags,
+
+ /// The handle type of `handle`.
+ ///
+ /// There is no default value.
+ pub handle_type: ExternalSemaphoreHandleType,
+
+ /// The handle to import the semaphore from.
+ ///
+ /// The default value is `ZX_HANDLE_INVALID`, which must be overridden.
+ pub zircon_handle: ash::vk::zx_handle_t,
+
+ pub _ne: crate::NonExhaustive,
+}
+
+#[cfg(target_os = "fuchsia")]
+impl ImportSemaphoreZirconHandleInfo {
+ /// Returns an `ImportSemaphoreZirconHandleInfo` with the specified `handle_type`.
+ #[inline]
+ pub fn handle_type(handle_type: ExternalSemaphoreHandleType) -> Self {
+ Self {
+ flags: SemaphoreImportFlags::empty(),
+ handle_type,
+ zircon_handle: 0,
+ _ne: crate::NonExhaustive(()),
+ }
+ }
+}
+
+/// The semaphore configuration to query in
+/// [`PhysicalDevice::external_semaphore_properties`](crate::device::physical::PhysicalDevice::external_semaphore_properties).
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+pub struct ExternalSemaphoreInfo {
+ /// The external handle type that will be used with the semaphore.
+ pub handle_type: ExternalSemaphoreHandleType,
+
+ pub _ne: crate::NonExhaustive,
+}
+
+impl ExternalSemaphoreInfo {
+ /// Returns an `ExternalSemaphoreInfo` with the specified `handle_type`.
+ #[inline]
+ pub fn handle_type(handle_type: ExternalSemaphoreHandleType) -> Self {
+ Self {
+ handle_type,
+ _ne: crate::NonExhaustive(()),
+ }
+ }
+}
+
+/// The properties for exporting or importing external handles, when a semaphore is created
+/// with a specific configuration.
+#[derive(Clone, Debug)]
+#[non_exhaustive]
+pub struct ExternalSemaphoreProperties {
+ /// Whether a handle can be exported to an external source with the queried
+ /// external handle type.
+ pub exportable: bool,
+
+ /// Whether a handle can be imported from an external source with the queried
+ /// external handle type.
+ pub importable: bool,
+
+ /// Which external handle types can be re-exported after the queried external handle type has
+ /// been imported.
+ pub export_from_imported_handle_types: ExternalSemaphoreHandleTypes,
+
+ /// Which external handle types can be enabled along with the queried external handle type
+ /// when creating the semaphore.
+ pub compatible_handle_types: ExternalSemaphoreHandleTypes,
+}
+
+/// Error that can be returned from operations on a semaphore.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum SemaphoreError {
+ /// Not enough memory available.
+ OomError(OomError),
+
+ RequirementNotMet {
+ required_for: &'static str,
+ requires_one_of: RequiresOneOf,
+ },
+
+ /// The provided handle type does not permit more than one export,
+ /// and a handle of this type was already exported previously.
+ AlreadyExported,
+
+ /// The provided handle type cannot be exported from the current import handle type.
+ ExportFromImportedNotSupported {
+ imported_handle_type: ExternalSemaphoreHandleType,
+ },
+
+ /// One of the export handle types is not compatible with the other provided handles.
+ ExportHandleTypesNotCompatible,
+
+ /// A handle type with copy transference was provided, but the semaphore is not signaled and
+ /// there is no pending queue operation that will signal it.
+ HandleTypeCopyNotSignaled,
+
+ /// A handle type with copy transference was provided,
+ /// but the `temporary` import flag was not set.
+ HandletypeCopyNotTemporary,
+
+ /// The provided export handle type was not set in `export_handle_types` when creating the
+ /// semaphore.
+ HandleTypeNotEnabled,
+
+ /// Exporting is not supported for the provided handle type.
+ HandleTypeNotExportable {
+ handle_type: ExternalSemaphoreHandleType,
+ },
+
+ /// The provided handle type is not a POSIX file descriptor handle.
+ HandleTypeNotFd,
+
+ /// The provided handle type is not a Win32 handle.
+ HandleTypeNotWin32,
+
+ /// The provided handle type is not a Zircon event handle.
+ HandleTypeNotZircon,
+
+ /// The semaphore currently has a temporary import for a swapchain acquire operation.
+ ImportedForSwapchainAcquire,
+
+ /// The semaphore is currently in use by a queue.
+ InQueue,
+
+ /// A queue is currently waiting on the semaphore.
+ QueueIsWaiting,
+}
+
+impl Error for SemaphoreError {
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ match self {
+ Self::OomError(err) => Some(err),
+ _ => None,
+ }
+ }
+}
+
+impl Display for SemaphoreError {
+ fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
+ match self {
+ Self::OomError(_) => write!(f, "not enough memory available"),
+ Self::RequirementNotMet {
+ required_for,
+ requires_one_of,
+ } => write!(
+ f,
+ "a requirement was not met for: {}; requires one of: {}",
+ required_for, requires_one_of,
+ ),
+
+ Self::AlreadyExported => write!(
+ f,
+ "the provided handle type does not permit more than one export, and a handle of \
+ this type was already exported previously",
+ ),
+ Self::ExportFromImportedNotSupported {
+ imported_handle_type,
+ } => write!(
+ f,
+ "the provided handle type cannot be exported from the current imported handle type \
+ {:?}",
+ imported_handle_type,
+ ),
+ Self::ExportHandleTypesNotCompatible => write!(
+ f,
+ "one of the export handle types is not compatible with the other provided handles",
+ ),
+ Self::HandleTypeCopyNotSignaled => write!(
+ f,
+ "a handle type with copy transference was provided, but the semaphore is not \
+ signaled and there is no pending queue operation that will signal it",
+ ),
+ Self::HandletypeCopyNotTemporary => write!(
+ f,
+ "a handle type with copy transference was provided, but the `temporary` \
+ import flag was not set",
+ ),
+ Self::HandleTypeNotEnabled => write!(
+ f,
+ "the provided export handle type was not set in `export_handle_types` when \
+ creating the semaphore",
+ ),
+ Self::HandleTypeNotExportable { handle_type } => write!(
+ f,
+ "exporting is not supported for handles of type {:?}",
+ handle_type,
+ ),
+ Self::HandleTypeNotFd => write!(
+ f,
+ "the provided handle type is not a POSIX file descriptor handle",
+ ),
+ Self::HandleTypeNotWin32 => {
+ write!(f, "the provided handle type is not a Win32 handle")
+ }
+ Self::HandleTypeNotZircon => {
+ write!(f, "the provided handle type is not a Zircon event handle")
+ }
+ Self::ImportedForSwapchainAcquire => write!(
+ f,
+ "the semaphore currently has a temporary import for a swapchain acquire operation",
+ ),
+ Self::InQueue => write!(f, "the semaphore is currently in use by a queue"),
+ Self::QueueIsWaiting => write!(f, "a queue is currently waiting on the semaphore"),
+ }
+ }
+}
+
+impl From<VulkanError> for SemaphoreError {
+ fn from(err: VulkanError) -> Self {
+ match err {
+ e @ VulkanError::OutOfHostMemory | e @ VulkanError::OutOfDeviceMemory => {
+ Self::OomError(e.into())
+ }
+ _ => panic!("unexpected error: {:?}", err),
+ }
+ }
+}
+
+impl From<OomError> for SemaphoreError {
+ fn from(err: OomError) -> Self {
+ Self::OomError(err)
+ }
+}
+
+impl From<RequirementNotMet> for SemaphoreError {
+ fn from(err: RequirementNotMet) -> Self {
+ Self::RequirementNotMet {
+ required_for: err.required_for,
+ requires_one_of: err.requires_one_of,
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ #[cfg(unix)]
+ use crate::{
+ device::{Device, DeviceCreateInfo, DeviceExtensions, QueueCreateInfo},
+ instance::{Instance, InstanceCreateInfo, InstanceExtensions},
+ sync::semaphore::{
+ ExternalSemaphoreHandleType, ExternalSemaphoreHandleTypes, SemaphoreCreateInfo,
+ },
+ VulkanLibrary,
+ };
+ use crate::{sync::semaphore::Semaphore, VulkanObject};
+
+ #[test]
+ fn semaphore_create() {
+ let (device, _) = gfx_dev_and_queue!();
+ let _ = Semaphore::new(device, Default::default());
+ }
+
+ #[test]
+ fn semaphore_pool() {
+ let (device, _) = gfx_dev_and_queue!();
+
+ assert_eq!(device.semaphore_pool().lock().len(), 0);
+ let sem1_internal_obj = {
+ let sem = Semaphore::from_pool(device.clone()).unwrap();
+ assert_eq!(device.semaphore_pool().lock().len(), 0);
+ sem.handle()
+ };
+
+ assert_eq!(device.semaphore_pool().lock().len(), 1);
+ let sem2 = Semaphore::from_pool(device.clone()).unwrap();
+ assert_eq!(device.semaphore_pool().lock().len(), 0);
+ assert_eq!(sem2.handle(), sem1_internal_obj);
+ }
+
+ #[test]
+ #[cfg(unix)]
+ fn semaphore_export_fd() {
+ let library = match VulkanLibrary::new() {
+ Ok(x) => x,
+ Err(_) => return,
+ };
+
+ let instance = match Instance::new(
+ library,
+ InstanceCreateInfo {
+ enabled_extensions: InstanceExtensions {
+ khr_get_physical_device_properties2: true,
+ khr_external_semaphore_capabilities: true,
+ ..InstanceExtensions::empty()
+ },
+ ..Default::default()
+ },
+ ) {
+ Ok(x) => x,
+ Err(_) => return,
+ };
+
+ let physical_device = match instance.enumerate_physical_devices() {
+ Ok(mut x) => x.next().unwrap(),
+ Err(_) => return,
+ };
+
+ let (device, _) = match Device::new(
+ physical_device,
+ DeviceCreateInfo {
+ enabled_extensions: DeviceExtensions {
+ khr_external_semaphore: true,
+ khr_external_semaphore_fd: true,
+ ..DeviceExtensions::empty()
+ },
+ queue_create_infos: vec![QueueCreateInfo {
+ queue_family_index: 0,
+ ..Default::default()
+ }],
+ ..Default::default()
+ },
+ ) {
+ Ok(x) => x,
+ Err(_) => return,
+ };
+
+ let sem = Semaphore::new(
+ device,
+ SemaphoreCreateInfo {
+ export_handle_types: ExternalSemaphoreHandleTypes::OPAQUE_FD,
+ ..Default::default()
+ },
+ )
+ .unwrap();
+ let _fd = sem
+ .export_fd(ExternalSemaphoreHandleType::OpaqueFd)
+ .unwrap();
+ }
+}
diff --git a/src/sync/semaphore/external_semaphore_handle_type.rs b/src/sync/semaphore/external_semaphore_handle_type.rs
deleted file mode 100644
index 35af2fa..0000000
--- a/src/sync/semaphore/external_semaphore_handle_type.rs
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright (c) 2021 The vulkano developers
-// Licensed under the Apache License, Version 2.0
-// <LICENSE-APACHE or
-// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
-// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
-// at your option. All files in the project carrying such
-// notice may not be copied, modified, or distributed except
-// according to those terms.
-
-use std::ops::BitOr;
-
-/// Describes the handle type used for Vulkan external semaphore APIs. This is **not**
-/// just a suggestion. Check out VkExternalSemaphoreHandleTypeFlagBits in the Vulkan
-/// spec.
-#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
-pub struct ExternalSemaphoreHandleType {
- pub opaque_fd: bool,
- pub opaque_win32: bool,
- pub opaque_win32_kmt: bool,
- pub d3d12_fence: bool,
- pub sync_fd: bool,
-}
-
-impl ExternalSemaphoreHandleType {
- /// Builds a `ExternalSemaphoreHandleType` with all values set to false. Useful as a default value.
- ///
- /// # Example
- ///
- /// ```rust
- /// use vulkano::sync::ExternalSemaphoreHandleType as ExternalSemaphoreHandleType;
- ///
- /// let _handle_type = ExternalSemaphoreHandleType {
- /// opaque_fd: true,
- /// .. ExternalSemaphoreHandleType::none()
- /// };
- /// ```
- #[inline]
- pub fn none() -> ExternalSemaphoreHandleType {
- ExternalSemaphoreHandleType {
- opaque_fd: false,
- opaque_win32: false,
- opaque_win32_kmt: false,
- d3d12_fence: false,
- sync_fd: false,
- }
- }
-
- /// Builds an `ExternalSemaphoreHandleType` for a posix file descriptor.
- ///
- /// # Example
- ///
- /// ```rust
- /// use vulkano::sync::ExternalSemaphoreHandleType as ExternalSemaphoreHandleType;
- ///
- /// let _handle_type = ExternalSemaphoreHandleType::posix();
- /// ```
- #[inline]
- pub fn posix() -> ExternalSemaphoreHandleType {
- ExternalSemaphoreHandleType {
- opaque_fd: true,
- ..ExternalSemaphoreHandleType::none()
- }
- }
-}
-
-impl From<ExternalSemaphoreHandleType> for ash::vk::ExternalSemaphoreHandleTypeFlags {
- #[inline]
- fn from(val: ExternalSemaphoreHandleType) -> Self {
- let mut result = ash::vk::ExternalSemaphoreHandleTypeFlags::empty();
- if val.opaque_fd {
- result |= ash::vk::ExternalSemaphoreHandleTypeFlags::OPAQUE_FD;
- }
- if val.opaque_win32 {
- result |= ash::vk::ExternalSemaphoreHandleTypeFlags::OPAQUE_WIN32;
- }
- if val.opaque_win32_kmt {
- result |= ash::vk::ExternalSemaphoreHandleTypeFlags::OPAQUE_WIN32_KMT;
- }
- if val.d3d12_fence {
- result |= ash::vk::ExternalSemaphoreHandleTypeFlags::D3D12_FENCE;
- }
- if val.sync_fd {
- result |= ash::vk::ExternalSemaphoreHandleTypeFlags::SYNC_FD;
- }
- result
- }
-}
-
-impl BitOr for ExternalSemaphoreHandleType {
- type Output = Self;
-
- fn bitor(self, rhs: Self) -> Self {
- ExternalSemaphoreHandleType {
- opaque_fd: self.opaque_fd || rhs.opaque_fd,
- opaque_win32: self.opaque_win32 || rhs.opaque_win32,
- opaque_win32_kmt: self.opaque_win32_kmt || rhs.opaque_win32_kmt,
- d3d12_fence: self.d3d12_fence || rhs.d3d12_fence,
- sync_fd: self.sync_fd || rhs.sync_fd,
- }
- }
-}
diff --git a/src/sync/semaphore/mod.rs b/src/sync/semaphore/mod.rs
deleted file mode 100644
index 6fc7688..0000000
--- a/src/sync/semaphore/mod.rs
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright (c) 2021 The vulkano developers
-// Licensed under the Apache License, Version 2.0
-// <LICENSE-APACHE or
-// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
-// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
-// at your option. All files in the project carrying such
-// notice may not be copied, modified, or distributed except
-// according to those terms.
-
-pub use self::external_semaphore_handle_type::ExternalSemaphoreHandleType;
-pub use self::semaphore::Semaphore;
-pub use self::semaphore::SemaphoreError;
-
-mod external_semaphore_handle_type;
-mod semaphore;
diff --git a/src/sync/semaphore/semaphore.rs b/src/sync/semaphore/semaphore.rs
deleted file mode 100644
index 29c9952..0000000
--- a/src/sync/semaphore/semaphore.rs
+++ /dev/null
@@ -1,355 +0,0 @@
-// Copyright (c) 2016 The vulkano developers
-// Licensed under the Apache License, Version 2.0
-// <LICENSE-APACHE or
-// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
-// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
-// at your option. All files in the project carrying such
-// notice may not be copied, modified, or distributed except
-// according to those terms.
-
-use crate::check_errors;
-use crate::device::Device;
-use crate::device::DeviceOwned;
-use crate::Error;
-use crate::OomError;
-use crate::SafeDeref;
-use crate::VulkanObject;
-use std::fmt;
-#[cfg(target_os = "linux")]
-use std::fs::File;
-use std::mem::MaybeUninit;
-#[cfg(target_os = "linux")]
-use std::os::unix::io::FromRawFd;
-use std::ptr;
-use std::sync::Arc;
-
-use crate::sync::semaphore::ExternalSemaphoreHandleType;
-
-/// Used to provide synchronization between command buffers during their execution.
-///
-/// It is similar to a fence, except that it is purely on the GPU side. The CPU can't query a
-/// semaphore's status or wait for it to be signaled.
-#[derive(Debug)]
-pub struct Semaphore<D = Arc<Device>>
-where
- D: SafeDeref<Target = Device>,
-{
- semaphore: ash::vk::Semaphore,
- device: D,
- must_put_in_pool: bool,
-}
-
-// TODO: Add support for VkExportSemaphoreWin32HandleInfoKHR
-// TODO: Add suport for importable semaphores
-pub struct SemaphoreBuilder<D = Arc<Device>>
-where
- D: SafeDeref<Target = Device>,
-{
- device: D,
- export_info: Option<ash::vk::ExportSemaphoreCreateInfo>,
- create: ash::vk::SemaphoreCreateInfo,
- must_put_in_pool: bool,
-}
-
-impl<D> SemaphoreBuilder<D>
-where
- D: SafeDeref<Target = Device>,
-{
- pub fn new(device: D) -> Self {
- let create = ash::vk::SemaphoreCreateInfo::default();
-
- Self {
- device,
- export_info: None,
- create,
- must_put_in_pool: false,
- }
- }
- /// Configures the semaphore to be added to the semaphore pool once it is destroyed.
- pub(crate) fn in_pool(mut self) -> Self {
- self.must_put_in_pool = true;
- self
- }
-
- /// Sets an optional field for exportable allocations in the `SemaphoreBuilder`.
- ///
- /// # Panic
- ///
- /// - Panics if the export info has already been set.
- pub fn export_info(mut self, handle_types: ExternalSemaphoreHandleType) -> Self {
- assert!(self.export_info.is_none());
- let export_info = ash::vk::ExportSemaphoreCreateInfo {
- handle_types: handle_types.into(),
- ..Default::default()
- };
-
- self.export_info = Some(export_info);
- self.create.p_next = unsafe { std::mem::transmute(&export_info) };
-
- self
- }
-
- pub fn build(self) -> Result<Semaphore<D>, SemaphoreError> {
- if self.export_info.is_some()
- && !self
- .device
- .instance()
- .enabled_extensions()
- .khr_external_semaphore_capabilities
- {
- Err(SemaphoreError::MissingExtension(
- "khr_external_semaphore_capabilities",
- ))
- } else {
- let semaphore = unsafe {
- let fns = self.device.fns();
- let mut output = MaybeUninit::uninit();
- check_errors(fns.v1_0.create_semaphore(
- self.device.internal_object(),
- &self.create,
- ptr::null(),
- output.as_mut_ptr(),
- ))?;
- output.assume_init()
- };
-
- Ok(Semaphore {
- device: self.device,
- semaphore,
- must_put_in_pool: self.must_put_in_pool,
- })
- }
- }
-}
-
-impl<D> Semaphore<D>
-where
- D: SafeDeref<Target = Device>,
-{
- /// Takes a semaphore from the vulkano-provided semaphore pool.
- /// If the pool is empty, a new semaphore will be allocated.
- /// Upon `drop`, the semaphore is put back into the pool.
- ///
- /// For most applications, using the pool should be preferred,
- /// in order to avoid creating new semaphores every frame.
- pub fn from_pool(device: D) -> Result<Semaphore<D>, SemaphoreError> {
- let maybe_raw_sem = device.semaphore_pool().lock().unwrap().pop();
- match maybe_raw_sem {
- Some(raw_sem) => Ok(Semaphore {
- device,
- semaphore: raw_sem,
- must_put_in_pool: true,
- }),
- None => {
- // Pool is empty, alloc new semaphore
- SemaphoreBuilder::new(device).in_pool().build()
- }
- }
- }
-
- /// Builds a new semaphore.
- #[inline]
- pub fn alloc(device: D) -> Result<Semaphore<D>, SemaphoreError> {
- SemaphoreBuilder::new(device).build()
- }
-
- /// Same as `alloc`, but allows exportable opaque file descriptor on Linux
- #[inline]
- #[cfg(target_os = "linux")]
- pub fn alloc_with_exportable_fd(device: D) -> Result<Semaphore<D>, SemaphoreError> {
- SemaphoreBuilder::new(device)
- .export_info(ExternalSemaphoreHandleType::posix())
- .build()
- }
-
- #[cfg(target_os = "linux")]
- pub fn export_opaque_fd(&self) -> Result<File, SemaphoreError> {
- let fns = self.device.fns();
-
- assert!(self.device.enabled_extensions().khr_external_semaphore);
- assert!(self.device.enabled_extensions().khr_external_semaphore_fd);
-
- let fd = unsafe {
- let info = ash::vk::SemaphoreGetFdInfoKHR {
- semaphore: self.semaphore,
- handle_type: ash::vk::ExternalSemaphoreHandleTypeFlagsKHR::OPAQUE_FD,
- ..Default::default()
- };
-
- let mut output = MaybeUninit::uninit();
- check_errors(fns.khr_external_semaphore_fd.get_semaphore_fd_khr(
- self.device.internal_object(),
- &info,
- output.as_mut_ptr(),
- ))?;
- output.assume_init()
- };
- let file = unsafe { File::from_raw_fd(fd) };
- Ok(file)
- }
-}
-
-unsafe impl DeviceOwned for Semaphore {
- #[inline]
- fn device(&self) -> &Arc<Device> {
- &self.device
- }
-}
-
-unsafe impl<D> VulkanObject for Semaphore<D>
-where
- D: SafeDeref<Target = Device>,
-{
- type Object = ash::vk::Semaphore;
-
- #[inline]
- fn internal_object(&self) -> ash::vk::Semaphore {
- self.semaphore
- }
-}
-
-impl<D> Drop for Semaphore<D>
-where
- D: SafeDeref<Target = Device>,
-{
- #[inline]
- fn drop(&mut self) {
- unsafe {
- if self.must_put_in_pool {
- let raw_sem = self.semaphore;
- self.device.semaphore_pool().lock().unwrap().push(raw_sem);
- } else {
- let fns = self.device.fns();
- fns.v1_0.destroy_semaphore(
- self.device.internal_object(),
- self.semaphore,
- ptr::null(),
- );
- }
- }
- }
-}
-
-#[derive(Copy, Clone, Debug, PartialEq, Eq)]
-pub enum SemaphoreError {
- /// Not enough memory available.
- OomError(OomError),
- /// An extensions is missing.
- MissingExtension(&'static str),
-}
-
-impl fmt::Display for SemaphoreError {
- fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- match *self {
- SemaphoreError::OomError(_) => write!(fmt, "not enough memory available"),
- SemaphoreError::MissingExtension(s) => {
- write!(fmt, "Missing the following extension: {}", s)
- }
- }
- }
-}
-
-impl From<Error> for SemaphoreError {
- #[inline]
- fn from(err: Error) -> SemaphoreError {
- match err {
- e @ Error::OutOfHostMemory | e @ Error::OutOfDeviceMemory => {
- SemaphoreError::OomError(e.into())
- }
- _ => panic!("unexpected error: {:?}", err),
- }
- }
-}
-
-impl std::error::Error for SemaphoreError {
- #[inline]
- fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
- match *self {
- SemaphoreError::OomError(ref err) => Some(err),
- _ => None,
- }
- }
-}
-
-impl From<OomError> for SemaphoreError {
- #[inline]
- fn from(err: OomError) -> SemaphoreError {
- SemaphoreError::OomError(err)
- }
-}
-
-#[cfg(test)]
-mod tests {
- use crate::device::physical::PhysicalDevice;
- use crate::device::{Device, DeviceExtensions};
- use crate::instance::{Instance, InstanceExtensions};
- use crate::VulkanObject;
- use crate::{sync::Semaphore, Version};
-
- #[test]
- fn semaphore_create() {
- let (device, _) = gfx_dev_and_queue!();
- let _ = Semaphore::alloc(device.clone());
- }
-
- #[test]
- fn semaphore_pool() {
- let (device, _) = gfx_dev_and_queue!();
-
- assert_eq!(device.semaphore_pool().lock().unwrap().len(), 0);
- let sem1_internal_obj = {
- let sem = Semaphore::from_pool(device.clone()).unwrap();
- assert_eq!(device.semaphore_pool().lock().unwrap().len(), 0);
- sem.internal_object()
- };
-
- assert_eq!(device.semaphore_pool().lock().unwrap().len(), 1);
- let sem2 = Semaphore::from_pool(device.clone()).unwrap();
- assert_eq!(device.semaphore_pool().lock().unwrap().len(), 0);
- assert_eq!(sem2.internal_object(), sem1_internal_obj);
- }
-
- #[test]
- #[cfg(target_os = "linux")]
- fn semaphore_export() {
- let supported_ext = InstanceExtensions::supported_by_core().unwrap();
- if supported_ext.khr_get_display_properties2
- && supported_ext.khr_external_semaphore_capabilities
- {
- let instance = Instance::new(
- None,
- Version::V1_1,
- &InstanceExtensions {
- khr_get_physical_device_properties2: true,
- khr_external_semaphore_capabilities: true,
- ..InstanceExtensions::none()
- },
- None,
- )
- .unwrap();
-
- let physical = PhysicalDevice::enumerate(&instance).next().unwrap();
-
- let queue_family = physical.queue_families().next().unwrap();
-
- let device_ext = DeviceExtensions {
- khr_external_semaphore: true,
- khr_external_semaphore_fd: true,
- ..DeviceExtensions::none()
- };
- let (device, _) = Device::new(
- physical,
- physical.supported_features(),
- &device_ext,
- [(queue_family, 0.5)].iter().cloned(),
- )
- .unwrap();
-
- let supported_ext = physical.supported_extensions();
- if supported_ext.khr_external_semaphore && supported_ext.khr_external_semaphore_fd {
- let sem = Semaphore::alloc_with_exportable_fd(device.clone()).unwrap();
- let fd = sem.export_opaque_fd().unwrap();
- }
- }
- }
-}