aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJon Ashburn <jon@lunarg.com>2016-03-08 17:48:44 -0700
committerJon Ashburn <jon@lunarg.com>2016-03-08 17:48:44 -0700
commit491a3cd11793892b996a8b5771479cc539198f99 (patch)
treed0a3fb1fa8659b6ab78f63ef8d7ffba505050b0a
parentc3fbcf827392dd7ad90b9b960931a458228679fb (diff)
downloadvulkan-validation-layers-491a3cd11793892b996a8b5771479cc539198f99.tar.gz
layers: clang-format layers directory
Change-Id: I318e3759829f33441e57aafedec1e9ec06d658c4
-rw-r--r--layers/.clang-format6
-rw-r--r--layers/device_limits.cpp574
-rw-r--r--layers/device_limits.h56
-rw-r--r--layers/draw_state.cpp6529
-rw-r--r--layers/draw_state.h632
-rw-r--r--layers/image.cpp969
-rw-r--r--layers/image.h38
-rw-r--r--layers/mem_tracker.cpp2498
-rw-r--r--layers/mem_tracker.h125
-rw-r--r--layers/object_tracker.h752
-rw-r--r--layers/param_checker.cpp6010
-rw-r--r--layers/param_checker_utils.h193
-rw-r--r--layers/swapchain.cpp1537
-rw-r--r--layers/swapchain.h224
-rw-r--r--layers/threading.cpp192
-rw-r--r--layers/threading.h145
-rw-r--r--layers/unique_objects.h336
-rw-r--r--layers/vk_layer_config.cpp96
-rw-r--r--layers/vk_layer_config.h2
-rw-r--r--layers/vk_layer_data.h10
-rw-r--r--layers/vk_layer_extension_utils.cpp16
-rw-r--r--layers/vk_layer_extension_utils.h15
-rw-r--r--layers/vk_layer_logging.h191
-rw-r--r--layers/vk_layer_table.cpp103
-rw-r--r--layers/vk_layer_table.h22
-rw-r--r--layers/vk_layer_utils.cpp465
-rw-r--r--layers/vk_layer_utils.h131
27 files changed, 9194 insertions, 12673 deletions
diff --git a/layers/.clang-format b/layers/.clang-format
new file mode 100644
index 000000000..cd70ac163
--- /dev/null
+++ b/layers/.clang-format
@@ -0,0 +1,6 @@
+---
+# We'll use defaults from the LLVM style, but with 4 columns indentation.
+BasedOnStyle: LLVM
+IndentWidth: 4
+ColumnLimit: 132
+...
diff --git a/layers/device_limits.cpp b/layers/device_limits.cpp
index c82631430..75422bc5e 100644
--- a/layers/device_limits.cpp
+++ b/layers/device_limits.cpp
@@ -54,15 +54,15 @@
// This struct will be stored in a map hashed by the dispatchable object
struct layer_data {
- debug_report_data *report_data;
- std::vector<VkDebugReportCallbackEXT> logging_callback;
- VkLayerDispatchTable *device_dispatch_table;
- VkLayerInstanceDispatchTable *instance_dispatch_table;
+ debug_report_data *report_data;
+ std::vector<VkDebugReportCallbackEXT> logging_callback;
+ VkLayerDispatchTable *device_dispatch_table;
+ VkLayerInstanceDispatchTable *instance_dispatch_table;
// Track state of each instance
- unique_ptr<INSTANCE_STATE> instanceState;
- unique_ptr<PHYSICAL_DEVICE_STATE> physicalDeviceState;
- VkPhysicalDeviceFeatures actualPhysicalDeviceFeatures;
- VkPhysicalDeviceFeatures requestedPhysicalDeviceFeatures;
+ unique_ptr<INSTANCE_STATE> instanceState;
+ unique_ptr<PHYSICAL_DEVICE_STATE> physicalDeviceState;
+ VkPhysicalDeviceFeatures actualPhysicalDeviceFeatures;
+ VkPhysicalDeviceFeatures requestedPhysicalDeviceFeatures;
unordered_map<VkDevice, VkPhysicalDeviceProperties> physDevPropertyMap;
// Track physical device per logical device
@@ -70,16 +70,9 @@ struct layer_data {
// Vector indices correspond to queueFamilyIndex
vector<unique_ptr<VkQueueFamilyProperties>> queueFamilyProperties;
- layer_data() :
- report_data(nullptr),
- device_dispatch_table(nullptr),
- instance_dispatch_table(nullptr),
- instanceState(nullptr),
- physicalDeviceState(nullptr),
- actualPhysicalDeviceFeatures(),
- requestedPhysicalDeviceFeatures(),
- physicalDevice()
- {};
+ layer_data()
+ : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr), instanceState(nullptr),
+ physicalDeviceState(nullptr), actualPhysicalDeviceFeatures(), requestedPhysicalDeviceFeatures(), physicalDevice(){};
};
static unordered_map<void *, layer_data *> layer_data_map;
@@ -88,12 +81,9 @@ static unordered_map<void *, layer_data *> layer_data_map;
static int globalLockInitialized = 0;
static loader_platform_thread_mutex globalLock;
-template layer_data *get_my_data_ptr<layer_data>(
- void *data_key,
- std::unordered_map<void *, layer_data *> &data_map);
+template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map);
-static void init_device_limits(layer_data *my_data, const VkAllocationCallbacks *pAllocator)
-{
+static void init_device_limits(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
uint32_t report_flags = 0;
uint32_t debug_action = 0;
FILE *log_output = NULL;
@@ -101,10 +91,9 @@ static void init_device_limits(layer_data *my_data, const VkAllocationCallbacks
VkDebugReportCallbackEXT callback;
// initialize device_limits options
report_flags = getLayerOptionFlags("lunarg_device_limits.report_flags", 0);
- getLayerOptionEnum("lunarg_device_limits.debug_action", (uint32_t *) &debug_action);
+ getLayerOptionEnum("lunarg_device_limits.debug_action", (uint32_t *)&debug_action);
- if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG)
- {
+ if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG) {
option_str = getLayerOption("lunarg_device_limits.log_filename");
log_output = getLayerLogOutput(option_str, "lunarg_device_limits");
VkDebugReportCallbackCreateInfoEXT dbgCreateInfo;
@@ -112,7 +101,7 @@ static void init_device_limits(layer_data *my_data, const VkAllocationCallbacks
dbgCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
dbgCreateInfo.flags = report_flags;
dbgCreateInfo.pfnCallback = log_callback;
- dbgCreateInfo.pUserData = (void *) log_output;
+ dbgCreateInfo.pUserData = (void *)log_output;
layer_create_msg_callback(my_data->report_data, &dbgCreateInfo, pAllocator, &callback);
my_data->logging_callback.push_back(callback);
}
@@ -128,8 +117,7 @@ static void init_device_limits(layer_data *my_data, const VkAllocationCallbacks
my_data->logging_callback.push_back(callback);
}
- if (!globalLockInitialized)
- {
+ if (!globalLockInitialized) {
// TODO/TBD: Need to delete this mutex sometime. How??? One
// suggestion is to call this during vkCreateInstance(), and then we
// can clean it up during vkDestroyInstance(). However, that requires
@@ -140,68 +128,46 @@ static void init_device_limits(layer_data *my_data, const VkAllocationCallbacks
}
}
-static const VkExtensionProperties instance_extensions[] = {
- {
- VK_EXT_DEBUG_REPORT_EXTENSION_NAME,
- VK_EXT_DEBUG_REPORT_SPEC_VERSION
- }
-};
+static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(
- const char *pLayerName,
- uint32_t *pCount,
- VkExtensionProperties* pProperties)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
-vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
- const char *pLayerName, uint32_t *pCount,
- VkExtensionProperties *pProperties) {
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
+ const char *pLayerName, uint32_t *pCount,
+ VkExtensionProperties *pProperties) {
if (pLayerName == NULL) {
dispatch_key key = get_dispatch_key(physicalDevice);
layer_data *my_data = get_my_data_ptr(key, layer_data_map);
- return my_data->instance_dispatch_table
- ->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount,
- pProperties);
+ return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
} else {
return util_GetExtensionProperties(0, nullptr, pCount, pProperties);
}
}
-static const VkLayerProperties dl_global_layers[] = {
- {
- "VK_LAYER_LUNARG_device_limits",
- VK_API_VERSION,
- 1,
- "LunarG Validation Layer",
- }
-};
+static const VkLayerProperties dl_global_layers[] = {{
+ "VK_LAYER_LUNARG_device_limits", VK_API_VERSION, 1, "LunarG Validation Layer",
+}};
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(
- uint32_t *pCount,
- VkLayerProperties* pProperties)
-{
- return util_GetLayerProperties(ARRAY_SIZE(dl_global_layers),
- dl_global_layers,
- pCount, pProperties);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
+ return util_GetLayerProperties(ARRAY_SIZE(dl_global_layers), dl_global_layers, pCount, pProperties);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(
- VkPhysicalDevice physicalDevice, uint32_t *pCount,
- VkLayerProperties *pProperties) {
- return util_GetLayerProperties(ARRAY_SIZE(dl_global_layers),
- dl_global_layers, pCount, pProperties);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
+ return util_GetLayerProperties(ARRAY_SIZE(dl_global_layers), dl_global_layers, pCount, pProperties);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
- PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance) fpGetInstanceProcAddr(NULL, "vkCreateInstance");
+ PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
if (fpCreateInstance == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
@@ -217,11 +183,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(const VkInstance
my_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
layer_init_instance_dispatch_table(*pInstance, my_data->instance_dispatch_table, fpGetInstanceProcAddr);
- my_data->report_data = debug_report_create_instance(
- my_data->instance_dispatch_table,
- *pInstance,
- pCreateInfo->enabledExtensionCount,
- pCreateInfo->ppEnabledExtensionNames);
+ my_data->report_data = debug_report_create_instance(my_data->instance_dispatch_table, *pInstance,
+ pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
init_device_limits(my_data, pAllocator);
my_data->instanceState = unique_ptr<INSTANCE_STATE>(new INSTANCE_STATE());
@@ -230,8 +193,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(const VkInstance
}
/* hook DestroyInstance to remove tableInstanceMap entry */
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks* pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
dispatch_key key = get_dispatch_key(instance);
layer_data *my_data = get_my_data_ptr(key, layer_data_map);
VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
@@ -254,8 +216,8 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance
}
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkEnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount, VkPhysicalDevice *pPhysicalDevices) {
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
if (my_data->instanceState) {
@@ -265,117 +227,143 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices(VkInst
} else {
if (UNCALLED == my_data->instanceState->vkEnumeratePhysicalDevicesState) {
// Flag error here, shouldn't be calling this without having queried count
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
- "Invalid call sequence to vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
+ skipCall |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0,
+ __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
+ "Invalid call sequence to vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first "
+ "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
} // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
else if (my_data->instanceState->physicalDevicesCount != *pPhysicalDeviceCount) {
// TODO: Having actual count match count from app is not a requirement, so this can be a warning
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
- "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count supported by this instance is %u.", *pPhysicalDeviceCount, my_data->instanceState->physicalDevicesCount);
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
+ "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count "
+ "supported by this instance is %u.",
+ *pPhysicalDeviceCount, my_data->instanceState->physicalDevicesCount);
}
my_data->instanceState->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
}
if (skipCall)
return VK_ERROR_VALIDATION_FAILED_EXT;
- VkResult result = my_data->instance_dispatch_table->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
+ VkResult result =
+ my_data->instance_dispatch_table->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
if (NULL == pPhysicalDevices) {
my_data->instanceState->physicalDevicesCount = *pPhysicalDeviceCount;
} else { // Save physical devices
- for (uint32_t i=0; i < *pPhysicalDeviceCount; i++) {
+ for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(pPhysicalDevices[i]), layer_data_map);
phy_dev_data->physicalDeviceState = unique_ptr<PHYSICAL_DEVICE_STATE>(new PHYSICAL_DEVICE_STATE());
// Init actual features for each physical device
- my_data->instance_dispatch_table->GetPhysicalDeviceFeatures(pPhysicalDevices[i], &(phy_dev_data->actualPhysicalDeviceFeatures));
+ my_data->instance_dispatch_table->GetPhysicalDeviceFeatures(pPhysicalDevices[i],
+ &(phy_dev_data->actualPhysicalDeviceFeatures));
}
}
return result;
} else {
- log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__, DEVLIMITS_INVALID_INSTANCE, "DL",
- "Invalid instance (%#" PRIxLEAST64 ") passed into vkEnumeratePhysicalDevices().", (uint64_t)instance);
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__,
+ DEVLIMITS_INVALID_INSTANCE, "DL", "Invalid instance (%#" PRIxLEAST64 ") passed into vkEnumeratePhysicalDevices().",
+ (uint64_t)instance);
}
return VK_ERROR_VALIDATION_FAILED_EXT;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures *pFeatures) {
layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
phy_dev_data->physicalDeviceState->vkGetPhysicalDeviceFeaturesState = QUERY_DETAILS;
phy_dev_data->instance_dispatch_table->GetPhysicalDeviceFeatures(physicalDevice, pFeatures);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties)
-{
- get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map)->instance_dispatch_table->GetPhysicalDeviceFormatProperties(
- physicalDevice, format, pFormatProperties);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetPhysicalDeviceFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties *pFormatProperties) {
+ get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map)
+ ->instance_dispatch_table->GetPhysicalDeviceFormatProperties(physicalDevice, format, pFormatProperties);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties* pImageFormatProperties)
-{
- return get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map)->instance_dispatch_table->GetPhysicalDeviceImageFormatProperties(physicalDevice, format, type, tiling, usage, flags, pImageFormatProperties);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkGetPhysicalDeviceImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling,
+ VkImageUsageFlags usage, VkImageCreateFlags flags,
+ VkImageFormatProperties *pImageFormatProperties) {
+ return get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map)
+ ->instance_dispatch_table->GetPhysicalDeviceImageFormatProperties(physicalDevice, format, type, tiling, usage, flags,
+ pImageFormatProperties);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties *pProperties) {
layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
phy_dev_data->instance_dispatch_table->GetPhysicalDeviceProperties(physicalDevice, pProperties);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t* pCount, VkQueueFamilyProperties* pQueueFamilyProperties)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
+ VkQueueFamilyProperties *pQueueFamilyProperties) {
VkBool32 skipCall = VK_FALSE;
layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
if (phy_dev_data->physicalDeviceState) {
if (NULL == pQueueFamilyProperties) {
phy_dev_data->physicalDeviceState->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
} else {
- // Verify that for each physical device, this function is called first with NULL pQueueFamilyProperties ptr in order to get count
+ // Verify that for each physical device, this function is called first with NULL pQueueFamilyProperties ptr in order to
+ // get count
if (UNCALLED == phy_dev_data->physicalDeviceState->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
- skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
- "Invalid call sequence to vkGetPhysicalDeviceQueueFamilyProperties() w/ non-NULL pQueueFamilyProperties. You should first call vkGetPhysicalDeviceQueueFamilyProperties() w/ NULL pQueueFamilyProperties to query pCount.");
+ skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
+ "Invalid call sequence to vkGetPhysicalDeviceQueueFamilyProperties() w/ non-NULL "
+ "pQueueFamilyProperties. You should first call vkGetPhysicalDeviceQueueFamilyProperties() w/ "
+ "NULL pQueueFamilyProperties to query pCount.");
}
// Then verify that pCount that is passed in on second call matches what was returned
if (phy_dev_data->physicalDeviceState->queueFamilyPropertiesCount != *pCount) {
- // TODO: this is not a requirement of the Valid Usage section for vkGetPhysicalDeviceQueueFamilyProperties, so provide as warning
- skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
- "Call to vkGetPhysicalDeviceQueueFamilyProperties() w/ pCount value %u, but actual count supported by this physicalDevice is %u.", *pCount, phy_dev_data->physicalDeviceState->queueFamilyPropertiesCount);
+ // TODO: this is not a requirement of the Valid Usage section for vkGetPhysicalDeviceQueueFamilyProperties, so
+ // provide as warning
+ skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
+ "Call to vkGetPhysicalDeviceQueueFamilyProperties() w/ pCount value %u, but actual count "
+ "supported by this physicalDevice is %u.",
+ *pCount, phy_dev_data->physicalDeviceState->queueFamilyPropertiesCount);
}
phy_dev_data->physicalDeviceState->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
}
if (skipCall)
return;
- phy_dev_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pCount, pQueueFamilyProperties);
+ phy_dev_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pCount,
+ pQueueFamilyProperties);
if (NULL == pQueueFamilyProperties) {
phy_dev_data->physicalDeviceState->queueFamilyPropertiesCount = *pCount;
} else { // Save queue family properties
phy_dev_data->queueFamilyProperties.reserve(*pCount);
- for (uint32_t i=0; i < *pCount; i++) {
+ for (uint32_t i = 0; i < *pCount; i++) {
phy_dev_data->queueFamilyProperties.emplace_back(new VkQueueFamilyProperties(pQueueFamilyProperties[i]));
}
}
return;
} else {
- log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_PHYSICAL_DEVICE, "DL",
- "Invalid physicalDevice (%#" PRIxLEAST64 ") passed into vkGetPhysicalDeviceQueueFamilyProperties().", (uint64_t)physicalDevice);
+ log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
+ __LINE__, DEVLIMITS_INVALID_PHYSICAL_DEVICE, "DL",
+ "Invalid physicalDevice (%#" PRIxLEAST64 ") passed into vkGetPhysicalDeviceQueueFamilyProperties().",
+ (uint64_t)physicalDevice);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties)
-{
- get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map)->instance_dispatch_table->GetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties *pMemoryProperties) {
+ get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map)
+ ->instance_dispatch_table->GetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t* pNumProperties, VkSparseImageFormatProperties* pProperties)
-{
- get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map)->instance_dispatch_table->GetPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage, tiling, pNumProperties, pProperties);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type,
+ VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling,
+ uint32_t *pNumProperties, VkSparseImageFormatProperties *pProperties) {
+ get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map)
+ ->instance_dispatch_table->GetPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage,
+ tiling, pNumProperties, pProperties);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport(
- VkCommandBuffer commandBuffer,
- uint32_t firstViewport,
- uint32_t viewportCount,
- const VkViewport* pViewports)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
VkBool32 skipCall = VK_FALSE;
/* TODO: Verify viewportCount < maxViewports from VkPhysicalDeviceLimits */
if (VK_FALSE == skipCall) {
@@ -384,12 +372,8 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport(
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor(
- VkCommandBuffer commandBuffer,
- uint32_t firstScissor,
- uint32_t scissorCount,
- const VkRect2D* pScissors)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
VkBool32 skipCall = VK_FALSE;
/* TODO: Verify scissorCount < maxViewports from VkPhysicalDeviceLimits */
/* TODO: viewportCount and scissorCount must match at draw time */
@@ -400,56 +384,71 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor(
}
// Verify that features have been queried and verify that requested features are available
-static VkBool32 validate_features_request(layer_data *phy_dev_data)
-{
+static VkBool32 validate_features_request(layer_data *phy_dev_data) {
VkBool32 skipCall = VK_FALSE;
// Verify that all of the requested features are available
// Get ptrs into actual and requested structs and if requested is 1 but actual is 0, request is invalid
- VkBool32* actual = (VkBool32*)&(phy_dev_data->actualPhysicalDeviceFeatures);
- VkBool32* requested = (VkBool32*)&(phy_dev_data->requestedPhysicalDeviceFeatures);
+ VkBool32 *actual = (VkBool32 *)&(phy_dev_data->actualPhysicalDeviceFeatures);
+ VkBool32 *requested = (VkBool32 *)&(phy_dev_data->requestedPhysicalDeviceFeatures);
// TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
// Need to provide the struct member name with the issue. To do that seems like we'll
// have to loop through each struct member which should be done w/ codegen to keep in synch.
uint32_t errors = 0;
- uint32_t totalBools = sizeof(VkPhysicalDeviceFeatures)/sizeof(VkBool32);
+ uint32_t totalBools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
for (uint32_t i = 0; i < totalBools; i++) {
if (requested[i] > actual[i]) {
- skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL",
- "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, which is not available on this device.", i);
+ skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
+ "DL", "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, "
+ "which is not available on this device.",
+ i);
errors++;
}
}
if (errors && (UNCALLED == phy_dev_data->physicalDeviceState->vkGetPhysicalDeviceFeaturesState)) {
// If user didn't request features, notify them that they should
// TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
- skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL",
- "You requested features that are unavailable on this device. You should first query feature availability by calling vkGetPhysicalDeviceFeatures().");
+ skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED, "DL",
+ "You requested features that are unavailable on this device. You should first query feature "
+ "availability by calling vkGetPhysicalDeviceFeatures().");
}
return skipCall;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
VkBool32 skipCall = VK_FALSE;
layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
// First check is app has actually requested queueFamilyProperties
if (!phy_dev_data->physicalDeviceState) {
- skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
- "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
+ skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
+ "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
} else if (QUERY_DETAILS != phy_dev_data->physicalDeviceState->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
// TODO: This is not called out as an invalid use in the spec so make more informative recommendation.
- skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
- "Call to vkCreateDevice() w/o first calling vkGetPhysicalDeviceQueueFamilyProperties().");
+ skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST,
+ "DL", "Call to vkCreateDevice() w/o first calling vkGetPhysicalDeviceQueueFamilyProperties().");
} else {
// Check that the requested queue properties are valid
- for (uint32_t i=0; i<pCreateInfo->queueCreateInfoCount; i++) {
+ for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
uint32_t requestedIndex = pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex;
- if (phy_dev_data->queueFamilyProperties.size() <= requestedIndex) { // requested index is out of bounds for this physical device
- skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
+ if (phy_dev_data->queueFamilyProperties.size() <=
+ requestedIndex) { // requested index is out of bounds for this physical device
+ skipCall |= log_msg(
+ phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
+ __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
"Invalid queue create request in vkCreateDevice(). Invalid queueFamilyIndex %u requested.", requestedIndex);
- } else if (pCreateInfo->pQueueCreateInfos[i].queueCount > phy_dev_data->queueFamilyProperties[requestedIndex]->queueCount) {
- skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
- "Invalid queue create request in vkCreateDevice(). QueueFamilyIndex %u only has %u queues, but requested queueCount is %u.", requestedIndex, phy_dev_data->queueFamilyProperties[requestedIndex]->queueCount, pCreateInfo->pQueueCreateInfos[i].queueCount);
+ } else if (pCreateInfo->pQueueCreateInfos[i].queueCount >
+ phy_dev_data->queueFamilyProperties[requestedIndex]->queueCount) {
+ skipCall |=
+ log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST,
+ "DL", "Invalid queue create request in vkCreateDevice(). QueueFamilyIndex %u only has %u queues, but "
+ "requested queueCount is %u.",
+ requestedIndex, phy_dev_data->queueFamilyProperties[requestedIndex]->queueCount,
+ pCreateInfo->pQueueCreateInfos[i].queueCount);
}
}
}
@@ -466,7 +465,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice g
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
- PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice) fpGetInstanceProcAddr(NULL, "vkCreateDevice");
+ PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
if (fpCreateDevice == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
@@ -491,8 +490,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice g
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks* pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
// Free device lifetime allocations
dispatch_key key = get_dispatch_key(device);
layer_data *my_device_data = get_my_data_ptr(key, layer_data_map);
@@ -501,93 +499,102 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, cons
layer_data_map.erase(key);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkCommandPool *pCommandPool) {
// TODO : Verify that requested QueueFamilyIndex for this pool exists
- VkResult result = get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
+ VkResult result = get_my_data_ptr(get_dispatch_key(device), layer_data_map)
+ ->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator)
-{
- get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
+ get_my_data_ptr(get_dispatch_key(device), layer_data_map)
+ ->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags)
-{
- VkResult result = get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
+ VkResult result = get_my_data_ptr(get_dispatch_key(device), layer_data_map)
+ ->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo* pCreateInfo, VkCommandBuffer* pCommandBuffer)
-{
- VkResult result = get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
+ VkResult result = get_my_data_ptr(get_dispatch_key(device), layer_data_map)
+ ->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t count, const VkCommandBuffer* pCommandBuffers)
-{
- get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->FreeCommandBuffers(device, commandPool, count, pCommandBuffers);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t count, const VkCommandBuffer *pCommandBuffers) {
+ get_my_data_ptr(get_dispatch_key(device), layer_data_map)
+ ->device_dispatch_table->FreeCommandBuffers(device, commandPool, count, pCommandBuffers);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
bool skipCall = false;
layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
if (dev_data->actualPhysicalDeviceFeatures.inheritedQueries == VK_FALSE && pInfo && pInfo->occlusionQueryEnable != VK_FALSE) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer), __LINE__,
- DEVLIMITS_INVALID_INHERITED_QUERY, "DL",
- "Cannot set inherited occlusionQueryEnable in vkBeginCommandBuffer() when device does not support inheritedQueries.");
+ skipCall |= log_msg(
+ dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DEVLIMITS_INVALID_INHERITED_QUERY, "DL",
+ "Cannot set inherited occlusionQueryEnable in vkBeginCommandBuffer() when device does not support inheritedQueries.");
}
if (dev_data->actualPhysicalDeviceFeatures.inheritedQueries != VK_FALSE && pInfo && pInfo->occlusionQueryEnable != VK_FALSE &&
!validate_VkQueryControlFlagBits(VkQueryControlFlagBits(pInfo->queryFlags))) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer), __LINE__,
- DEVLIMITS_INVALID_INHERITED_QUERY, "DL",
- "Cannot enable in occlusion queries in vkBeginCommandBuffer() and set queryFlags to %d which is not a valid combination of VkQueryControlFlagBits.",
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DEVLIMITS_INVALID_INHERITED_QUERY, "DL",
+ "Cannot enable in occlusion queries in vkBeginCommandBuffer() and set queryFlags to %d which is not a "
+ "valid combination of VkQueryControlFlagBits.",
pInfo->queryFlags);
}
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
if (!skipCall)
- result = dev_data->device_dispatch_table->BeginCommandBuffer(
- commandBuffer, pBeginInfo);
+ result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
VkBool32 skipCall = VK_FALSE;
layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkPhysicalDevice gpu = dev_data->physicalDevice;
layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
- if (queueFamilyIndex >= phy_dev_data->queueFamilyProperties.size()) { // requested index is out of bounds for this physical device
- skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
- "Invalid queueFamilyIndex %u requested in vkGetDeviceQueue().", queueFamilyIndex);
+ if (queueFamilyIndex >=
+ phy_dev_data->queueFamilyProperties.size()) { // requested index is out of bounds for this physical device
+ skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST,
+ "DL", "Invalid queueFamilyIndex %u requested in vkGetDeviceQueue().", queueFamilyIndex);
} else if (queueIndex >= phy_dev_data->queueFamilyProperties[queueFamilyIndex]->queueCount) {
- skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
- "Invalid queue request in vkGetDeviceQueue(). QueueFamilyIndex %u only has %u queues, but requested queueIndex is %u.", queueFamilyIndex, phy_dev_data->queueFamilyProperties[queueFamilyIndex]->queueCount, queueIndex);
+ skipCall |= log_msg(
+ phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
+ DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
+ "Invalid queue request in vkGetDeviceQueue(). QueueFamilyIndex %u only has %u queues, but requested queueIndex is %u.",
+ queueFamilyIndex, phy_dev_data->queueFamilyProperties[queueFamilyIndex]->queueCount, queueIndex);
}
if (skipCall)
return;
dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory(
- VkDevice device,
- VkBuffer buffer,
- VkDeviceMemory mem,
- VkDeviceSize memoryOffset)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
VkDeviceSize uniformAlignment = dev_data->physDevPropertyMap[device].limits.minUniformBufferOffsetAlignment;
if (vk_safe_modulo(memoryOffset, uniformAlignment) != 0) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
- __LINE__, DEVLIMITS_INVALID_UNIFORM_BUFFER_OFFSET, "DL",
- "vkBindBufferMemory(): memoryOffset %#" PRIxLEAST64 " must be a multiple of device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
- memoryOffset, uniformAlignment);
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
+ 0, __LINE__, DEVLIMITS_INVALID_UNIFORM_BUFFER_OFFSET, "DL",
+ "vkBindBufferMemory(): memoryOffset %#" PRIxLEAST64
+ " must be a multiple of device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
+ memoryOffset, uniformAlignment);
}
if (VK_FALSE == skipCall) {
@@ -596,60 +603,56 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSets(
- VkDevice device,
- uint32_t descriptorWriteCount,
- const VkWriteDescriptorSet *pDescriptorWrites,
- uint32_t descriptorCopyCount,
- const VkCopyDescriptorSet *pDescriptorCopies)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- VkBool32 skipCall = VK_FALSE;
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
+ uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ VkBool32 skipCall = VK_FALSE;
for (uint32_t i = 0; i < descriptorWriteCount; i++) {
- if ((pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) ||
- (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)) {
+ if ((pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) ||
+ (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)) {
VkDeviceSize uniformAlignment = dev_data->physDevPropertyMap[device].limits.minUniformBufferOffsetAlignment;
for (uint32_t j = 0; j < pDescriptorWrites[i].descriptorCount; j++) {
if (vk_safe_modulo(pDescriptorWrites[i].pBufferInfo[j].offset, uniformAlignment) != 0) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
- __LINE__, DEVLIMITS_INVALID_UNIFORM_BUFFER_OFFSET, "DL",
- "vkUpdateDescriptorSets(): pDescriptorWrites[%d].pBufferInfo[%d].offset (%#" PRIxLEAST64 ") must be a multiple of device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
- i, j, pDescriptorWrites[i].pBufferInfo[j].offset, uniformAlignment);
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
+ DEVLIMITS_INVALID_UNIFORM_BUFFER_OFFSET, "DL",
+ "vkUpdateDescriptorSets(): pDescriptorWrites[%d].pBufferInfo[%d].offset (%#" PRIxLEAST64
+ ") must be a multiple of device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
+ i, j, pDescriptorWrites[i].pBufferInfo[j].offset, uniformAlignment);
}
}
- } else if ((pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) ||
- (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
+ } else if ((pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) ||
+ (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
VkDeviceSize storageAlignment = dev_data->physDevPropertyMap[device].limits.minStorageBufferOffsetAlignment;
for (uint32_t j = 0; j < pDescriptorWrites[i].descriptorCount; j++) {
if (vk_safe_modulo(pDescriptorWrites[i].pBufferInfo[j].offset, storageAlignment) != 0) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
- __LINE__, DEVLIMITS_INVALID_STORAGE_BUFFER_OFFSET, "DL",
- "vkUpdateDescriptorSets(): pDescriptorWrites[%d].pBufferInfo[%d].offset (%#" PRIxLEAST64 ") must be a multiple of device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64,
- i, j, pDescriptorWrites[i].pBufferInfo[j].offset, storageAlignment);
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
+ DEVLIMITS_INVALID_STORAGE_BUFFER_OFFSET, "DL",
+ "vkUpdateDescriptorSets(): pDescriptorWrites[%d].pBufferInfo[%d].offset (%#" PRIxLEAST64
+ ") must be a multiple of device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64,
+ i, j, pDescriptorWrites[i].pBufferInfo[j].offset, storageAlignment);
}
}
}
}
if (skipCall == VK_FALSE) {
- dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
+ dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
+ pDescriptorCopies);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(
- VkCommandBuffer commandBuffer,
- VkBuffer dstBuffer,
- VkDeviceSize dstOffset,
- VkDeviceSize dataSize,
- const uint32_t* pData)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
+ VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
// dstOffset is the byte offset into the buffer to start updating and must be a multiple of 4.
if (dstOffset & 3) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "DL",
- "vkCmdUpdateBuffer parameter, VkDeviceSize dstOffset, is not a multiple of 4")) {
+ "vkCmdUpdateBuffer parameter, VkDeviceSize dstOffset, is not a multiple of 4")) {
return;
}
}
@@ -658,7 +661,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(
if (dataSize & 3) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "DL",
- "vkCmdUpdateBuffer parameter, VkDeviceSize dataSize, is not a multiple of 4")) {
+ "vkCmdUpdateBuffer parameter, VkDeviceSize dataSize, is not a multiple of 4")) {
return;
}
}
@@ -666,20 +669,15 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(
dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer(
- VkCommandBuffer commandBuffer,
- VkBuffer dstBuffer,
- VkDeviceSize dstOffset,
- VkDeviceSize size,
- uint32_t data)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
// dstOffset is the byte offset into the buffer to start filling and must be a multiple of 4.
if (dstOffset & 3) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "DL",
- "vkCmdFillBuffer parameter, VkDeviceSize dstOffset, is not a multiple of 4")) {
+ "vkCmdFillBuffer parameter, VkDeviceSize dstOffset, is not a multiple of 4")) {
return;
}
}
@@ -688,7 +686,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer(
if (size & 3) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "DL",
- "vkCmdFillBuffer parameter, VkDeviceSize size, is not a multiple of 4")) {
+ "vkCmdFillBuffer parameter, VkDeviceSize size, is not a multiple of 4")) {
return;
}
}
@@ -696,12 +694,9 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer(
dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(
- VkInstance instance,
- const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkDebugReportCallbackEXT* pMsgCallback)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
VkResult res = my_data->instance_dispatch_table->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
if (VK_SUCCESS == res) {
@@ -710,64 +705,55 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(
return res;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(
- VkInstance instance,
- VkDebugReportCallbackEXT msgCallback,
- const VkAllocationCallbacks* pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance,
+ VkDebugReportCallbackEXT msgCallback,
+ const VkAllocationCallbacks *pAllocator) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
my_data->instance_dispatch_table->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT(
- VkInstance instance,
- VkDebugReportFlagsEXT flags,
- VkDebugReportObjectTypeEXT objType,
- uint64_t object,
- size_t location,
- int32_t msgCode,
- const char* pLayerPrefix,
- const char* pMsg)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
+ size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
- my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
+ my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
+ pMsg);
}
-VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char* funcName)
-{
+VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
if (!strcmp(funcName, "vkGetDeviceProcAddr"))
- return (PFN_vkVoidFunction) vkGetDeviceProcAddr;
+ return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
if (!strcmp(funcName, "vkDestroyDevice"))
- return (PFN_vkVoidFunction) vkDestroyDevice;
+ return (PFN_vkVoidFunction)vkDestroyDevice;
if (!strcmp(funcName, "vkGetDeviceQueue"))
- return (PFN_vkVoidFunction) vkGetDeviceQueue;
+ return (PFN_vkVoidFunction)vkGetDeviceQueue;
if (!strcmp(funcName, "CreateCommandPool"))
- return (PFN_vkVoidFunction) vkCreateCommandPool;
+ return (PFN_vkVoidFunction)vkCreateCommandPool;
if (!strcmp(funcName, "DestroyCommandPool"))
- return (PFN_vkVoidFunction) vkDestroyCommandPool;
+ return (PFN_vkVoidFunction)vkDestroyCommandPool;
if (!strcmp(funcName, "ResetCommandPool"))
- return (PFN_vkVoidFunction) vkResetCommandPool;
+ return (PFN_vkVoidFunction)vkResetCommandPool;
if (!strcmp(funcName, "vkAllocateCommandBuffers"))
- return (PFN_vkVoidFunction) vkAllocateCommandBuffers;
+ return (PFN_vkVoidFunction)vkAllocateCommandBuffers;
if (!strcmp(funcName, "vkFreeCommandBuffers"))
- return (PFN_vkVoidFunction) vkFreeCommandBuffers;
+ return (PFN_vkVoidFunction)vkFreeCommandBuffers;
if (!strcmp(funcName, "vkBeginCommandBuffer"))
- return (PFN_vkVoidFunction) vkBeginCommandBuffer;
+ return (PFN_vkVoidFunction)vkBeginCommandBuffer;
if (!strcmp(funcName, "vkCmdUpdateBuffer"))
- return (PFN_vkVoidFunction) vkCmdUpdateBuffer;
+ return (PFN_vkVoidFunction)vkCmdUpdateBuffer;
if (!strcmp(funcName, "vkBindBufferMemory"))
- return (PFN_vkVoidFunction) vkBindBufferMemory;
+ return (PFN_vkVoidFunction)vkBindBufferMemory;
if (!strcmp(funcName, "vkUpdateDescriptorSets"))
- return (PFN_vkVoidFunction) vkUpdateDescriptorSets;
+ return (PFN_vkVoidFunction)vkUpdateDescriptorSets;
if (!strcmp(funcName, "vkCmdFillBuffer"))
- return (PFN_vkVoidFunction) vkCmdFillBuffer;
+ return (PFN_vkVoidFunction)vkCmdFillBuffer;
if (dev == NULL)
return NULL;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
- VkLayerDispatchTable* pTable = my_data->device_dispatch_table;
+ VkLayerDispatchTable *pTable = my_data->device_dispatch_table;
{
if (pTable->GetDeviceProcAddr == NULL)
return NULL;
@@ -775,47 +761,47 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkD
}
}
-VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char* funcName)
-{
+VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
PFN_vkVoidFunction fptr;
layer_data *my_data;
if (!strcmp(funcName, "vkGetInstanceProcAddr"))
- return (PFN_vkVoidFunction) vkGetInstanceProcAddr;
+ return (PFN_vkVoidFunction)vkGetInstanceProcAddr;
if (!strcmp(funcName, "vkGetDeviceProcAddr"))
- return (PFN_vkVoidFunction) vkGetDeviceProcAddr;
+ return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
if (!strcmp(funcName, "vkCreateInstance"))
- return (PFN_vkVoidFunction) vkCreateInstance;
+ return (PFN_vkVoidFunction)vkCreateInstance;
if (!strcmp(funcName, "vkDestroyInstance"))
- return (PFN_vkVoidFunction) vkDestroyInstance;
+ return (PFN_vkVoidFunction)vkDestroyInstance;
if (!strcmp(funcName, "vkCreateDevice"))
- return (PFN_vkVoidFunction) vkCreateDevice;
+ return (PFN_vkVoidFunction)vkCreateDevice;
if (!strcmp(funcName, "vkEnumeratePhysicalDevices"))
- return (PFN_vkVoidFunction) vkEnumeratePhysicalDevices;
+ return (PFN_vkVoidFunction)vkEnumeratePhysicalDevices;
if (!strcmp(funcName, "vkGetPhysicalDeviceFeatures"))
- return (PFN_vkVoidFunction) vkGetPhysicalDeviceFeatures;
+ return (PFN_vkVoidFunction)vkGetPhysicalDeviceFeatures;
if (!strcmp(funcName, "vkGetPhysicalDeviceFormatProperties"))
- return (PFN_vkVoidFunction) vkGetPhysicalDeviceFormatProperties;
+ return (PFN_vkVoidFunction)vkGetPhysicalDeviceFormatProperties;
if (!strcmp(funcName, "vkGetPhysicalDeviceImageFormatProperties"))
- return (PFN_vkVoidFunction) vkGetPhysicalDeviceImageFormatProperties;
+ return (PFN_vkVoidFunction)vkGetPhysicalDeviceImageFormatProperties;
if (!strcmp(funcName, "vkGetPhysicalDeviceProperties"))
- return (PFN_vkVoidFunction) vkGetPhysicalDeviceProperties;
+ return (PFN_vkVoidFunction)vkGetPhysicalDeviceProperties;
if (!strcmp(funcName, "vkGetPhysicalDeviceQueueFamilyProperties"))
- return (PFN_vkVoidFunction) vkGetPhysicalDeviceQueueFamilyProperties;
+ return (PFN_vkVoidFunction)vkGetPhysicalDeviceQueueFamilyProperties;
if (!strcmp(funcName, "vkGetPhysicalDeviceMemoryProperties"))
- return (PFN_vkVoidFunction) vkGetPhysicalDeviceMemoryProperties;
+ return (PFN_vkVoidFunction)vkGetPhysicalDeviceMemoryProperties;
if (!strcmp(funcName, "vkGetPhysicalDeviceSparseImageFormatProperties"))
- return (PFN_vkVoidFunction) vkGetPhysicalDeviceSparseImageFormatProperties;
+ return (PFN_vkVoidFunction)vkGetPhysicalDeviceSparseImageFormatProperties;
if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties"))
- return (PFN_vkVoidFunction) vkEnumerateInstanceLayerProperties;
+ return (PFN_vkVoidFunction)vkEnumerateInstanceLayerProperties;
if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties"))
return (PFN_vkVoidFunction)vkEnumerateDeviceLayerProperties;
if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties"))
- return (PFN_vkVoidFunction) vkEnumerateInstanceExtensionProperties;
+ return (PFN_vkVoidFunction)vkEnumerateInstanceExtensionProperties;
if (!strcmp(funcName, "vkEnumerateInstanceDeviceProperties"))
return (PFN_vkVoidFunction)vkEnumerateDeviceExtensionProperties;
- if (!instance) return NULL;
+ if (!instance)
+ return NULL;
my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
@@ -824,7 +810,7 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(V
return fptr;
{
- VkLayerInstanceDispatchTable* pTable = my_data->instance_dispatch_table;
+ VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
if (pTable->GetInstanceProcAddr == NULL)
return NULL;
return pTable->GetInstanceProcAddr(instance, funcName);
diff --git a/layers/device_limits.h b/layers/device_limits.h
index 0fa3b90dc..c7dfbfe2e 100644
--- a/layers/device_limits.h
+++ b/layers/device_limits.h
@@ -31,40 +31,36 @@
using namespace std;
// Device Limits ERROR codes
-typedef enum _DEV_LIMITS_ERROR
-{
- DEVLIMITS_NONE, // Used for INFO & other non-error messages
- DEVLIMITS_INVALID_INSTANCE, // Invalid instance used
- DEVLIMITS_INVALID_PHYSICAL_DEVICE, // Invalid physical device used
- DEVLIMITS_INVALID_INHERITED_QUERY, // Invalid use of inherited query
- DEVLIMITS_MUST_QUERY_COUNT, // Failed to make initial call to an API to query the count
- DEVLIMITS_MUST_QUERY_PROPERTIES, // Failed to make initial call to an API to query properties
- DEVLIMITS_INVALID_CALL_SEQUENCE, // Flag generic case of an invalid call sequence by the app
- DEVLIMITS_INVALID_FEATURE_REQUESTED, // App requested a feature not supported by physical device
- DEVLIMITS_COUNT_MISMATCH, // App requesting a count value different than actual value
- DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, // Invalid queue requested based on queue family properties
- DEVLIMITS_LIMITS_VIOLATION, // Driver-specified limits/properties were exceeded
- DEVLIMITS_INVALID_UNIFORM_BUFFER_OFFSET, // Uniform buffer offset violates device limit granularity
- DEVLIMITS_INVALID_STORAGE_BUFFER_OFFSET, // Storage buffer offset violates device limit granularity
+typedef enum _DEV_LIMITS_ERROR {
+ DEVLIMITS_NONE, // Used for INFO & other non-error messages
+ DEVLIMITS_INVALID_INSTANCE, // Invalid instance used
+ DEVLIMITS_INVALID_PHYSICAL_DEVICE, // Invalid physical device used
+ DEVLIMITS_INVALID_INHERITED_QUERY, // Invalid use of inherited query
+ DEVLIMITS_MUST_QUERY_COUNT, // Failed to make initial call to an API to query the count
+ DEVLIMITS_MUST_QUERY_PROPERTIES, // Failed to make initial call to an API to query properties
+ DEVLIMITS_INVALID_CALL_SEQUENCE, // Flag generic case of an invalid call sequence by the app
+ DEVLIMITS_INVALID_FEATURE_REQUESTED, // App requested a feature not supported by physical device
+ DEVLIMITS_COUNT_MISMATCH, // App requesting a count value different than actual value
+ DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, // Invalid queue requested based on queue family properties
+ DEVLIMITS_LIMITS_VIOLATION, // Driver-specified limits/properties were exceeded
+ DEVLIMITS_INVALID_UNIFORM_BUFFER_OFFSET, // Uniform buffer offset violates device limit granularity
+ DEVLIMITS_INVALID_STORAGE_BUFFER_OFFSET, // Storage buffer offset violates device limit granularity
} DEV_LIMITS_ERROR;
-typedef enum _CALL_STATE
-{
- UNCALLED, // Function has not been called
- QUERY_COUNT, // Function called once to query a count
- QUERY_DETAILS, // Function called w/ a count to query details
+typedef enum _CALL_STATE {
+ UNCALLED, // Function has not been called
+ QUERY_COUNT, // Function called once to query a count
+ QUERY_DETAILS, // Function called w/ a count to query details
} CALL_STATE;
-typedef struct _INSTANCE_STATE
-{
+typedef struct _INSTANCE_STATE {
// Track the call state and array size for physical devices
CALL_STATE vkEnumeratePhysicalDevicesState;
uint32_t physicalDevicesCount;
- _INSTANCE_STATE():vkEnumeratePhysicalDevicesState(UNCALLED), physicalDevicesCount(0) {};
+ _INSTANCE_STATE() : vkEnumeratePhysicalDevicesState(UNCALLED), physicalDevicesCount(0){};
} INSTANCE_STATE;
-typedef struct _PHYSICAL_DEVICE_STATE
-{
+typedef struct _PHYSICAL_DEVICE_STATE {
// Track the call state and array sizes for various query functions
CALL_STATE vkGetPhysicalDeviceQueueFamilyPropertiesState;
uint32_t queueFamilyPropertiesCount;
@@ -73,9 +69,9 @@ typedef struct _PHYSICAL_DEVICE_STATE
CALL_STATE vkGetPhysicalDeviceExtensionPropertiesState;
uint32_t deviceExtensionCount;
CALL_STATE vkGetPhysicalDeviceFeaturesState;
- _PHYSICAL_DEVICE_STATE():vkGetPhysicalDeviceQueueFamilyPropertiesState(UNCALLED), queueFamilyPropertiesCount(0),
- vkGetPhysicalDeviceLayerPropertiesState(UNCALLED), deviceLayerCount(0),
- vkGetPhysicalDeviceExtensionPropertiesState(UNCALLED), deviceExtensionCount(0),
- vkGetPhysicalDeviceFeaturesState(UNCALLED) {};
+ _PHYSICAL_DEVICE_STATE()
+ : vkGetPhysicalDeviceQueueFamilyPropertiesState(UNCALLED), queueFamilyPropertiesCount(0),
+ vkGetPhysicalDeviceLayerPropertiesState(UNCALLED), deviceLayerCount(0),
+ vkGetPhysicalDeviceExtensionPropertiesState(UNCALLED), deviceExtensionCount(0),
+ vkGetPhysicalDeviceFeaturesState(UNCALLED){};
} PHYSICAL_DEVICE_STATE;
-
diff --git a/layers/draw_state.cpp b/layers/draw_state.cpp
index b0d278f6a..c6d0fcdae 100644
--- a/layers/draw_state.cpp
+++ b/layers/draw_state.cpp
@@ -71,14 +71,14 @@ using std::unordered_set;
// Track command pools and their command buffers
struct CMD_POOL_INFO {
- VkCommandPoolCreateFlags createFlags;
+ VkCommandPoolCreateFlags createFlags;
uint32_t queueFamilyIndex;
- list<VkCommandBuffer> commandBuffers; // list container of cmd buffers allocated from this pool
+ list<VkCommandBuffer> commandBuffers; // list container of cmd buffers allocated from this pool
};
struct devExts {
VkBool32 wsi_enabled;
- unordered_map<VkSwapchainKHR, SWAPCHAIN_NODE*> swapchainMap;
+ unordered_map<VkSwapchainKHR, SWAPCHAIN_NODE *> swapchainMap;
unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
};
@@ -87,57 +87,51 @@ struct shader_module;
struct render_pass;
struct layer_data {
- debug_report_data* report_data;
+ debug_report_data *report_data;
std::vector<VkDebugReportCallbackEXT> logging_callback;
- VkLayerDispatchTable* device_dispatch_table;
- VkLayerInstanceDispatchTable* instance_dispatch_table;
+ VkLayerDispatchTable *device_dispatch_table;
+ VkLayerInstanceDispatchTable *instance_dispatch_table;
devExts device_extensions;
vector<VkQueue> queues; // all queues under given device
// Global set of all cmdBuffers that are inFlight on this device
unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
// Layer specific data
- unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> sampleMap;
- unordered_map<VkImageView, unique_ptr<VkImageViewCreateInfo>> imageViewMap;
+ unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> sampleMap;
+ unordered_map<VkImageView, unique_ptr<VkImageViewCreateInfo>> imageViewMap;
unordered_map<VkImage, IMAGE_NODE> imageMap;
- unordered_map<VkBufferView, unique_ptr<VkBufferViewCreateInfo>> bufferViewMap;
- unordered_map<VkBuffer, BUFFER_NODE> bufferMap;
- unordered_map<VkPipeline, PIPELINE_NODE*> pipelineMap;
- unordered_map<VkCommandPool, CMD_POOL_INFO> commandPoolMap;
- unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE*> descriptorPoolMap;
- unordered_map<VkDescriptorSet, SET_NODE*> setMap;
- unordered_map<VkDescriptorSetLayout, LAYOUT_NODE*> descriptorSetLayoutMap;
- unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
- unordered_map<VkDeviceMemory, VkImage> memImageMap;
- unordered_map<VkFence, FENCE_NODE> fenceMap;
- unordered_map<VkQueue, QUEUE_NODE> queueMap;
- unordered_map<VkEvent, EVENT_NODE> eventMap;
- unordered_map<QueryObject, bool> queryToStateMap;
+ unordered_map<VkBufferView, unique_ptr<VkBufferViewCreateInfo>> bufferViewMap;
+ unordered_map<VkBuffer, BUFFER_NODE> bufferMap;
+ unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
+ unordered_map<VkCommandPool, CMD_POOL_INFO> commandPoolMap;
+ unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
+ unordered_map<VkDescriptorSet, SET_NODE *> setMap;
+ unordered_map<VkDescriptorSetLayout, LAYOUT_NODE *> descriptorSetLayoutMap;
+ unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
+ unordered_map<VkDeviceMemory, VkImage> memImageMap;
+ unordered_map<VkFence, FENCE_NODE> fenceMap;
+ unordered_map<VkQueue, QUEUE_NODE> queueMap;
+ unordered_map<VkEvent, EVENT_NODE> eventMap;
+ unordered_map<QueryObject, bool> queryToStateMap;
unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
- unordered_map<void*, GLOBAL_CB_NODE*> commandBufferMap;
+ unordered_map<void *, GLOBAL_CB_NODE *> commandBufferMap;
unordered_map<VkFramebuffer, FRAMEBUFFER_NODE> frameBufferMap;
unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
- unordered_map<VkRenderPass, RENDER_PASS_NODE*> renderPassMap;
- unordered_map<VkShaderModule, shader_module*> shaderModuleMap;
+ unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
+ unordered_map<VkShaderModule, shader_module *> shaderModuleMap;
// Current render pass
- VkRenderPassBeginInfo renderPassBeginInfo;
- uint32_t currentSubpass;
+ VkRenderPassBeginInfo renderPassBeginInfo;
+ uint32_t currentSubpass;
// Device specific data
- PHYS_DEV_PROPERTIES_NODE physDevProperties;
-
- layer_data() :
- report_data(nullptr),
- device_dispatch_table(nullptr),
- instance_dispatch_table(nullptr),
- device_extensions()
- {};
+ PHYS_DEV_PROPERTIES_NODE physDevProperties;
+
+ layer_data() : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr), device_extensions(){};
};
// Code imported from shader_checker
-static void
-build_def_index(shader_module *);
+static void build_def_index(shader_module *);
// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
// without the caller needing to care too much about the physical SPIRV module layout.
@@ -147,36 +141,31 @@ struct spirv_inst_iter {
uint32_t len() { return *it >> 16; }
uint32_t opcode() { return *it & 0x0ffffu; }
- uint32_t const & word(unsigned n) { return it[n]; }
+ uint32_t const &word(unsigned n) { return it[n]; }
uint32_t offset() { return (uint32_t)(it - zero); }
spirv_inst_iter() {}
- spirv_inst_iter(std::vector<uint32_t>::const_iterator zero,
- std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
+ spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
- bool operator== (spirv_inst_iter const & other) {
- return it == other.it;
- }
+ bool operator==(spirv_inst_iter const &other) { return it == other.it; }
- bool operator!= (spirv_inst_iter const & other) {
- return it != other.it;
- }
+ bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
- spirv_inst_iter operator++ (int) { /* x++ */
+ spirv_inst_iter operator++(int) { /* x++ */
spirv_inst_iter ii = *this;
it += len();
return ii;
}
- spirv_inst_iter operator++ () { /* ++x; */
+ spirv_inst_iter operator++() { /* ++x; */
it += len();
return *this;
}
/* The iterator and the value are the same thing. */
- spirv_inst_iter & operator* () { return *this; }
- spirv_inst_iter const & operator* () const { return *this; }
+ spirv_inst_iter &operator*() { return *this; }
+ spirv_inst_iter const &operator*() const { return *this; }
};
struct shader_module {
@@ -187,16 +176,16 @@ struct shader_module {
*/
unordered_map<unsigned, unsigned> def_index;
- shader_module(VkShaderModuleCreateInfo const *pCreateInfo) :
- words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
- def_index() {
+ shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
+ : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
+ def_index() {
build_def_index(this);
}
/* expose begin() / end() to enable range-based for */
- spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
- spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); } /* just past last insn */
+ spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
+ spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); } /* just past last insn */
/* given an offset into the module, produce an iterator there. */
spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
@@ -211,7 +200,7 @@ struct shader_module {
};
// TODO : Do we need to guard access to layer_data_map w/ lock?
-static unordered_map<void*, layer_data*> layer_data_map;
+static unordered_map<void *, layer_data *> layer_data_map;
// TODO : This can be much smarter, using separate locks for separate global data
static int globalLockInitialized = 0;
@@ -220,9 +209,7 @@ static loader_platform_thread_mutex globalLock;
static loader_platform_thread_id g_tidMapping[MAX_TID] = {0};
static uint32_t g_maxTID = 0;
-template layer_data *get_my_data_ptr<layer_data>(
- void *data_key,
- std::unordered_map<void *, layer_data *> &data_map);
+template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map);
// Map actual TID to an index value and return that index
// This keeps TIDs in range from 0-MAX_TID and simplifies compares between runs
@@ -233,116 +220,112 @@ static uint32_t getTIDIndex() {
return i;
}
// Don't yet have mapping, set it and return newly set index
- uint32_t retVal = (uint32_t) g_maxTID;
+ uint32_t retVal = (uint32_t)g_maxTID;
g_tidMapping[g_maxTID++] = tid;
assert(g_maxTID < MAX_TID);
return retVal;
}
// Return a string representation of CMD_TYPE enum
-static string cmdTypeToString(CMD_TYPE cmd)
-{
- switch (cmd)
- {
- case CMD_BINDPIPELINE:
- return "CMD_BINDPIPELINE";
- case CMD_BINDPIPELINEDELTA:
- return "CMD_BINDPIPELINEDELTA";
- case CMD_SETVIEWPORTSTATE:
- return "CMD_SETVIEWPORTSTATE";
- case CMD_SETLINEWIDTHSTATE:
- return "CMD_SETLINEWIDTHSTATE";
- case CMD_SETDEPTHBIASSTATE:
- return "CMD_SETDEPTHBIASSTATE";
- case CMD_SETBLENDSTATE:
- return "CMD_SETBLENDSTATE";
- case CMD_SETDEPTHBOUNDSSTATE:
- return "CMD_SETDEPTHBOUNDSSTATE";
- case CMD_SETSTENCILREADMASKSTATE:
- return "CMD_SETSTENCILREADMASKSTATE";
- case CMD_SETSTENCILWRITEMASKSTATE:
- return "CMD_SETSTENCILWRITEMASKSTATE";
- case CMD_SETSTENCILREFERENCESTATE:
- return "CMD_SETSTENCILREFERENCESTATE";
- case CMD_BINDDESCRIPTORSETS:
- return "CMD_BINDDESCRIPTORSETS";
- case CMD_BINDINDEXBUFFER:
- return "CMD_BINDINDEXBUFFER";
- case CMD_BINDVERTEXBUFFER:
- return "CMD_BINDVERTEXBUFFER";
- case CMD_DRAW:
- return "CMD_DRAW";
- case CMD_DRAWINDEXED:
- return "CMD_DRAWINDEXED";
- case CMD_DRAWINDIRECT:
- return "CMD_DRAWINDIRECT";
- case CMD_DRAWINDEXEDINDIRECT:
- return "CMD_DRAWINDEXEDINDIRECT";
- case CMD_DISPATCH:
- return "CMD_DISPATCH";
- case CMD_DISPATCHINDIRECT:
- return "CMD_DISPATCHINDIRECT";
- case CMD_COPYBUFFER:
- return "CMD_COPYBUFFER";
- case CMD_COPYIMAGE:
- return "CMD_COPYIMAGE";
- case CMD_BLITIMAGE:
- return "CMD_BLITIMAGE";
- case CMD_COPYBUFFERTOIMAGE:
- return "CMD_COPYBUFFERTOIMAGE";
- case CMD_COPYIMAGETOBUFFER:
- return "CMD_COPYIMAGETOBUFFER";
- case CMD_CLONEIMAGEDATA:
- return "CMD_CLONEIMAGEDATA";
- case CMD_UPDATEBUFFER:
- return "CMD_UPDATEBUFFER";
- case CMD_FILLBUFFER:
- return "CMD_FILLBUFFER";
- case CMD_CLEARCOLORIMAGE:
- return "CMD_CLEARCOLORIMAGE";
- case CMD_CLEARATTACHMENTS:
- return "CMD_CLEARCOLORATTACHMENT";
- case CMD_CLEARDEPTHSTENCILIMAGE:
- return "CMD_CLEARDEPTHSTENCILIMAGE";
- case CMD_RESOLVEIMAGE:
- return "CMD_RESOLVEIMAGE";
- case CMD_SETEVENT:
- return "CMD_SETEVENT";
- case CMD_RESETEVENT:
- return "CMD_RESETEVENT";
- case CMD_WAITEVENTS:
- return "CMD_WAITEVENTS";
- case CMD_PIPELINEBARRIER:
- return "CMD_PIPELINEBARRIER";
- case CMD_BEGINQUERY:
- return "CMD_BEGINQUERY";
- case CMD_ENDQUERY:
- return "CMD_ENDQUERY";
- case CMD_RESETQUERYPOOL:
- return "CMD_RESETQUERYPOOL";
- case CMD_COPYQUERYPOOLRESULTS:
- return "CMD_COPYQUERYPOOLRESULTS";
- case CMD_WRITETIMESTAMP:
- return "CMD_WRITETIMESTAMP";
- case CMD_INITATOMICCOUNTERS:
- return "CMD_INITATOMICCOUNTERS";
- case CMD_LOADATOMICCOUNTERS:
- return "CMD_LOADATOMICCOUNTERS";
- case CMD_SAVEATOMICCOUNTERS:
- return "CMD_SAVEATOMICCOUNTERS";
- case CMD_BEGINRENDERPASS:
- return "CMD_BEGINRENDERPASS";
- case CMD_ENDRENDERPASS:
- return "CMD_ENDRENDERPASS";
- default:
- return "UNKNOWN";
+static string cmdTypeToString(CMD_TYPE cmd) {
+ switch (cmd) {
+ case CMD_BINDPIPELINE:
+ return "CMD_BINDPIPELINE";
+ case CMD_BINDPIPELINEDELTA:
+ return "CMD_BINDPIPELINEDELTA";
+ case CMD_SETVIEWPORTSTATE:
+ return "CMD_SETVIEWPORTSTATE";
+ case CMD_SETLINEWIDTHSTATE:
+ return "CMD_SETLINEWIDTHSTATE";
+ case CMD_SETDEPTHBIASSTATE:
+ return "CMD_SETDEPTHBIASSTATE";
+ case CMD_SETBLENDSTATE:
+ return "CMD_SETBLENDSTATE";
+ case CMD_SETDEPTHBOUNDSSTATE:
+ return "CMD_SETDEPTHBOUNDSSTATE";
+ case CMD_SETSTENCILREADMASKSTATE:
+ return "CMD_SETSTENCILREADMASKSTATE";
+ case CMD_SETSTENCILWRITEMASKSTATE:
+ return "CMD_SETSTENCILWRITEMASKSTATE";
+ case CMD_SETSTENCILREFERENCESTATE:
+ return "CMD_SETSTENCILREFERENCESTATE";
+ case CMD_BINDDESCRIPTORSETS:
+ return "CMD_BINDDESCRIPTORSETS";
+ case CMD_BINDINDEXBUFFER:
+ return "CMD_BINDINDEXBUFFER";
+ case CMD_BINDVERTEXBUFFER:
+ return "CMD_BINDVERTEXBUFFER";
+ case CMD_DRAW:
+ return "CMD_DRAW";
+ case CMD_DRAWINDEXED:
+ return "CMD_DRAWINDEXED";
+ case CMD_DRAWINDIRECT:
+ return "CMD_DRAWINDIRECT";
+ case CMD_DRAWINDEXEDINDIRECT:
+ return "CMD_DRAWINDEXEDINDIRECT";
+ case CMD_DISPATCH:
+ return "CMD_DISPATCH";
+ case CMD_DISPATCHINDIRECT:
+ return "CMD_DISPATCHINDIRECT";
+ case CMD_COPYBUFFER:
+ return "CMD_COPYBUFFER";
+ case CMD_COPYIMAGE:
+ return "CMD_COPYIMAGE";
+ case CMD_BLITIMAGE:
+ return "CMD_BLITIMAGE";
+ case CMD_COPYBUFFERTOIMAGE:
+ return "CMD_COPYBUFFERTOIMAGE";
+ case CMD_COPYIMAGETOBUFFER:
+ return "CMD_COPYIMAGETOBUFFER";
+ case CMD_CLONEIMAGEDATA:
+ return "CMD_CLONEIMAGEDATA";
+ case CMD_UPDATEBUFFER:
+ return "CMD_UPDATEBUFFER";
+ case CMD_FILLBUFFER:
+ return "CMD_FILLBUFFER";
+ case CMD_CLEARCOLORIMAGE:
+ return "CMD_CLEARCOLORIMAGE";
+ case CMD_CLEARATTACHMENTS:
+ return "CMD_CLEARCOLORATTACHMENT";
+ case CMD_CLEARDEPTHSTENCILIMAGE:
+ return "CMD_CLEARDEPTHSTENCILIMAGE";
+ case CMD_RESOLVEIMAGE:
+ return "CMD_RESOLVEIMAGE";
+ case CMD_SETEVENT:
+ return "CMD_SETEVENT";
+ case CMD_RESETEVENT:
+ return "CMD_RESETEVENT";
+ case CMD_WAITEVENTS:
+ return "CMD_WAITEVENTS";
+ case CMD_PIPELINEBARRIER:
+ return "CMD_PIPELINEBARRIER";
+ case CMD_BEGINQUERY:
+ return "CMD_BEGINQUERY";
+ case CMD_ENDQUERY:
+ return "CMD_ENDQUERY";
+ case CMD_RESETQUERYPOOL:
+ return "CMD_RESETQUERYPOOL";
+ case CMD_COPYQUERYPOOLRESULTS:
+ return "CMD_COPYQUERYPOOLRESULTS";
+ case CMD_WRITETIMESTAMP:
+ return "CMD_WRITETIMESTAMP";
+ case CMD_INITATOMICCOUNTERS:
+ return "CMD_INITATOMICCOUNTERS";
+ case CMD_LOADATOMICCOUNTERS:
+ return "CMD_LOADATOMICCOUNTERS";
+ case CMD_SAVEATOMICCOUNTERS:
+ return "CMD_SAVEATOMICCOUNTERS";
+ case CMD_BEGINRENDERPASS:
+ return "CMD_BEGINRENDERPASS";
+ case CMD_ENDRENDERPASS:
+ return "CMD_ENDRENDERPASS";
+ default:
+ return "UNKNOWN";
}
}
// SPIRV utility functions
-static void
-build_def_index(shader_module *module)
-{
+static void build_def_index(shader_module *module) {
for (auto insn : *module) {
switch (insn.opcode()) {
/* Types */
@@ -405,13 +388,10 @@ build_def_index(shader_module *module)
}
}
-
-static spirv_inst_iter
-find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits)
-{
+static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
for (auto insn : *src) {
if (insn.opcode() == spv::OpEntryPoint) {
- auto entrypointName = (char const *) &insn.word(3);
+ auto entrypointName = (char const *)&insn.word(3);
auto entrypointStageBits = 1u << insn.word(1);
if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
@@ -423,10 +403,7 @@ find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stag
return src->end();
}
-
-bool
-shader_is_spirv(VkShaderModuleCreateInfo const *pCreateInfo)
-{
+bool shader_is_spirv(VkShaderModuleCreateInfo const *pCreateInfo) {
uint32_t *words = (uint32_t *)pCreateInfo->pCode;
size_t sizeInWords = pCreateInfo->codeSize / sizeof(uint32_t);
@@ -434,30 +411,39 @@ shader_is_spirv(VkShaderModuleCreateInfo const *pCreateInfo)
return sizeInWords >= 5 && words[0] == spv::MagicNumber && words[1] == spv::Version;
}
-static char const *
-storage_class_name(unsigned sc)
-{
+static char const *storage_class_name(unsigned sc) {
switch (sc) {
- case spv::StorageClassInput: return "input";
- case spv::StorageClassOutput: return "output";
- case spv::StorageClassUniformConstant: return "const uniform";
- case spv::StorageClassUniform: return "uniform";
- case spv::StorageClassWorkgroup: return "workgroup local";
- case spv::StorageClassCrossWorkgroup: return "workgroup global";
- case spv::StorageClassPrivate: return "private global";
- case spv::StorageClassFunction: return "function";
- case spv::StorageClassGeneric: return "generic";
- case spv::StorageClassAtomicCounter: return "atomic counter";
- case spv::StorageClassImage: return "image";
- case spv::StorageClassPushConstant: return "push constant";
- default: return "unknown";
+ case spv::StorageClassInput:
+ return "input";
+ case spv::StorageClassOutput:
+ return "output";
+ case spv::StorageClassUniformConstant:
+ return "const uniform";
+ case spv::StorageClassUniform:
+ return "uniform";
+ case spv::StorageClassWorkgroup:
+ return "workgroup local";
+ case spv::StorageClassCrossWorkgroup:
+ return "workgroup global";
+ case spv::StorageClassPrivate:
+ return "private global";
+ case spv::StorageClassFunction:
+ return "function";
+ case spv::StorageClassGeneric:
+ return "generic";
+ case spv::StorageClassAtomicCounter:
+ return "atomic counter";
+ case spv::StorageClassImage:
+ return "image";
+ case spv::StorageClassPushConstant:
+ return "push constant";
+ default:
+ return "unknown";
}
}
/* get the value of an integral constant */
-unsigned
-get_constant_value(shader_module const *src, unsigned id)
-{
+unsigned get_constant_value(shader_module const *src, unsigned id) {
auto value = src->get_def(id);
assert(value != src->end());
@@ -472,50 +458,45 @@ get_constant_value(shader_module const *src, unsigned id)
}
/* returns ptr to null terminator */
-static char *
-describe_type(char *dst, shader_module const *src, unsigned type)
-{
+static char *describe_type(char *dst, shader_module const *src, unsigned type) {
auto insn = src->get_def(type);
assert(insn != src->end());
switch (insn.opcode()) {
- case spv::OpTypeBool:
- return dst + sprintf(dst, "bool");
- case spv::OpTypeInt:
- return dst + sprintf(dst, "%cint%d", insn.word(3) ? 's' : 'u', insn.word(2));
- case spv::OpTypeFloat:
- return dst + sprintf(dst, "float%d", insn.word(2));
- case spv::OpTypeVector:
- dst += sprintf(dst, "vec%d of ", insn.word(3));
- return describe_type(dst, src, insn.word(2));
- case spv::OpTypeMatrix:
- dst += sprintf(dst, "mat%d of ", insn.word(3));
- return describe_type(dst, src, insn.word(2));
- case spv::OpTypeArray:
- dst += sprintf(dst, "arr[%d] of ", get_constant_value(src, insn.word(3)));
- return describe_type(dst, src, insn.word(2));
- case spv::OpTypePointer:
- dst += sprintf(dst, "ptr to %s ", storage_class_name(insn.word(2)));
- return describe_type(dst, src, insn.word(3));
- case spv::OpTypeStruct:
- {
- dst += sprintf(dst, "struct of (");
- for (unsigned i = 2; i < insn.len(); i++) {
- dst = describe_type(dst, src, insn.word(i));
- dst += sprintf(dst, i == insn.len()-1 ? ")" : ", ");
- }
- return dst;
- }
- case spv::OpTypeSampler:
- return dst + sprintf(dst, "sampler");
- default:
- return dst + sprintf(dst, "oddtype");
+ case spv::OpTypeBool:
+ return dst + sprintf(dst, "bool");
+ case spv::OpTypeInt:
+ return dst + sprintf(dst, "%cint%d", insn.word(3) ? 's' : 'u', insn.word(2));
+ case spv::OpTypeFloat:
+ return dst + sprintf(dst, "float%d", insn.word(2));
+ case spv::OpTypeVector:
+ dst += sprintf(dst, "vec%d of ", insn.word(3));
+ return describe_type(dst, src, insn.word(2));
+ case spv::OpTypeMatrix:
+ dst += sprintf(dst, "mat%d of ", insn.word(3));
+ return describe_type(dst, src, insn.word(2));
+ case spv::OpTypeArray:
+ dst += sprintf(dst, "arr[%d] of ", get_constant_value(src, insn.word(3)));
+ return describe_type(dst, src, insn.word(2));
+ case spv::OpTypePointer:
+ dst += sprintf(dst, "ptr to %s ", storage_class_name(insn.word(2)));
+ return describe_type(dst, src, insn.word(3));
+ case spv::OpTypeStruct: {
+ dst += sprintf(dst, "struct of (");
+ for (unsigned i = 2; i < insn.len(); i++) {
+ dst = describe_type(dst, src, insn.word(i));
+ dst += sprintf(dst, i == insn.len() - 1 ? ")" : ", ");
+ }
+ return dst;
+ }
+ case spv::OpTypeSampler:
+ return dst + sprintf(dst, "sampler");
+ default:
+ return dst + sprintf(dst, "oddtype");
}
}
-static bool
-types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool b_arrayed)
-{
+static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool b_arrayed) {
/* walk two type trees together, and complain about differences */
auto a_insn = a->get_def(a_type);
auto b_insn = b->get_def(b_type);
@@ -532,65 +513,59 @@ types_match(shader_module const *a, shader_module const *b, unsigned a_type, uns
}
switch (a_insn.opcode()) {
- /* if b_arrayed and we hit a leaf type, then we can't match -- there's nowhere for the extra OpTypeArray to be! */
- case spv::OpTypeBool:
- return true && !b_arrayed;
- case spv::OpTypeInt:
- /* match on width, signedness */
- return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3) && !b_arrayed;
- case spv::OpTypeFloat:
- /* match on width */
- return a_insn.word(2) == b_insn.word(2) && !b_arrayed;
- case spv::OpTypeVector:
- case spv::OpTypeMatrix:
- /* match on element type, count. these all have the same layout. we don't get here if
- * b_arrayed -- that is handled above. */
- return !b_arrayed &&
- types_match(a, b, a_insn.word(2), b_insn.word(2), b_arrayed) &&
- a_insn.word(3) == b_insn.word(3);
- case spv::OpTypeArray:
- /* match on element type, count. these all have the same layout. we don't get here if
- * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
- * not a literal within OpTypeArray */
- return !b_arrayed &&
- types_match(a, b, a_insn.word(2), b_insn.word(2), b_arrayed) &&
- get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
- case spv::OpTypeStruct:
- /* match on all element types */
- {
- if (b_arrayed) {
- /* for the purposes of matching different levels of arrayness, structs are leaves. */
- return false;
- }
+ /* if b_arrayed and we hit a leaf type, then we can't match -- there's nowhere for the extra OpTypeArray to be! */
+ case spv::OpTypeBool:
+ return true && !b_arrayed;
+ case spv::OpTypeInt:
+ /* match on width, signedness */
+ return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3) && !b_arrayed;
+ case spv::OpTypeFloat:
+ /* match on width */
+ return a_insn.word(2) == b_insn.word(2) && !b_arrayed;
+ case spv::OpTypeVector:
+ case spv::OpTypeMatrix:
+ /* match on element type, count. these all have the same layout. we don't get here if
+ * b_arrayed -- that is handled above. */
+ return !b_arrayed && types_match(a, b, a_insn.word(2), b_insn.word(2), b_arrayed) && a_insn.word(3) == b_insn.word(3);
+ case spv::OpTypeArray:
+ /* match on element type, count. these all have the same layout. we don't get here if
+ * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
+ * not a literal within OpTypeArray */
+ return !b_arrayed && types_match(a, b, a_insn.word(2), b_insn.word(2), b_arrayed) &&
+ get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
+ case spv::OpTypeStruct:
+ /* match on all element types */
+ {
+ if (b_arrayed) {
+ /* for the purposes of matching different levels of arrayness, structs are leaves. */
+ return false;
+ }
- if (a_insn.len() != b_insn.len()) {
- return false; /* structs cannot match if member counts differ */
- }
+ if (a_insn.len() != b_insn.len()) {
+ return false; /* structs cannot match if member counts differ */
+ }
- for (unsigned i = 2; i < a_insn.len(); i++) {
- if (!types_match(a, b, a_insn.word(i), b_insn.word(i), b_arrayed)) {
- return false;
- }
+ for (unsigned i = 2; i < a_insn.len(); i++) {
+ if (!types_match(a, b, a_insn.word(i), b_insn.word(i), b_arrayed)) {
+ return false;
}
-
- return true;
}
- case spv::OpTypePointer:
- /* match on pointee type. storage class is expected to differ */
- return types_match(a, b, a_insn.word(3), b_insn.word(3), b_arrayed);
- default:
- /* remaining types are CLisms, or may not appear in the interfaces we
- * are interested in. Just claim no match.
- */
- return false;
+ return true;
+ }
+ case spv::OpTypePointer:
+ /* match on pointee type. storage class is expected to differ */
+ return types_match(a, b, a_insn.word(3), b_insn.word(3), b_arrayed);
+ default:
+ /* remaining types are CLisms, or may not appear in the interfaces we
+ * are interested in. Just claim no match.
+ */
+ return false;
}
}
-static int
-value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def)
-{
+static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
auto it = map.find(id);
if (it == map.end())
return def;
@@ -598,42 +573,36 @@ value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id,
return it->second;
}
-
-static unsigned
-get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level)
-{
+static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
auto insn = src->get_def(type);
assert(insn != src->end());
switch (insn.opcode()) {
- case spv::OpTypePointer:
- /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
- * we're never actually passing pointers around. */
- return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
- case spv::OpTypeArray:
- if (strip_array_level) {
- return get_locations_consumed_by_type(src, insn.word(2), false);
- }
- else {
- return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
- }
- case spv::OpTypeMatrix:
- /* num locations is the dimension * element size */
- return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
- default:
- /* everything else is just 1. */
- return 1;
+ case spv::OpTypePointer:
+ /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
+ * we're never actually passing pointers around. */
+ return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
+ case spv::OpTypeArray:
+ if (strip_array_level) {
+ return get_locations_consumed_by_type(src, insn.word(2), false);
+ } else {
+ return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
+ }
+ case spv::OpTypeMatrix:
+ /* num locations is the dimension * element size */
+ return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
+ default:
+ /* everything else is just 1. */
+ return 1;
/* TODO: extend to handle 64bit scalar types, whose vectors may need
* multiple locations. */
}
}
-
typedef std::pair<unsigned, unsigned> location_t;
typedef std::pair<unsigned, unsigned> descriptor_slot_t;
-
struct interface_var {
uint32_t id;
uint32_t type_id;
@@ -641,38 +610,26 @@ struct interface_var {
/* TODO: collect the name, too? Isn't required to be present. */
};
-
-static spirv_inst_iter
-get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts)
-{
+static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
while (true) {
if (def.opcode() == spv::OpTypePointer) {
def = src->get_def(def.word(3));
- }
- else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
+ } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
def = src->get_def(def.word(2));
is_array_of_verts = false;
- }
- else if (def.opcode() == spv::OpTypeStruct) {
+ } else if (def.opcode() == spv::OpTypeStruct) {
return def;
- }
- else {
+ } else {
return src->end();
}
}
}
-
-static void
-collect_interface_block_members(layer_data *my_data, VkDevice dev,
- shader_module const *src,
- std::map<location_t, interface_var> &out,
- std::unordered_map<unsigned, unsigned> const &blocks,
- bool is_array_of_verts,
- uint32_t id,
- uint32_t type_id)
-{
+static void collect_interface_block_members(layer_data *my_data, VkDevice dev, shader_module const *src,
+ std::map<location_t, interface_var> &out,
+ std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
+ uint32_t id, uint32_t type_id) {
/* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts);
if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
@@ -719,14 +676,9 @@ collect_interface_block_members(layer_data *my_data, VkDevice dev,
}
}
-static void
-collect_interface_by_location(layer_data *my_data, VkDevice dev,
- shader_module const *src,
- spirv_inst_iter entrypoint,
- spv::StorageClass sinterface,
- std::map<location_t, interface_var> &out,
- bool is_array_of_verts)
-{
+static void collect_interface_by_location(layer_data *my_data, VkDevice dev, shader_module const *src, spirv_inst_iter entrypoint,
+ spv::StorageClass sinterface, std::map<location_t, interface_var> &out,
+ bool is_array_of_verts) {
std::unordered_map<unsigned, unsigned> var_locations;
std::unordered_map<unsigned, unsigned> var_builtins;
std::unordered_map<unsigned, unsigned> var_components;
@@ -756,9 +708,9 @@ collect_interface_by_location(layer_data *my_data, VkDevice dev,
}
}
- /* TODO: handle grouped decorations */
- /* TODO: handle index=1 dual source outputs from FS -- two vars will
- * have the same location, and we DONT want to clobber. */
+ /* TODO: handle grouped decorations */
+ /* TODO: handle index=1 dual source outputs from FS -- two vars will
+ * have the same location, and we DONT want to clobber. */
/* find the end of the entrypoint's name string. additional zero bytes follow the actual null
terminator, to fill out the rest of the word - so we only need to look at the last byte in
@@ -780,7 +732,7 @@ collect_interface_by_location(layer_data *my_data, VkDevice dev,
int location = value_or_default(var_locations, id, -1);
int builtin = value_or_default(var_builtins, id, -1);
- unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
+ unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
/* All variables and interface block members in the Input or Output storage classes
* must be decorated with either a builtin or an explicit location.
@@ -793,8 +745,7 @@ collect_interface_by_location(layer_data *my_data, VkDevice dev,
if (location != -1) {
/* A user-defined interface variable, with a location. Where a variable
* occupied multiple locations, emit one result for each. */
- unsigned num_locations = get_locations_consumed_by_type(src, type,
- is_array_of_verts);
+ unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts);
for (unsigned int offset = 0; offset < num_locations; offset++) {
interface_var v;
v.id = id;
@@ -802,22 +753,17 @@ collect_interface_by_location(layer_data *my_data, VkDevice dev,
v.offset = offset;
out[std::make_pair(location + offset, component)] = v;
}
- }
- else if (builtin == -1) {
+ } else if (builtin == -1) {
/* An interface block instance */
- collect_interface_block_members(my_data, dev, src, out,
- blocks, is_array_of_verts, id, type);
+ collect_interface_block_members(my_data, dev, src, out, blocks, is_array_of_verts, id, type);
}
}
}
}
-static void
-collect_interface_by_descriptor_slot(layer_data *my_data, VkDevice dev,
- shader_module const *src,
- std::unordered_set<uint32_t> const &accessible_ids,
- std::map<descriptor_slot_t, interface_var> &out)
-{
+static void collect_interface_by_descriptor_slot(layer_data *my_data, VkDevice dev, shader_module const *src,
+ std::unordered_set<uint32_t> const &accessible_ids,
+ std::map<descriptor_slot_t, interface_var> &out) {
std::unordered_map<unsigned, unsigned> var_sets;
std::unordered_map<unsigned, unsigned> var_bindings;
@@ -842,19 +788,18 @@ collect_interface_by_descriptor_slot(layer_data *my_data, VkDevice dev,
assert(insn != src->end());
if (insn.opcode() == spv::OpVariable &&
- (insn.word(3) == spv::StorageClassUniform ||
- insn.word(3) == spv::StorageClassUniformConstant)) {
+ (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
unsigned set = value_or_default(var_sets, insn.word(2), 0);
unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
auto existing_it = out.find(std::make_pair(set, binding));
if (existing_it != out.end()) {
/* conflict within spv image */
- log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/0, __LINE__,
- SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
+ __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
"var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition",
- insn.word(2), insn.word(1), storage_class_name(insn.word(3)),
- existing_it->first.first, existing_it->first.second);
+ insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first,
+ existing_it->first.second);
}
interface_var v;
@@ -865,57 +810,58 @@ collect_interface_by_descriptor_slot(layer_data *my_data, VkDevice dev,
}
}
-static bool
-validate_interface_between_stages(layer_data *my_data, VkDevice dev,
- shader_module const *producer, spirv_inst_iter producer_entrypoint, char const *producer_name,
- shader_module const *consumer, spirv_inst_iter consumer_entrypoint, char const *consumer_name,
- bool consumer_arrayed_input)
-{
+static bool validate_interface_between_stages(layer_data *my_data, VkDevice dev, shader_module const *producer,
+ spirv_inst_iter producer_entrypoint, char const *producer_name,
+ shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
+ char const *consumer_name, bool consumer_arrayed_input) {
std::map<location_t, interface_var> outputs;
std::map<location_t, interface_var> inputs;
bool pass = true;
collect_interface_by_location(my_data, dev, producer, producer_entrypoint, spv::StorageClassOutput, outputs, false);
- collect_interface_by_location(my_data, dev, consumer, consumer_entrypoint, spv::StorageClassInput, inputs, consumer_arrayed_input);
+ collect_interface_by_location(my_data, dev, consumer, consumer_entrypoint, spv::StorageClassInput, inputs,
+ consumer_arrayed_input);
auto a_it = outputs.begin();
auto b_it = inputs.begin();
/* maps sorted by key (location); walk them together to find mismatches */
- while ((outputs.size() > 0 && a_it != outputs.end()) || ( inputs.size() && b_it != inputs.end())) {
+ while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
- bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
+ bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
- if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/0, __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
- "%s writes to output location %u.%u which is not consumed by %s", producer_name, a_first.first, a_first.second, consumer_name)) {
+ if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
+ /*dev*/ 0, __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
+ "%s writes to output location %u.%u which is not consumed by %s", producer_name, a_first.first,
+ a_first.second, consumer_name)) {
pass = false;
}
a_it++;
- }
- else if (a_at_end || a_first > b_first) {
- if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/0, __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
- "%s consumes input location %u.%u which is not written by %s", consumer_name, b_first.first, b_first.second, producer_name)) {
+ } else if (a_at_end || a_first > b_first) {
+ if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
+ __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
+ "%s consumes input location %u.%u which is not written by %s", consumer_name, b_first.first, b_first.second,
+ producer_name)) {
pass = false;
}
b_it++;
- }
- else {
+ } else {
if (types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id, consumer_arrayed_input)) {
/* OK! */
- }
- else {
+ } else {
char producer_type[1024];
char consumer_type[1024];
describe_type(producer_type, producer, a_it->second.type_id);
describe_type(consumer_type, consumer, b_it->second.type_id);
- if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/0, __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
- "Type mismatch on location %u.%u: '%s' vs '%s'", a_first.first, a_first.second, producer_type, consumer_type)) {
- pass = false;
+ if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
+ __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
+ a_first.first, a_first.second, producer_type, consumer_type)) {
+ pass = false;
}
}
a_it++;
@@ -928,13 +874,12 @@ validate_interface_between_stages(layer_data *my_data, VkDevice dev,
enum FORMAT_TYPE {
FORMAT_TYPE_UNDEFINED,
- FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
+ FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
FORMAT_TYPE_SINT,
FORMAT_TYPE_UINT,
};
-static unsigned
-get_format_type(VkFormat fmt) {
+static unsigned get_format_type(VkFormat fmt) {
switch (fmt) {
case VK_FORMAT_UNDEFINED:
return FORMAT_TYPE_UNDEFINED;
@@ -979,41 +924,34 @@ get_format_type(VkFormat fmt) {
/* characterizes a SPIR-V type appearing in an interface to a FF stage,
* for comparison to a VkFormat's characterization above. */
-static unsigned
-get_fundamental_type(shader_module const *src, unsigned type)
-{
+static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
auto insn = src->get_def(type);
assert(insn != src->end());
switch (insn.opcode()) {
- case spv::OpTypeInt:
- return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
- case spv::OpTypeFloat:
- return FORMAT_TYPE_FLOAT;
- case spv::OpTypeVector:
- return get_fundamental_type(src, insn.word(2));
- case spv::OpTypeMatrix:
- return get_fundamental_type(src, insn.word(2));
- case spv::OpTypeArray:
- return get_fundamental_type(src, insn.word(2));
- case spv::OpTypePointer:
- return get_fundamental_type(src, insn.word(3));
- default:
- return FORMAT_TYPE_UNDEFINED;
+ case spv::OpTypeInt:
+ return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
+ case spv::OpTypeFloat:
+ return FORMAT_TYPE_FLOAT;
+ case spv::OpTypeVector:
+ return get_fundamental_type(src, insn.word(2));
+ case spv::OpTypeMatrix:
+ return get_fundamental_type(src, insn.word(2));
+ case spv::OpTypeArray:
+ return get_fundamental_type(src, insn.word(2));
+ case spv::OpTypePointer:
+ return get_fundamental_type(src, insn.word(3));
+ default:
+ return FORMAT_TYPE_UNDEFINED;
}
}
-
-static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage)
-{
+static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
uint32_t bit_pos = u_ffs(stage);
- return bit_pos-1;
+ return bit_pos - 1;
}
-
-static bool
-validate_vi_consistency(layer_data *my_data, VkDevice dev, VkPipelineVertexInputStateCreateInfo const *vi)
-{
+static bool validate_vi_consistency(layer_data *my_data, VkDevice dev, VkPipelineVertexInputStateCreateInfo const *vi) {
/* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
* each binding should be specified only once.
*/
@@ -1022,14 +960,14 @@ validate_vi_consistency(layer_data *my_data, VkDevice dev, VkPipelineVertexInput
for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
auto desc = &vi->pVertexBindingDescriptions[i];
- auto & binding = bindings[desc->binding];
+ auto &binding = bindings[desc->binding];
if (binding) {
- if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/0, __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
- "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
+ if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
+ __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
+ "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
pass = false;
}
- }
- else {
+ } else {
binding = desc;
}
}
@@ -1037,9 +975,8 @@ validate_vi_consistency(layer_data *my_data, VkDevice dev, VkPipelineVertexInput
return pass;
}
-static bool
-validate_vi_against_vs_inputs(layer_data *my_data, VkDevice dev, VkPipelineVertexInputStateCreateInfo const *vi, shader_module const *vs, spirv_inst_iter entrypoint)
-{
+static bool validate_vi_against_vs_inputs(layer_data *my_data, VkDevice dev, VkPipelineVertexInputStateCreateInfo const *vi,
+ shader_module const *vs, spirv_inst_iter entrypoint) {
std::map<location_t, interface_var> inputs;
bool pass = true;
@@ -1057,24 +994,24 @@ validate_vi_against_vs_inputs(layer_data *my_data, VkDevice dev, VkPipelineVerte
while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
- bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
+ bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
auto a_first = a_at_end ? 0 : it_a->first;
auto b_first = b_at_end ? 0 : it_b->first.first;
if (!a_at_end && (b_at_end || a_first < b_first)) {
- if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/0, __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
- "Vertex attribute at location %d not consumed by VS", a_first)) {
+ if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
+ /*dev*/ 0, __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
+ "Vertex attribute at location %d not consumed by VS", a_first)) {
pass = false;
}
it_a++;
- }
- else if (!b_at_end && (a_at_end || b_first < a_first)) {
- if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/0, __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
- "VS consumes input at location %d but not provided", b_first)) {
+ } else if (!b_at_end && (a_at_end || b_first < a_first)) {
+ if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
+ __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
+ b_first)) {
pass = false;
}
it_b++;
- }
- else {
+ } else {
unsigned attrib_type = get_format_type(it_a->second->format);
unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
@@ -1082,9 +1019,10 @@ validate_vi_against_vs_inputs(layer_data *my_data, VkDevice dev, VkPipelineVerte
if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
char vs_type[1024];
describe_type(vs_type, vs, it_b->second.type_id);
- if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/0, __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
- "Attribute type of `%s` at location %d does not match VS input type of `%s`",
- string_VkFormat(it_a->second->format), a_first, vs_type)) {
+ if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
+ __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
+ "Attribute type of `%s` at location %d does not match VS input type of `%s`",
+ string_VkFormat(it_a->second->format), a_first, vs_type)) {
pass = false;
}
}
@@ -1098,9 +1036,8 @@ validate_vi_against_vs_inputs(layer_data *my_data, VkDevice dev, VkPipelineVerte
return pass;
}
-static bool
-validate_fs_outputs_against_render_pass(layer_data *my_data, VkDevice dev, shader_module const *fs, spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass)
-{
+static bool validate_fs_outputs_against_render_pass(layer_data *my_data, VkDevice dev, shader_module const *fs,
+ spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass) {
const std::vector<VkFormat> &color_formats = rp->subpassColorFormats[subpass];
std::map<location_t, interface_var> outputs;
bool pass = true;
@@ -1117,21 +1054,20 @@ validate_fs_outputs_against_render_pass(layer_data *my_data, VkDevice dev, shade
*/
while ((outputs.size() > 0 && it != outputs.end()) || attachment < color_formats.size()) {
- if (attachment == color_formats.size() || ( it != outputs.end() && it->first.first < attachment)) {
- if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/0, __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
- "FS writes to output location %d with no matching attachment", it->first.first)) {
+ if (attachment == color_formats.size() || (it != outputs.end() && it->first.first < attachment)) {
+ if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
+ __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
+ "FS writes to output location %d with no matching attachment", it->first.first)) {
pass = false;
}
it++;
- }
- else if (it == outputs.end() || it->first.first > attachment) {
- if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/0, __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
- "Attachment %d not written by FS", attachment)) {
+ } else if (it == outputs.end() || it->first.first > attachment) {
+ if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
+ __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", attachment)) {
pass = false;
}
attachment++;
- }
- else {
+ } else {
unsigned output_type = get_fundamental_type(fs, it->second.type_id);
unsigned att_type = get_format_type(color_formats[attachment]);
@@ -1139,9 +1075,10 @@ validate_fs_outputs_against_render_pass(layer_data *my_data, VkDevice dev, shade
if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
char fs_type[1024];
describe_type(fs_type, fs, it->second.type_id);
- if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/0, __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
- "Attachment %d of type `%s` does not match FS output type of `%s`",
- attachment, string_VkFormat(color_formats[attachment]), fs_type)) {
+ if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
+ __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
+ "Attachment %d of type `%s` does not match FS output type of `%s`", attachment,
+ string_VkFormat(color_formats[attachment]), fs_type)) {
pass = false;
}
}
@@ -1155,7 +1092,6 @@ validate_fs_outputs_against_render_pass(layer_data *my_data, VkDevice dev, shade
return pass;
}
-
/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
* entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
* for example.
@@ -1165,9 +1101,7 @@ validate_fs_outputs_against_render_pass(layer_data *my_data, VkDevice dev, shade
* TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
* converting parts of this to be generated from the machine-readable spec instead.
*/
-static void
-mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids)
-{
+static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) {
std::unordered_set<uint32_t> worklist;
worklist.insert(entrypoint.word(2));
@@ -1185,7 +1119,7 @@ mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::u
/* try to add to the output set */
if (!ids.insert(id).second) {
- continue; /* if we already saw this id, we don't want to walk it again. */
+ continue; /* if we already saw this id, we don't want to walk it again. */
}
switch (insn.opcode()) {
@@ -1209,15 +1143,15 @@ mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::u
case spv::OpAtomicAnd:
case spv::OpAtomicOr:
case spv::OpAtomicXor:
- worklist.insert(insn.word(3)); /* ptr */
+ worklist.insert(insn.word(3)); /* ptr */
break;
case spv::OpStore:
case spv::OpAtomicStore:
- worklist.insert(insn.word(1)); /* ptr */
+ worklist.insert(insn.word(1)); /* ptr */
break;
case spv::OpAccessChain:
case spv::OpInBoundsAccessChain:
- worklist.insert(insn.word(3)); /* base ptr */
+ worklist.insert(insn.word(3)); /* base ptr */
break;
case spv::OpSampledImage:
case spv::OpImageSampleImplicitLod:
@@ -1252,20 +1186,20 @@ mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::u
case spv::OpImageSparseGather:
case spv::OpImageSparseDrefGather:
case spv::OpImageTexelPointer:
- worklist.insert(insn.word(3)); /* image or sampled image */
+ worklist.insert(insn.word(3)); /* image or sampled image */
break;
case spv::OpImageWrite:
- worklist.insert(insn.word(1)); /* image -- different operand order to above */
+ worklist.insert(insn.word(1)); /* image -- different operand order to above */
break;
case spv::OpFunctionCall:
for (auto i = 3; i < insn.len(); i++) {
- worklist.insert(insn.word(i)); /* fn itself, and all args */
+ worklist.insert(insn.word(i)); /* fn itself, and all args */
}
break;
case spv::OpExtInst:
for (auto i = 5; i < insn.len(); i++) {
- worklist.insert(insn.word(i)); /* operands to ext inst */
+ worklist.insert(insn.word(i)); /* operands to ext inst */
}
break;
}
@@ -1275,123 +1209,101 @@ mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::u
}
}
-
struct shader_stage_attributes {
- char const * const name;
+ char const *const name;
bool arrayed_input;
};
-
-static shader_stage_attributes
-shader_stage_attribs[] = {
- { "vertex shader", false },
- { "tessellation control shader", true },
- { "tessellation evaluation shader", false },
- { "geometry shader", true },
- { "fragment shader", false },
+static shader_stage_attributes shader_stage_attribs[] = {
+ {"vertex shader", false},
+ {"tessellation control shader", true},
+ {"tessellation evaluation shader", false},
+ {"geometry shader", true},
+ {"fragment shader", false},
};
-static bool validate_push_constant_block_against_pipeline(
- layer_data* my_data, VkDevice dev,
- std::vector<VkPushConstantRange> const* pushConstantRanges,
- shader_module const* src, spirv_inst_iter type,
- VkShaderStageFlagBits stage) {
- bool pass = true;
-
- /* strip off ptrs etc */
- type = get_struct_type(src, type, false);
- assert(type != src->end());
-
- /* validate directly off the offsets. this isn't quite correct for arrays
- * and matrices, but is a good first step. TODO: arrays, matrices, weird
- * sizes */
- for (auto insn : *src) {
- if (insn.opcode() == spv::OpMemberDecorate &&
- insn.word(1) == type.word(1)) {
-
- if (insn.word(3) == spv::DecorationOffset) {
- unsigned offset = insn.word(4);
- auto size = 4; /* bytes; TODO: calculate this based on the type */
-
- bool found_range = false;
- for (auto const& range : *pushConstantRanges) {
- if (range.offset <= offset &&
- range.offset + range.size >= offset + size) {
- found_range = true;
-
- if ((range.stageFlags & stage) == 0) {
- if (log_msg(
- my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- /* dev */ 0, __LINE__,
- SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE,
- "SC",
- "Push constant range covering variable starting at "
- "offset %u not accessible from %s stage",
- offset,
- shader_stage_attribs[get_shader_stage_id(stage)].name)) {
- pass = false;
- }
- }
+static bool validate_push_constant_block_against_pipeline(layer_data *my_data, VkDevice dev,
+ std::vector<VkPushConstantRange> const *pushConstantRanges,
+ shader_module const *src, spirv_inst_iter type,
+ VkShaderStageFlagBits stage) {
+ bool pass = true;
- break;
- }
- }
+ /* strip off ptrs etc */
+ type = get_struct_type(src, type, false);
+ assert(type != src->end());
- if (!found_range) {
- if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- /* dev */ 0, __LINE__,
- SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
- "Push constant range covering variable starting at "
- "offset %u not declared in layout",
- offset)) {
- pass = false;
- }
+ /* validate directly off the offsets. this isn't quite correct for arrays
+ * and matrices, but is a good first step. TODO: arrays, matrices, weird
+ * sizes */
+ for (auto insn : *src) {
+ if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
+
+ if (insn.word(3) == spv::DecorationOffset) {
+ unsigned offset = insn.word(4);
+ auto size = 4; /* bytes; TODO: calculate this based on the type */
+
+ bool found_range = false;
+ for (auto const &range : *pushConstantRanges) {
+ if (range.offset <= offset && range.offset + range.size >= offset + size) {
+ found_range = true;
+
+ if ((range.stageFlags & stage) == 0) {
+ if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
+ /* dev */ 0, __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
+ "Push constant range covering variable starting at "
+ "offset %u not accessible from %s stage",
+ offset, shader_stage_attribs[get_shader_stage_id(stage)].name)) {
+ pass = false;
+ }
+ }
+
+ break;
+ }
+ }
+
+ if (!found_range) {
+ if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
+ /* dev */ 0, __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
+ "Push constant range covering variable starting at "
+ "offset %u not declared in layout",
+ offset)) {
+ pass = false;
+ }
+ }
+ }
}
- }
}
- }
- return pass;
+ return pass;
}
-static bool validate_push_constant_usage(
- layer_data* my_data, VkDevice dev,
- std::vector<VkPushConstantRange> const* pushConstantRanges,
- shader_module const* src, std::unordered_set<uint32_t> accessible_ids,
- VkShaderStageFlagBits stage) {
- bool pass = true;
+static bool validate_push_constant_usage(layer_data *my_data, VkDevice dev,
+ std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src,
+ std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
+ bool pass = true;
- for (auto id : accessible_ids) {
- auto def_insn = src->get_def(id);
- if (def_insn.opcode() == spv::OpVariable &&
- def_insn.word(3) == spv::StorageClassPushConstant) {
- pass = validate_push_constant_block_against_pipeline(
- my_data, dev, pushConstantRanges, src,
- src->get_def(def_insn.word(1)), stage) &&
- pass;
+ for (auto id : accessible_ids) {
+ auto def_insn = src->get_def(id);
+ if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
+ pass = validate_push_constant_block_against_pipeline(my_data, dev, pushConstantRanges, src,
+ src->get_def(def_insn.word(1)), stage) &&
+ pass;
+ }
}
- }
- return pass;
+ return pass;
}
// For given pipelineLayout verify that the setLayout at slot.first
// has the requested binding at slot.second
-static bool
-has_descriptor_binding(layer_data* my_data,
- vector<VkDescriptorSetLayout>* pipelineLayout,
- descriptor_slot_t slot)
-{
+static bool has_descriptor_binding(layer_data *my_data, vector<VkDescriptorSetLayout> *pipelineLayout, descriptor_slot_t slot) {
if (!pipelineLayout)
return false;
if (slot.first >= pipelineLayout->size())
return false;
- const auto &bindingMap = my_data->descriptorSetLayoutMap[(*pipelineLayout)[slot.first]]
- ->bindingToIndexMap;
+ const auto &bindingMap = my_data->descriptorSetLayoutMap[(*pipelineLayout)[slot.first]]->bindingToIndexMap;
return (bindingMap.find(slot.second) != bindingMap.end());
}
@@ -1406,11 +1318,10 @@ static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
// Track the last cmd buffer touched by this thread
// prototype
-static GLOBAL_CB_NODE* getCBNode(layer_data*, const VkCommandBuffer);
+static GLOBAL_CB_NODE *getCBNode(layer_data *, const VkCommandBuffer);
-static VkBool32 hasDrawCmd(GLOBAL_CB_NODE* pCB)
-{
- for (uint32_t i=0; i<NUM_DRAW_TYPES; i++) {
+static VkBool32 hasDrawCmd(GLOBAL_CB_NODE *pCB) {
+ for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
if (pCB->drawCount[i])
return VK_TRUE;
}
@@ -1418,23 +1329,22 @@ static VkBool32 hasDrawCmd(GLOBAL_CB_NODE* pCB)
}
// Check object status for selected flag state
-static VkBool32 validate_status(layer_data* my_data, GLOBAL_CB_NODE* pNode, CBStatusFlags enable_mask, CBStatusFlags status_mask, CBStatusFlags status_flag, VkFlags msg_flags, DRAW_STATE_ERROR error_code, const char* fail_msg)
-{
+static VkBool32 validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags enable_mask, CBStatusFlags status_mask,
+ CBStatusFlags status_flag, VkFlags msg_flags, DRAW_STATE_ERROR error_code, const char *fail_msg) {
// If non-zero enable mask is present, check it against status but if enable_mask
// is 0 then no enable required so we should always just check status
if ((!enable_mask) || (enable_mask & pNode->status)) {
if ((pNode->status & status_mask) != status_flag) {
// TODO : How to pass dispatchable objects as srcObject? Here src obj should be cmd buffer
- return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, error_code, "DS",
- "CB object %#" PRIxLEAST64 ": %s", (uint64_t)(pNode->commandBuffer), fail_msg);
+ return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, error_code,
+ "DS", "CB object %#" PRIxLEAST64 ": %s", (uint64_t)(pNode->commandBuffer), fail_msg);
}
}
return VK_FALSE;
}
// Retrieve pipeline node ptr for given pipeline object
-static PIPELINE_NODE* getPipeline(layer_data* my_data, const VkPipeline pipeline)
-{
+static PIPELINE_NODE *getPipeline(layer_data *my_data, const VkPipeline pipeline) {
if (my_data->pipelineMap.find(pipeline) == my_data->pipelineMap.end()) {
return NULL;
}
@@ -1442,10 +1352,9 @@ static PIPELINE_NODE* getPipeline(layer_data* my_data, const VkPipeline pipeline
}
// Return VK_TRUE if for a given PSO, the given state enum is dynamic, else return VK_FALSE
-static VkBool32 isDynamic(const PIPELINE_NODE* pPipeline, const VkDynamicState state)
-{
+static VkBool32 isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
- for (uint32_t i=0; i<pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
+ for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
return VK_TRUE;
}
@@ -1454,19 +1363,39 @@ static VkBool32 isDynamic(const PIPELINE_NODE* pPipeline, const VkDynamicState s
}
// Validate state stored as flags at time of draw call
-static VkBool32 validate_draw_state_flags(layer_data* my_data, GLOBAL_CB_NODE* pCB, VkBool32 indexedDraw) {
+static VkBool32 validate_draw_state_flags(layer_data *my_data, GLOBAL_CB_NODE *pCB, VkBool32 indexedDraw) {
VkBool32 result;
- result = validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_VIEWPORT_SET, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_VIEWPORT_NOT_BOUND, "Dynamic viewport state not set for this command buffer");
- result |= validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_SCISSOR_SET, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_SCISSOR_NOT_BOUND, "Dynamic scissor state not set for this command buffer");
- result |= validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_LINE_WIDTH_SET, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
- result |= validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_DEPTH_BIAS_SET, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
- result |= validate_status(my_data, pCB, CBSTATUS_COLOR_BLEND_WRITE_ENABLE, CBSTATUS_BLEND_SET, CBSTATUS_BLEND_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend object state not set for this command buffer");
- result |= validate_status(my_data, pCB, CBSTATUS_DEPTH_WRITE_ENABLE, CBSTATUS_DEPTH_BOUNDS_SET, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
- result |= validate_status(my_data, pCB, CBSTATUS_STENCIL_TEST_ENABLE, CBSTATUS_STENCIL_READ_MASK_SET, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
- result |= validate_status(my_data, pCB, CBSTATUS_STENCIL_TEST_ENABLE, CBSTATUS_STENCIL_WRITE_MASK_SET, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
- result |= validate_status(my_data, pCB, CBSTATUS_STENCIL_TEST_ENABLE, CBSTATUS_STENCIL_REFERENCE_SET, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
+ result =
+ validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_VIEWPORT_SET, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ DRAWSTATE_VIEWPORT_NOT_BOUND, "Dynamic viewport state not set for this command buffer");
+ result |=
+ validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_SCISSOR_SET, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ DRAWSTATE_SCISSOR_NOT_BOUND, "Dynamic scissor state not set for this command buffer");
+ result |= validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_LINE_WIDTH_SET, CBSTATUS_LINE_WIDTH_SET,
+ VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_LINE_WIDTH_NOT_BOUND,
+ "Dynamic line width state not set for this command buffer");
+ result |= validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_DEPTH_BIAS_SET, CBSTATUS_DEPTH_BIAS_SET,
+ VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_DEPTH_BIAS_NOT_BOUND,
+ "Dynamic depth bias state not set for this command buffer");
+ result |= validate_status(my_data, pCB, CBSTATUS_COLOR_BLEND_WRITE_ENABLE, CBSTATUS_BLEND_SET, CBSTATUS_BLEND_SET,
+ VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_BLEND_NOT_BOUND,
+ "Dynamic blend object state not set for this command buffer");
+ result |= validate_status(my_data, pCB, CBSTATUS_DEPTH_WRITE_ENABLE, CBSTATUS_DEPTH_BOUNDS_SET, CBSTATUS_DEPTH_BOUNDS_SET,
+ VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND,
+ "Dynamic depth bounds state not set for this command buffer");
+ result |= validate_status(my_data, pCB, CBSTATUS_STENCIL_TEST_ENABLE, CBSTATUS_STENCIL_READ_MASK_SET,
+ CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_STENCIL_NOT_BOUND,
+ "Dynamic stencil read mask state not set for this command buffer");
+ result |= validate_status(my_data, pCB, CBSTATUS_STENCIL_TEST_ENABLE, CBSTATUS_STENCIL_WRITE_MASK_SET,
+ CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_STENCIL_NOT_BOUND,
+ "Dynamic stencil write mask state not set for this command buffer");
+ result |= validate_status(my_data, pCB, CBSTATUS_STENCIL_TEST_ENABLE, CBSTATUS_STENCIL_REFERENCE_SET,
+ CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_STENCIL_NOT_BOUND,
+ "Dynamic stencil reference state not set for this command buffer");
if (indexedDraw)
- result |= validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_INDEX_BUFFER_BOUND, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_INDEX_BUFFER_NOT_BOUND, "Index buffer object not bound to this command buffer when Indexed Draw attempted");
+ result |= validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_INDEX_BUFFER_BOUND, CBSTATUS_INDEX_BUFFER_BOUND,
+ VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
+ "Index buffer object not bound to this command buffer when Indexed Draw attempted");
return result;
}
@@ -1475,9 +1404,10 @@ static VkBool32 validate_draw_state_flags(layer_data* my_data, GLOBAL_CB_NODE* p
// If both AttachmentReference arrays have requested index, check their corresponding AttachementDescriptions
// to make sure that format and samples counts match.
// If not, they are not compatible.
-static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference* pPrimary, const uint32_t primaryCount, const VkAttachmentDescription* pPrimaryAttachments,
- const VkAttachmentReference* pSecondary, const uint32_t secondaryCount, const VkAttachmentDescription* pSecondaryAttachments)
-{
+static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
+ const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
+ const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
+ const VkAttachmentDescription *pSecondaryAttachments) {
if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
if (VK_ATTACHMENT_UNUSED != pSecondary[index].attachment)
return false;
@@ -1485,8 +1415,10 @@ static bool attachment_references_compatible(const uint32_t index, const VkAttac
if (VK_ATTACHMENT_UNUSED != pPrimary[index].attachment)
return false;
} else { // format and sample count must match
- if ((pPrimaryAttachments[pPrimary[index].attachment].format == pSecondaryAttachments[pSecondary[index].attachment].format) &&
- (pPrimaryAttachments[pPrimary[index].attachment].samples == pSecondaryAttachments[pSecondary[index].attachment].samples))
+ if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
+ pSecondaryAttachments[pSecondary[index].attachment].format) &&
+ (pPrimaryAttachments[pPrimary[index].attachment].samples ==
+ pSecondaryAttachments[pSecondary[index].attachment].samples))
return true;
}
// Format and sample counts didn't match
@@ -1494,8 +1426,8 @@ static bool attachment_references_compatible(const uint32_t index, const VkAttac
}
// For give primary and secondary RenderPass objects, verify that they're compatible
-static bool verify_renderpass_compatibility(layer_data* my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP, string& errorMsg)
-{
+static bool verify_renderpass_compatibility(layer_data *my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP,
+ string &errorMsg) {
stringstream errorStr;
if (my_data->renderPassMap.find(primaryRP) == my_data->renderPassMap.end()) {
errorStr << "invalid VkRenderPass (" << primaryRP << ")";
@@ -1510,10 +1442,11 @@ static bool verify_renderpass_compatibility(layer_data* my_data, const VkRenderP
if (primaryRP == secondaryRP) {
return true;
}
- const VkRenderPassCreateInfo* primaryRPCI = my_data->renderPassMap[primaryRP]->pCreateInfo;
- const VkRenderPassCreateInfo* secondaryRPCI = my_data->renderPassMap[secondaryRP]->pCreateInfo;
+ const VkRenderPassCreateInfo *primaryRPCI = my_data->renderPassMap[primaryRP]->pCreateInfo;
+ const VkRenderPassCreateInfo *secondaryRPCI = my_data->renderPassMap[secondaryRP]->pCreateInfo;
if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
- errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
+ errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
+ << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
errorMsg = errorStr.str();
return false;
}
@@ -1524,19 +1457,25 @@ static bool verify_renderpass_compatibility(layer_data* my_data, const VkRenderP
uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
- if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount, primaryRPCI->pAttachments,
- secondaryRPCI->pSubpasses[spIndex].pColorAttachments, secondaryColorCount, secondaryRPCI->pAttachments)) {
+ if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
+ primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
+ secondaryColorCount, secondaryRPCI->pAttachments)) {
errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
errorMsg = errorStr.str();
return false;
- } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments, primaryColorCount, primaryRPCI->pAttachments,
- secondaryRPCI->pSubpasses[spIndex].pResolveAttachments, secondaryColorCount, secondaryRPCI->pAttachments)) {
+ } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
+ primaryColorCount, primaryRPCI->pAttachments,
+ secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
+ secondaryColorCount, secondaryRPCI->pAttachments)) {
errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
errorMsg = errorStr.str();
return false;
- } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment, primaryColorCount, primaryRPCI->pAttachments,
- secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment, secondaryColorCount, secondaryRPCI->pAttachments)) {
- errorStr << "depth/stencil attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
+ } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
+ primaryColorCount, primaryRPCI->pAttachments,
+ secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
+ secondaryColorCount, secondaryRPCI->pAttachments)) {
+ errorStr << "depth/stencil attachments at index " << cIdx << " of subpass index " << spIndex
+ << " are not compatible.";
errorMsg = errorStr.str();
return false;
}
@@ -1545,8 +1484,9 @@ static bool verify_renderpass_compatibility(layer_data* my_data, const VkRenderP
uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
for (uint32_t i = 0; i < inputMax; ++i) {
- if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount, primaryRPCI->pAttachments,
- secondaryRPCI->pSubpasses[spIndex].pInputAttachments, secondaryColorCount, secondaryRPCI->pAttachments)) {
+ if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
+ primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
+ secondaryColorCount, secondaryRPCI->pAttachments)) {
errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
errorMsg = errorStr.str();
return false;
@@ -1557,8 +1497,8 @@ static bool verify_renderpass_compatibility(layer_data* my_data, const VkRenderP
}
// For give SET_NODE, verify that its Set is compatible w/ the setLayout corresponding to pipelineLayout[layoutIndex]
-static bool verify_set_layout_compatibility(layer_data* my_data, const SET_NODE* pSet, const VkPipelineLayout layout, const uint32_t layoutIndex, string& errorMsg)
-{
+static bool verify_set_layout_compatibility(layer_data *my_data, const SET_NODE *pSet, const VkPipelineLayout layout,
+ const uint32_t layoutIndex, string &errorMsg) {
stringstream errorStr;
if (my_data->pipelineLayoutMap.find(layout) == my_data->pipelineLayoutMap.end()) {
errorStr << "invalid VkPipelineLayout (" << layout << ")";
@@ -1567,33 +1507,41 @@ static bool verify_set_layout_compatibility(layer_data* my_data, const SET_NODE*
}
PIPELINE_LAYOUT_NODE pl = my_data->pipelineLayoutMap[layout];
if (layoutIndex >= pl.descriptorSetLayouts.size()) {
- errorStr << "VkPipelineLayout (" << layout << ") only contains " << pl.descriptorSetLayouts.size() << " setLayouts corresponding to sets 0-" << pl.descriptorSetLayouts.size()-1 << ", but you're attempting to bind set to index " << layoutIndex;
+ errorStr << "VkPipelineLayout (" << layout << ") only contains " << pl.descriptorSetLayouts.size()
+ << " setLayouts corresponding to sets 0-" << pl.descriptorSetLayouts.size() - 1
+ << ", but you're attempting to bind set to index " << layoutIndex;
errorMsg = errorStr.str();
return false;
}
// Get the specific setLayout from PipelineLayout that overlaps this set
- LAYOUT_NODE* pLayoutNode = my_data->descriptorSetLayoutMap[pl.descriptorSetLayouts[layoutIndex]];
+ LAYOUT_NODE *pLayoutNode = my_data->descriptorSetLayoutMap[pl.descriptorSetLayouts[layoutIndex]];
if (pLayoutNode->layout == pSet->pLayout->layout) { // trivial pass case
return true;
}
size_t descriptorCount = pLayoutNode->descriptorTypes.size();
if (descriptorCount != pSet->pLayout->descriptorTypes.size()) {
- errorStr << "setLayout " << layoutIndex << " from pipelineLayout " << layout << " has " << descriptorCount << " descriptors, but corresponding set being bound has " << pSet->pLayout->descriptorTypes.size() << " descriptors.";
+ errorStr << "setLayout " << layoutIndex << " from pipelineLayout " << layout << " has " << descriptorCount
+ << " descriptors, but corresponding set being bound has " << pSet->pLayout->descriptorTypes.size()
+ << " descriptors.";
errorMsg = errorStr.str();
return false; // trivial fail case
}
// Now need to check set against corresponding pipelineLayout to verify compatibility
- for (size_t i=0; i<descriptorCount; ++i) {
+ for (size_t i = 0; i < descriptorCount; ++i) {
// Need to verify that layouts are identically defined
// TODO : Is below sufficient? Making sure that types & stageFlags match per descriptor
// do we also need to check immutable samplers?
if (pLayoutNode->descriptorTypes[i] != pSet->pLayout->descriptorTypes[i]) {
- errorStr << "descriptor " << i << " for descriptorSet being bound is type '" << string_VkDescriptorType(pSet->pLayout->descriptorTypes[i]) << "' but corresponding descriptor from pipelineLayout is type '" << string_VkDescriptorType(pLayoutNode->descriptorTypes[i]) << "'";
+ errorStr << "descriptor " << i << " for descriptorSet being bound is type '"
+ << string_VkDescriptorType(pSet->pLayout->descriptorTypes[i])
+ << "' but corresponding descriptor from pipelineLayout is type '"
+ << string_VkDescriptorType(pLayoutNode->descriptorTypes[i]) << "'";
errorMsg = errorStr.str();
return false;
}
if (pLayoutNode->stageFlags[i] != pSet->pLayout->stageFlags[i]) {
- errorStr << "stageFlags " << i << " for descriptorSet being bound is " << pSet->pLayout->stageFlags[i] << "' but corresponding descriptor from pipelineLayout has stageFlags " << pLayoutNode->stageFlags[i];
+ errorStr << "stageFlags " << i << " for descriptorSet being bound is " << pSet->pLayout->stageFlags[i]
+ << "' but corresponding descriptor from pipelineLayout has stageFlags " << pLayoutNode->stageFlags[i];
errorMsg = errorStr.str();
return false;
}
@@ -1601,11 +1549,8 @@ static bool verify_set_layout_compatibility(layer_data* my_data, const SET_NODE*
return true;
}
-
// Validate that data for each specialization entry is fully contained within the buffer.
-static VkBool32
-validate_specialization_offsets(layer_data *my_data, VkPipelineShaderStageCreateInfo const *info)
-{
+static VkBool32 validate_specialization_offsets(layer_data *my_data, VkPipelineShaderStageCreateInfo const *info) {
VkBool32 pass = VK_TRUE;
VkSpecializationInfo const *spec = info->pSpecializationInfo;
@@ -1614,14 +1559,12 @@ validate_specialization_offsets(layer_data *my_data, VkPipelineShaderStageCreate
for (auto i = 0u; i < spec->mapEntryCount; i++) {
if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- /*dev*/0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
- "Specialization entry %u (for constant id %u) references memory outside provided "
- "specialization data (bytes %u.."
- PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER " bytes provided)",
- i, spec->pMapEntries[i].constantID,
- spec->pMapEntries[i].offset,
- spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1,
- spec->dataSize)) {
+ /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
+ "Specialization entry %u (for constant id %u) references memory outside provided "
+ "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
+ " bytes provided)",
+ i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
+ spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
pass = VK_FALSE;
}
@@ -1632,12 +1575,9 @@ validate_specialization_offsets(layer_data *my_data, VkPipelineShaderStageCreate
return pass;
}
-
// Validate that the shaders used by the given pipeline
// As a side effect this function also records the sets that are actually used by the pipeline
-static VkBool32
-validate_pipeline_shaders(layer_data *my_data, VkDevice dev, PIPELINE_NODE* pPipeline)
-{
+static VkBool32 validate_pipeline_shaders(layer_data *my_data, VkDevice dev, PIPELINE_NODE *pPipeline) {
VkGraphicsPipelineCreateInfo const *pCreateInfo = &pPipeline->graphicsPipelineCI;
/* We seem to allow pipeline stages to be specified out of order, so collect and identify them
* before trying to do anything more: */
@@ -1656,14 +1596,13 @@ validate_pipeline_shaders(layer_data *my_data, VkDevice dev, PIPELINE_NODE* pPip
VkPipelineShaderStageCreateInfo const *pStage = &pCreateInfo->pStages[i];
if (pStage->sType == VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO) {
- if ((pStage->stage & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT
- | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) == 0) {
- if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/0, __LINE__, SHADER_CHECKER_UNKNOWN_STAGE, "SC",
- "Unknown shader stage %d", pStage->stage)) {
+ if ((pStage->stage & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT |
+ VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) == 0) {
+ if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
+ /*dev*/ 0, __LINE__, SHADER_CHECKER_UNKNOWN_STAGE, "SC", "Unknown shader stage %d", pStage->stage)) {
pass = VK_FALSE;
}
- }
- else {
+ } else {
pass = validate_specialization_offsets(my_data, pStage) && pass;
auto stage_id = get_shader_stage_id(pStage->stage);
@@ -1673,8 +1612,9 @@ validate_pipeline_shaders(layer_data *my_data, VkDevice dev, PIPELINE_NODE* pPip
/* find the entrypoint */
entrypoints[stage_id] = find_entrypoint(module, pStage->pName, pStage->stage);
if (entrypoints[stage_id] == module->end()) {
- if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/0, __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
- "No entrypoint found named `%s` for stages %u", pStage->pName, pStage->stage)) {
+ if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
+ /*dev*/ 0, __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
+ "No entrypoint found named `%s` for stages %u", pStage->pName, pStage->stage)) {
pass = VK_FALSE;
}
}
@@ -1685,12 +1625,11 @@ validate_pipeline_shaders(layer_data *my_data, VkDevice dev, PIPELINE_NODE* pPip
/* validate descriptor set layout against what the entrypoint actually uses */
std::map<descriptor_slot_t, interface_var> descriptor_uses;
- collect_interface_by_descriptor_slot(my_data, dev, module,
- accessible_ids,
- descriptor_uses);
+ collect_interface_by_descriptor_slot(my_data, dev, module, accessible_ids, descriptor_uses);
- auto layouts = pCreateInfo->layout != VK_NULL_HANDLE ?
- &(my_data->pipelineLayoutMap[pCreateInfo->layout].descriptorSetLayouts) : nullptr;
+ auto layouts = pCreateInfo->layout != VK_NULL_HANDLE
+ ? &(my_data->pipelineLayoutMap[pCreateInfo->layout].descriptorSetLayouts)
+ : nullptr;
for (auto it = descriptor_uses.begin(); it != descriptor_uses.end(); it++) {
// As a side-effect of this function, capture which sets are used by the pipeline
@@ -1702,22 +1641,20 @@ validate_pipeline_shaders(layer_data *my_data, VkDevice dev, PIPELINE_NODE* pPip
if (!found) {
char type_name[1024];
describe_type(type_name, module, it->second.type_id);
- if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/0, __LINE__,
- SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
- "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
- it->first.first, it->first.second, type_name)) {
+ if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
+ /*dev*/ 0, __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
+ "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
+ it->first.first, it->first.second, type_name)) {
pass = VK_FALSE;
}
}
}
/* validate push constant usage */
- pass = validate_push_constant_usage(
- my_data, dev,
- &my_data->pipelineLayoutMap[pCreateInfo->layout]
- .pushConstantRanges,
- module, accessible_ids, pStage->stage) &&
- pass;
+ pass =
+ validate_push_constant_usage(my_data, dev, &my_data->pipelineLayoutMap[pCreateInfo->layout].pushConstantRanges,
+ module, accessible_ids, pStage->stage) &&
+ pass;
}
}
}
@@ -1747,28 +1684,27 @@ validate_pipeline_shaders(layer_data *my_data, VkDevice dev, PIPELINE_NODE* pPip
for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
assert(shaders[producer]);
if (shaders[consumer]) {
- pass = validate_interface_between_stages(my_data, dev,
- shaders[producer], entrypoints[producer],
- shader_stage_attribs[producer].name,
- shaders[consumer], entrypoints[consumer],
+ pass = validate_interface_between_stages(my_data, dev, shaders[producer], entrypoints[producer],
+ shader_stage_attribs[producer].name, shaders[consumer], entrypoints[consumer],
shader_stage_attribs[consumer].name,
- shader_stage_attribs[consumer].arrayed_input) && pass;
+ shader_stage_attribs[consumer].arrayed_input) &&
+ pass;
producer = consumer;
}
}
if (shaders[fragment_stage] && rp) {
- pass = validate_fs_outputs_against_render_pass(my_data, dev, shaders[fragment_stage],
- entrypoints[fragment_stage], rp, pCreateInfo->subpass) && pass;
+ pass = validate_fs_outputs_against_render_pass(my_data, dev, shaders[fragment_stage], entrypoints[fragment_stage], rp,
+ pCreateInfo->subpass) &&
+ pass;
}
return pass;
}
// Return Set node ptr for specified set or else NULL
-static SET_NODE* getSetNode(layer_data* my_data, const VkDescriptorSet set)
-{
+static SET_NODE *getSetNode(layer_data *my_data, const VkDescriptorSet set) {
if (my_data->setMap.find(set) == my_data->setMap.end()) {
return NULL;
}
@@ -1778,11 +1714,10 @@ static SET_NODE* getSetNode(layer_data* my_data, const VkDescriptorSet set)
// that any dynamic descriptor in that set has a valid dynamic offset bound.
// To be valid, the dynamic offset combined with the offet and range from its
// descriptor update must not overflow the size of its buffer being updated
-static VkBool32 validate_dynamic_offsets(layer_data* my_data, const GLOBAL_CB_NODE* pCB, const vector<SET_NODE*> activeSetNodes)
-{
+static VkBool32 validate_dynamic_offsets(layer_data *my_data, const GLOBAL_CB_NODE *pCB, const vector<SET_NODE *> activeSetNodes) {
VkBool32 result = VK_FALSE;
- VkWriteDescriptorSet* pWDS = NULL;
+ VkWriteDescriptorSet *pWDS = NULL;
uint32_t dynOffsetIndex = 0;
VkDeviceSize bufferSize = 0;
for (auto set_node : activeSetNodes) {
@@ -1792,83 +1727,51 @@ static VkBool32 validate_dynamic_offsets(layer_data* my_data, const GLOBAL_CB_NO
switch (set_node->ppDescriptors[i]->sType) {
case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
pWDS = (VkWriteDescriptorSet *)set_node->ppDescriptors[i];
- if ((pWDS->descriptorType ==
- VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
- (pWDS->descriptorType ==
- VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
+ if ((pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
+ (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
- bufferSize =
- my_data->bufferMap[pWDS->pBufferInfo[j].buffer]
- .create_info->size;
+ bufferSize = my_data->bufferMap[pWDS->pBufferInfo[j].buffer].create_info->size;
if (pWDS->pBufferInfo[j].range == VK_WHOLE_SIZE) {
- if ((pCB->dynamicOffsets[dynOffsetIndex] +
- pWDS->pBufferInfo[j].offset) > bufferSize) {
- result |= log_msg(
- my_data->report_data,
- VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
- (uint64_t)set_node->set, __LINE__,
- DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
- "VkDescriptorSet (%#" PRIxLEAST64
- ") bound as set #%u has range of "
- "VK_WHOLE_SIZE but dynamic offset %u "
- "combined with offet %#" PRIxLEAST64
- " oversteps its buffer (%#" PRIxLEAST64
- ") which has a size of %#" PRIxLEAST64 ".",
- (uint64_t)set_node->set, i,
- pCB->dynamicOffsets[dynOffsetIndex],
- pWDS->pBufferInfo[j].offset,
- (uint64_t)pWDS->pBufferInfo[j].buffer,
- bufferSize);
+ if ((pCB->dynamicOffsets[dynOffsetIndex] + pWDS->pBufferInfo[j].offset) > bufferSize) {
+ result |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)set_node->set, __LINE__,
+ DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
+ "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has range of "
+ "VK_WHOLE_SIZE but dynamic offset %u "
+ "combined with offet %#" PRIxLEAST64 " oversteps its buffer (%#" PRIxLEAST64
+ ") which has a size of %#" PRIxLEAST64 ".",
+ (uint64_t)set_node->set, i, pCB->dynamicOffsets[dynOffsetIndex],
+ pWDS->pBufferInfo[j].offset, (uint64_t)pWDS->pBufferInfo[j].buffer, bufferSize);
}
- } else if ((pCB->dynamicOffsets[dynOffsetIndex] +
- pWDS->pBufferInfo[j].offset +
+ } else if ((pCB->dynamicOffsets[dynOffsetIndex] + pWDS->pBufferInfo[j].offset +
pWDS->pBufferInfo[j].range) > bufferSize) {
- result |= log_msg(
- my_data->report_data,
- VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
- (uint64_t)set_node->set, __LINE__,
- DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
- "VkDescriptorSet (%#" PRIxLEAST64
- ") bound as set #%u has dynamic offset %u. "
- "Combined with offet %#" PRIxLEAST64
- " and range %#" PRIxLEAST64
- " from its update, this oversteps its buffer "
- "(%#" PRIxLEAST64
- ") which has a size of %#" PRIxLEAST64 ".",
- (uint64_t)set_node->set, i,
- pCB->dynamicOffsets[dynOffsetIndex],
- pWDS->pBufferInfo[j].offset,
- pWDS->pBufferInfo[j].range,
- (uint64_t)pWDS->pBufferInfo[j].buffer,
- bufferSize);
- } else if ((pCB->dynamicOffsets[dynOffsetIndex] +
- pWDS->pBufferInfo[j].offset +
- pWDS->pBufferInfo[j].range) >
- bufferSize) {
- result |= log_msg(
- my_data->report_data,
- VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
- (uint64_t)set_node->set, __LINE__,
- DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
- "VkDescriptorSet (%#" PRIxLEAST64
- ") bound as set #%u has dynamic offset %u. "
- "Combined with offet %#" PRIxLEAST64
- " and range %#" PRIxLEAST64
- " from its update, this oversteps its buffer "
- "(%#" PRIxLEAST64
- ") which has a size of %#" PRIxLEAST64 ".",
- (uint64_t)set_node->set, i,
- pCB->dynamicOffsets[dynOffsetIndex],
- pWDS->pBufferInfo[j].offset,
- pWDS->pBufferInfo[j].range,
- (uint64_t)pWDS->pBufferInfo[j].buffer,
- bufferSize);
+ result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)set_node->set, __LINE__,
+ DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
+ "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has dynamic offset %u. "
+ "Combined with offet %#" PRIxLEAST64 " and range %#" PRIxLEAST64
+ " from its update, this oversteps its buffer "
+ "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".",
+ (uint64_t)set_node->set, i, pCB->dynamicOffsets[dynOffsetIndex],
+ pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range,
+ (uint64_t)pWDS->pBufferInfo[j].buffer, bufferSize);
+ } else if ((pCB->dynamicOffsets[dynOffsetIndex] + pWDS->pBufferInfo[j].offset +
+ pWDS->pBufferInfo[j].range) > bufferSize) {
+ result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)set_node->set, __LINE__,
+ DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
+ "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has dynamic offset %u. "
+ "Combined with offet %#" PRIxLEAST64 " and range %#" PRIxLEAST64
+ " from its update, this oversteps its buffer "
+ "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".",
+ (uint64_t)set_node->set, i, pCB->dynamicOffsets[dynOffsetIndex],
+ pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range,
+ (uint64_t)pWDS->pBufferInfo[j].buffer, bufferSize);
}
dynOffsetIndex++;
- i += j; // Advance i to end of this set of descriptors (++i at end of for loop will move 1 index past last of these descriptors)
+ i += j; // Advance i to end of this set of descriptors (++i at end of for loop will move 1 index past
+ // last of these descriptors)
}
}
break;
@@ -1883,10 +1786,10 @@ static VkBool32 validate_dynamic_offsets(layer_data* my_data, const GLOBAL_CB_NO
}
// Validate overall state at the time of a draw call
-static VkBool32 validate_draw_state(layer_data* my_data, GLOBAL_CB_NODE* pCB, VkBool32 indexedDraw) {
+static VkBool32 validate_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, VkBool32 indexedDraw) {
// First check flag states
VkBool32 result = validate_draw_state_flags(my_data, pCB, indexedDraw);
- PIPELINE_NODE* pPipe = getPipeline(my_data, pCB->lastBoundPipeline);
+ PIPELINE_NODE *pPipe = getPipeline(my_data, pCB->lastBoundPipeline);
// Now complete other state checks
// TODO : Currently only performing next check if *something* was bound (non-zero last bound)
// There is probably a better way to gate when this check happens, and to know if something *should* have been bound
@@ -1895,27 +1798,37 @@ static VkBool32 validate_draw_state(layer_data* my_data, GLOBAL_CB_NODE* pCB, Vk
if (pCB->lastBoundPipelineLayout) {
string errorString;
// Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
- vector<SET_NODE*> activeSetNodes;
+ vector<SET_NODE *> activeSetNodes;
for (auto setIndex : pPipe->active_sets) {
// If valid set is not bound throw an error
if ((pCB->boundDescriptorSets.size() <= setIndex) || (!pCB->boundDescriptorSets[setIndex])) {
- result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
- "VkPipeline %#" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline, setIndex);
- } else if (!verify_set_layout_compatibility(my_data, my_data->setMap[pCB->boundDescriptorSets[setIndex]], pPipe->graphicsPipelineCI.layout, setIndex, errorString)) {
+ result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
+ __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
+ "VkPipeline %#" PRIxLEAST64 " uses set #%u but that set is not bound.",
+ (uint64_t)pPipe->pipeline, setIndex);
+ } else if (!verify_set_layout_compatibility(my_data, my_data->setMap[pCB->boundDescriptorSets[setIndex]],
+ pPipe->graphicsPipelineCI.layout, setIndex, errorString)) {
// Set is bound but not compatible w/ overlapping pipelineLayout from PSO
VkDescriptorSet setHandle = my_data->setMap[pCB->boundDescriptorSets[setIndex]]->set;
- result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
- "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u is not compatible with overlapping VkPipelineLayout %#" PRIxLEAST64 " due to: %s",
- (uint64_t)setHandle, setIndex, (uint64_t)pPipe->graphicsPipelineCI.layout, errorString.c_str());
+ result |= log_msg(
+ my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
+ (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
+ "VkDescriptorSet (%#" PRIxLEAST64
+ ") bound as set #%u is not compatible with overlapping VkPipelineLayout %#" PRIxLEAST64 " due to: %s",
+ (uint64_t)setHandle, setIndex, (uint64_t)pPipe->graphicsPipelineCI.layout, errorString.c_str());
} else { // Valid set is bound and layout compatible, validate that it's updated and verify any dynamic offsets
// Pull the set node
- SET_NODE* pSet = my_data->setMap[pCB->boundDescriptorSets[setIndex]];
+ SET_NODE *pSet = my_data->setMap[pCB->boundDescriptorSets[setIndex]];
// Save vector of all active sets to verify dynamicOffsets below
activeSetNodes.push_back(pSet);
// Make sure set has been updated
if (!pSet->pUpdateStructs) {
- result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t) pSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
- "DS %#" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so this will result in undefined behavior.", (uint64_t) pSet->set);
+ result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pSet->set, __LINE__,
+ DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
+ "DS %#" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
+ "this will result in undefined behavior.",
+ (uint64_t)pSet->set);
}
}
}
@@ -1927,37 +1840,47 @@ static VkBool32 validate_draw_state(layer_data* my_data, GLOBAL_CB_NODE* pCB, Vk
if (pPipe->vtxBindingCount > 0) {
VkPipelineVertexInputStateCreateInfo *vtxInCI = &pPipe->vertexInputCI;
for (uint32_t i = 0; i < vtxInCI->vertexBindingDescriptionCount; i++) {
- if ((pCB->currentDrawData.buffers.size() < (i+1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) {
- result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
- "The Pipeline State Object (%#" PRIxLEAST64 ") expects that this Command Buffer's vertex binding Index %d should be set via vkCmdBindVertexBuffers.",
+ if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) {
+ result |= log_msg(
+ my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
+ "The Pipeline State Object (%#" PRIxLEAST64
+ ") expects that this Command Buffer's vertex binding Index %d should be set via vkCmdBindVertexBuffers.",
(uint64_t)pCB->lastBoundPipeline, i);
-
}
}
} else {
if (!pCB->currentDrawData.buffers.empty()) {
- result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS,
- "DS", "Vertex buffers are bound to command buffer (%#" PRIxLEAST64 ") but no vertex buffers are attached to this Pipeline State Object (%#" PRIxLEAST64 ").",
- (uint64_t)pCB->commandBuffer, (uint64_t)pCB->lastBoundPipeline);
+ result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
+ 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
+ "Vertex buffers are bound to command buffer (%#" PRIxLEAST64
+ ") but no vertex buffers are attached to this Pipeline State Object (%#" PRIxLEAST64 ").",
+ (uint64_t)pCB->commandBuffer, (uint64_t)pCB->lastBoundPipeline);
}
}
// If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
// Skip check if rasterization is disabled or there is no viewport.
if ((!pPipe->graphicsPipelineCI.pRasterizationState ||
!pPipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) &&
- pPipe->graphicsPipelineCI.pViewportState) {
+ pPipe->graphicsPipelineCI.pViewportState) {
VkBool32 dynViewport = isDynamic(pPipe, VK_DYNAMIC_STATE_VIEWPORT);
VkBool32 dynScissor = isDynamic(pPipe, VK_DYNAMIC_STATE_SCISSOR);
if (dynViewport) {
if (pCB->viewports.size() != pPipe->graphicsPipelineCI.pViewportState->viewportCount) {
- result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
- "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER ", but PSO viewportCount is %u. These counts must match.", pCB->viewports.size(), pPipe->graphicsPipelineCI.pViewportState->viewportCount);
+ result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
+ __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
+ "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER
+ ", but PSO viewportCount is %u. These counts must match.",
+ pCB->viewports.size(), pPipe->graphicsPipelineCI.pViewportState->viewportCount);
}
}
if (dynScissor) {
if (pCB->scissors.size() != pPipe->graphicsPipelineCI.pViewportState->scissorCount) {
- result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
- "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER ", but PSO scissorCount is %u. These counts must match.", pCB->scissors.size(), pPipe->graphicsPipelineCI.pViewportState->scissorCount);
+ result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
+ __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
+ "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER
+ ", but PSO scissorCount is %u. These counts must match.",
+ pCB->scissors.size(), pPipe->graphicsPipelineCI.pViewportState->scissorCount);
}
}
}
@@ -1966,8 +1889,8 @@ static VkBool32 validate_draw_state(layer_data* my_data, GLOBAL_CB_NODE* pCB, Vk
}
// Verify that create state for a pipeline is valid
-static VkBool32 verifyPipelineCreateState(layer_data* my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines, int pipelineIndex)
-{
+static VkBool32 verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
+ int pipelineIndex) {
VkBool32 skipCall = VK_FALSE;
PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
@@ -1978,28 +1901,26 @@ static VkBool32 verifyPipelineCreateState(layer_data* my_data, const VkDevice de
if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
PIPELINE_NODE *pBasePipeline = nullptr;
if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
- (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
+ (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
"Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
- }
- else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
+ } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
- "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
- }
- else {
+ skipCall |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
+ "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
+ } else {
pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
}
- }
- else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
+ } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
}
if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
"Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
}
}
@@ -2007,7 +1928,7 @@ static VkBool32 verifyPipelineCreateState(layer_data* my_data, const VkDevice de
if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
if (!my_data->physDevProperties.features.independentBlend) {
VkPipelineColorBlendAttachmentState *pAttachments = pPipeline->pAttachments;
- for (uint32_t i = 1 ; i < pPipeline->attachmentCount ; i++) {
+ for (uint32_t i = 1; i < pPipeline->attachmentCount; i++) {
if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) ||
(pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) ||
(pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) ||
@@ -2016,42 +1937,40 @@ static VkBool32 verifyPipelineCreateState(layer_data* my_data, const VkDevice de
(pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) ||
(pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) ||
(pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INDEPENDENT_BLEND, "DS",
- "Invalid Pipeline CreateInfo: If independent blend feature not enabled, all elements of pAttachments must be identical");
+ skipCall |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INDEPENDENT_BLEND, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
+ "enabled, all elements of pAttachments must be identical");
}
}
}
if (!my_data->physDevProperties.features.logicOp &&
(pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_DISABLED_LOGIC_OP, "DS",
- "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
+ skipCall |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_DISABLED_LOGIC_OP, "DS",
+ "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
}
if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
- (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INVALID_LOGIC_OP, "DS",
- "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
+ (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
+ skipCall |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_LOGIC_OP, "DS",
+ "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
}
}
// Ensure the subpass index is valid. If not, then validate_pipeline_shaders
// produces nonsense errors that confuse users. Other layers should already
// emit errors for renderpass being invalid.
- auto rp_data =
- my_data->renderPassMap.find(pPipeline->graphicsPipelineCI.renderPass);
+ auto rp_data = my_data->renderPassMap.find(pPipeline->graphicsPipelineCI.renderPass);
if (rp_data != my_data->renderPassMap.end() &&
- pPipeline->graphicsPipelineCI.subpass >=
- rp_data->second->pCreateInfo->subpassCount) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
- "Invalid Pipeline CreateInfo State: Subpass index %u "
- "is out of range for this renderpass (0..%u)",
- pPipeline->graphicsPipelineCI.subpass,
- rp_data->second->pCreateInfo->subpassCount - 1);
+ pPipeline->graphicsPipelineCI.subpass >= rp_data->second->pCreateInfo->subpassCount) {
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
+ "is out of range for this renderpass (0..%u)",
+ pPipeline->graphicsPipelineCI.subpass, rp_data->second->pCreateInfo->subpassCount - 1);
}
if (!validate_pipeline_shaders(my_data, device, pPipeline)) {
@@ -2059,66 +1978,91 @@ static VkBool32 verifyPipelineCreateState(layer_data* my_data, const VkDevice de
}
// VS is required
if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
- "Invalid Pipeline CreateInfo State: Vtx Shader required");
+ skipCall |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
}
// Either both or neither TC/TE shaders should be defined
if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
- ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0) ) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
- "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
+ ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
+ "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
}
// Compute shaders should be specified independent of Gfx shaders
if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
- (pPipeline->active_shaders & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT |
- VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT | VK_SHADER_STAGE_GEOMETRY_BIT |
- VK_SHADER_STAGE_FRAGMENT_BIT))) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
- "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
+ (pPipeline->active_shaders &
+ (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
+ VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
+ "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
}
// VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
// Mismatching primitive topology and tessellation fails graphics pipeline creation.
if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
(pPipeline->iaStateCI.topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
- "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA topology for tessellation pipelines");
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
+ "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
+ "topology for tessellation pipelines");
}
if (pPipeline->iaStateCI.topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
- "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines");
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
+ "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
+ "topology is only valid for tessellation pipelines");
}
if (!pPipeline->tessStateCI.patchControlPoints || (pPipeline->tessStateCI.patchControlPoints > 32)) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
- "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology used with patchControlPoints value %u."
- " patchControlPoints should be >0 and <=32.", pPipeline->tessStateCI.patchControlPoints);
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
+ "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
+ "topology used with patchControlPoints value %u."
+ " patchControlPoints should be >0 and <=32.",
+ pPipeline->tessStateCI.patchControlPoints);
}
}
// Viewport state must be included if rasterization is enabled.
// If the viewport state is included, the viewport and scissor counts should always match.
// NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
- !pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
+ !pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
if (!pPipeline->graphicsPipelineCI.pViewportState) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
- "Gfx Pipeline pViewportState is null. Even if viewport and scissors are dynamic PSO must include viewportCount and scissorCount in pViewportState.");
- } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount != pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
- "Gfx Pipeline viewport count (%u) must match scissor count (%u).", pPipeline->vpStateCI.viewportCount, pPipeline->vpStateCI.scissorCount);
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
+ "and scissors are dynamic PSO must include "
+ "viewportCount and scissorCount in pViewportState.");
+ } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
+ pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
+ "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
+ pPipeline->vpStateCI.viewportCount, pPipeline->vpStateCI.scissorCount);
} else {
// If viewport or scissor are not dynamic, then verify that data is appropriate for count
VkBool32 dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
VkBool32 dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
if (!dynViewport) {
- if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount && !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
- "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you must either include pViewports data, or include viewport in pDynamicState and set it with vkCmdSetViewport().", pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
+ if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
+ !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
+ __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
+ "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
+ "must either include pViewports data, or include viewport in pDynamicState and set it with "
+ "vkCmdSetViewport().",
+ pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
}
}
if (!dynScissor) {
- if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount && !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
- "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you must either include pScissors data, or include scissor in pDynamicState and set it with vkCmdSetScissor().", pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
+ if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
+ !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
+ __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
+ "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
+ "must either include pScissors data, or include scissor in pDynamicState and set it with "
+ "vkCmdSetScissor().",
+ pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
}
}
}
@@ -2129,116 +2073,115 @@ static VkBool32 verifyPipelineCreateState(layer_data* my_data, const VkDevice de
// Init the pipeline mapping info based on pipeline create info LL tree
// Threading note : Calls to this function should wrapped in mutex
// TODO : this should really just be in the constructor for PIPELINE_NODE
-static PIPELINE_NODE* initGraphicsPipeline(layer_data* dev_data, const VkGraphicsPipelineCreateInfo* pCreateInfo)
-{
- PIPELINE_NODE* pPipeline = new PIPELINE_NODE;
+static PIPELINE_NODE *initGraphicsPipeline(layer_data *dev_data, const VkGraphicsPipelineCreateInfo *pCreateInfo) {
+ PIPELINE_NODE *pPipeline = new PIPELINE_NODE;
// First init create info
memcpy(&pPipeline->graphicsPipelineCI, pCreateInfo, sizeof(VkGraphicsPipelineCreateInfo));
size_t bufferSize = 0;
- const VkPipelineVertexInputStateCreateInfo* pVICI = NULL;
- const VkPipelineColorBlendStateCreateInfo* pCBCI = NULL;
+ const VkPipelineVertexInputStateCreateInfo *pVICI = NULL;
+ const VkPipelineColorBlendStateCreateInfo *pCBCI = NULL;
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
const VkPipelineShaderStageCreateInfo *pPSSCI = &pCreateInfo->pStages[i];
switch (pPSSCI->stage) {
- case VK_SHADER_STAGE_VERTEX_BIT:
- memcpy(&pPipeline->vsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
- pPipeline->active_shaders |= VK_SHADER_STAGE_VERTEX_BIT;
- break;
- case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
- memcpy(&pPipeline->tcsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
- pPipeline->active_shaders |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
- break;
- case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
- memcpy(&pPipeline->tesCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
- pPipeline->active_shaders |= VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
- break;
- case VK_SHADER_STAGE_GEOMETRY_BIT:
- memcpy(&pPipeline->gsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
- pPipeline->active_shaders |= VK_SHADER_STAGE_GEOMETRY_BIT;
- break;
- case VK_SHADER_STAGE_FRAGMENT_BIT:
- memcpy(&pPipeline->fsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
- pPipeline->active_shaders |= VK_SHADER_STAGE_FRAGMENT_BIT;
- break;
- case VK_SHADER_STAGE_COMPUTE_BIT:
- // TODO : Flag error, CS is specified through VkComputePipelineCreateInfo
- pPipeline->active_shaders |= VK_SHADER_STAGE_COMPUTE_BIT;
- break;
- default:
- // TODO : Flag error
- break;
+ case VK_SHADER_STAGE_VERTEX_BIT:
+ memcpy(&pPipeline->vsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
+ pPipeline->active_shaders |= VK_SHADER_STAGE_VERTEX_BIT;
+ break;
+ case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
+ memcpy(&pPipeline->tcsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
+ pPipeline->active_shaders |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
+ break;
+ case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
+ memcpy(&pPipeline->tesCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
+ pPipeline->active_shaders |= VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
+ break;
+ case VK_SHADER_STAGE_GEOMETRY_BIT:
+ memcpy(&pPipeline->gsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
+ pPipeline->active_shaders |= VK_SHADER_STAGE_GEOMETRY_BIT;
+ break;
+ case VK_SHADER_STAGE_FRAGMENT_BIT:
+ memcpy(&pPipeline->fsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
+ pPipeline->active_shaders |= VK_SHADER_STAGE_FRAGMENT_BIT;
+ break;
+ case VK_SHADER_STAGE_COMPUTE_BIT:
+ // TODO : Flag error, CS is specified through VkComputePipelineCreateInfo
+ pPipeline->active_shaders |= VK_SHADER_STAGE_COMPUTE_BIT;
+ break;
+ default:
+ // TODO : Flag error
+ break;
}
}
// Copy over GraphicsPipelineCreateInfo structure embedded pointers
if (pCreateInfo->stageCount != 0) {
pPipeline->graphicsPipelineCI.pStages = new VkPipelineShaderStageCreateInfo[pCreateInfo->stageCount];
- bufferSize = pCreateInfo->stageCount * sizeof(VkPipelineShaderStageCreateInfo);
- memcpy((void*)pPipeline->graphicsPipelineCI.pStages, pCreateInfo->pStages, bufferSize);
+ bufferSize = pCreateInfo->stageCount * sizeof(VkPipelineShaderStageCreateInfo);
+ memcpy((void *)pPipeline->graphicsPipelineCI.pStages, pCreateInfo->pStages, bufferSize);
}
if (pCreateInfo->pVertexInputState != NULL) {
- memcpy((void*)&pPipeline->vertexInputCI, pCreateInfo->pVertexInputState , sizeof(VkPipelineVertexInputStateCreateInfo));
+ memcpy((void *)&pPipeline->vertexInputCI, pCreateInfo->pVertexInputState, sizeof(VkPipelineVertexInputStateCreateInfo));
// Copy embedded ptrs
pVICI = pCreateInfo->pVertexInputState;
pPipeline->vtxBindingCount = pVICI->vertexBindingDescriptionCount;
if (pPipeline->vtxBindingCount) {
pPipeline->pVertexBindingDescriptions = new VkVertexInputBindingDescription[pPipeline->vtxBindingCount];
bufferSize = pPipeline->vtxBindingCount * sizeof(VkVertexInputBindingDescription);
- memcpy((void*)pPipeline->pVertexBindingDescriptions, pVICI->pVertexBindingDescriptions, bufferSize);
+ memcpy((void *)pPipeline->pVertexBindingDescriptions, pVICI->pVertexBindingDescriptions, bufferSize);
}
pPipeline->vtxAttributeCount = pVICI->vertexAttributeDescriptionCount;
if (pPipeline->vtxAttributeCount) {
pPipeline->pVertexAttributeDescriptions = new VkVertexInputAttributeDescription[pPipeline->vtxAttributeCount];
bufferSize = pPipeline->vtxAttributeCount * sizeof(VkVertexInputAttributeDescription);
- memcpy((void*)pPipeline->pVertexAttributeDescriptions, pVICI->pVertexAttributeDescriptions, bufferSize);
+ memcpy((void *)pPipeline->pVertexAttributeDescriptions, pVICI->pVertexAttributeDescriptions, bufferSize);
}
pPipeline->graphicsPipelineCI.pVertexInputState = &pPipeline->vertexInputCI;
}
if (pCreateInfo->pInputAssemblyState != NULL) {
- memcpy((void*)&pPipeline->iaStateCI, pCreateInfo->pInputAssemblyState, sizeof(VkPipelineInputAssemblyStateCreateInfo));
+ memcpy((void *)&pPipeline->iaStateCI, pCreateInfo->pInputAssemblyState, sizeof(VkPipelineInputAssemblyStateCreateInfo));
pPipeline->graphicsPipelineCI.pInputAssemblyState = &pPipeline->iaStateCI;
}
if (pCreateInfo->pTessellationState != NULL) {
- memcpy((void*)&pPipeline->tessStateCI, pCreateInfo->pTessellationState, sizeof(VkPipelineTessellationStateCreateInfo));
+ memcpy((void *)&pPipeline->tessStateCI, pCreateInfo->pTessellationState, sizeof(VkPipelineTessellationStateCreateInfo));
pPipeline->graphicsPipelineCI.pTessellationState = &pPipeline->tessStateCI;
}
if (pCreateInfo->pViewportState != NULL) {
- memcpy((void*)&pPipeline->vpStateCI, pCreateInfo->pViewportState, sizeof(VkPipelineViewportStateCreateInfo));
+ memcpy((void *)&pPipeline->vpStateCI, pCreateInfo->pViewportState, sizeof(VkPipelineViewportStateCreateInfo));
pPipeline->graphicsPipelineCI.pViewportState = &pPipeline->vpStateCI;
}
if (pCreateInfo->pRasterizationState != NULL) {
- memcpy((void*)&pPipeline->rsStateCI, pCreateInfo->pRasterizationState, sizeof(VkPipelineRasterizationStateCreateInfo));
+ memcpy((void *)&pPipeline->rsStateCI, pCreateInfo->pRasterizationState, sizeof(VkPipelineRasterizationStateCreateInfo));
pPipeline->graphicsPipelineCI.pRasterizationState = &pPipeline->rsStateCI;
}
if (pCreateInfo->pMultisampleState != NULL) {
- memcpy((void*)&pPipeline->msStateCI, pCreateInfo->pMultisampleState, sizeof(VkPipelineMultisampleStateCreateInfo));
+ memcpy((void *)&pPipeline->msStateCI, pCreateInfo->pMultisampleState, sizeof(VkPipelineMultisampleStateCreateInfo));
pPipeline->graphicsPipelineCI.pMultisampleState = &pPipeline->msStateCI;
}
if (pCreateInfo->pDepthStencilState != NULL) {
- memcpy((void*)&pPipeline->dsStateCI, pCreateInfo->pDepthStencilState, sizeof(VkPipelineDepthStencilStateCreateInfo));
+ memcpy((void *)&pPipeline->dsStateCI, pCreateInfo->pDepthStencilState, sizeof(VkPipelineDepthStencilStateCreateInfo));
pPipeline->graphicsPipelineCI.pDepthStencilState = &pPipeline->dsStateCI;
}
if (pCreateInfo->pColorBlendState != NULL) {
- memcpy((void*)&pPipeline->cbStateCI, pCreateInfo->pColorBlendState, sizeof(VkPipelineColorBlendStateCreateInfo));
+ memcpy((void *)&pPipeline->cbStateCI, pCreateInfo->pColorBlendState, sizeof(VkPipelineColorBlendStateCreateInfo));
// Copy embedded ptrs
pCBCI = pCreateInfo->pColorBlendState;
pPipeline->attachmentCount = pCBCI->attachmentCount;
if (pPipeline->attachmentCount) {
pPipeline->pAttachments = new VkPipelineColorBlendAttachmentState[pPipeline->attachmentCount];
bufferSize = pPipeline->attachmentCount * sizeof(VkPipelineColorBlendAttachmentState);
- memcpy((void*)pPipeline->pAttachments, pCBCI->pAttachments, bufferSize);
+ memcpy((void *)pPipeline->pAttachments, pCBCI->pAttachments, bufferSize);
}
pPipeline->graphicsPipelineCI.pColorBlendState = &pPipeline->cbStateCI;
}
if (pCreateInfo->pDynamicState != NULL) {
- memcpy((void*)&pPipeline->dynStateCI, pCreateInfo->pDynamicState, sizeof(VkPipelineDynamicStateCreateInfo));
+ memcpy((void *)&pPipeline->dynStateCI, pCreateInfo->pDynamicState, sizeof(VkPipelineDynamicStateCreateInfo));
if (pPipeline->dynStateCI.dynamicStateCount) {
pPipeline->dynStateCI.pDynamicStates = new VkDynamicState[pPipeline->dynStateCI.dynamicStateCount];
bufferSize = pPipeline->dynStateCI.dynamicStateCount * sizeof(VkDynamicState);
- memcpy((void*)pPipeline->dynStateCI.pDynamicStates, pCreateInfo->pDynamicState->pDynamicStates, bufferSize);
+ memcpy((void *)pPipeline->dynStateCI.pDynamicStates, pCreateInfo->pDynamicState->pDynamicStates, bufferSize);
}
pPipeline->graphicsPipelineCI.pDynamicState = &pPipeline->dynStateCI;
}
@@ -2247,19 +2190,18 @@ static PIPELINE_NODE* initGraphicsPipeline(layer_data* dev_data, const VkGraphic
}
// Free the Pipeline nodes
-static void deletePipelines(layer_data* my_data)
-{
+static void deletePipelines(layer_data *my_data) {
if (my_data->pipelineMap.size() <= 0)
return;
- for (auto ii=my_data->pipelineMap.begin(); ii!=my_data->pipelineMap.end(); ++ii) {
+ for (auto ii = my_data->pipelineMap.begin(); ii != my_data->pipelineMap.end(); ++ii) {
if ((*ii).second->graphicsPipelineCI.stageCount != 0) {
- delete[] (*ii).second->graphicsPipelineCI.pStages;
+ delete[](*ii).second->graphicsPipelineCI.pStages;
}
- delete[] (*ii).second->pVertexBindingDescriptions;
- delete[] (*ii).second->pVertexAttributeDescriptions;
- delete[] (*ii).second->pAttachments;
+ delete[](*ii).second->pVertexBindingDescriptions;
+ delete[](*ii).second->pVertexAttributeDescriptions;
+ delete[](*ii).second->pAttachments;
if ((*ii).second->dynStateCI.dynamicStateCount != 0) {
- delete[] (*ii).second->dynStateCI.pDynamicStates;
+ delete[](*ii).second->dynStateCI.pDynamicStates;
}
delete (*ii).second;
}
@@ -2267,9 +2209,8 @@ static void deletePipelines(layer_data* my_data)
}
// For given pipeline, return number of MSAA samples, or one if MSAA disabled
-static VkSampleCountFlagBits getNumSamples(layer_data* my_data, const VkPipeline pipeline)
-{
- PIPELINE_NODE* pPipe = my_data->pipelineMap[pipeline];
+static VkSampleCountFlagBits getNumSamples(layer_data *my_data, const VkPipeline pipeline) {
+ PIPELINE_NODE *pPipe = my_data->pipelineMap[pipeline];
if (VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pPipe->msStateCI.sType) {
return pPipe->msStateCI.rasterizationSamples;
}
@@ -2277,19 +2218,19 @@ static VkSampleCountFlagBits getNumSamples(layer_data* my_data, const VkPipeline
}
// Validate state related to the PSO
-static VkBool32 validatePipelineState(layer_data* my_data, const GLOBAL_CB_NODE* pCB, const VkPipelineBindPoint pipelineBindPoint, const VkPipeline pipeline)
-{
+static VkBool32 validatePipelineState(layer_data *my_data, const GLOBAL_CB_NODE *pCB, const VkPipelineBindPoint pipelineBindPoint,
+ const VkPipeline pipeline) {
if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
// Verify that any MSAA request in PSO matches sample# in bound FB
// Skip the check if rasterization is disabled.
- PIPELINE_NODE* pPipeline = my_data->pipelineMap[pipeline];
+ PIPELINE_NODE *pPipeline = my_data->pipelineMap[pipeline];
if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
- !pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
+ !pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
VkSampleCountFlagBits psoNumSamples = getNumSamples(my_data, pipeline);
if (pCB->activeRenderPass) {
- const VkRenderPassCreateInfo* pRPCI = my_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
- const VkSubpassDescription* pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
- VkSampleCountFlagBits subpassNumSamples = (VkSampleCountFlagBits) 0;
+ const VkRenderPassCreateInfo *pRPCI = my_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
+ const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
+ VkSampleCountFlagBits subpassNumSamples = (VkSampleCountFlagBits)0;
uint32_t i;
for (i = 0; i < pSD->colorAttachmentCount; i++) {
@@ -2299,25 +2240,27 @@ static VkBool32 validatePipelineState(layer_data* my_data, const GLOBAL_CB_NODE*
continue;
samples = pRPCI->pAttachments[pSD->pColorAttachments[i].attachment].samples;
- if (subpassNumSamples == (VkSampleCountFlagBits) 0) {
+ if (subpassNumSamples == (VkSampleCountFlagBits)0) {
subpassNumSamples = samples;
} else if (subpassNumSamples != samples) {
- subpassNumSamples = (VkSampleCountFlagBits) -1;
+ subpassNumSamples = (VkSampleCountFlagBits)-1;
break;
}
}
if (pSD->pDepthStencilAttachment && pSD->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const VkSampleCountFlagBits samples = pRPCI->pAttachments[pSD->pDepthStencilAttachment->attachment].samples;
- if (subpassNumSamples == (VkSampleCountFlagBits) 0)
+ if (subpassNumSamples == (VkSampleCountFlagBits)0)
subpassNumSamples = samples;
else if (subpassNumSamples != samples)
- subpassNumSamples = (VkSampleCountFlagBits) -1;
+ subpassNumSamples = (VkSampleCountFlagBits)-1;
}
if (psoNumSamples != subpassNumSamples) {
- return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, (uint64_t) pipeline, __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
- "Num samples mismatch! Binding PSO (%#" PRIxLEAST64 ") with %u samples while current RenderPass (%#" PRIxLEAST64 ") w/ %u samples!",
- (uint64_t) pipeline, psoNumSamples, (uint64_t) pCB->activeRenderPass, subpassNumSamples);
+ return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
+ (uint64_t)pipeline, __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
+ "Num samples mismatch! Binding PSO (%#" PRIxLEAST64
+ ") with %u samples while current RenderPass (%#" PRIxLEAST64 ") w/ %u samples!",
+ (uint64_t)pipeline, psoNumSamples, (uint64_t)pCB->activeRenderPass, subpassNumSamples);
}
} else {
// TODO : I believe it's an error if we reach this point and don't have an activeRenderPass
@@ -2334,15 +2277,14 @@ static VkBool32 validatePipelineState(layer_data* my_data, const GLOBAL_CB_NODE*
// Block of code at start here specifically for managing/tracking DSs
// Return Pool node ptr for specified pool or else NULL
-static DESCRIPTOR_POOL_NODE* getPoolNode(layer_data* my_data, const VkDescriptorPool pool)
-{
+static DESCRIPTOR_POOL_NODE *getPoolNode(layer_data *my_data, const VkDescriptorPool pool) {
if (my_data->descriptorPoolMap.find(pool) == my_data->descriptorPoolMap.end()) {
return NULL;
}
return my_data->descriptorPoolMap[pool];
}
-static LAYOUT_NODE* getLayoutNode(layer_data* my_data, const VkDescriptorSetLayout layout) {
+static LAYOUT_NODE *getLayoutNode(layer_data *my_data, const VkDescriptorSetLayout layout) {
if (my_data->descriptorSetLayoutMap.find(layout) == my_data->descriptorSetLayoutMap.end()) {
return NULL;
}
@@ -2350,39 +2292,36 @@ static LAYOUT_NODE* getLayoutNode(layer_data* my_data, const VkDescriptorSetLayo
}
// Return VK_FALSE if update struct is of valid type, otherwise flag error and return code from callback
-static VkBool32 validUpdateStruct(layer_data* my_data, const VkDevice device, const GENERIC_HEADER* pUpdateStruct)
-{
- switch (pUpdateStruct->sType)
- {
- case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
- case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
- return VK_FALSE;
- default:
- return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
- "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree", string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
+static VkBool32 validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
+ switch (pUpdateStruct->sType) {
+ case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
+ case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
+ return VK_FALSE;
+ default:
+ return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
+ "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
+ string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
}
}
// Set count for given update struct in the last parameter
// Return value of skipCall, which is only VK_TRUE if error occurs and callback signals execution to cease
-static uint32_t getUpdateCount(layer_data* my_data, const VkDevice device, const GENERIC_HEADER* pUpdateStruct)
-{
- switch (pUpdateStruct->sType)
- {
- case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
- return ((VkWriteDescriptorSet*)pUpdateStruct)->descriptorCount;
- case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
- // TODO : Need to understand this case better and make sure code is correct
- return ((VkCopyDescriptorSet*)pUpdateStruct)->descriptorCount;
- default:
- return 0;
+static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
+ switch (pUpdateStruct->sType) {
+ case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
+ return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
+ case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
+ // TODO : Need to understand this case better and make sure code is correct
+ return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
+ default:
+ return 0;
}
return 0;
}
// For given Layout Node and binding, return index where that binding begins
-static uint32_t getBindingStartIndex(const LAYOUT_NODE* pLayout, const uint32_t binding)
-{
+static uint32_t getBindingStartIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) {
uint32_t offsetIndex = 0;
for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
if (pLayout->createInfo.pBindings[i].binding == binding)
@@ -2393,61 +2332,65 @@ static uint32_t getBindingStartIndex(const LAYOUT_NODE* pLayout, const uint32_t
}
// For given layout node and binding, return last index that is updated
-static uint32_t getBindingEndIndex(const LAYOUT_NODE* pLayout, const uint32_t binding)
-{
+static uint32_t getBindingEndIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) {
uint32_t offsetIndex = 0;
- for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
+ for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount;
if (pLayout->createInfo.pBindings[i].binding == binding)
break;
}
- return offsetIndex-1;
+ return offsetIndex - 1;
}
// For given layout and update, return the first overall index of the layout that is updated
-static uint32_t getUpdateStartIndex(layer_data* my_data, const VkDevice device, const LAYOUT_NODE* pLayout, const uint32_t binding, const uint32_t arrayIndex, const GENERIC_HEADER* pUpdateStruct)
-{
- return getBindingStartIndex(pLayout, binding)+arrayIndex;
+static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding,
+ const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
+ return getBindingStartIndex(pLayout, binding) + arrayIndex;
}
// For given layout and update, return the last overall index of the layout that is updated
-static uint32_t getUpdateEndIndex(layer_data* my_data, const VkDevice device, const LAYOUT_NODE* pLayout, const uint32_t binding, const uint32_t arrayIndex, const GENERIC_HEADER* pUpdateStruct)
-{
+static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding,
+ const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
- return getBindingStartIndex(pLayout, binding)+arrayIndex+count-1;
+ return getBindingStartIndex(pLayout, binding) + arrayIndex + count - 1;
}
// Verify that the descriptor type in the update struct matches what's expected by the layout
-static VkBool32 validateUpdateConsistency(layer_data* my_data, const VkDevice device, const LAYOUT_NODE* pLayout, const GENERIC_HEADER* pUpdateStruct, uint32_t startIndex, uint32_t endIndex)
-{
+static VkBool32 validateUpdateConsistency(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout,
+ const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
// First get actual type of update
VkBool32 skipCall = VK_FALSE;
VkDescriptorType actualType;
uint32_t i = 0;
- switch (pUpdateStruct->sType)
- {
- case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
- actualType = ((VkWriteDescriptorSet*)pUpdateStruct)->descriptorType;
- break;
- case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
- /* no need to validate */
- return VK_FALSE;
- break;
- default:
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
- "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree", string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
+ switch (pUpdateStruct->sType) {
+ case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
+ actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
+ break;
+ case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
+ /* no need to validate */
+ return VK_FALSE;
+ break;
+ default:
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
+ "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
+ string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
}
if (VK_FALSE == skipCall) {
// Set first stageFlags as reference and verify that all other updates match it
VkShaderStageFlags refStageFlags = pLayout->stageFlags[startIndex];
for (i = startIndex; i <= endIndex; i++) {
if (pLayout->descriptorTypes[i] != actualType) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
+ skipCall |= log_msg(
+ my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
"Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
string_VkDescriptorType(actualType), string_VkDescriptorType(pLayout->descriptorTypes[i]));
}
if (pLayout->stageFlags[i] != refStageFlags) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_DESCRIPTOR_STAGEFLAGS_MISMATCH, "DS",
+ skipCall |= log_msg(
+ my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_DESCRIPTOR_STAGEFLAGS_MISMATCH, "DS",
"Write descriptor update has stageFlags %x that do not match overlapping binding descriptor stageFlags of %x!",
refStageFlags, pLayout->stageFlags[i]);
}
@@ -2459,61 +2402,55 @@ static VkBool32 validateUpdateConsistency(layer_data* my_data, const VkDevice de
// Determine the update type, allocate a new struct of that type, shadow the given pUpdate
// struct into the pNewNode param. Return VK_TRUE if error condition encountered and callback signals early exit.
// NOTE : Calls to this function should be wrapped in mutex
-static VkBool32 shadowUpdateNode(layer_data* my_data, const VkDevice device, GENERIC_HEADER* pUpdate, GENERIC_HEADER** pNewNode)
-{
+static VkBool32 shadowUpdateNode(layer_data *my_data, const VkDevice device, GENERIC_HEADER *pUpdate, GENERIC_HEADER **pNewNode) {
VkBool32 skipCall = VK_FALSE;
- VkWriteDescriptorSet* pWDS = NULL;
- VkCopyDescriptorSet* pCDS = NULL;
- switch (pUpdate->sType)
- {
- case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
- pWDS = new VkWriteDescriptorSet;
- *pNewNode = (GENERIC_HEADER*)pWDS;
- memcpy(pWDS, pUpdate, sizeof(VkWriteDescriptorSet));
-
- switch (pWDS->descriptorType) {
- case VK_DESCRIPTOR_TYPE_SAMPLER:
- case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
- case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
- case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
- {
- VkDescriptorImageInfo *info = new VkDescriptorImageInfo[pWDS->descriptorCount];
- memcpy(info, pWDS->pImageInfo, pWDS->descriptorCount * sizeof(VkDescriptorImageInfo));
- pWDS->pImageInfo = info;
- }
- break;
- case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
- case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
- {
- VkBufferView *info = new VkBufferView[pWDS->descriptorCount];
- memcpy(info, pWDS->pTexelBufferView, pWDS->descriptorCount * sizeof(VkBufferView));
- pWDS->pTexelBufferView = info;
- }
- break;
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
- {
- VkDescriptorBufferInfo *info = new VkDescriptorBufferInfo[pWDS->descriptorCount];
- memcpy(info, pWDS->pBufferInfo, pWDS->descriptorCount * sizeof(VkDescriptorBufferInfo));
- pWDS->pBufferInfo = info;
- }
- break;
- default:
- return VK_ERROR_VALIDATION_FAILED_EXT;
- break;
- }
- break;
- case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
- pCDS = new VkCopyDescriptorSet;
- *pNewNode = (GENERIC_HEADER*)pCDS;
- memcpy(pCDS, pUpdate, sizeof(VkCopyDescriptorSet));
- break;
+ VkWriteDescriptorSet *pWDS = NULL;
+ VkCopyDescriptorSet *pCDS = NULL;
+ switch (pUpdate->sType) {
+ case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
+ pWDS = new VkWriteDescriptorSet;
+ *pNewNode = (GENERIC_HEADER *)pWDS;
+ memcpy(pWDS, pUpdate, sizeof(VkWriteDescriptorSet));
+
+ switch (pWDS->descriptorType) {
+ case VK_DESCRIPTOR_TYPE_SAMPLER:
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
+ VkDescriptorImageInfo *info = new VkDescriptorImageInfo[pWDS->descriptorCount];
+ memcpy(info, pWDS->pImageInfo, pWDS->descriptorCount * sizeof(VkDescriptorImageInfo));
+ pWDS->pImageInfo = info;
+ } break;
+ case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
+ VkBufferView *info = new VkBufferView[pWDS->descriptorCount];
+ memcpy(info, pWDS->pTexelBufferView, pWDS->descriptorCount * sizeof(VkBufferView));
+ pWDS->pTexelBufferView = info;
+ } break;
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
+ VkDescriptorBufferInfo *info = new VkDescriptorBufferInfo[pWDS->descriptorCount];
+ memcpy(info, pWDS->pBufferInfo, pWDS->descriptorCount * sizeof(VkDescriptorBufferInfo));
+ pWDS->pBufferInfo = info;
+ } break;
default:
- if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
- "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree", string_VkStructureType(pUpdate->sType), pUpdate->sType))
- return VK_TRUE;
+ return VK_ERROR_VALIDATION_FAILED_EXT;
+ break;
+ }
+ break;
+ case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
+ pCDS = new VkCopyDescriptorSet;
+ *pNewNode = (GENERIC_HEADER *)pCDS;
+ memcpy(pCDS, pUpdate, sizeof(VkCopyDescriptorSet));
+ break;
+ default:
+ if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
+ "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
+ string_VkStructureType(pUpdate->sType), pUpdate->sType))
+ return VK_TRUE;
}
// Make sure that pNext for the end of shadow copy is NULL
(*pNewNode)->pNext = NULL;
@@ -2521,17 +2458,21 @@ static VkBool32 shadowUpdateNode(layer_data* my_data, const VkDevice device, GEN
}
// Verify that given sampler is valid
-static VkBool32 validateSampler(const layer_data* my_data, const VkSampler* pSampler, const VkBool32 immutable)
-{
+static VkBool32 validateSampler(const layer_data *my_data, const VkSampler *pSampler, const VkBool32 immutable) {
VkBool32 skipCall = VK_FALSE;
auto sampIt = my_data->sampleMap.find(*pSampler);
if (sampIt == my_data->sampleMap.end()) {
if (!immutable) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, (uint64_t) *pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
- "vkUpdateDescriptorSets: Attempt to update descriptor with invalid sampler %#" PRIxLEAST64, (uint64_t) *pSampler);
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
+ (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
+ "vkUpdateDescriptorSets: Attempt to update descriptor with invalid sampler %#" PRIxLEAST64,
+ (uint64_t)*pSampler);
} else { // immutable
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, (uint64_t) *pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
- "vkUpdateDescriptorSets: Attempt to update descriptor whose binding has an invalid immutable sampler %#" PRIxLEAST64, (uint64_t) *pSampler);
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
+ (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
+ "vkUpdateDescriptorSets: Attempt to update descriptor whose binding has an invalid immutable "
+ "sampler %#" PRIxLEAST64,
+ (uint64_t)*pSampler);
}
} else {
// TODO : Any further checks we want to do on the sampler?
@@ -2540,8 +2481,7 @@ static VkBool32 validateSampler(const layer_data* my_data, const VkSampler* pSam
}
// find layout(s) on the cmd buf level
-bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image,
- VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
+bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
ImageSubresourcePair imgpair = {image, true, range};
auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
if (imgsubIt == pCB->imageLayoutMap.end()) {
@@ -2555,26 +2495,24 @@ bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image,
}
// find layout(s) on the global level
-bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair,
- VkImageLayout &layout) {
+bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
if (imgsubIt == my_data->imageLayoutMap.end()) {
imgpair = {imgpair.image, false, VkImageSubresource()};
imgsubIt = my_data->imageLayoutMap.find(imgpair);
- if(imgsubIt == my_data->imageLayoutMap.end()) return false;
+ if (imgsubIt == my_data->imageLayoutMap.end())
+ return false;
}
layout = imgsubIt->second.layout;
return true;
}
-bool FindLayout(const layer_data *my_data, VkImage image,
- VkImageSubresource range, VkImageLayout &layout) {
+bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
ImageSubresourcePair imgpair = {image, true, range};
return FindLayout(my_data, imgpair, layout);
}
-bool FindLayouts(const layer_data *my_data, VkImage image,
- std::vector<VkImageLayout> &layouts) {
+bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
auto sub_data = my_data->imageSubresourceMap.find(image);
if (sub_data == my_data->imageSubresourceMap.end())
return false;
@@ -2584,8 +2522,7 @@ bool FindLayouts(const layer_data *my_data, VkImage image,
bool ignoreGlobal = false;
// TODO: Make this robust for >1 aspect mask. Now it will just say ignore
// potential errors in this case.
- if (sub_data->second.size() >=
- (imgIt->second.createInfo.arrayLayers * imgIt->second.createInfo.mipLevels + 1)) {
+ if (sub_data->second.size() >= (imgIt->second.createInfo.arrayLayers * imgIt->second.createInfo.mipLevels + 1)) {
ignoreGlobal = true;
}
for (auto imgsubpair : sub_data->second) {
@@ -2599,53 +2536,42 @@ bool FindLayouts(const layer_data *my_data, VkImage image,
return true;
}
-
// Set the layout on the global level
-void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair,
- const VkImageLayout &layout) {
+void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
VkImage &image = imgpair.image;
// TODO (mlentine): Maybe set format if new? Not used atm.
my_data->imageLayoutMap[imgpair].layout = layout;
// TODO (mlentine): Maybe make vector a set?
- auto subresource =
- std::find(my_data->imageSubresourceMap[image].begin(),
- my_data->imageSubresourceMap[image].end(), imgpair);
+ auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
if (subresource == my_data->imageSubresourceMap[image].end()) {
my_data->imageSubresourceMap[image].push_back(imgpair);
}
}
-void SetLayout(layer_data *my_data, VkImage image,
- const VkImageLayout &layout) {
+void SetLayout(layer_data *my_data, VkImage image, const VkImageLayout &layout) {
ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
SetLayout(my_data, imgpair, layout);
}
-void SetLayout(layer_data *my_data, VkImage image, VkImageSubresource range,
- const VkImageLayout &layout) {
+void SetLayout(layer_data *my_data, VkImage image, VkImageSubresource range, const VkImageLayout &layout) {
ImageSubresourcePair imgpair = {image, true, range};
SetLayout(my_data, imgpair, layout);
}
// Set the layout on the cmdbuf level
-void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, ImageSubresourcePair imgpair,
- const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
+void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
pCB->imageLayoutMap[imgpair] = node;
// TODO (mlentine): Maybe make vector a set?
- auto subresource =
- std::find(pCB->imageSubresourceMap[image].begin(),
- pCB->imageSubresourceMap[image].end(), imgpair);
+ auto subresource = std::find(pCB->imageSubresourceMap[image].begin(), pCB->imageSubresourceMap[image].end(), imgpair);
if (subresource == pCB->imageSubresourceMap[image].end()) {
pCB->imageSubresourceMap[image].push_back(imgpair);
}
}
-void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, ImageSubresourcePair imgpair,
- const VkImageLayout &layout) {
+void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
// TODO (mlentine): Maybe make vector a set?
- if(std::find(pCB->imageSubresourceMap[image].begin(),
- pCB->imageSubresourceMap[image].end(),
- imgpair) != pCB->imageSubresourceMap[image].end()) {
+ if (std::find(pCB->imageSubresourceMap[image].begin(), pCB->imageSubresourceMap[image].end(), imgpair) !=
+ pCB->imageSubresourceMap[image].end()) {
pCB->imageLayoutMap[imgpair].layout = layout;
} else {
// TODO (mlentine): Could be expensive and might need to be removed.
@@ -2656,37 +2582,31 @@ void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, ImageSubresourcePair imgpair,
}
}
-void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image,
- const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
+void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
SetLayout(pCB, image, imgpair, node);
}
-void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range,
- const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
+void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
ImageSubresourcePair imgpair = {image, true, range};
SetLayout(pCB, image, imgpair, node);
}
-void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image,
- const VkImageLayout &layout) {
+void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, const VkImageLayout &layout) {
ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
SetLayout(pCB, image, imgpair, layout);
}
-void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range,
- const VkImageLayout &layout) {
+void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, const VkImageLayout &layout) {
ImageSubresourcePair imgpair = {image, true, range};
SetLayout(pCB, image, imgpair, layout);
}
-void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB,
- VkImageView imageView, const VkImageLayout &layout) {
+void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
auto image_view_data = dev_data->imageViewMap.find(imageView);
assert(image_view_data != dev_data->imageViewMap.end());
const VkImage &image = image_view_data->second->image;
- const VkImageSubresourceRange &subRange =
- image_view_data->second->subresourceRange;
+ const VkImageSubresourceRange &subRange = image_view_data->second->subresourceRange;
// TODO: Do not iterate over every possibility - consolidate where possible
for (uint32_t j = 0; j < subRange.levelCount; j++) {
uint32_t level = subRange.baseMipLevel + j;
@@ -2699,13 +2619,14 @@ void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB,
}
// Verify that given imageView is valid
-static VkBool32 validateImageView(const layer_data* my_data, const VkImageView* pImageView, const VkImageLayout imageLayout)
-{
+static VkBool32 validateImageView(const layer_data *my_data, const VkImageView *pImageView, const VkImageLayout imageLayout) {
VkBool32 skipCall = VK_FALSE;
auto ivIt = my_data->imageViewMap.find(*pImageView);
if (ivIt == my_data->imageViewMap.end()) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, (uint64_t) *pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
- "vkUpdateDescriptorSets: Attempt to update descriptor with invalid imageView %#" PRIxLEAST64, (uint64_t) *pImageView);
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
+ (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
+ "vkUpdateDescriptorSets: Attempt to update descriptor with invalid imageView %#" PRIxLEAST64,
+ (uint64_t)*pImageView);
} else {
// Validate that imageLayout is compatible with aspectMask and image format
VkImageAspectFlags aspectMask = ivIt->second->subresourceRange.aspectMask;
@@ -2715,65 +2636,84 @@ static VkBool32 validateImageView(const layer_data* my_data, const VkImageView*
auto imgIt = my_data->imageMap.find(image);
if (imgIt != my_data->imageMap.end()) {
format = (*imgIt).second.createInfo.format;
- }
- else {
+ } else {
// Also need to check the swapchains.
auto swapchainIt = my_data->device_extensions.imageToSwapchainMap.find(image);
if (swapchainIt != my_data->device_extensions.imageToSwapchainMap.end()) {
VkSwapchainKHR swapchain = swapchainIt->second;
auto swapchain_nodeIt = my_data->device_extensions.swapchainMap.find(swapchain);
if (swapchain_nodeIt != my_data->device_extensions.swapchainMap.end()) {
- SWAPCHAIN_NODE* pswapchain_node = swapchain_nodeIt->second;
+ SWAPCHAIN_NODE *pswapchain_node = swapchain_nodeIt->second;
format = pswapchain_node->createInfo.imageFormat;
}
}
}
if (format == VK_FORMAT_MAX_ENUM) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, (uint64_t) image, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
- "vkUpdateDescriptorSets: Attempt to update descriptor with invalid image %#" PRIxLEAST64 " in imageView %#" PRIxLEAST64, (uint64_t) image, (uint64_t) *pImageView);
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
+ (uint64_t)image, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
+ "vkUpdateDescriptorSets: Attempt to update descriptor with invalid image %#" PRIxLEAST64
+ " in imageView %#" PRIxLEAST64,
+ (uint64_t)image, (uint64_t)*pImageView);
} else {
VkBool32 ds = vk_format_is_depth_or_stencil(format);
switch (imageLayout) {
- case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
- // Only Color bit must be set
- if ((aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, (uint64_t) *pImageView, __LINE__,
- DRAWSTATE_INVALID_IMAGE_ASPECT, "DS", "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL and imageView %#" PRIxLEAST64 ""
- " that does not have VK_IMAGE_ASPECT_COLOR_BIT set.", (uint64_t) *pImageView);
- }
- // format must NOT be DS
- if (ds) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, (uint64_t) *pImageView, __LINE__,
- DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS", "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL and imageView %#" PRIxLEAST64 ""
- " but the image format is %s which is not a color format.", (uint64_t) *pImageView, string_VkFormat(format));
- }
- break;
- case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
- case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
- // Depth or stencil bit must be set, but both must NOT be set
- if (aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
- if (aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
- // both must NOT be set
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, (uint64_t) *pImageView, __LINE__,
- DRAWSTATE_INVALID_IMAGE_ASPECT, "DS", "vkUpdateDescriptorSets: Updating descriptor with imageView %#" PRIxLEAST64 ""
- " that has both STENCIL and DEPTH aspects set", (uint64_t) *pImageView);
- }
- } else if (!(aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
- // Neither were set
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, (uint64_t) *pImageView, __LINE__,
- DRAWSTATE_INVALID_IMAGE_ASPECT, "DS", "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
- " that does not have STENCIL or DEPTH aspect set.", string_VkImageLayout(imageLayout), (uint64_t) *pImageView);
- }
- // format must be DS
- if (!ds) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, (uint64_t) *pImageView, __LINE__,
- DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS", "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
- " but the image format is %s which is not a depth/stencil format.", string_VkImageLayout(imageLayout), (uint64_t) *pImageView, string_VkFormat(format));
+ case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
+ // Only Color bit must be set
+ if ((aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
+ skipCall |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
+ (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
+ "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
+ "and imageView %#" PRIxLEAST64 ""
+ " that does not have VK_IMAGE_ASPECT_COLOR_BIT set.",
+ (uint64_t)*pImageView);
+ }
+ // format must NOT be DS
+ if (ds) {
+ skipCall |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
+ (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
+ "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
+ "and imageView %#" PRIxLEAST64 ""
+ " but the image format is %s which is not a color format.",
+ (uint64_t)*pImageView, string_VkFormat(format));
+ }
+ break;
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
+ // Depth or stencil bit must be set, but both must NOT be set
+ if (aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
+ if (aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
+ // both must NOT be set
+ skipCall |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
+ (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
+ "vkUpdateDescriptorSets: Updating descriptor with imageView %#" PRIxLEAST64 ""
+ " that has both STENCIL and DEPTH aspects set",
+ (uint64_t)*pImageView);
}
- break;
- default:
- // anything to check for other layouts?
- break;
+ } else if (!(aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
+ // Neither were set
+ skipCall |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
+ (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
+ "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
+ " that does not have STENCIL or DEPTH aspect set.",
+ string_VkImageLayout(imageLayout), (uint64_t)*pImageView);
+ }
+ // format must be DS
+ if (!ds) {
+ skipCall |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
+ (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
+ "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
+ " but the image format is %s which is not a depth/stencil format.",
+ string_VkImageLayout(imageLayout), (uint64_t)*pImageView, string_VkFormat(format));
+ }
+ break;
+ default:
+ // anything to check for other layouts?
+ break;
}
}
}
@@ -2781,13 +2721,14 @@ static VkBool32 validateImageView(const layer_data* my_data, const VkImageView*
}
// Verify that given bufferView is valid
-static VkBool32 validateBufferView(const layer_data* my_data, const VkBufferView* pBufferView)
-{
+static VkBool32 validateBufferView(const layer_data *my_data, const VkBufferView *pBufferView) {
VkBool32 skipCall = VK_FALSE;
auto sampIt = my_data->bufferViewMap.find(*pBufferView);
if (sampIt == my_data->bufferViewMap.end()) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT, (uint64_t) *pBufferView, __LINE__, DRAWSTATE_BUFFERVIEW_DESCRIPTOR_ERROR, "DS",
- "vkUpdateDescriptorSets: Attempt to update descriptor with invalid bufferView %#" PRIxLEAST64, (uint64_t) *pBufferView);
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT,
+ (uint64_t)*pBufferView, __LINE__, DRAWSTATE_BUFFERVIEW_DESCRIPTOR_ERROR, "DS",
+ "vkUpdateDescriptorSets: Attempt to update descriptor with invalid bufferView %#" PRIxLEAST64,
+ (uint64_t)*pBufferView);
} else {
// TODO : Any further checks we want to do on the bufferView?
}
@@ -2795,79 +2736,88 @@ static VkBool32 validateBufferView(const layer_data* my_data, const VkBufferView
}
// Verify that given bufferInfo is valid
-static VkBool32 validateBufferInfo(const layer_data* my_data, const VkDescriptorBufferInfo* pBufferInfo)
-{
+static VkBool32 validateBufferInfo(const layer_data *my_data, const VkDescriptorBufferInfo *pBufferInfo) {
VkBool32 skipCall = VK_FALSE;
auto sampIt = my_data->bufferMap.find(pBufferInfo->buffer);
if (sampIt == my_data->bufferMap.end()) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, (uint64_t) pBufferInfo->buffer, __LINE__, DRAWSTATE_BUFFERINFO_DESCRIPTOR_ERROR, "DS",
- "vkUpdateDescriptorSets: Attempt to update descriptor where bufferInfo has invalid buffer %#" PRIxLEAST64, (uint64_t) pBufferInfo->buffer);
+ skipCall |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
+ (uint64_t)pBufferInfo->buffer, __LINE__, DRAWSTATE_BUFFERINFO_DESCRIPTOR_ERROR, "DS",
+ "vkUpdateDescriptorSets: Attempt to update descriptor where bufferInfo has invalid buffer %#" PRIxLEAST64,
+ (uint64_t)pBufferInfo->buffer);
} else {
// TODO : Any further checks we want to do on the bufferView?
}
return skipCall;
}
-static VkBool32 validateUpdateContents(const layer_data* my_data, const VkWriteDescriptorSet *pWDS, const VkDescriptorSetLayoutBinding* pLayoutBinding)
-{
+static VkBool32 validateUpdateContents(const layer_data *my_data, const VkWriteDescriptorSet *pWDS,
+ const VkDescriptorSetLayoutBinding *pLayoutBinding) {
VkBool32 skipCall = VK_FALSE;
// First verify that for the given Descriptor type, the correct DescriptorInfo data is supplied
- const VkSampler* pSampler = NULL;
+ const VkSampler *pSampler = NULL;
VkBool32 immutable = VK_FALSE;
uint32_t i = 0;
// For given update type, verify that update contents are correct
switch (pWDS->descriptorType) {
- case VK_DESCRIPTOR_TYPE_SAMPLER:
- for (i=0; i<pWDS->descriptorCount; ++i) {
- skipCall |= validateSampler(my_data, &(pWDS->pImageInfo[i].sampler), immutable);
- }
- break;
- case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
- for (i=0; i<pWDS->descriptorCount; ++i) {
- if (NULL == pLayoutBinding->pImmutableSamplers) {
- pSampler = &(pWDS->pImageInfo[i].sampler);
- if (immutable) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, (uint64_t) *pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
- "vkUpdateDescriptorSets: Update #%u is not an immutable sampler %#" PRIxLEAST64 ", but previous update(s) from this "
- "VkWriteDescriptorSet struct used an immutable sampler. All updates from a single struct must either "
- "use immutable or non-immutable samplers.", i, (uint64_t) *pSampler);
- }
- } else {
- if (i>0 && !immutable) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, (uint64_t) *pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
- "vkUpdateDescriptorSets: Update #%u is an immutable sampler, but previous update(s) from this "
- "VkWriteDescriptorSet struct used a non-immutable sampler. All updates from a single struct must either "
- "use immutable or non-immutable samplers.", i);
- }
- immutable = VK_TRUE;
- pSampler = &(pLayoutBinding->pImmutableSamplers[i]);
+ case VK_DESCRIPTOR_TYPE_SAMPLER:
+ for (i = 0; i < pWDS->descriptorCount; ++i) {
+ skipCall |= validateSampler(my_data, &(pWDS->pImageInfo[i].sampler), immutable);
+ }
+ break;
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ for (i = 0; i < pWDS->descriptorCount; ++i) {
+ if (NULL == pLayoutBinding->pImmutableSamplers) {
+ pSampler = &(pWDS->pImageInfo[i].sampler);
+ if (immutable) {
+ skipCall |= log_msg(
+ my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
+ (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
+ "vkUpdateDescriptorSets: Update #%u is not an immutable sampler %#" PRIxLEAST64
+ ", but previous update(s) from this "
+ "VkWriteDescriptorSet struct used an immutable sampler. All updates from a single struct must either "
+ "use immutable or non-immutable samplers.",
+ i, (uint64_t)*pSampler);
}
- skipCall |= validateSampler(my_data, pSampler, immutable);
- }
- // Intentionally fall through here to also validate image stuff
- case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
- case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
- case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
- for (i=0; i<pWDS->descriptorCount; ++i) {
- skipCall |= validateImageView(my_data, &(pWDS->pImageInfo[i].imageView), pWDS->pImageInfo[i].imageLayout);
- }
- break;
- case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
- case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
- for (i=0; i<pWDS->descriptorCount; ++i) {
- skipCall |= validateBufferView(my_data, &(pWDS->pTexelBufferView[i]));
- }
- break;
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
- for (i=0; i<pWDS->descriptorCount; ++i) {
- skipCall |= validateBufferInfo(my_data, &(pWDS->pBufferInfo[i]));
- }
- break;
- default:
- break;
+ } else {
+ if (i > 0 && !immutable) {
+ skipCall |= log_msg(
+ my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
+ (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
+ "vkUpdateDescriptorSets: Update #%u is an immutable sampler, but previous update(s) from this "
+ "VkWriteDescriptorSet struct used a non-immutable sampler. All updates from a single struct must either "
+ "use immutable or non-immutable samplers.",
+ i);
+ }
+ immutable = VK_TRUE;
+ pSampler = &(pLayoutBinding->pImmutableSamplers[i]);
+ }
+ skipCall |= validateSampler(my_data, pSampler, immutable);
+ }
+ // Intentionally fall through here to also validate image stuff
+ case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+ case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+ for (i = 0; i < pWDS->descriptorCount; ++i) {
+ skipCall |= validateImageView(my_data, &(pWDS->pImageInfo[i].imageView), pWDS->pImageInfo[i].imageLayout);
+ }
+ break;
+ case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+ for (i = 0; i < pWDS->descriptorCount; ++i) {
+ skipCall |= validateBufferView(my_data, &(pWDS->pTexelBufferView[i]));
+ }
+ break;
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ for (i = 0; i < pWDS->descriptorCount; ++i) {
+ skipCall |= validateBufferInfo(my_data, &(pWDS->pBufferInfo[i]));
+ }
+ break;
+ default:
+ break;
}
return skipCall;
}
@@ -2875,22 +2825,25 @@ static VkBool32 validateUpdateContents(const layer_data* my_data, const VkWriteD
// func_str is the name of the calling function
// Return VK_FALSE if no errors occur
// Return VK_TRUE if validation error occurs and callback returns VK_TRUE (to skip upcoming API call down the chain)
-VkBool32 validateIdleDescriptorSet(const layer_data* my_data, VkDescriptorSet set, std::string func_str) {
+VkBool32 validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
VkBool32 skip_call = VK_FALSE;
auto set_node = my_data->setMap.find(set);
if (set_node == my_data->setMap.end()) {
- skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
- "Cannot call %s() on descriptor set %" PRIxLEAST64 " that has not been allocated.", func_str.c_str(), (uint64_t)(set));
+ skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
+ (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
+ "Cannot call %s() on descriptor set %" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
+ (uint64_t)(set));
} else {
if (set_node->second->in_use.load()) {
- skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
- "Cannot call %s() on descriptor set %" PRIxLEAST64 " that is in use by a command buffer.", func_str.c_str(), (uint64_t)(set));
+ skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
+ "DS", "Cannot call %s() on descriptor set %" PRIxLEAST64 " that is in use by a command buffer.",
+ func_str.c_str(), (uint64_t)(set));
}
}
return skip_call;
}
-static void invalidateBoundCmdBuffers(layer_data* dev_data, const SET_NODE* pSet)
-{
+static void invalidateBoundCmdBuffers(layer_data *dev_data, const SET_NODE *pSet) {
// Flag any CBs this set is bound to as INVALID
for (auto cb : pSet->boundCmdBuffers) {
auto cb_node = dev_data->commandBufferMap.find(cb);
@@ -2900,23 +2853,23 @@ static void invalidateBoundCmdBuffers(layer_data* dev_data, const SET_NODE* pSet
}
}
// update DS mappings based on write and copy update arrays
-static VkBool32 dsUpdate(layer_data* my_data, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pWDS, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pCDS)
-{
+static VkBool32 dsUpdate(layer_data *my_data, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pWDS,
+ uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pCDS) {
VkBool32 skipCall = VK_FALSE;
- LAYOUT_NODE* pLayout = NULL;
- VkDescriptorSetLayoutCreateInfo* pLayoutCI = NULL;
+ LAYOUT_NODE *pLayout = NULL;
+ VkDescriptorSetLayoutCreateInfo *pLayoutCI = NULL;
// Validate Write updates
uint32_t i = 0;
- for (i=0; i < descriptorWriteCount; i++) {
+ for (i = 0; i < descriptorWriteCount; i++) {
VkDescriptorSet ds = pWDS[i].dstSet;
- SET_NODE* pSet = my_data->setMap[ds];
+ SET_NODE *pSet = my_data->setMap[ds];
// Set being updated cannot be in-flight
if ((skipCall = validateIdleDescriptorSet(my_data, ds, "VkUpdateDescriptorSets")) == VK_TRUE)
return skipCall;
// If set is bound to any cmdBuffers, mark them invalid
invalidateBoundCmdBuffers(my_data, pSet);
- GENERIC_HEADER* pUpdate = (GENERIC_HEADER*) &pWDS[i];
+ GENERIC_HEADER *pUpdate = (GENERIC_HEADER *)&pWDS[i];
pLayout = pSet->pLayout;
// First verify valid update struct
if ((skipCall = validUpdateStruct(my_data, device, pUpdate)) == VK_TRUE) {
@@ -2927,53 +2880,49 @@ static VkBool32 dsUpdate(layer_data* my_data, VkDevice device, uint32_t descript
auto bindingToIndex = pLayout->bindingToIndexMap.find(binding);
// Make sure that layout being updated has the binding being updated
if (bindingToIndex == pLayout->bindingToIndexMap.end()) {
- skipCall |= log_msg(
- my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(ds),
- __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
- "Descriptor Set %" PRIu64 " does not have binding to match "
- "update binding %u for update type "
- "%s!",
- (uint64_t)(ds), binding,
- string_VkStructureType(pUpdate->sType));
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
+ (uint64_t)(ds), __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
+ "Descriptor Set %" PRIu64 " does not have binding to match "
+ "update binding %u for update type "
+ "%s!",
+ (uint64_t)(ds), binding, string_VkStructureType(pUpdate->sType));
} else {
// Next verify that update falls within size of given binding
endIndex = getUpdateEndIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate);
if (getBindingEndIndex(pLayout, binding) < endIndex) {
pLayoutCI = &pLayout->createInfo;
string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS} ");
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(ds), __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
- "Descriptor update type of %s is out of bounds for matching binding %u in Layout w/ CI:\n%s!", string_VkStructureType(pUpdate->sType), binding, DSstr.c_str());
+ skipCall |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
+ (uint64_t)(ds), __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
+ "Descriptor update type of %s is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
+ string_VkStructureType(pUpdate->sType), binding, DSstr.c_str());
} else { // TODO : should we skip update on a type mismatch or force it?
uint32_t startIndex;
- startIndex =
- getUpdateStartIndex(my_data, device, pLayout, binding,
- pWDS[i].dstArrayElement, pUpdate);
+ startIndex = getUpdateStartIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate);
// Layout bindings match w/ update, now verify that update type
// & stageFlags are the same for entire update
- if ((skipCall = validateUpdateConsistency(
- my_data, device, pLayout, pUpdate, startIndex,
- endIndex)) == VK_FALSE) {
+ if ((skipCall = validateUpdateConsistency(my_data, device, pLayout, pUpdate, startIndex, endIndex)) == VK_FALSE) {
// The update is within bounds and consistent, but need to
// make sure contents make sense as well
- if ((skipCall = validateUpdateContents(
- my_data, &pWDS[i],
- &pLayout->createInfo.pBindings[bindingToIndex->second])) ==
- VK_FALSE) {
+ if ((skipCall = validateUpdateContents(my_data, &pWDS[i],
+ &pLayout->createInfo.pBindings[bindingToIndex->second])) == VK_FALSE) {
// Update is good. Save the update info
// Create new update struct for this set's shadow copy
- GENERIC_HEADER* pNewNode = NULL;
+ GENERIC_HEADER *pNewNode = NULL;
skipCall |= shadowUpdateNode(my_data, device, pUpdate, &pNewNode);
if (NULL == pNewNode) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(ds), __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
- "Out of memory while attempting to allocate UPDATE struct in vkUpdateDescriptors()");
+ skipCall |= log_msg(
+ my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
+ (uint64_t)(ds), __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
+ "Out of memory while attempting to allocate UPDATE struct in vkUpdateDescriptors()");
} else {
// Insert shadow node into LL of updates for this set
pNewNode->pNext = pSet->pUpdateStructs;
pSet->pUpdateStructs = pNewNode;
// Now update appropriate descriptor(s) to point to new Update node
for (uint32_t j = startIndex; j <= endIndex; j++) {
- assert(j<pSet->descriptorCount);
+ assert(j < pSet->descriptorCount);
pSet->ppDescriptors[j] = pNewNode;
}
}
@@ -2983,7 +2932,7 @@ static VkBool32 dsUpdate(layer_data* my_data, VkDevice device, uint32_t descript
}
}
// Now validate copy updates
- for (i=0; i < descriptorCopyCount; ++i) {
+ for (i = 0; i < descriptorCopyCount; ++i) {
SET_NODE *pSrcSet = NULL, *pDstSet = NULL;
LAYOUT_NODE *pSrcLayout = NULL, *pDstLayout = NULL;
uint32_t srcStartIndex = 0, srcEndIndex = 0, dstStartIndex = 0, dstEndIndex = 0;
@@ -2997,58 +2946,61 @@ static VkBool32 dsUpdate(layer_data* my_data, VkDevice device, uint32_t descript
pSrcLayout = pSrcSet->pLayout;
pDstLayout = pDstSet->pLayout;
// Validate that src binding is valid for src set layout
- if (pSrcLayout->bindingToIndexMap.find(pCDS[i].srcBinding) ==
- pSrcLayout->bindingToIndexMap.end()) {
- skipCall |=
- log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
- (uint64_t)pSrcSet->set, __LINE__,
- DRAWSTATE_INVALID_UPDATE_INDEX,
- "DS", "Copy descriptor update %u has srcBinding %u "
- "which is out of bounds for underlying SetLayout "
- "%#" PRIxLEAST64 " which only has bindings 0-%u.",
- i, pCDS[i].srcBinding, (uint64_t)pSrcLayout->layout,
- pSrcLayout->createInfo.bindingCount - 1);
- } else if (pDstLayout->bindingToIndexMap.find(pCDS[i].dstBinding) ==
- pDstLayout->bindingToIndexMap.end()) {
- skipCall |=
- log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
- (uint64_t)pDstSet->set, __LINE__,
- DRAWSTATE_INVALID_UPDATE_INDEX,
- "DS", "Copy descriptor update %u has dstBinding %u "
- "which is out of bounds for underlying SetLayout "
- "%#" PRIxLEAST64 " which only has bindings 0-%u.",
- i, pCDS[i].dstBinding, (uint64_t)pDstLayout->layout,
- pDstLayout->createInfo.bindingCount - 1);
+ if (pSrcLayout->bindingToIndexMap.find(pCDS[i].srcBinding) == pSrcLayout->bindingToIndexMap.end()) {
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
+ (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
+ "Copy descriptor update %u has srcBinding %u "
+ "which is out of bounds for underlying SetLayout "
+ "%#" PRIxLEAST64 " which only has bindings 0-%u.",
+ i, pCDS[i].srcBinding, (uint64_t)pSrcLayout->layout, pSrcLayout->createInfo.bindingCount - 1);
+ } else if (pDstLayout->bindingToIndexMap.find(pCDS[i].dstBinding) == pDstLayout->bindingToIndexMap.end()) {
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
+ (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
+ "Copy descriptor update %u has dstBinding %u "
+ "which is out of bounds for underlying SetLayout "
+ "%#" PRIxLEAST64 " which only has bindings 0-%u.",
+ i, pCDS[i].dstBinding, (uint64_t)pDstLayout->layout, pDstLayout->createInfo.bindingCount - 1);
} else {
// Proceed with validation. Bindings are ok, but make sure update is within bounds of given layout
- srcEndIndex = getUpdateEndIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement, (const GENERIC_HEADER*)&(pCDS[i]));
- dstEndIndex = getUpdateEndIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement, (const GENERIC_HEADER*)&(pCDS[i]));
+ srcEndIndex = getUpdateEndIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement,
+ (const GENERIC_HEADER *)&(pCDS[i]));
+ dstEndIndex = getUpdateEndIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement,
+ (const GENERIC_HEADER *)&(pCDS[i]));
if (getBindingEndIndex(pSrcLayout, pCDS[i].srcBinding) < srcEndIndex) {
pLayoutCI = &pSrcLayout->createInfo;
string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS} ");
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t) pSrcSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
- "Copy descriptor src update is out of bounds for matching binding %u in Layout w/ CI:\n%s!", pCDS[i].srcBinding, DSstr.c_str());
+ skipCall |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
+ (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
+ "Copy descriptor src update is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
+ pCDS[i].srcBinding, DSstr.c_str());
} else if (getBindingEndIndex(pDstLayout, pCDS[i].dstBinding) < dstEndIndex) {
pLayoutCI = &pDstLayout->createInfo;
string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS} ");
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t) pDstSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
- "Copy descriptor dest update is out of bounds for matching binding %u in Layout w/ CI:\n%s!", pCDS[i].dstBinding, DSstr.c_str());
+ skipCall |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
+ (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
+ "Copy descriptor dest update is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
+ pCDS[i].dstBinding, DSstr.c_str());
} else {
- srcStartIndex = getUpdateStartIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement, (const GENERIC_HEADER*)&(pCDS[i]));
- dstStartIndex = getUpdateStartIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement, (const GENERIC_HEADER*)&(pCDS[i]));
- for (uint32_t j=0; j<pCDS[i].descriptorCount; ++j) {
+ srcStartIndex = getUpdateStartIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement,
+ (const GENERIC_HEADER *)&(pCDS[i]));
+ dstStartIndex = getUpdateStartIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement,
+ (const GENERIC_HEADER *)&(pCDS[i]));
+ for (uint32_t j = 0; j < pCDS[i].descriptorCount; ++j) {
// For copy just make sure that the types match and then perform the update
- if (pSrcLayout->descriptorTypes[srcStartIndex+j] != pDstLayout->descriptorTypes[dstStartIndex+j]) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
- "Copy descriptor update index %u, update count #%u, has src update descriptor type %s that does not match overlapping dest descriptor type of %s!",
- i, j+1, string_VkDescriptorType(pSrcLayout->descriptorTypes[srcStartIndex+j]), string_VkDescriptorType(pDstLayout->descriptorTypes[dstStartIndex+j]));
+ if (pSrcLayout->descriptorTypes[srcStartIndex + j] != pDstLayout->descriptorTypes[dstStartIndex + j]) {
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
+ __LINE__, DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
+ "Copy descriptor update index %u, update count #%u, has src update descriptor type %s "
+ "that does not match overlapping dest descriptor type of %s!",
+ i, j + 1, string_VkDescriptorType(pSrcLayout->descriptorTypes[srcStartIndex + j]),
+ string_VkDescriptorType(pDstLayout->descriptorTypes[dstStartIndex + j]));
} else {
// point dst descriptor at corresponding src descriptor
// TODO : This may be a hole. I believe copy should be its own copy,
// otherwise a subsequent write update to src will incorrectly affect the copy
- pDstSet->ppDescriptors[j+dstStartIndex] = pSrcSet->ppDescriptors[j+srcStartIndex];
+ pDstSet->ppDescriptors[j + dstStartIndex] = pSrcSet->ppDescriptors[j + srcStartIndex];
pDstSet->pUpdateStructs = pSrcSet->pUpdateStructs;
}
}
@@ -3059,24 +3011,31 @@ static VkBool32 dsUpdate(layer_data* my_data, VkDevice device, uint32_t descript
}
// Verify that given pool has descriptors that are being requested for allocation
-static VkBool32 validate_descriptor_availability_in_pool(layer_data* dev_data, DESCRIPTOR_POOL_NODE* pPoolNode, uint32_t count, const VkDescriptorSetLayout* pSetLayouts)
-{
+static VkBool32 validate_descriptor_availability_in_pool(layer_data *dev_data, DESCRIPTOR_POOL_NODE *pPoolNode, uint32_t count,
+ const VkDescriptorSetLayout *pSetLayouts) {
VkBool32 skipCall = VK_FALSE;
uint32_t i = 0, j = 0;
- for (i=0; i<count; ++i) {
- LAYOUT_NODE* pLayout = getLayoutNode(dev_data, pSetLayouts[i]);
+ for (i = 0; i < count; ++i) {
+ LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pSetLayouts[i]);
if (NULL == pLayout) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t) pSetLayouts[i], __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
- "Unable to find set layout node for layout %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call", (uint64_t) pSetLayouts[i]);
+ skipCall |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
+ (uint64_t)pSetLayouts[i], __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
+ "Unable to find set layout node for layout %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
+ (uint64_t)pSetLayouts[i]);
} else {
uint32_t typeIndex = 0, poolSizeCount = 0;
- for (j=0; j<pLayout->createInfo.bindingCount; ++j) {
+ for (j = 0; j < pLayout->createInfo.bindingCount; ++j) {
typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType);
poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount;
if (poolSizeCount > pPoolNode->availableDescriptorTypeCount[typeIndex]) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t) pLayout->layout, __LINE__, DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
- "Unable to allocate %u descriptors of type %s from pool %#" PRIxLEAST64 ". This pool only has %u descriptors of this type remaining.",
- poolSizeCount, string_VkDescriptorType(pLayout->createInfo.pBindings[j].descriptorType), (uint64_t) pPoolNode->pool, pPoolNode->availableDescriptorTypeCount[typeIndex]);
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pLayout->layout, __LINE__,
+ DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
+ "Unable to allocate %u descriptors of type %s from pool %#" PRIxLEAST64
+ ". This pool only has %u descriptors of this type remaining.",
+ poolSizeCount, string_VkDescriptorType(pLayout->createInfo.pBindings[j].descriptorType),
+ (uint64_t)pPoolNode->pool, pPoolNode->availableDescriptorTypeCount[typeIndex]);
} else { // Decrement available descriptors of this type
pPoolNode->availableDescriptorTypeCount[typeIndex] -= poolSizeCount;
}
@@ -3088,53 +3047,45 @@ static VkBool32 validate_descriptor_availability_in_pool(layer_data* dev_data, D
// Free the shadowed update node for this Set
// NOTE : Calls to this function should be wrapped in mutex
-static void freeShadowUpdateTree(SET_NODE* pSet)
-{
- GENERIC_HEADER* pShadowUpdate = pSet->pUpdateStructs;
+static void freeShadowUpdateTree(SET_NODE *pSet) {
+ GENERIC_HEADER *pShadowUpdate = pSet->pUpdateStructs;
pSet->pUpdateStructs = NULL;
- GENERIC_HEADER* pFreeUpdate = pShadowUpdate;
+ GENERIC_HEADER *pFreeUpdate = pShadowUpdate;
// Clear the descriptor mappings as they will now be invalid
- memset(pSet->ppDescriptors, 0, pSet->descriptorCount*sizeof(GENERIC_HEADER*));
- while(pShadowUpdate) {
+ memset(pSet->ppDescriptors, 0, pSet->descriptorCount * sizeof(GENERIC_HEADER *));
+ while (pShadowUpdate) {
pFreeUpdate = pShadowUpdate;
- pShadowUpdate = (GENERIC_HEADER*)pShadowUpdate->pNext;
- VkWriteDescriptorSet * pWDS = NULL;
- switch (pFreeUpdate->sType)
- {
- case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
- pWDS = (VkWriteDescriptorSet*)pFreeUpdate;
- switch (pWDS->descriptorType) {
- case VK_DESCRIPTOR_TYPE_SAMPLER:
- case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
- case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
- case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
- {
- delete[] pWDS->pImageInfo;
- }
- break;
- case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
- case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
- {
- delete[] pWDS->pTexelBufferView;
- }
- break;
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
- {
- delete[] pWDS->pBufferInfo;
- }
- break;
- default:
- break;
- }
- break;
- case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
- break;
+ pShadowUpdate = (GENERIC_HEADER *)pShadowUpdate->pNext;
+ VkWriteDescriptorSet *pWDS = NULL;
+ switch (pFreeUpdate->sType) {
+ case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
+ pWDS = (VkWriteDescriptorSet *)pFreeUpdate;
+ switch (pWDS->descriptorType) {
+ case VK_DESCRIPTOR_TYPE_SAMPLER:
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
+ delete[] pWDS->pImageInfo;
+ } break;
+ case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
+ delete[] pWDS->pTexelBufferView;
+ } break;
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
+ delete[] pWDS->pBufferInfo;
+ } break;
default:
- assert(0);
break;
+ }
+ break;
+ case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
+ break;
+ default:
+ assert(0);
+ break;
}
delete pFreeUpdate;
}
@@ -3142,13 +3093,12 @@ static void freeShadowUpdateTree(SET_NODE* pSet)
// Free all DS Pools including their Sets & related sub-structs
// NOTE : Calls to this function should be wrapped in mutex
-static void deletePools(layer_data* my_data)
-{
+static void deletePools(layer_data *my_data) {
if (my_data->descriptorPoolMap.size() <= 0)
return;
- for (auto ii=my_data->descriptorPoolMap.begin(); ii!=my_data->descriptorPoolMap.end(); ++ii) {
- SET_NODE* pSet = (*ii).second->pSets;
- SET_NODE* pFreeSet = pSet;
+ for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
+ SET_NODE *pSet = (*ii).second->pSets;
+ SET_NODE *pFreeSet = pSet;
while (pSet) {
pFreeSet = pSet;
pSet = pSet->pNext;
@@ -3165,14 +3115,13 @@ static void deletePools(layer_data* my_data)
// WARN : Once deleteLayouts() called, any layout ptrs in Pool/Set data structure will be invalid
// NOTE : Calls to this function should be wrapped in mutex
-static void deleteLayouts(layer_data* my_data)
-{
+static void deleteLayouts(layer_data *my_data) {
if (my_data->descriptorSetLayoutMap.size() <= 0)
return;
- for (auto ii=my_data->descriptorSetLayoutMap.begin(); ii!=my_data->descriptorSetLayoutMap.end(); ++ii) {
- LAYOUT_NODE* pLayout = (*ii).second;
+ for (auto ii = my_data->descriptorSetLayoutMap.begin(); ii != my_data->descriptorSetLayoutMap.end(); ++ii) {
+ LAYOUT_NODE *pLayout = (*ii).second;
if (pLayout->createInfo.pBindings) {
- for (uint32_t i=0; i<pLayout->createInfo.bindingCount; i++) {
+ for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
delete[] pLayout->createInfo.pBindings[i].pImmutableSamplers;
}
delete[] pLayout->createInfo.pBindings;
@@ -3184,9 +3133,8 @@ static void deleteLayouts(layer_data* my_data)
// Currently clearing a set is removing all previous updates to that set
// TODO : Validate if this is correct clearing behavior
-static void clearDescriptorSet(layer_data* my_data, VkDescriptorSet set)
-{
- SET_NODE* pSet = getSetNode(my_data, set);
+static void clearDescriptorSet(layer_data *my_data, VkDescriptorSet set) {
+ SET_NODE *pSet = getSetNode(my_data, set);
if (!pSet) {
// TODO : Return error
} else {
@@ -3194,34 +3142,35 @@ static void clearDescriptorSet(layer_data* my_data, VkDescriptorSet set)
}
}
-static void clearDescriptorPool(layer_data* my_data, const VkDevice device, const VkDescriptorPool pool, VkDescriptorPoolResetFlags flags)
-{
- DESCRIPTOR_POOL_NODE* pPool = getPoolNode(my_data, pool);
+static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
+ VkDescriptorPoolResetFlags flags) {
+ DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
if (!pPool) {
- log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, (uint64_t) pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
- "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t) pool);
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
+ (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
+ "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool);
} else {
// TODO: validate flags
// For every set off of this pool, clear it
- SET_NODE* pSet = pPool->pSets;
+ SET_NODE *pSet = pPool->pSets;
while (pSet) {
clearDescriptorSet(my_data, pSet->set);
pSet = pSet->pNext;
}
// Reset available count to max count for this pool
- for (uint32_t i=0; i<pPool->availableDescriptorTypeCount.size(); ++i) {
+ for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
}
}
}
// For given CB object, fetch associated CB Node from map
-static GLOBAL_CB_NODE* getCBNode(layer_data* my_data, const VkCommandBuffer cb)
-{
+static GLOBAL_CB_NODE *getCBNode(layer_data *my_data, const VkCommandBuffer cb) {
if (my_data->commandBufferMap.count(cb) == 0) {
// TODO : How to pass cb as srcObj here?
- log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
- "Attempt to use CommandBuffer %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__,
+ DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Attempt to use CommandBuffer %#" PRIxLEAST64 " that doesn't exist!",
+ (uint64_t)(cb));
return NULL;
}
return my_data->commandBufferMap[cb];
@@ -3229,122 +3178,124 @@ static GLOBAL_CB_NODE* getCBNode(layer_data* my_data, const VkCommandBuffer cb)
// Free all CB Nodes
// NOTE : Calls to this function should be wrapped in mutex
-static void deleteCommandBuffers(layer_data* my_data)
-{
+static void deleteCommandBuffers(layer_data *my_data) {
if (my_data->commandBufferMap.size() <= 0) {
return;
}
- for (auto ii=my_data->commandBufferMap.begin(); ii!=my_data->commandBufferMap.end(); ++ii) {
+ for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
delete (*ii).second;
}
my_data->commandBufferMap.clear();
}
-static VkBool32 report_error_no_cb_begin(const layer_data* dev_data, const VkCommandBuffer cb, const char* caller_name)
-{
+static VkBool32 report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
- "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
+ (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
+ "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
}
-VkBool32 validateCmdsInCmdBuffer(const layer_data* dev_data, const GLOBAL_CB_NODE* pCB, const CMD_TYPE cmd_type) {
- if (!pCB->activeRenderPass) return VK_FALSE;
+VkBool32 validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
+ if (!pCB->activeRenderPass)
+ return VK_FALSE;
VkBool32 skip_call = VK_FALSE;
if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS && cmd_type != CMD_EXECUTECOMMANDS) {
skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Commands cannot be called in a subpass using secondary command buffers.");
+ DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
+ "Commands cannot be called in a subpass using secondary command buffers.");
} else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
+ DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
+ "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
}
return skip_call;
}
-static bool checkGraphicsBit(const layer_data* my_data, VkQueueFlags flags, const char* name) {
+static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
if (!(flags & VK_QUEUE_GRAPHICS_BIT))
return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
+ DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
+ "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
return false;
}
-static bool checkComputeBit(const layer_data* my_data, VkQueueFlags flags, const char* name) {
+static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
if (!(flags & VK_QUEUE_COMPUTE_BIT))
return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
+ DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
+ "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
return false;
}
-static bool checkGraphicsOrComputeBit(const layer_data* my_data, VkQueueFlags flags, const char* name) {
+static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
+ DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
+ "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
return false;
}
// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
// in the recording state or if there's an issue with the Cmd ordering
-static VkBool32 addCmd(const layer_data* my_data, GLOBAL_CB_NODE* pCB, const CMD_TYPE cmd, const char* caller_name)
-{
+static VkBool32 addCmd(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
VkBool32 skipCall = VK_FALSE;
auto pool_data = my_data->commandPoolMap.find(pCB->createInfo.commandPool);
if (pool_data != my_data->commandPoolMap.end()) {
VkQueueFlags flags = my_data->physDevProperties.queue_family_properties[pool_data->second.queueFamilyIndex].queueFlags;
- switch (cmd)
- {
- case CMD_BINDPIPELINE:
- case CMD_BINDPIPELINEDELTA:
- case CMD_BINDDESCRIPTORSETS:
- case CMD_FILLBUFFER:
- case CMD_CLEARCOLORIMAGE:
- case CMD_SETEVENT:
- case CMD_RESETEVENT:
- case CMD_WAITEVENTS:
- case CMD_BEGINQUERY:
- case CMD_ENDQUERY:
- case CMD_RESETQUERYPOOL:
- case CMD_COPYQUERYPOOLRESULTS:
- case CMD_WRITETIMESTAMP:
- skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
- break;
- case CMD_SETVIEWPORTSTATE:
- case CMD_SETSCISSORSTATE:
- case CMD_SETLINEWIDTHSTATE:
- case CMD_SETDEPTHBIASSTATE:
- case CMD_SETBLENDSTATE:
- case CMD_SETDEPTHBOUNDSSTATE:
- case CMD_SETSTENCILREADMASKSTATE:
- case CMD_SETSTENCILWRITEMASKSTATE:
- case CMD_SETSTENCILREFERENCESTATE:
- case CMD_BINDINDEXBUFFER:
- case CMD_BINDVERTEXBUFFER:
- case CMD_DRAW:
- case CMD_DRAWINDEXED:
- case CMD_DRAWINDIRECT:
- case CMD_DRAWINDEXEDINDIRECT:
- case CMD_BLITIMAGE:
- case CMD_CLEARATTACHMENTS:
- case CMD_CLEARDEPTHSTENCILIMAGE:
- case CMD_RESOLVEIMAGE:
- case CMD_BEGINRENDERPASS:
- case CMD_NEXTSUBPASS:
- case CMD_ENDRENDERPASS:
- skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
- break;
- case CMD_DISPATCH:
- case CMD_DISPATCHINDIRECT:
- skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
- break;
- case CMD_COPYBUFFER:
- case CMD_COPYIMAGE:
- case CMD_COPYBUFFERTOIMAGE:
- case CMD_COPYIMAGETOBUFFER:
- case CMD_CLONEIMAGEDATA:
- case CMD_UPDATEBUFFER:
- case CMD_PIPELINEBARRIER:
- case CMD_EXECUTECOMMANDS:
- break;
- default:
- break;
+ switch (cmd) {
+ case CMD_BINDPIPELINE:
+ case CMD_BINDPIPELINEDELTA:
+ case CMD_BINDDESCRIPTORSETS:
+ case CMD_FILLBUFFER:
+ case CMD_CLEARCOLORIMAGE:
+ case CMD_SETEVENT:
+ case CMD_RESETEVENT:
+ case CMD_WAITEVENTS:
+ case CMD_BEGINQUERY:
+ case CMD_ENDQUERY:
+ case CMD_RESETQUERYPOOL:
+ case CMD_COPYQUERYPOOLRESULTS:
+ case CMD_WRITETIMESTAMP:
+ skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
+ break;
+ case CMD_SETVIEWPORTSTATE:
+ case CMD_SETSCISSORSTATE:
+ case CMD_SETLINEWIDTHSTATE:
+ case CMD_SETDEPTHBIASSTATE:
+ case CMD_SETBLENDSTATE:
+ case CMD_SETDEPTHBOUNDSSTATE:
+ case CMD_SETSTENCILREADMASKSTATE:
+ case CMD_SETSTENCILWRITEMASKSTATE:
+ case CMD_SETSTENCILREFERENCESTATE:
+ case CMD_BINDINDEXBUFFER:
+ case CMD_BINDVERTEXBUFFER:
+ case CMD_DRAW:
+ case CMD_DRAWINDEXED:
+ case CMD_DRAWINDIRECT:
+ case CMD_DRAWINDEXEDINDIRECT:
+ case CMD_BLITIMAGE:
+ case CMD_CLEARATTACHMENTS:
+ case CMD_CLEARDEPTHSTENCILIMAGE:
+ case CMD_RESOLVEIMAGE:
+ case CMD_BEGINRENDERPASS:
+ case CMD_NEXTSUBPASS:
+ case CMD_ENDRENDERPASS:
+ skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
+ break;
+ case CMD_DISPATCH:
+ case CMD_DISPATCHINDIRECT:
+ skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
+ break;
+ case CMD_COPYBUFFER:
+ case CMD_COPYIMAGE:
+ case CMD_COPYBUFFERTOIMAGE:
+ case CMD_COPYIMAGETOBUFFER:
+ case CMD_CLONEIMAGEDATA:
+ case CMD_UPDATEBUFFER:
+ case CMD_PIPELINEBARRIER:
+ case CMD_EXECUTECOMMANDS:
+ break;
+ default:
+ break;
}
}
if (pCB->state != CB_RECORDING) {
@@ -3360,9 +3311,8 @@ static VkBool32 addCmd(const layer_data* my_data, GLOBAL_CB_NODE* pCB, const CMD
}
// Reset the command buffer state
// Maintain the createInfo and set state to CB_NEW, but clear all other state
-static void resetCB(layer_data* my_data, const VkCommandBuffer cb)
-{
- GLOBAL_CB_NODE* pCB = my_data->commandBufferMap[cb];
+static void resetCB(layer_data *my_data, const VkCommandBuffer cb) {
+ GLOBAL_CB_NODE *pCB = my_data->commandBufferMap[cb];
if (pCB) {
pCB->cmds.clear();
// Reset CB state (note that createInfo is not cleared)
@@ -3426,8 +3376,7 @@ static void resetCB(layer_data* my_data, const VkCommandBuffer cb)
}
// Set PSO-related status bits for CB, including dynamic state set via PSO
-static void set_cb_pso_status(GLOBAL_CB_NODE* pCB, const PIPELINE_NODE* pPipe)
-{
+static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
for (uint32_t i = 0; i < pPipe->cbStateCI.attachmentCount; i++) {
if (0 != pPipe->pAttachments[i].colorWriteMask) {
pCB->status |= CBSTATUS_COLOR_BLEND_WRITE_ENABLE;
@@ -3447,38 +3396,38 @@ static void set_cb_pso_status(GLOBAL_CB_NODE* pCB, const PIPELINE_NODE* pPipe)
// Then unset any state that's noted as dynamic in PSO
// Finally OR that into CB statemask
CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
- for (uint32_t i=0; i < pPipe->dynStateCI.dynamicStateCount; i++) {
+ for (uint32_t i = 0; i < pPipe->dynStateCI.dynamicStateCount; i++) {
switch (pPipe->dynStateCI.pDynamicStates[i]) {
- case VK_DYNAMIC_STATE_VIEWPORT:
- psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET;
- break;
- case VK_DYNAMIC_STATE_SCISSOR:
- psoDynStateMask &= ~CBSTATUS_SCISSOR_SET;
- break;
- case VK_DYNAMIC_STATE_LINE_WIDTH:
- psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
- break;
- case VK_DYNAMIC_STATE_DEPTH_BIAS:
- psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
- break;
- case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
- psoDynStateMask &= ~CBSTATUS_BLEND_SET;
- break;
- case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
- psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
- break;
- case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
- psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
- break;
- case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
- psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
- break;
- case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
- psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
- break;
- default:
- // TODO : Flag error here
- break;
+ case VK_DYNAMIC_STATE_VIEWPORT:
+ psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET;
+ break;
+ case VK_DYNAMIC_STATE_SCISSOR:
+ psoDynStateMask &= ~CBSTATUS_SCISSOR_SET;
+ break;
+ case VK_DYNAMIC_STATE_LINE_WIDTH:
+ psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
+ break;
+ case VK_DYNAMIC_STATE_DEPTH_BIAS:
+ psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
+ break;
+ case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
+ psoDynStateMask &= ~CBSTATUS_BLEND_SET;
+ break;
+ case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
+ psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
+ break;
+ case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
+ psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
+ break;
+ case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
+ psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
+ break;
+ case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
+ psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
+ break;
+ default:
+ // TODO : Flag error here
+ break;
}
}
pCB->status |= psoDynStateMask;
@@ -3486,90 +3435,93 @@ static void set_cb_pso_status(GLOBAL_CB_NODE* pCB, const PIPELINE_NODE* pPipe)
}
// Print the last bound Gfx Pipeline
-static VkBool32 printPipeline(layer_data* my_data, const VkCommandBuffer cb)
-{
+static VkBool32 printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
VkBool32 skipCall = VK_FALSE;
- GLOBAL_CB_NODE* pCB = getCBNode(my_data, cb);
+ GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
if (pCB) {
PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBoundPipeline);
if (!pPipeTrav) {
// nothing to print
} else {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_NONE, "DS",
- "%s", vk_print_vkgraphicspipelinecreateinfo(&pPipeTrav->graphicsPipelineCI, "{DS}").c_str());
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
+ __LINE__, DRAWSTATE_NONE, "DS", "%s",
+ vk_print_vkgraphicspipelinecreateinfo(&pPipeTrav->graphicsPipelineCI, "{DS}").c_str());
}
}
return skipCall;
}
// Print details of DS config to stdout
-static VkBool32 printDSConfig(layer_data* my_data, const VkCommandBuffer cb)
-{
+static VkBool32 printDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
VkBool32 skipCall = VK_FALSE;
- GLOBAL_CB_NODE* pCB = getCBNode(my_data, cb);
+ GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
if (pCB && pCB->lastBoundDescriptorSet) {
- SET_NODE* pSet = getSetNode(my_data, pCB->lastBoundDescriptorSet);
- DESCRIPTOR_POOL_NODE* pPool = getPoolNode(my_data, pSet->pool);
+ SET_NODE *pSet = getSetNode(my_data, pCB->lastBoundDescriptorSet);
+ DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pSet->pool);
// Print out pool details
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_NONE, "DS",
- "Details for pool %#" PRIxLEAST64 ".", (uint64_t) pPool->pool);
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_NONE, "DS", "Details for pool %#" PRIxLEAST64 ".", (uint64_t)pPool->pool);
string poolStr = vk_print_vkdescriptorpoolcreateinfo(&pPool->createInfo, " ");
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_NONE, "DS",
- "%s", poolStr.c_str());
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_NONE, "DS", "%s", poolStr.c_str());
// Print out set details
char prefix[10];
uint32_t index = 0;
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_NONE, "DS",
- "Details for descriptor set %#" PRIxLEAST64 ".", (uint64_t) pSet->set);
- LAYOUT_NODE* pLayout = pSet->pLayout;
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_NONE, "DS", "Details for descriptor set %#" PRIxLEAST64 ".", (uint64_t)pSet->set);
+ LAYOUT_NODE *pLayout = pSet->pLayout;
// Print layout details
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_NONE, "DS",
- "Layout #%u, (object %#" PRIxLEAST64 ") for DS %#" PRIxLEAST64 ".", index+1, (uint64_t)(pLayout->layout), (uint64_t)(pSet->set));
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_NONE, "DS", "Layout #%u, (object %#" PRIxLEAST64 ") for DS %#" PRIxLEAST64 ".", index + 1,
+ (uint64_t)(pLayout->layout), (uint64_t)(pSet->set));
sprintf(prefix, " [L%u] ", index);
string DSLstr = vk_print_vkdescriptorsetlayoutcreateinfo(&pLayout->createInfo, prefix).c_str();
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_NONE, "DS",
- "%s", DSLstr.c_str());
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_NONE, "DS", "%s", DSLstr.c_str());
index++;
- GENERIC_HEADER* pUpdate = pSet->pUpdateStructs;
+ GENERIC_HEADER *pUpdate = pSet->pUpdateStructs;
if (pUpdate) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_NONE, "DS",
- "Update Chain [UC] for descriptor set %#" PRIxLEAST64 ":", (uint64_t) pSet->set);
+ skipCall |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_NONE, "DS", "Update Chain [UC] for descriptor set %#" PRIxLEAST64 ":", (uint64_t)pSet->set);
sprintf(prefix, " [UC] ");
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_NONE, "DS",
- "%s", dynamic_display(pUpdate, prefix).c_str());
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
+ __LINE__, DRAWSTATE_NONE, "DS", "%s", dynamic_display(pUpdate, prefix).c_str());
// TODO : If there is a "view" associated with this update, print CI for that view
} else {
if (0 != pSet->descriptorCount) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_NONE, "DS",
- "No Update Chain for descriptor set %#" PRIxLEAST64 " which has %u descriptors (vkUpdateDescriptors has not been called)", (uint64_t) pSet->set, pSet->descriptorCount);
+ skipCall |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_NONE, "DS", "No Update Chain for descriptor set %#" PRIxLEAST64
+ " which has %u descriptors (vkUpdateDescriptors has not been called)",
+ (uint64_t)pSet->set, pSet->descriptorCount);
} else {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_NONE, "DS",
- "FYI: No descriptors in descriptor set %#" PRIxLEAST64 ".", (uint64_t) pSet->set);
+ skipCall |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_NONE, "DS", "FYI: No descriptors in descriptor set %#" PRIxLEAST64 ".", (uint64_t)pSet->set);
}
}
}
return skipCall;
}
-static void printCB(layer_data* my_data, const VkCommandBuffer cb)
-{
- GLOBAL_CB_NODE* pCB = getCBNode(my_data, cb);
+static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
+ GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
if (pCB && pCB->cmds.size() > 0) {
- log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_NONE, "DS",
- "Cmds in CB %p", (void*)cb);
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_NONE, "DS", "Cmds in CB %p", (void *)cb);
vector<CMD_NODE> cmds = pCB->cmds;
- for (auto ii=cmds.begin(); ii!=cmds.end(); ++ii) {
+ for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
// TODO : Need to pass cb as srcObj here
- log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
- " CMD#%" PRIu64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
+ __LINE__, DRAWSTATE_NONE, "DS", " CMD#%" PRIu64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
}
} else {
// Nothing to print
}
}
-static VkBool32 synchAndPrintDSConfig(layer_data* my_data, const VkCommandBuffer cb)
-{
+static VkBool32 synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
VkBool32 skipCall = VK_FALSE;
if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
return skipCall;
@@ -3581,27 +3533,23 @@ static VkBool32 synchAndPrintDSConfig(layer_data* my_data, const VkCommandBuffer
// Flags validation error if the associated call is made inside a render pass. The apiName
// routine should ONLY be called outside a render pass.
-static VkBool32 insideRenderPass(const layer_data* my_data, GLOBAL_CB_NODE *pCB, const char *apiName)
-{
+static VkBool32 insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
VkBool32 inside = VK_FALSE;
if (pCB->activeRenderPass) {
inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
(uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
- "%s: It is invalid to issue this call inside an active render pass (%#" PRIxLEAST64 ")",
- apiName, (uint64_t) pCB->activeRenderPass);
+ "%s: It is invalid to issue this call inside an active render pass (%#" PRIxLEAST64 ")", apiName,
+ (uint64_t)pCB->activeRenderPass);
}
return inside;
}
// Flags validation error if the associated call is made outside a render pass. The apiName
// routine should ONLY be called inside a render pass.
-static VkBool32 outsideRenderPass(const layer_data* my_data, GLOBAL_CB_NODE *pCB, const char *apiName)
-{
+static VkBool32 outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
VkBool32 outside = VK_FALSE;
- if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
- (!pCB->activeRenderPass)) ||
- ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) &&
- (!pCB->activeRenderPass) &&
+ if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
+ ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
!(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
(uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
@@ -3610,8 +3558,7 @@ static VkBool32 outsideRenderPass(const layer_data* my_data, GLOBAL_CB_NODE *pCB
return outside;
}
-static void init_draw_state(layer_data *my_data, const VkAllocationCallbacks *pAllocator)
-{
+static void init_draw_state(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
uint32_t report_flags = 0;
uint32_t debug_action = 0;
FILE *log_output = NULL;
@@ -3619,10 +3566,9 @@ static void init_draw_state(layer_data *my_data, const VkAllocationCallbacks *pA
VkDebugReportCallbackEXT callback;
// initialize draw_state options
report_flags = getLayerOptionFlags("lunarg_draw_state.report_flags", 0);
- getLayerOptionEnum("lunarg_draw_state.debug_action", (uint32_t *) &debug_action);
+ getLayerOptionEnum("lunarg_draw_state.debug_action", (uint32_t *)&debug_action);
- if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG)
- {
+ if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG) {
option_str = getLayerOption("lunarg_draw_state.log_filename");
log_output = getLayerLogOutput(option_str, "lunarg_draw_state");
VkDebugReportCallbackCreateInfoEXT dbgInfo;
@@ -3646,20 +3592,19 @@ static void init_draw_state(layer_data *my_data, const VkAllocationCallbacks *pA
my_data->logging_callback.push_back(callback);
}
- if (!globalLockInitialized)
- {
+ if (!globalLockInitialized) {
loader_platform_thread_create_mutex(&globalLock);
globalLockInitialized = 1;
}
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
- PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance) fpGetInstanceProcAddr(NULL, "vkCreateInstance");
+ PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
if (fpCreateInstance == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
@@ -3677,11 +3622,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(const VkInstance
my_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
layer_init_instance_dispatch_table(*pInstance, my_data->instance_dispatch_table, fpGetInstanceProcAddr);
- my_data->report_data = debug_report_create_instance(
- my_data->instance_dispatch_table,
- *pInstance,
- pCreateInfo->enabledExtensionCount,
- pCreateInfo->ppEnabledExtensionNames);
+ my_data->report_data = debug_report_create_instance(my_data->instance_dispatch_table, *pInstance,
+ pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
init_draw_state(my_data, pAllocator);
@@ -3689,8 +3631,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(const VkInstance
}
/* hook DestroyInstance to remove tableInstanceMap entry */
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks* pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
// TODOSC : Shouldn't need any customization here
dispatch_key key = get_dispatch_key(instance);
// TBD: Need any locking this early, in case this function is called at the
@@ -3722,23 +3663,21 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance
}
}
-static void createDeviceRegisterExtensions(const VkDeviceCreateInfo* pCreateInfo, VkDevice device)
-{
+static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
uint32_t i;
// TBD: Need any locking, in case this function is called at the same time
// by more than one thread?
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
dev_data->device_extensions.wsi_enabled = false;
+ VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
+ PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
- VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
- PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
-
- pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR) gpa(device, "vkCreateSwapchainKHR");
- pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR) gpa(device, "vkDestroySwapchainKHR");
- pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR) gpa(device, "vkGetSwapchainImagesKHR");
- pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR) gpa(device, "vkAcquireNextImageKHR");
- pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR) gpa(device, "vkQueuePresentKHR");
+ pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
+ pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
+ pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
+ pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
+ pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0) {
@@ -3747,14 +3686,14 @@ static void createDeviceRegisterExtensions(const VkDeviceCreateInfo* pCreateInfo
}
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
- PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice) fpGetInstanceProcAddr(NULL, "vkCreateDevice");
+ PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
if (fpCreateDevice == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
@@ -3782,11 +3721,11 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice g
uint32_t count;
my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
my_device_data->physDevProperties.queue_family_properties.resize(count);
- my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, &my_device_data->physDevProperties.queue_family_properties[0]);
+ my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
+ gpu, &count, &my_device_data->physDevProperties.queue_family_properties[0]);
// TODO: device limits should make sure these are compatible
if (pCreateInfo->pEnabledFeatures) {
- my_device_data->physDevProperties.features =
- *pCreateInfo->pEnabledFeatures;
+ my_device_data->physDevProperties.features = *pCreateInfo->pEnabledFeatures;
} else {
memset(&my_device_data->physDevProperties.features, 0, sizeof(VkPhysicalDeviceFeatures));
}
@@ -3795,12 +3734,11 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice g
}
// prototype
-static void deleteRenderPasses(layer_data*);
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks* pAllocator)
-{
+static void deleteRenderPasses(layer_data *);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
// TODOSC : Shouldn't need any customization here
dispatch_key key = get_dispatch_key(device);
- layer_data* dev_data = get_my_data_ptr(key, layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
// Free all the memory
loader_platform_thread_lock_mutex(&globalLock);
deletePipelines(dev_data);
@@ -3819,76 +3757,42 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, cons
layer_data_map.erase(key);
}
-static const VkExtensionProperties instance_extensions[] = {
- {
- VK_EXT_DEBUG_REPORT_EXTENSION_NAME,
- VK_EXT_DEBUG_REPORT_SPEC_VERSION
- }
-};
+static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(
- const char *pLayerName,
- uint32_t *pCount,
- VkExtensionProperties* pProperties)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
}
-static const VkLayerProperties ds_global_layers[] = {
- {
- "VK_LAYER_LUNARG_draw_state",
- VK_API_VERSION,
- 1,
- "LunarG Validation Layer",
- }
-};
+static const VkLayerProperties ds_global_layers[] = {{
+ "VK_LAYER_LUNARG_draw_state", VK_API_VERSION, 1, "LunarG Validation Layer",
+}};
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(
- uint32_t *pCount,
- VkLayerProperties* pProperties)
-{
- return util_GetLayerProperties(ARRAY_SIZE(ds_global_layers),
- ds_global_layers,
- pCount, pProperties);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
+ return util_GetLayerProperties(ARRAY_SIZE(ds_global_layers), ds_global_layers, pCount, pProperties);
}
+static const VkLayerProperties ds_device_layers[] = {{
+ "VK_LAYER_LUNARG_draw_state", VK_API_VERSION, 1, "LunarG Validation Layer",
+}};
-static const VkLayerProperties ds_device_layers[] = {
- {
- "VK_LAYER_LUNARG_draw_state",
- VK_API_VERSION,
- 1,
- "LunarG Validation Layer",
- }
-};
-
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(
- VkPhysicalDevice physicalDevice,
- const char* pLayerName,
- uint32_t* pCount,
- VkExtensionProperties* pProperties)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
+ const char *pLayerName, uint32_t *pCount,
+ VkExtensionProperties *pProperties) {
if (pLayerName == NULL) {
dispatch_key key = get_dispatch_key(physicalDevice);
layer_data *my_data = get_my_data_ptr(key, layer_data_map);
- return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(
- physicalDevice,
- NULL,
- pCount,
- pProperties);
+ return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
} else {
return util_GetExtensionProperties(0, NULL, pCount, pProperties);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(
- VkPhysicalDevice physicalDevice,
- uint32_t* pCount,
- VkLayerProperties* pProperties)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
/* draw_state physical device layers are the same as global */
- return util_GetLayerProperties(ARRAY_SIZE(ds_device_layers), ds_device_layers,
- pCount, pProperties);
+ return util_GetLayerProperties(ARRAY_SIZE(ds_device_layers), ds_device_layers, pCount, pProperties);
}
// This validates that the initial layout specified in the command buffer for
@@ -3896,30 +3800,24 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(
// as the global IMAGE layout
VkBool32 ValidateCmdBufImageLayouts(VkCommandBuffer cmdBuffer) {
VkBool32 skip_call = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, cmdBuffer);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
for (auto cb_image_data : pCB->imageLayoutMap) {
VkImageLayout imageLayout;
if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
- skip_call |= log_msg(
- dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__,
- DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
- "Cannot submit cmd buffer using deleted image %" PRIu64 ".",
- reinterpret_cast<const uint64_t &>(cb_image_data.first));
+ skip_call |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
+ __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image %" PRIu64 ".",
+ reinterpret_cast<const uint64_t &>(cb_image_data.first));
} else {
if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
// TODO: Set memory invalid which is in mem_tracker currently
- }
- else if (imageLayout != cb_image_data.second.initialLayout) {
- skip_call |= log_msg(
- dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__,
- DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
- "Cannot submit cmd buffer using image with layout %s when "
- "first use is %s.",
- string_VkImageLayout(imageLayout),
- string_VkImageLayout(cb_image_data.second.initialLayout));
+ } else if (imageLayout != cb_image_data.second.initialLayout) {
+ skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT,
+ "DS", "Cannot submit cmd buffer using image with layout %s when "
+ "first use is %s.",
+ string_VkImageLayout(imageLayout), string_VkImageLayout(cb_image_data.second.initialLayout));
}
SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
}
@@ -3927,13 +3825,14 @@ VkBool32 ValidateCmdBufImageLayouts(VkCommandBuffer cmdBuffer) {
return skip_call;
}
// Track which resources are in-flight by atomically incrementing their "in_use" count
-VkBool32 validateAndIncrementResources(layer_data* my_data, GLOBAL_CB_NODE* pCB) {
+VkBool32 validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB) {
VkBool32 skip_call = VK_FALSE;
for (auto drawDataElement : pCB->drawData) {
for (auto buffer : drawDataElement.buffers) {
auto buffer_data = my_data->bufferMap.find(buffer);
if (buffer_data == my_data->bufferMap.end()) {
- skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
+ skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
+ (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
"Cannot submit cmd buffer using deleted buffer %" PRIu64 ".", (uint64_t)(buffer));
} else {
buffer_data->second.in_use.fetch_add(1);
@@ -3943,8 +3842,10 @@ VkBool32 validateAndIncrementResources(layer_data* my_data, GLOBAL_CB_NODE* pCB)
for (auto set : pCB->uniqueBoundSets) {
auto setNode = my_data->setMap.find(set);
if (setNode == my_data->setMap.end()) {
- skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS",
- "Cannot submit cmd buffer using deleted descriptor set %" PRIu64 ".", (uint64_t)(set));
+ skip_call |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
+ (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS",
+ "Cannot submit cmd buffer using deleted descriptor set %" PRIu64 ".", (uint64_t)(set));
} else {
setNode->second->in_use.fetch_add(1);
}
@@ -3952,13 +3853,10 @@ VkBool32 validateAndIncrementResources(layer_data* my_data, GLOBAL_CB_NODE* pCB)
for (auto semaphore : pCB->semaphores) {
auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
if (semaphoreNode == my_data->semaphoreMap.end()) {
- skip_call |= log_msg(
- my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
- reinterpret_cast<uint64_t &>(semaphore), __LINE__,
- DRAWSTATE_INVALID_SEMAPHORE, "DS",
- "Cannot submit cmd buffer using deleted semaphore %" PRIu64 ".",
- reinterpret_cast<uint64_t &>(semaphore));
+ skip_call |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
+ reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
+ "Cannot submit cmd buffer using deleted semaphore %" PRIu64 ".", reinterpret_cast<uint64_t &>(semaphore));
} else {
semaphoreNode->second.in_use.fetch_add(1);
}
@@ -3966,13 +3864,10 @@ VkBool32 validateAndIncrementResources(layer_data* my_data, GLOBAL_CB_NODE* pCB)
for (auto event : pCB->events) {
auto eventNode = my_data->eventMap.find(event);
if (eventNode == my_data->eventMap.end()) {
- skip_call |= log_msg(
- my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
- reinterpret_cast<uint64_t &>(event), __LINE__,
- DRAWSTATE_INVALID_EVENT, "DS",
- "Cannot submit cmd buffer using deleted event %" PRIu64 ".",
- reinterpret_cast<uint64_t &>(event));
+ skip_call |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
+ reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
+ "Cannot submit cmd buffer using deleted event %" PRIu64 ".", reinterpret_cast<uint64_t &>(event));
} else {
eventNode->second.in_use.fetch_add(1);
}
@@ -3980,8 +3875,8 @@ VkBool32 validateAndIncrementResources(layer_data* my_data, GLOBAL_CB_NODE* pCB)
return skip_call;
}
-void decrementResources(layer_data* my_data, VkCommandBuffer cmdBuffer) {
- GLOBAL_CB_NODE* pCB = getCBNode(my_data, cmdBuffer);
+void decrementResources(layer_data *my_data, VkCommandBuffer cmdBuffer) {
+ GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
for (auto drawDataElement : pCB->drawData) {
for (auto buffer : drawDataElement.buffers) {
auto buffer_data = my_data->bufferMap.find(buffer);
@@ -4012,15 +3907,15 @@ void decrementResources(layer_data* my_data, VkCommandBuffer cmdBuffer) {
my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
}
for (auto eventStagePair : pCB->eventToStageMap) {
- my_data->eventMap[eventStagePair.first].stageMask =
- eventStagePair.second;
+ my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
}
}
-void decrementResources(layer_data* my_data, uint32_t fenceCount, const VkFence* pFences) {
+void decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) {
for (uint32_t i = 0; i < fenceCount; ++i) {
auto fence_data = my_data->fenceMap.find(pFences[i]);
- if (fence_data == my_data->fenceMap.end() || !fence_data->second.needsSignaled) return;
+ if (fence_data == my_data->fenceMap.end() || !fence_data->second.needsSignaled)
+ return;
fence_data->second.needsSignaled = false;
fence_data->second.in_use.fetch_sub(1);
if (fence_data->second.priorFence != VK_NULL_HANDLE) {
@@ -4032,7 +3927,7 @@ void decrementResources(layer_data* my_data, uint32_t fenceCount, const VkFence*
}
}
-void decrementResources(layer_data* my_data, VkQueue queue) {
+void decrementResources(layer_data *my_data, VkQueue queue) {
auto queue_data = my_data->queueMap.find(queue);
if (queue_data != my_data->queueMap.end()) {
for (auto cmdBuffer : queue_data->second.untrackedCmdBuffers) {
@@ -4043,7 +3938,8 @@ void decrementResources(layer_data* my_data, VkQueue queue) {
}
}
-void trackCommandBuffers(layer_data* my_data, VkQueue queue, uint32_t cmdBufferCount, const VkCommandBuffer* pCmdBuffers, VkFence fence) {
+void trackCommandBuffers(layer_data *my_data, VkQueue queue, uint32_t cmdBufferCount, const VkCommandBuffer *pCmdBuffers,
+ VkFence fence) {
auto queue_data = my_data->queueMap.find(queue);
if (fence != VK_NULL_HANDLE) {
VkFence priorFence = VK_NULL_HANDLE;
@@ -4065,11 +3961,8 @@ void trackCommandBuffers(layer_data* my_data, VkQueue queue, uint32_t cmdBufferC
fence_data->second.queue = queue;
fence_data->second.in_use.fetch_add(1);
for (uint32_t i = 0; i < cmdBufferCount; ++i) {
- for (auto secondaryCmdBuffer :
- my_data->commandBufferMap[pCmdBuffers[i]]
- ->secondaryCommandBuffers) {
- fence_data->second.cmdBuffers.push_back(
- secondaryCmdBuffer);
+ for (auto secondaryCmdBuffer : my_data->commandBufferMap[pCmdBuffers[i]]->secondaryCommandBuffers) {
+ fence_data->second.cmdBuffers.push_back(secondaryCmdBuffer);
}
fence_data->second.cmdBuffers.push_back(pCmdBuffers[i]);
}
@@ -4077,7 +3970,7 @@ void trackCommandBuffers(layer_data* my_data, VkQueue queue, uint32_t cmdBufferC
if (queue_data != my_data->queueMap.end()) {
for (uint32_t i = 0; i < cmdBufferCount; ++i) {
for (auto secondaryCmdBuffer : my_data->commandBufferMap[pCmdBuffers[i]]->secondaryCommandBuffers) {
- queue_data->second.untrackedCmdBuffers.push_back(secondaryCmdBuffer);
+ queue_data->second.untrackedCmdBuffers.push_back(secondaryCmdBuffer);
}
queue_data->second.untrackedCmdBuffers.push_back(pCmdBuffers[i]);
}
@@ -4096,25 +3989,20 @@ void trackCommandBuffers(layer_data* my_data, VkQueue queue, uint32_t cmdBufferC
}
}
-bool validateCommandBufferSimultaneousUse(layer_data *dev_data,
- GLOBAL_CB_NODE *pCB) {
+bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
bool skip_call = false;
if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
- !(pCB->beginInfo.flags &
- VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
- skip_call |= log_msg(
- dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__,
- DRAWSTATE_INVALID_FENCE, "DS",
- "Command Buffer %#" PRIx64 " is already in use and is not marked "
- "for simultaneous use.",
- reinterpret_cast<uint64_t>(pCB->commandBuffer));
+ !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
+ skip_call |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
+ __LINE__, DRAWSTATE_INVALID_FENCE, "DS", "Command Buffer %#" PRIx64 " is already in use and is not marked "
+ "for simultaneous use.",
+ reinterpret_cast<uint64_t>(pCB->commandBuffer));
}
return skip_call;
}
-static bool validateCommandBufferState(layer_data *dev_data,
- GLOBAL_CB_NODE *pCB) {
+static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
bool skipCall = false;
// Validate that cmd buffers have been updated
if (CB_RECORDED != pCB->state) {
@@ -4126,8 +4014,12 @@ static bool validateCommandBufferState(layer_data *dev_data,
for (auto set : pCB->destroyedSets)
set_string << " " << set;
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
- "You are submitting command buffer %#" PRIxLEAST64 " that is invalid because it had the following bound descriptor set(s) destroyed: %s", (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
+ skipCall |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
+ "You are submitting command buffer %#" PRIxLEAST64
+ " that is invalid because it had the following bound descriptor set(s) destroyed: %s",
+ (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
causeReported = true;
}
if (!pCB->updatedSets.empty()) {
@@ -4135,8 +4027,12 @@ static bool validateCommandBufferState(layer_data *dev_data,
for (auto set : pCB->updatedSets)
set_string << " " << set;
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
- "You are submitting command buffer %#" PRIxLEAST64 " that is invalid because it had the following bound descriptor set(s) updated: %s", (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
+ skipCall |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
+ "You are submitting command buffer %#" PRIxLEAST64
+ " that is invalid because it had the following bound descriptor set(s) updated: %s",
+ (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
causeReported = true;
}
if (!pCB->destroyedFramebuffers.empty()) {
@@ -4144,16 +4040,12 @@ static bool validateCommandBufferState(layer_data *dev_data,
for (auto fb : pCB->destroyedFramebuffers)
fb_string << " " << fb;
- skipCall |= log_msg(
- dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__,
- DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
- "You are submitting command buffer %#" PRIxLEAST64
- " that is invalid because it had the following "
- "referenced framebuffers destroyed: %s",
- reinterpret_cast<uint64_t &>(pCB->commandBuffer),
- fb_string.str().c_str());
+ skipCall |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
+ "You are submitting command buffer %#" PRIxLEAST64 " that is invalid because it had the following "
+ "referenced framebuffers destroyed: %s",
+ reinterpret_cast<uint64_t &>(pCB->commandBuffer), fb_string.str().c_str());
causeReported = true;
}
// TODO : This is defensive programming to make sure an error is
@@ -4162,62 +4054,51 @@ static bool validateCommandBufferState(layer_data *dev_data,
// code should be updated to seemlessly handle all the cases.
if (!causeReported) {
skipCall |= log_msg(
- dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__,
- DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
- "You are submitting command buffer %#" PRIxLEAST64
- " that is invalid due to an unknown cause. Validation "
+ dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
+ "You are submitting command buffer %#" PRIxLEAST64 " that is invalid due to an unknown cause. Validation "
"should "
"be improved to report the exact cause.",
reinterpret_cast<uint64_t &>(pCB->commandBuffer));
}
} else { // Flag error for using CB w/o vkEndCommandBuffer() called
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
- "You must call vkEndCommandBuffer() on CB %#" PRIxLEAST64 " before this call to vkQueueSubmit()!", (uint64_t)(pCB->commandBuffer));
+ skipCall |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
+ "You must call vkEndCommandBuffer() on CB %#" PRIxLEAST64 " before this call to vkQueueSubmit()!",
+ (uint64_t)(pCB->commandBuffer));
}
}
return skipCall;
}
-static VkBool32 validatePrimaryCommandBufferState(layer_data *dev_data,
- GLOBAL_CB_NODE *pCB) {
+static VkBool32 validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
// Track in-use for resources off of primary and any secondary CBs
VkBool32 skipCall = validateAndIncrementResources(dev_data, pCB);
if (!pCB->secondaryCommandBuffers.empty()) {
for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
- skipCall |= validateAndIncrementResources(
- dev_data, dev_data->commandBufferMap[secondaryCmdBuffer]);
- GLOBAL_CB_NODE* pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
+ skipCall |= validateAndIncrementResources(dev_data, dev_data->commandBufferMap[secondaryCmdBuffer]);
+ GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
if (pSubCB->primaryCommandBuffer != pCB->commandBuffer) {
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
- __LINE__,
- DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
- "CB %#" PRIxLEAST64
- " was submitted with secondary buffer %#" PRIxLEAST64
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
+ __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
+ "CB %#" PRIxLEAST64 " was submitted with secondary buffer %#" PRIxLEAST64
" but that buffer has subsequently been bound to "
"primary cmd buffer %#" PRIxLEAST64 ".",
- reinterpret_cast<uint64_t>(pCB->commandBuffer),
- reinterpret_cast<uint64_t>(secondaryCmdBuffer),
- reinterpret_cast<uint64_t>(
- pSubCB->primaryCommandBuffer));
+ reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
+ reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
}
}
}
// TODO : Verify if this also needs to be checked for secondary command
// buffers. If so, this block of code can move to
// validateCommandBufferState() function. vulkan GL106 filed to clarify
- if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) &&
- (pCB->submitCount > 1)) {
- skipCall |=
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__,
- DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
- "CB %#" PRIxLEAST64
- " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
- "set, but has been submitted %#" PRIxLEAST64 " times.",
- (uint64_t)(pCB->commandBuffer), pCB->submitCount);
+ if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
+ __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
+ "CB %#" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
+ "set, but has been submitted %#" PRIxLEAST64 " times.",
+ (uint64_t)(pCB->commandBuffer), pCB->submitCount);
}
skipCall |= validateCommandBufferState(dev_data, pCB);
// If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
@@ -4226,30 +4107,31 @@ static VkBool32 validatePrimaryCommandBufferState(layer_data *dev_data,
return skipCall;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
VkBool32 skipCall = VK_FALSE;
- GLOBAL_CB_NODE* pCB = NULL;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
+ GLOBAL_CB_NODE *pCB = NULL;
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo *submit = &pSubmits[submit_idx];
vector<VkSemaphore> semaphoreList;
- for (uint32_t i=0; i < submit->waitSemaphoreCount; ++i) {
+ for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
semaphoreList.push_back(submit->pWaitSemaphores[i]);
if (dev_data->semaphoreMap[submit->pWaitSemaphores[i]].signaled) {
dev_data->semaphoreMap[submit->pWaitSemaphores[i]].signaled = 0;
} else {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
- "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
- (uint64_t)(queue), (uint64_t)(submit->pWaitSemaphores[i]));
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
+ "DS", "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
+ (uint64_t)(queue), (uint64_t)(submit->pWaitSemaphores[i]));
}
}
- for (uint32_t i=0; i < submit->signalSemaphoreCount; ++i) {
+ for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
semaphoreList.push_back(submit->pSignalSemaphores[i]);
dev_data->semaphoreMap[submit->pSignalSemaphores[i]].signaled = 1;
}
- for (uint32_t i=0; i < submit->commandBufferCount; i++) {
+ for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
skipCall |= ValidateCmdBufImageLayouts(submit->pCommandBuffers[i]);
pCB = getCBNode(dev_data, submit->pCommandBuffers[i]);
pCB->semaphores = semaphoreList;
@@ -4257,16 +4139,11 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit(VkQueue queue, uint
skipCall |= validatePrimaryCommandBufferState(dev_data, pCB);
}
if ((fence != VK_NULL_HANDLE) && dev_data->fenceMap[fence].in_use.load()) {
- skipCall |= log_msg(
- dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
- (uint64_t)(fence), __LINE__,
- DRAWSTATE_INVALID_FENCE, "DS",
- "Fence %#" PRIx64 " is already in use by another submission.",
- (uint64_t)(fence));
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
+ (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
+ "Fence %#" PRIx64 " is already in use by another submission.", (uint64_t)(fence));
}
- trackCommandBuffers(dev_data, queue, submit->commandBufferCount,
- submit->pCommandBuffers, fence);
+ trackCommandBuffers(dev_data, queue, submit->commandBufferCount, submit->pCommandBuffers, fence);
}
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall)
@@ -4276,16 +4153,18 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit(VkQueue queue, uint
// Note: This function assumes that the global lock is held by the calling
// thread.
-VkBool32 cleanInFlightCmdBuffer(layer_data* my_data, VkCommandBuffer cmdBuffer) {
+VkBool32 cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
VkBool32 skip_call = VK_FALSE;
- GLOBAL_CB_NODE* pCB = getCBNode(my_data, cmdBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
if (pCB) {
for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
for (auto event : queryEventsPair.second) {
if (my_data->eventMap[event].needsSignaled) {
- skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
- "Cannot get query results on queryPool %" PRIu64 " with index %d which was guarded by unsignaled event %" PRIu64 ".",
- (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
+ skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
+ "Cannot get query results on queryPool %" PRIu64
+ " with index %d which was guarded by unsignaled event %" PRIu64 ".",
+ (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
}
}
}
@@ -4298,14 +4177,14 @@ VkBool32 cleanInFlightCmdBuffer(layer_data* my_data, VkCommandBuffer cmdBuffer)
// is still in flight on another queue, add it back into the global set.
// Note: This function assumes that the global lock is held by the calling
// thread.
-static inline void removeInFlightCmdBuffer(layer_data* dev_data, VkCommandBuffer cmd_buffer, VkQueue queue)
-{
+static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkQueue queue) {
// Pull it off of global list initially, but if we find it in any other queue list, add it back in
dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) {
dev_data->queueMap[queue].inFlightCmdBuffers.erase(cmd_buffer);
for (auto q : dev_data->queues) {
- if ((q != queue) && (dev_data->queueMap[q].inFlightCmdBuffers.find(cmd_buffer) != dev_data->queueMap[q].inFlightCmdBuffers.end())) {
+ if ((q != queue) &&
+ (dev_data->queueMap[q].inFlightCmdBuffers.find(cmd_buffer) != dev_data->queueMap[q].inFlightCmdBuffers.end())) {
dev_data->globalInFlightCmdBuffers.insert(cmd_buffer);
break;
}
@@ -4313,9 +4192,9 @@ static inline void removeInFlightCmdBuffer(layer_data* dev_data, VkCommandBuffer
}
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
VkBool32 skip_call = VK_FALSE;
loader_platform_thread_lock_mutex(&globalLock);
@@ -4341,10 +4220,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences(VkDevice device,
return result;
}
-
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, VkFence fence)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, VkFence fence) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
VkBool32 skip_call = VK_FALSE;
loader_platform_thread_lock_mutex(&globalLock);
@@ -4362,9 +4239,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device,
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
dev_data->queues.push_back(*pQueue);
@@ -4372,9 +4249,8 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(VkDevice device, uin
loader_platform_thread_unlock_mutex(&globalLock);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
decrementResources(dev_data, queue);
VkBool32 skip_call = VK_FALSE;
loader_platform_thread_lock_mutex(&globalLock);
@@ -4391,10 +4267,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue)
return dev_data->device_dispatch_table->QueueWaitIdle(queue);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device) {
VkBool32 skip_call = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
for (auto queue : dev_data->queues) {
decrementResources(dev_data, queue);
@@ -4413,83 +4288,69 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device)
return dev_data->device_dispatch_table->DeviceWaitIdle(device);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator)
-{
- layer_data *dev_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
bool skipCall = false;
loader_platform_thread_lock_mutex(&globalLock);
if (dev_data->fenceMap[fence].in_use.load()) {
- skipCall |=
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, (uint64_t)(fence),
- __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
- "Fence %#" PRIx64 " is in use by a command buffer.",
- (uint64_t)(fence));
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
+ (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
+ "Fence %#" PRIx64 " is in use by a command buffer.", (uint64_t)(fence));
}
loader_platform_thread_unlock_mutex(&globalLock);
if (!skipCall)
- dev_data->device_dispatch_table->DestroyFence(device, fence,
- pAllocator);
+ dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
// TODO : Clean up any internal data structures using this obj.
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
loader_platform_thread_lock_mutex(&globalLock);
if (dev_data->semaphoreMap[semaphore].in_use.load()) {
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
- reinterpret_cast<uint64_t &>(semaphore), __LINE__,
- DRAWSTATE_INVALID_SEMAPHORE, "DS",
- "Cannot delete semaphore %" PRIx64 " which is in use.",
- reinterpret_cast<uint64_t &>(semaphore));
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
+ reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
+ "Cannot delete semaphore %" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore));
}
dev_data->semaphoreMap.erase(semaphore);
loader_platform_thread_unlock_mutex(&globalLock);
// TODO : Clean up any internal data structures using this obj.
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator)
-{
- layer_data *dev_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
bool skip_call = false;
loader_platform_thread_lock_mutex(&globalLock);
auto event_data = dev_data->eventMap.find(event);
if (event_data != dev_data->eventMap.end()) {
if (event_data->second.in_use.load()) {
- skip_call |=
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
- reinterpret_cast<uint64_t &>(event), __LINE__,
- DRAWSTATE_INVALID_EVENT, "DS",
- "Cannot delete event %" PRIu64
- " which is in use by a command buffer.",
- reinterpret_cast<uint64_t &>(event));
+ skip_call |= log_msg(
+ dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
+ reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
+ "Cannot delete event %" PRIu64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event));
}
dev_data->eventMap.erase(event_data);
}
loader_platform_thread_unlock_mutex(&globalLock);
if (!skip_call)
- dev_data->device_dispatch_table->DestroyEvent(device, event,
- pAllocator);
+ dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
// TODO : Clean up any internal data structures using this obj.
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator)
-{
- get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
+ get_my_data_ptr(get_dispatch_key(device), layer_data_map)
+ ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
// TODO : Clean up any internal data structures using this obj.
}
-VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
- size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags) {
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
+ uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
+ VkQueryResultFlags flags) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
- GLOBAL_CB_NODE* pCB = nullptr;
+ GLOBAL_CB_NODE *pCB = nullptr;
loader_platform_thread_lock_mutex(&globalLock);
for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
pCB = getCBNode(dev_data, cmdBuffer);
@@ -4505,13 +4366,15 @@ VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPoo
if (queryToStateElement != dev_data->queryToStateMap.end()) {
}
// Available and in flight
- if(queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() && queryToStateElement->second) {
+ if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
+ queryToStateElement->second) {
for (auto cmdBuffer : queryElement->second) {
pCB = getCBNode(dev_data, cmdBuffer);
auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
- skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__,
- DRAWSTATE_INVALID_QUERY, "DS", "Cannot get query results on queryPool %" PRIu64 " with index %d which is in flight.",
+ skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
+ "Cannot get query results on queryPool %" PRIu64 " with index %d which is in flight.",
(uint64_t)(queryPool), firstQuery + i);
} else {
for (auto event : queryEventElement->second) {
@@ -4519,8 +4382,9 @@ VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPoo
}
}
}
- // Unavailable and in flight
- } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
+ // Unavailable and in flight
+ } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
+ !queryToStateElement->second) {
// TODO : Can there be the same query in use by multiple command buffers in flight?
bool make_available = false;
for (auto cmdBuffer : queryElement->second) {
@@ -4528,18 +4392,21 @@ VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPoo
make_available |= pCB->queryToStateMap[query];
}
if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
- skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
+ skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
"Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
(uint64_t)(queryPool), firstQuery + i);
}
- // Unavailable
+ // Unavailable
} else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
- skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
+ skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT,
+ 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
"Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
(uint64_t)(queryPool), firstQuery + i);
- // Unitialized
+ // Unitialized
} else if (queryToStateElement == dev_data->queryToStateMap.end()) {
- skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
+ skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT,
+ 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
"Cannot get query results on queryPool %" PRIu64 " with index %d which is uninitialized.",
(uint64_t)(queryPool), firstQuery + i);
}
@@ -4547,27 +4414,30 @@ VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPoo
loader_platform_thread_unlock_mutex(&globalLock);
if (skip_call)
return VK_ERROR_VALIDATION_FAILED_EXT;
- return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
+ return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
+ flags);
}
-VkBool32 validateIdleBuffer(const layer_data* my_data, VkBuffer buffer) {
+VkBool32 validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
VkBool32 skip_call = VK_FALSE;
auto buffer_data = my_data->bufferMap.find(buffer);
if (buffer_data == my_data->bufferMap.end()) {
- skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
+ skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
+ (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
"Cannot free buffer %" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
} else {
if (buffer_data->second.in_use.load()) {
- skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
+ skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
+ (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
"Cannot free buffer %" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
}
}
return skip_call;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
if (!validateIdleBuffer(dev_data, buffer)) {
loader_platform_thread_unlock_mutex(&globalLock);
@@ -4578,77 +4448,83 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer(VkDevice device, VkBu
loader_platform_thread_unlock_mutex(&globalLock);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
loader_platform_thread_lock_mutex(&globalLock);
dev_data->bufferViewMap.erase(bufferView);
loader_platform_thread_unlock_mutex(&globalLock);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
loader_platform_thread_lock_mutex(&globalLock);
dev_data->imageMap.erase(image);
loader_platform_thread_unlock_mutex(&globalLock);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator)
-{
- get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
+ get_my_data_ptr(get_dispatch_key(device), layer_data_map)
+ ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
// TODO : Clean up any internal data structures using this obj.
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator)
-{
- get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
+ get_my_data_ptr(get_dispatch_key(device), layer_data_map)
+ ->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
// TODO : Clean up any internal data structures using this obj.
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
// TODO : Clean up any internal data structures using this obj.
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator)
-{
- get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
+ get_my_data_ptr(get_dispatch_key(device), layer_data_map)
+ ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
// TODO : Clean up any internal data structures using this obj.
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
// TODO : Clean up any internal data structures using this obj.
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator)
-{
- get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
+ get_my_data_ptr(get_dispatch_key(device), layer_data_map)
+ ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
// TODO : Clean up any internal data structures using this obj.
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator)
-{
- get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
+ get_my_data_ptr(get_dispatch_key(device), layer_data_map)
+ ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
// TODO : Clean up any internal data structures using this obj.
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t count, const VkCommandBuffer *pCommandBuffers)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t count, const VkCommandBuffer *pCommandBuffers) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
bool skip_call = false;
loader_platform_thread_lock_mutex(&globalLock);
for (uint32_t i = 0; i < count; i++) {
if (dev_data->globalInFlightCmdBuffers.count(pCommandBuffers[i])) {
- skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
- "Attempt to free command buffer (%#" PRIxLEAST64 ") which is in use.", reinterpret_cast<uint64_t>(pCommandBuffers[i]));
+ skip_call |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
+ "Attempt to free command buffer (%#" PRIxLEAST64 ") which is in use.",
+ reinterpret_cast<uint64_t>(pCommandBuffers[i]));
}
// Delete CB information structure, and remove from commandBufferMap
auto cb = dev_data->commandBufferMap.find(pCommandBuffers[i]);
@@ -4668,9 +4544,10 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers(VkDevice device,
dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, count, pCommandBuffers);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkCommandPool *pCommandPool) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
@@ -4683,14 +4560,11 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice devi
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(
- VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
- layer_data *dev_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- VkResult result = dev_data->device_dispatch_table->CreateQueryPool(
- device, pCreateInfo, pAllocator, pQueryPool);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
if (result == VK_SUCCESS) {
loader_platform_thread_lock_mutex(&globalLock);
dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo;
@@ -4699,15 +4573,17 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(
return result;
}
-VkBool32 validateCommandBuffersNotInUse(const layer_data* dev_data, VkCommandPool commandPool) {
+VkBool32 validateCommandBuffersNotInUse(const layer_data *dev_data, VkCommandPool commandPool) {
VkBool32 skipCall = VK_FALSE;
auto pool_data = dev_data->commandPoolMap.find(commandPool);
if (pool_data != dev_data->commandPoolMap.end()) {
for (auto cmdBuffer : pool_data->second.commandBuffers) {
if (dev_data->globalInFlightCmdBuffers.count(cmdBuffer)) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, (uint64_t)(commandPool),
- __LINE__, DRAWSTATE_OBJECT_INUSE, "DS", "Cannot reset command pool %" PRIx64 " when allocated command buffer %" PRIx64 " is in use.",
- (uint64_t)(commandPool), (uint64_t)(cmdBuffer));
+ skipCall |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT,
+ (uint64_t)(commandPool), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
+ "Cannot reset command pool %" PRIx64 " when allocated command buffer %" PRIx64 " is in use.",
+ (uint64_t)(commandPool), (uint64_t)(cmdBuffer));
}
}
}
@@ -4715,18 +4591,20 @@ VkBool32 validateCommandBuffersNotInUse(const layer_data* dev_data, VkCommandPoo
}
// Destroy commandPool along with all of the commandBuffers allocated from that pool
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
// Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandPoolMap
if (dev_data->commandPoolMap.find(commandPool) != dev_data->commandPoolMap.end()) {
- for (auto poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.begin(); poolCb != dev_data->commandPoolMap[commandPool].commandBuffers.end();) {
+ for (auto poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
+ poolCb != dev_data->commandPoolMap[commandPool].commandBuffers.end();) {
auto del_cb = dev_data->commandBufferMap.find(*poolCb);
- delete (*del_cb).second; // delete CB info structure
- dev_data->commandBufferMap.erase(del_cb); // Remove this command buffer from cbMap
- poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.erase(poolCb); // Remove CB reference from commandPoolMap's list
+ delete (*del_cb).second; // delete CB info structure
+ dev_data->commandBufferMap.erase(del_cb); // Remove this command buffer from cbMap
+ poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.erase(
+ poolCb); // Remove CB reference from commandPoolMap's list
}
}
dev_data->commandPoolMap.erase(commandPool);
@@ -4739,13 +4617,10 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool(VkDevice device,
dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool(
- VkDevice device,
- VkCommandPool commandPool,
- VkCommandPoolResetFlags flags)
-{
- layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
if (VK_TRUE == validateCommandBuffersNotInUse(dev_data, commandPool))
return VK_ERROR_VALIDATION_FAILED_EXT;
@@ -4764,35 +4639,28 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
-vkResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
- layer_data *dev_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
bool skipCall = false;
loader_platform_thread_lock_mutex(&globalLock);
for (uint32_t i = 0; i < fenceCount; ++i) {
if (dev_data->fenceMap[pFences[i]].in_use.load()) {
skipCall |=
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
- reinterpret_cast<const uint64_t &>(pFences[i]),
- __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
- "Fence %#" PRIx64 " is in use by a command buffer.",
- reinterpret_cast<const uint64_t &>(pFences[i]));
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
+ reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
+ "Fence %#" PRIx64 " is in use by a command buffer.", reinterpret_cast<const uint64_t &>(pFences[i]));
}
}
loader_platform_thread_unlock_mutex(&globalLock);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
if (!skipCall)
- result = dev_data->device_dispatch_table->ResetFences(
- device, fenceCount, pFences);
+ result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator)
-{
- layer_data *dev_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
auto fbNode = dev_data->frameBufferMap.find(framebuffer);
if (fbNode != dev_data->frameBufferMap.end()) {
for (auto cb : fbNode->second.referencingCmdBuffers) {
@@ -4805,40 +4673,37 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer(VkDevice device,
}
dev_data->frameBufferMap.erase(framebuffer);
}
- dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer,
- pAllocator);
+ dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator)
-{
- get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
+ get_my_data_ptr(get_dispatch_key(device), layer_data_map)
+ ->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
// TODO : Clean up any internal data structures using this obj.
}
-VkBool32 validate_queue_family_indices(layer_data* dev_data, const char *function_name, const uint32_t count, const uint32_t* indices) {
+VkBool32 validate_queue_family_indices(layer_data *dev_data, const char *function_name, const uint32_t count,
+ const uint32_t *indices) {
VkBool32 skipCall = VK_FALSE;
for (auto i = 0; i < count; i++) {
if (indices[i] >= dev_data->physDevProperties.queue_family_properties.size()) {
- skipCall |=
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
- "%s has QueueFamilyIndex greater than the number of QueueFamilies ("
- PRINTF_SIZE_T_SPECIFIER ") for this device.",
- function_name,
- dev_data->physDevProperties.queue_family_properties.size());
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
+ "%s has QueueFamilyIndex greater than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER
+ ") for this device.",
+ function_name, dev_data->physDevProperties.queue_family_properties.size());
}
}
return skipCall;
}
-
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- bool skipCall = validate_queue_family_indices(dev_data, "vkCreateBuffer",
- pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ bool skipCall = validate_queue_family_indices(dev_data, "vkCreateBuffer", pCreateInfo->queueFamilyIndexCount,
+ pCreateInfo->pQueueFamilyIndices);
if (!skipCall) {
result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
}
@@ -4853,9 +4718,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(VkDevice device, c
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
@@ -4865,12 +4730,12 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice devic
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- bool skipCall = validate_queue_family_indices(dev_data, "vkCreateImage",
- pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ bool skipCall = validate_queue_family_indices(dev_data, "vkCreateImage", pCreateInfo->queueFamilyIndexCount,
+ pCreateInfo->pQueueFamilyIndices);
if (!skipCall) {
result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
}
@@ -4889,39 +4754,33 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(VkDevice device, co
return result;
}
-static void ResolveRemainingLevelsLayers(layer_data* dev_data,
- VkImageSubresourceRange* range,
- VkImage image) {
- /* expects globalLock to be held by caller */
+static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
+ /* expects globalLock to be held by caller */
- auto image_node_it = dev_data->imageMap.find(image);
- if (image_node_it != dev_data->imageMap.end()) {
- /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
- * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
- * the actual values.
- */
- if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
- range->levelCount =
- image_node_it->second.createInfo.mipLevels - range->baseMipLevel;
- }
+ auto image_node_it = dev_data->imageMap.find(image);
+ if (image_node_it != dev_data->imageMap.end()) {
+ /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
+ * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
+ * the actual values.
+ */
+ if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
+ range->levelCount = image_node_it->second.createInfo.mipLevels - range->baseMipLevel;
+ }
- if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
- range->layerCount =
- image_node_it->second.createInfo.arrayLayers - range->baseArrayLayer;
+ if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
+ range->layerCount = image_node_it->second.createInfo.arrayLayers - range->baseArrayLayer;
+ }
}
- }
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
- auto image_view = unique_ptr<VkImageViewCreateInfo>(
- new VkImageViewCreateInfo(*pCreateInfo));
- ResolveRemainingLevelsLayers(dev_data, &image_view->subresourceRange,
- pCreateInfo->image);
+ auto image_view = unique_ptr<VkImageViewCreateInfo>(new VkImageViewCreateInfo(*pCreateInfo));
+ ResolveRemainingLevelsLayers(dev_data, &image_view->subresourceRange, pCreateInfo->image);
dev_data->imageViewMap[*pView] = std::move(image_view);
loader_platform_thread_unlock_mutex(&globalLock);
}
@@ -4929,12 +4788,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
- vkCreateFence(VkDevice device, const VkFenceCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkFence* pFence) {
- layer_data *dev_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- VkResult result = dev_data->device_dispatch_table->CreateFence(
- device, pCreateInfo, pAllocator, pFence);
+vkCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
dev_data->fenceMap[*pFence].in_use.store(0);
@@ -4944,88 +4800,68 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
}
// TODO handle pipeline caches
-VKAPI_ATTR VkResult VKAPI_CALL
- vkCreatePipelineCache(VkDevice device,
- const VkPipelineCacheCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkPipelineCache *pPipelineCache) {
- layer_data *dev_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(
- device, pCreateInfo, pAllocator, pPipelineCache);
+VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
return result;
}
-VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineCache(
- VkDevice device,
- VkPipelineCache pipelineCache,
- const VkAllocationCallbacks* pAllocator)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VKAPI_ATTR void VKAPI_CALL
+vkDestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
}
-VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineCacheData(
- VkDevice device,
- VkPipelineCache pipelineCache,
- size_t* pDataSize,
- void* pData)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VKAPI_ATTR VkResult VKAPI_CALL
+vkGetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
return result;
}
-VKAPI_ATTR VkResult VKAPI_CALL vkMergePipelineCaches(
- VkDevice device,
- VkPipelineCache dstCache,
- uint32_t srcCacheCount,
- const VkPipelineCache* pSrcCaches)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VKAPI_ATTR VkResult VKAPI_CALL
+vkMergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines(
- VkDevice device,
- VkPipelineCache pipelineCache,
- uint32_t count,
- const VkGraphicsPipelineCreateInfo *pCreateInfos,
- const VkAllocationCallbacks *pAllocator,
- VkPipeline *pPipelines)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
+ const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
+ VkPipeline *pPipelines) {
VkResult result = VK_SUCCESS;
- //TODO What to do with pipelineCache?
+ // TODO What to do with pipelineCache?
// The order of operations here is a little convoluted but gets the job done
// 1. Pipeline create state is first shadowed into PIPELINE_NODE struct
// 2. Create state is then validated (which uses flags setup during shadowing)
// 3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
VkBool32 skipCall = VK_FALSE;
// TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
- vector<PIPELINE_NODE*> pPipeNode(count);
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ vector<PIPELINE_NODE *> pPipeNode(count);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- uint32_t i=0;
+ uint32_t i = 0;
loader_platform_thread_lock_mutex(&globalLock);
- for (i=0; i<count; i++) {
+ for (i = 0; i < count; i++) {
pPipeNode[i] = initGraphicsPipeline(dev_data, &pCreateInfos[i]);
skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
}
if (VK_FALSE == skipCall) {
loader_platform_thread_unlock_mutex(&globalLock);
- result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device,
- pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
+ result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
+ pPipelines);
loader_platform_thread_lock_mutex(&globalLock);
- for (i=0; i<count; i++) {
+ for (i = 0; i < count; i++) {
pPipeNode[i]->pipeline = pPipelines[i];
dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
}
loader_platform_thread_unlock_mutex(&globalLock);
} else {
- for (i=0; i<count; i++) {
+ for (i = 0; i < count; i++) {
if (pPipeNode[i]) {
// If we allocated a pipeNode, need to clean it up here
delete[] pPipeNode[i]->pVertexBindingDescriptions;
@@ -5040,29 +4876,25 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines(
- VkDevice device,
- VkPipelineCache pipelineCache,
- uint32_t count,
- const VkComputePipelineCreateInfo *pCreateInfos,
- const VkAllocationCallbacks *pAllocator,
- VkPipeline *pPipelines)
-{
- VkResult result = VK_SUCCESS;
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
+ const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
+ VkPipeline *pPipelines) {
+ VkResult result = VK_SUCCESS;
VkBool32 skipCall = VK_FALSE;
// TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
- vector<PIPELINE_NODE*> pPipeNode(count);
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ vector<PIPELINE_NODE *> pPipeNode(count);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- uint32_t i=0;
+ uint32_t i = 0;
loader_platform_thread_lock_mutex(&globalLock);
- for (i=0; i<count; i++) {
+ for (i = 0; i < count; i++) {
// TODO: Verify compute stage bits
// Create and initialize internal tracking data structure
pPipeNode[i] = new PIPELINE_NODE;
- memcpy(&pPipeNode[i]->computePipelineCI, (const void*)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
+ memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
// TODO: Add Compute Pipeline Verification
// skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
@@ -5070,15 +4902,16 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines(
if (VK_FALSE == skipCall) {
loader_platform_thread_unlock_mutex(&globalLock);
- result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
+ result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
+ pPipelines);
loader_platform_thread_lock_mutex(&globalLock);
- for (i=0; i<count; i++) {
+ for (i = 0; i < count; i++) {
pPipeNode[i]->pipeline = pPipelines[i];
dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
}
loader_platform_thread_unlock_mutex(&globalLock);
} else {
- for (i=0; i<count; i++) {
+ for (i = 0; i < count; i++) {
// Clean up any locally allocated data structures
delete pPipeNode[i];
}
@@ -5088,9 +4921,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
@@ -5100,43 +4933,44 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(VkDevice device,
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
if (VK_SUCCESS == result) {
// TODOSC : Capture layout bindings set
- LAYOUT_NODE* pNewNode = new LAYOUT_NODE;
+ LAYOUT_NODE *pNewNode = new LAYOUT_NODE;
if (NULL == pNewNode) {
- if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t) *pSetLayout, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
- "Out of memory while attempting to allocate LAYOUT_NODE in vkCreateDescriptorSetLayout()"))
+ if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
+ (uint64_t)*pSetLayout, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
+ "Out of memory while attempting to allocate LAYOUT_NODE in vkCreateDescriptorSetLayout()"))
return VK_ERROR_VALIDATION_FAILED_EXT;
}
- memcpy((void*)&pNewNode->createInfo, pCreateInfo, sizeof(VkDescriptorSetLayoutCreateInfo));
+ memcpy((void *)&pNewNode->createInfo, pCreateInfo, sizeof(VkDescriptorSetLayoutCreateInfo));
pNewNode->createInfo.pBindings = new VkDescriptorSetLayoutBinding[pCreateInfo->bindingCount];
- memcpy((void*)pNewNode->createInfo.pBindings, pCreateInfo->pBindings, sizeof(VkDescriptorSetLayoutBinding)*pCreateInfo->bindingCount);
+ memcpy((void *)pNewNode->createInfo.pBindings, pCreateInfo->pBindings,
+ sizeof(VkDescriptorSetLayoutBinding) * pCreateInfo->bindingCount);
// g++ does not like reserve with size 0
if (pCreateInfo->bindingCount)
pNewNode->bindingToIndexMap.reserve(pCreateInfo->bindingCount);
uint32_t totalCount = 0;
for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
if (!pNewNode->bindingToIndexMap.emplace(pCreateInfo->pBindings[i].binding, i).second) {
- if (log_msg(
- dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
- (uint64_t)*pSetLayout, __LINE__,
- DRAWSTATE_INVALID_LAYOUT, "DS",
- "duplicated binding number in "
- "VkDescriptorSetLayoutBinding"))
+ if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)*pSetLayout, __LINE__,
+ DRAWSTATE_INVALID_LAYOUT, "DS", "duplicated binding number in "
+ "VkDescriptorSetLayoutBinding"))
return VK_ERROR_VALIDATION_FAILED_EXT;
} else {
pNewNode->bindingToIndexMap[pCreateInfo->pBindings[i].binding] = i;
}
totalCount += pCreateInfo->pBindings[i].descriptorCount;
if (pCreateInfo->pBindings[i].pImmutableSamplers) {
- VkSampler** ppIS = (VkSampler**)&pNewNode->createInfo.pBindings[i].pImmutableSamplers;
+ VkSampler **ppIS = (VkSampler **)&pNewNode->createInfo.pBindings[i].pImmutableSamplers;
*ppIS = new VkSampler[pCreateInfo->pBindings[i].descriptorCount];
- memcpy(*ppIS, pCreateInfo->pBindings[i].pImmutableSamplers, pCreateInfo->pBindings[i].descriptorCount*sizeof(VkSampler));
+ memcpy(*ppIS, pCreateInfo->pBindings[i].pImmutableSamplers,
+ pCreateInfo->pBindings[i].descriptorCount * sizeof(VkSampler));
}
}
pNewNode->layout = *pSetLayout;
@@ -5147,7 +4981,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout(VkDev
uint32_t offset = 0;
uint32_t j = 0;
VkDescriptorType dType;
- for (uint32_t i=0; i<pCreateInfo->bindingCount; i++) {
+ for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
dType = pCreateInfo->pBindings[i].descriptorType;
for (j = 0; j < pCreateInfo->pBindings[i].descriptorCount; j++) {
pNewNode->descriptorTypes[offset + j] = dType;
@@ -5171,43 +5005,32 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout(VkDev
return result;
}
-static bool validatePushConstantSize(const layer_data *dev_data,
- const uint32_t offset, const uint32_t size,
+static bool validatePushConstantSize(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
const char *caller_name) {
bool skipCall = false;
- if ((offset + size) >
- dev_data->physDevProperties.properties.limits.maxPushConstantsSize) {
- skipCall = log_msg(
- dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS",
- "%s call has push constants with offset %u and size %u that "
- "exceeds this device's maxPushConstantSize of %u.",
- caller_name, offset, size,
- dev_data->physDevProperties.properties.limits.maxPushConstantsSize);
+ if ((offset + size) > dev_data->physDevProperties.properties.limits.maxPushConstantsSize) {
+ skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
+ "exceeds this device's maxPushConstantSize of %u.",
+ caller_name, offset, size, dev_data->physDevProperties.properties.limits.maxPushConstantsSize);
}
return skipCall;
}
-VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout)
-{
+VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
bool skipCall = false;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
uint32_t i = 0;
for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
- skipCall |= validatePushConstantSize(
- dev_data, pCreateInfo->pPushConstantRanges[i].offset,
- pCreateInfo->pPushConstantRanges[i].size,
- "vkCreatePipelineLayout()");
- if ((pCreateInfo->pPushConstantRanges[i].size == 0) ||
- ((pCreateInfo->pPushConstantRanges[i].size & 0x3) != 0)) {
- skipCall |= log_msg(
- dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS",
- "vkCreatePipelineLayout() call has push constant index %u with "
- "size %u. Size must be greater than zero and a multiple of 4.",
- i, pCreateInfo->pPushConstantRanges[i].size);
+ skipCall |= validatePushConstantSize(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
+ pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()");
+ if ((pCreateInfo->pPushConstantRanges[i].size == 0) || ((pCreateInfo->pPushConstantRanges[i].size & 0x3) != 0)) {
+ skipCall |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constant index %u with "
+ "size %u. Size must be greater than zero and a multiple of 4.",
+ i, pCreateInfo->pPushConstantRanges[i].size);
}
// TODO : Add warning if ranges overlap
}
@@ -5215,13 +5038,13 @@ VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkP
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
// TODOSC : Merge capture of the setLayouts per pipeline
- PIPELINE_LAYOUT_NODE& plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
+ PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount);
- for (i=0; i<pCreateInfo->setLayoutCount; ++i) {
+ for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i];
}
plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount);
- for (i=0; i<pCreateInfo->pushConstantRangeCount; ++i) {
+ for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i];
}
loader_platform_thread_unlock_mutex(&globalLock);
@@ -5229,19 +5052,22 @@ VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkP
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
+ VkDescriptorPool *pDescriptorPool) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
if (VK_SUCCESS == result) {
// Insert this pool into Global Pool LL at head
- if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, (uint64_t) *pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
- "Created Descriptor Pool %#" PRIxLEAST64, (uint64_t) *pDescriptorPool))
+ if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
+ (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool %#" PRIxLEAST64,
+ (uint64_t)*pDescriptorPool))
return VK_ERROR_VALIDATION_FAILED_EXT;
- DESCRIPTOR_POOL_NODE* pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
+ DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
if (NULL == pNewNode) {
- if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, (uint64_t) *pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
- "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
+ if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
+ (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
+ "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
return VK_ERROR_VALIDATION_FAILED_EXT;
} else {
loader_platform_thread_lock_mutex(&globalLock);
@@ -5254,9 +5080,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorPool(VkDevice d
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
@@ -5266,19 +5092,22 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetDescriptorPool(VkDevice de
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
// Verify that requested descriptorSets are available in pool
DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
if (!pPoolNode) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, (uint64_t) pAllocateInfo->descriptorPool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
- "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call", (uint64_t) pAllocateInfo->descriptorPool);
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
+ (uint64_t)pAllocateInfo->descriptorPool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
+ "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
+ (uint64_t)pAllocateInfo->descriptorPool);
} else { // Make sure pool has all the available descriptors before calling down chain
- skipCall |= validate_descriptor_availability_in_pool(dev_data, pPoolNode, pAllocateInfo->descriptorSetCount, pAllocateInfo->pSetLayouts);
+ skipCall |= validate_descriptor_availability_in_pool(dev_data, pPoolNode, pAllocateInfo->descriptorSetCount,
+ pAllocateInfo->pSetLayouts);
}
loader_platform_thread_unlock_mutex(&globalLock);
if (skipCall)
@@ -5289,17 +5118,21 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets(VkDevice
DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
if (pPoolNode) {
if (pAllocateInfo->descriptorSetCount == 0) {
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, pAllocateInfo->descriptorSetCount, __LINE__, DRAWSTATE_NONE, "DS",
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
+ pAllocateInfo->descriptorSetCount, __LINE__, DRAWSTATE_NONE, "DS",
"AllocateDescriptorSets called with 0 count");
}
for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t) pDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
- "Created Descriptor Set %#" PRIxLEAST64, (uint64_t) pDescriptorSets[i]);
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
+ (uint64_t)pDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS", "Created Descriptor Set %#" PRIxLEAST64,
+ (uint64_t)pDescriptorSets[i]);
// Create new set node and add to head of pool nodes
- SET_NODE* pNewNode = new SET_NODE;
+ SET_NODE *pNewNode = new SET_NODE;
if (NULL == pNewNode) {
- if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t) pDescriptorSets[i], __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
- "Out of memory while attempting to allocate SET_NODE in vkAllocateDescriptorSets()"))
+ if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
+ DRAWSTATE_OUT_OF_MEMORY, "DS",
+ "Out of memory while attempting to allocate SET_NODE in vkAllocateDescriptorSets()"))
return VK_ERROR_VALIDATION_FAILED_EXT;
} else {
// TODO : Pool should store a total count of each type of Descriptor available
@@ -5309,10 +5142,14 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets(VkDevice
pNewNode->pNext = pPoolNode->pSets;
pNewNode->in_use.store(0);
pPoolNode->pSets = pNewNode;
- LAYOUT_NODE* pLayout = getLayoutNode(dev_data, pAllocateInfo->pSetLayouts[i]);
+ LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pAllocateInfo->pSetLayouts[i]);
if (NULL == pLayout) {
- if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t) pAllocateInfo->pSetLayouts[i], __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
- "Unable to find set layout node for layout %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call", (uint64_t) pAllocateInfo->pSetLayouts[i]))
+ if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pAllocateInfo->pSetLayouts[i],
+ __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
+ "Unable to find set layout node for layout %#" PRIxLEAST64
+ " specified in vkAllocateDescriptorSets() call",
+ (uint64_t)pAllocateInfo->pSetLayouts[i]))
return VK_ERROR_VALIDATION_FAILED_EXT;
}
pNewNode->pLayout = pLayout;
@@ -5320,8 +5157,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets(VkDevice
pNewNode->set = pDescriptorSets[i];
pNewNode->descriptorCount = (pLayout->createInfo.bindingCount != 0) ? pLayout->endIndex + 1 : 0;
if (pNewNode->descriptorCount) {
- size_t descriptorArraySize = sizeof(GENERIC_HEADER*)*pNewNode->descriptorCount;
- pNewNode->ppDescriptors = new GENERIC_HEADER*[descriptorArraySize];
+ size_t descriptorArraySize = sizeof(GENERIC_HEADER *) * pNewNode->descriptorCount;
+ pNewNode->ppDescriptors = new GENERIC_HEADER *[descriptorArraySize];
memset(pNewNode->ppDescriptors, 0, descriptorArraySize);
}
dev_data->setMap[pDescriptorSets[i]] = pNewNode;
@@ -5333,19 +5170,21 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets(VkDevice
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet* pDescriptorSets)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
// Make sure that no sets being destroyed are in-flight
loader_platform_thread_lock_mutex(&globalLock);
- for (uint32_t i=0; i<count; ++i)
+ for (uint32_t i = 0; i < count; ++i)
skipCall |= validateIdleDescriptorSet(dev_data, pDescriptorSets[i], "vkFreeDesriptorSets");
DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, descriptorPool);
if (pPoolNode && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pPoolNode->createInfo.flags)) {
// Can't Free from a NON_FREE pool
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, (uint64_t)device, __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
- "It is invalid to call vkFreeDescriptorSets() with a pool created without setting VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
+ (uint64_t)device, __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
+ "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
+ "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
}
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE != skipCall)
@@ -5354,12 +5193,12 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets(VkDevice dev
if (VK_SUCCESS == result) {
// For each freed descriptor add it back into the pool as available
loader_platform_thread_lock_mutex(&globalLock);
- for (uint32_t i=0; i<count; ++i) {
- SET_NODE* pSet = dev_data->setMap[pDescriptorSets[i]]; // getSetNode() without locking
+ for (uint32_t i = 0; i < count; ++i) {
+ SET_NODE *pSet = dev_data->setMap[pDescriptorSets[i]]; // getSetNode() without locking
invalidateBoundCmdBuffers(dev_data, pSet);
- LAYOUT_NODE* pLayout = pSet->pLayout;
+ LAYOUT_NODE *pLayout = pSet->pLayout;
uint32_t typeIndex = 0, poolSizeCount = 0;
- for (uint32_t j=0; j<pLayout->createInfo.bindingCount; ++j) {
+ for (uint32_t j = 0; j < pLayout->createInfo.bindingCount; ++j) {
typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType);
poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount;
pPoolNode->availableDescriptorTypeCount[typeIndex] += poolSizeCount;
@@ -5371,26 +5210,23 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets(VkDevice dev
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
+ uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
// dsUpdate will return VK_TRUE only if a bailout error occurs, so we want to call down tree when update returns VK_FALSE
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- VkBool32 rtn = dsUpdate(dev_data,
- device,
- descriptorWriteCount,
- pDescriptorWrites,
- descriptorCopyCount,
- pDescriptorCopies);
+ VkBool32 rtn = dsUpdate(dev_data, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
loader_platform_thread_unlock_mutex(&globalLock);
if (!rtn) {
- dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
+ dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
+ pDescriptorCopies);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo* pCreateInfo, VkCommandBuffer* pCommandBuffer)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
@@ -5399,12 +5235,12 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers(VkDevice
if (dev_data->commandPoolMap.find(pCreateInfo->commandPool) != dev_data->commandPoolMap.end()) {
// Add command buffer to its commandPool map
dev_data->commandPoolMap[pCreateInfo->commandPool].commandBuffers.push_back(pCommandBuffer[i]);
- GLOBAL_CB_NODE* pCB = new GLOBAL_CB_NODE;
+ GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
// Add command buffer to map
dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
resetCB(dev_data, pCommandBuffer[i]);
- pCB->createInfo = *pCreateInfo;
- pCB->device = device;
+ pCB->createInfo = *pCreateInfo;
+ pCB->device = device;
}
}
loader_platform_thread_unlock_mutex(&globalLock);
@@ -5412,99 +5248,106 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers(VkDevice
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
// Validate command buffer level
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
// Secondary Command Buffer
const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
if (!pInfo) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer), __LINE__,
- DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS", "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must have inheritance info.",
- reinterpret_cast<void*>(commandBuffer));
+ skipCall |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
+ "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must have inheritance info.",
+ reinterpret_cast<void *>(commandBuffer));
} else {
if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
if (!pInfo->renderPass) { // renderpass should NOT be null for an Secondary CB
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
- __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
- "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must specify a valid renderpass parameter.", reinterpret_cast<void*>(commandBuffer));
+ skipCall |= log_msg(
+ dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
+ "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must specify a valid renderpass parameter.",
+ reinterpret_cast<void *>(commandBuffer));
}
if (!pInfo->framebuffer) { // framebuffer may be null for an Secondary CB, but this affects perf
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
- __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
- "vkBeginCommandBuffer(): Secondary Command Buffers (%p) may perform better if a valid framebuffer parameter is specified.",
- reinterpret_cast<void*>(commandBuffer));
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE,
+ "DS", "vkBeginCommandBuffer(): Secondary Command Buffers (%p) may perform better if a "
+ "valid framebuffer parameter is specified.",
+ reinterpret_cast<void *>(commandBuffer));
} else {
string errorString = "";
- auto fbNode =
- dev_data->frameBufferMap.find(pInfo->framebuffer);
+ auto fbNode = dev_data->frameBufferMap.find(pInfo->framebuffer);
if (fbNode != dev_data->frameBufferMap.end()) {
- VkRenderPass fbRP =
- fbNode->second.createInfo.renderPass;
- if (!verify_renderpass_compatibility(
- dev_data, fbRP, pInfo->renderPass,
- errorString)) {
+ VkRenderPass fbRP = fbNode->second.createInfo.renderPass;
+ if (!verify_renderpass_compatibility(dev_data, fbRP, pInfo->renderPass, errorString)) {
// renderPass that framebuffer was created with
// must
// be compatible with local renderPass
- skipCall |= log_msg(
- dev_data->report_data,
- VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- reinterpret_cast<uint64_t>(commandBuffer),
- __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
- "DS",
- "vkBeginCommandBuffer(): Secondary Command "
- "Buffer (%p) renderPass (%#" PRIxLEAST64
- ") is incompatible w/ framebuffer "
- "(%#" PRIxLEAST64
- ") w/ render pass (%#" PRIxLEAST64
- ") due to: %s",
- reinterpret_cast<void *>(commandBuffer),
- (uint64_t)(pInfo->renderPass),
- (uint64_t)(pInfo->framebuffer),
- (uint64_t)(fbRP), errorString.c_str());
+ skipCall |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
+ "DS", "vkBeginCommandBuffer(): Secondary Command "
+ "Buffer (%p) renderPass (%#" PRIxLEAST64 ") is incompatible w/ framebuffer "
+ "(%#" PRIxLEAST64 ") w/ render pass (%#" PRIxLEAST64 ") due to: %s",
+ reinterpret_cast<void *>(commandBuffer), (uint64_t)(pInfo->renderPass),
+ (uint64_t)(pInfo->framebuffer), (uint64_t)(fbRP), errorString.c_str());
}
// Connect this framebuffer to this cmdBuffer
- fbNode->second.referencingCmdBuffers.insert(
- pCB->commandBuffer);
+ fbNode->second.referencingCmdBuffers.insert(pCB->commandBuffer);
}
}
}
- if ((pInfo->occlusionQueryEnable == VK_FALSE || dev_data->physDevProperties.features.occlusionQueryPrecise == VK_FALSE) && (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
+ if ((pInfo->occlusionQueryEnable == VK_FALSE ||
+ dev_data->physDevProperties.features.occlusionQueryPrecise == VK_FALSE) &&
+ (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
__LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
- "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must not have VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
- "support precise occlusion queries.", reinterpret_cast<void*>(commandBuffer));
+ "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must not have "
+ "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
+ "support precise occlusion queries.",
+ reinterpret_cast<void *>(commandBuffer));
}
}
if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
auto rp_data = dev_data->renderPassMap.find(pInfo->renderPass);
if (rp_data != dev_data->renderPassMap.end() && rp_data->second && rp_data->second->pCreateInfo) {
if (pInfo->subpass >= rp_data->second->pCreateInfo->subpassCount) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer,
- __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
- "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must has a subpass index (%d) that is less than the number of subpasses (%d).",
- (void*)commandBuffer, pInfo->subpass, rp_data->second->pCreateInfo->subpassCount);
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
+ DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
+ "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must has a subpass index (%d) "
+ "that is less than the number of subpasses (%d).",
+ (void *)commandBuffer, pInfo->subpass, rp_data->second->pCreateInfo->subpassCount);
}
}
}
}
if (CB_RECORDING == pCB->state) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
- "vkBeginCommandBuffer(): Cannot call Begin on CB (%#" PRIxLEAST64 ") in the RECORDING state. Must first call vkEndCommandBuffer().", (uint64_t)commandBuffer);
+ skipCall |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
+ "vkBeginCommandBuffer(): Cannot call Begin on CB (%#" PRIxLEAST64
+ ") in the RECORDING state. Must first call vkEndCommandBuffer().",
+ (uint64_t)commandBuffer);
} else if (CB_RECORDED == pCB->state) {
VkCommandPool cmdPool = pCB->createInfo.commandPool;
if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer,
- __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
- "Call to vkBeginCommandBuffer() on command buffer (%#" PRIxLEAST64 ") attempts to implicitly reset cmdBuffer created from command pool (%#" PRIxLEAST64 ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
- (uint64_t) commandBuffer, (uint64_t) cmdPool);
+ skipCall |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
+ "Call to vkBeginCommandBuffer() on command buffer (%#" PRIxLEAST64
+ ") attempts to implicitly reset cmdBuffer created from command pool (%#" PRIxLEAST64
+ ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
+ (uint64_t)commandBuffer, (uint64_t)cmdPool);
}
resetCB(dev_data, commandBuffer);
}
@@ -5516,8 +5359,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer(VkCommandBuf
pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo;
}
} else {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
- "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB %p!", (void*)commandBuffer);
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
+ "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB %p!", (void *)commandBuffer);
}
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE != skipCall) {
@@ -5527,20 +5371,21 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer(VkCommandBuf
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffer commandBuffer)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffer commandBuffer) {
VkBool32 skipCall = VK_FALSE;
VkResult result = VK_SUCCESS;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state != CB_RECORDING) {
skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkEndCommandBuffer()");
}
for (auto query : pCB->activeQueries) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
- "Ending command buffer with in progress query: queryPool %" PRIu64 ", index %d", (uint64_t)(query.pool), query.index);
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_QUERY, "DS",
+ "Ending command buffer with in progress query: queryPool %" PRIu64 ", index %d",
+ (uint64_t)(query.pool), query.index);
}
}
if (VK_FALSE == skipCall) {
@@ -5560,23 +5405,25 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffe
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
VkCommandPool cmdPool = pCB->createInfo.commandPool;
if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t) commandBuffer,
- __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
- "Attempt to reset command buffer (%#" PRIxLEAST64 ") created from command pool (%#" PRIxLEAST64 ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
- (uint64_t) commandBuffer, (uint64_t) cmdPool);
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
+ "Attempt to reset command buffer (%#" PRIxLEAST64 ") created from command pool (%#" PRIxLEAST64
+ ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
+ (uint64_t)commandBuffer, (uint64_t)cmdPool);
}
if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t) commandBuffer,
- __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
- "Attempt to reset command buffer (%#" PRIxLEAST64 ") which is in use.", reinterpret_cast<uint64_t>(commandBuffer));
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
+ "Attempt to reset command buffer (%#" PRIxLEAST64 ") which is in use.",
+ reinterpret_cast<uint64_t>(commandBuffer));
}
loader_platform_thread_unlock_mutex(&globalLock);
if (skipCall != VK_FALSE)
@@ -5590,29 +5437,30 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer(VkCommandBuf
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, (uint64_t) pipeline,
- __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
- "Incorrectly binding compute pipeline (%#" PRIxLEAST64 ") during active RenderPass (%#" PRIxLEAST64 ")",
- (uint64_t) pipeline, (uint64_t) pCB->activeRenderPass);
+ skipCall |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
+ (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
+ "Incorrectly binding compute pipeline (%#" PRIxLEAST64 ") during active RenderPass (%#" PRIxLEAST64 ")",
+ (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass);
}
- PIPELINE_NODE* pPN = getPipeline(dev_data, pipeline);
+ PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
if (pPN) {
pCB->lastBoundPipeline = pipeline;
set_cb_pso_status(pCB, pPN);
skipCall |= validatePipelineState(dev_data, pCB, pipelineBindPoint, pipeline);
} else {
skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
- (uint64_t) pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
+ (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
"Attempt to bind Pipeline %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
}
}
@@ -5621,16 +5469,12 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline(VkCommandBuffer com
dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport(
- VkCommandBuffer commandBuffer,
- uint32_t firstViewport,
- uint32_t viewportCount,
- const VkViewport* pViewports)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
pCB->status |= CBSTATUS_VIEWPORT_SET;
@@ -5642,16 +5486,12 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport(
dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor(
- VkCommandBuffer commandBuffer,
- uint32_t firstScissor,
- uint32_t scissorCount,
- const VkRect2D* pScissors)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
pCB->status |= CBSTATUS_SCISSOR_SET;
@@ -5663,12 +5503,11 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor(
dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
pCB->status |= CBSTATUS_LINE_WIDTH_SET;
@@ -5679,16 +5518,12 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(VkCommandBuffer com
dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBias(
- VkCommandBuffer commandBuffer,
- float depthBiasConstantFactor,
- float depthBiasClamp,
- float depthBiasSlopeFactor)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
@@ -5698,15 +5533,15 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBias(
}
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall)
- dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
+ dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
+ depthBiasSlopeFactor);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4])
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
pCB->status |= CBSTATUS_BLEND_SET;
@@ -5717,15 +5552,12 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(VkCommandBuffe
dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBounds(
- VkCommandBuffer commandBuffer,
- float minDepthBounds,
- float maxDepthBounds)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
@@ -5737,15 +5569,12 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBounds(
dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilCompareMask(
- VkCommandBuffer commandBuffer,
- VkStencilFaceFlags faceMask,
- uint32_t compareMask)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
if (faceMask & VK_STENCIL_FACE_FRONT_BIT) {
@@ -5763,15 +5592,12 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilCompareMask(
dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilWriteMask(
- VkCommandBuffer commandBuffer,
- VkStencilFaceFlags faceMask,
- uint32_t writeMask)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
if (faceMask & VK_STENCIL_FACE_FRONT_BIT) {
@@ -5787,15 +5613,12 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilWriteMask(
dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilReference(
- VkCommandBuffer commandBuffer,
- VkStencilFaceFlags faceMask,
- uint32_t reference)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
if (faceMask & VK_STENCIL_FACE_FRONT_BIT) {
@@ -5811,17 +5634,21 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilReference(
dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount, const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
+ uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
+ const uint32_t *pDynamicOffsets) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_RECORDING) {
if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
- "Incorrectly binding compute DescriptorSets during active RenderPass (%#" PRIxLEAST64 ")", (uint64_t) pCB->activeRenderPass);
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
+ __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
+ "Incorrectly binding compute DescriptorSets during active RenderPass (%#" PRIxLEAST64 ")",
+ (uint64_t)pCB->activeRenderPass);
} else if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdBindDescriptorSets");
}
@@ -5829,53 +5656,83 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets(VkCommandBuff
// Track total count of dynamic descriptor types to make sure we have an offset for each one
uint32_t totalDynamicDescriptors = 0;
string errorString = "";
- uint32_t lastSetIndex = firstSet+setCount-1;
+ uint32_t lastSetIndex = firstSet + setCount - 1;
if (lastSetIndex >= pCB->boundDescriptorSets.size())
- pCB->boundDescriptorSets.resize(lastSetIndex+1);
+ pCB->boundDescriptorSets.resize(lastSetIndex + 1);
VkDescriptorSet oldFinalBoundSet = pCB->boundDescriptorSets[lastSetIndex];
- for (uint32_t i=0; i<setCount; i++) {
- SET_NODE* pSet = getSetNode(dev_data, pDescriptorSets[i]);
+ for (uint32_t i = 0; i < setCount; i++) {
+ SET_NODE *pSet = getSetNode(dev_data, pDescriptorSets[i]);
if (pSet) {
pCB->uniqueBoundSets.insert(pDescriptorSets[i]);
pSet->boundCmdBuffers.insert(commandBuffer);
pCB->lastBoundDescriptorSet = pDescriptorSets[i];
pCB->lastBoundPipelineLayout = layout;
- pCB->boundDescriptorSets[i+firstSet] = pDescriptorSets[i];
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t) pDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
- "DS %#" PRIxLEAST64 " bound on pipeline %s", (uint64_t) pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
+ pCB->boundDescriptorSets[i + firstSet] = pDescriptorSets[i];
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
+ DRAWSTATE_NONE, "DS", "DS %#" PRIxLEAST64 " bound on pipeline %s",
+ (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
if (!pSet->pUpdateStructs && (pSet->descriptorCount != 0)) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t) pDescriptorSets[i], __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
- "DS %#" PRIxLEAST64 " bound but it was never updated. You may want to either update it or not bind it.", (uint64_t) pDescriptorSets[i]);
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
+ __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
+ "DS %#" PRIxLEAST64
+ " bound but it was never updated. You may want to either update it or not bind it.",
+ (uint64_t)pDescriptorSets[i]);
}
// Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
- if (!verify_set_layout_compatibility(dev_data, pSet, layout, i+firstSet, errorString)) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t) pDescriptorSets[i], __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
- "descriptorSet #%u being bound is not compatible with overlapping layout in pipelineLayout due to: %s", i, errorString.c_str());
+ if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) {
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
+ __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
+ "descriptorSet #%u being bound is not compatible with overlapping layout in "
+ "pipelineLayout due to: %s",
+ i, errorString.c_str());
}
if (pSet->pLayout->dynamicDescriptorCount) {
// First make sure we won't overstep bounds of pDynamicOffsets array
if ((totalDynamicDescriptors + pSet->pLayout->dynamicDescriptorCount) > dynamicOffsetCount) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t) pDescriptorSets[i], __LINE__, DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
- "descriptorSet #%u (%#" PRIxLEAST64 ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets array. There must be one dynamic offset for each dynamic descriptor being bound.",
- i, (uint64_t) pDescriptorSets[i], pSet->pLayout->dynamicDescriptorCount, (dynamicOffsetCount - totalDynamicDescriptors));
+ skipCall |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
+ DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
+ "descriptorSet #%u (%#" PRIxLEAST64
+ ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
+ "array. There must be one dynamic offset for each dynamic descriptor being bound.",
+ i, (uint64_t)pDescriptorSets[i], pSet->pLayout->dynamicDescriptorCount,
+ (dynamicOffsetCount - totalDynamicDescriptors));
} else { // Validate and store dynamic offsets with the set
// Validate Dynamic Offset Minimums
uint32_t cur_dyn_offset = totalDynamicDescriptors;
for (uint32_t d = 0; d < pSet->descriptorCount; d++) {
if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
- if (vk_safe_modulo(pDynamicOffsets[cur_dyn_offset], dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
- __LINE__, DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
- "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
- cur_dyn_offset, pDynamicOffsets[cur_dyn_offset], dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment);
+ if (vk_safe_modulo(
+ pDynamicOffsets[cur_dyn_offset],
+ dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment) !=
+ 0) {
+ skipCall |= log_msg(
+ dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
+ DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
+ "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
+ "device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
+ cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
+ dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment);
}
cur_dyn_offset++;
} else if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
- if (vk_safe_modulo(pDynamicOffsets[cur_dyn_offset], dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
- __LINE__, DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
- "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64,
- cur_dyn_offset, pDynamicOffsets[cur_dyn_offset], dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment);
+ if (vk_safe_modulo(
+ pDynamicOffsets[cur_dyn_offset],
+ dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment) !=
+ 0) {
+ skipCall |= log_msg(
+ dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
+ DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
+ "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
+ "device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64,
+ cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
+ dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment);
}
cur_dyn_offset++;
}
@@ -5885,33 +5742,55 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets(VkCommandBuff
}
}
} else {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t) pDescriptorSets[i], __LINE__, DRAWSTATE_INVALID_SET, "DS",
- "Attempt to bind DS %#" PRIxLEAST64 " that doesn't exist!", (uint64_t) pDescriptorSets[i]);
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
+ DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS %#" PRIxLEAST64 " that doesn't exist!",
+ (uint64_t)pDescriptorSets[i]);
}
}
skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescrsiptorSets()");
// For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
if (firstSet > 0) { // Check set #s below the first bound set
- for (uint32_t i=0; i<firstSet; ++i) {
- if (pCB->boundDescriptorSets[i] && !verify_set_layout_compatibility(dev_data, dev_data->setMap[pCB->boundDescriptorSets[i]], layout, i, errorString)) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t) pCB->boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
- "DescriptorSetDS %#" PRIxLEAST64 " previously bound as set #%u was disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")", (uint64_t) pCB->boundDescriptorSets[i], i, (uint64_t) layout);
+ for (uint32_t i = 0; i < firstSet; ++i) {
+ if (pCB->boundDescriptorSets[i] &&
+ !verify_set_layout_compatibility(dev_data, dev_data->setMap[pCB->boundDescriptorSets[i]], layout, i,
+ errorString)) {
+ skipCall |= log_msg(
+ dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pCB->boundDescriptorSets[i], __LINE__,
+ DRAWSTATE_NONE, "DS",
+ "DescriptorSetDS %#" PRIxLEAST64
+ " previously bound as set #%u was disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
+ (uint64_t)pCB->boundDescriptorSets[i], i, (uint64_t)layout);
pCB->boundDescriptorSets[i] = VK_NULL_HANDLE;
}
}
}
// Check if newly last bound set invalidates any remaining bound sets
- if ((pCB->boundDescriptorSets.size()-1) > (lastSetIndex)) {
- if (oldFinalBoundSet && !verify_set_layout_compatibility(dev_data, dev_data->setMap[oldFinalBoundSet], layout, lastSetIndex, errorString)) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t) oldFinalBoundSet, __LINE__, DRAWSTATE_NONE, "DS",
- "DescriptorSetDS %#" PRIxLEAST64 " previously bound as set #%u is incompatible with set %#" PRIxLEAST64 " newly bound as set #%u so set #%u and any subsequent sets were disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")", (uint64_t) oldFinalBoundSet, lastSetIndex, (uint64_t) pCB->boundDescriptorSets[lastSetIndex], lastSetIndex, lastSetIndex+1, (uint64_t) layout);
- pCB->boundDescriptorSets.resize(lastSetIndex+1);
+ if ((pCB->boundDescriptorSets.size() - 1) > (lastSetIndex)) {
+ if (oldFinalBoundSet &&
+ !verify_set_layout_compatibility(dev_data, dev_data->setMap[oldFinalBoundSet], layout, lastSetIndex,
+ errorString)) {
+ skipCall |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)oldFinalBoundSet, __LINE__,
+ DRAWSTATE_NONE, "DS", "DescriptorSetDS %#" PRIxLEAST64
+ " previously bound as set #%u is incompatible with set %#" PRIxLEAST64
+ " newly bound as set #%u so set #%u and any subsequent sets were "
+ "disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
+ (uint64_t)oldFinalBoundSet, lastSetIndex, (uint64_t)pCB->boundDescriptorSets[lastSetIndex],
+ lastSetIndex, lastSetIndex + 1, (uint64_t)layout);
+ pCB->boundDescriptorSets.resize(lastSetIndex + 1);
}
}
// dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
if (totalDynamicDescriptors != dynamicOffsetCount) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t) commandBuffer, __LINE__, DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
- "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount is %u. It should exactly match the number of dynamic descriptors.", setCount, totalDynamicDescriptors, dynamicOffsetCount);
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
+ DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
+ "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
+ "is %u. It should exactly match the number of dynamic descriptors.",
+ setCount, totalDynamicDescriptors, dynamicOffsetCount);
}
// Save dynamicOffsets bound to this CB
for (uint32_t i = 0; i < dynamicOffsetCount; i++) {
@@ -5924,32 +5803,35 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets(VkCommandBuff
}
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall)
- dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount, pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
+ dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
+ pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
VkDeviceSize offset_align = 0;
switch (indexType) {
- case VK_INDEX_TYPE_UINT16:
- offset_align = 2;
- break;
- case VK_INDEX_TYPE_UINT32:
- offset_align = 4;
- break;
- default:
- // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
- break;
+ case VK_INDEX_TYPE_UINT16:
+ offset_align = 2;
+ break;
+ case VK_INDEX_TYPE_UINT32:
+ offset_align = 4;
+ break;
+ default:
+ // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
+ break;
}
if (!offset_align || (offset % offset_align)) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
- "vkCmdBindIndexBuffer() offset (%#" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset, string_VkIndexType(indexType));
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
+ "vkCmdBindIndexBuffer() offset (%#" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
+ offset, string_VkIndexType(indexType));
}
pCB->status |= CBSTATUS_INDEX_BUFFER_BOUND;
}
@@ -5958,7 +5840,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer(VkCommandBuffer
dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
}
-void updateResourceTracking(GLOBAL_CB_NODE* pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers) {
+void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
uint32_t end = firstBinding + bindingCount;
if (pCB->currentDrawData.buffers.size() < end) {
pCB->currentDrawData.buffers.resize(end);
@@ -5968,21 +5850,15 @@ void updateResourceTracking(GLOBAL_CB_NODE* pCB, uint32_t firstBinding, uint32_t
}
}
-void updateResourceTrackingOnDraw(GLOBAL_CB_NODE* pCB) {
- pCB->drawData.push_back(pCB->currentDrawData);
-}
+void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(
- VkCommandBuffer commandBuffer,
- uint32_t firstBinding,
- uint32_t bindingCount,
- const VkBuffer *pBuffers,
- const VkDeviceSize *pOffsets)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
+ uint32_t bindingCount, const VkBuffer *pBuffers,
+ const VkDeviceSize *pOffsets) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
addCmd(dev_data, pCB, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
updateResourceTracking(pCB, firstBinding, bindingCount, pBuffers);
@@ -5994,19 +5870,20 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(
dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
+ uint32_t firstVertex, uint32_t firstInstance) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
pCB->drawCount[DRAW]++;
skipCall |= validate_draw_state(dev_data, pCB, VK_FALSE);
// TODO : Need to pass commandBuffer as srcObj here
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
- "vkCmdDraw() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW]++);
+ skipCall |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
+ __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW]++);
skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
if (VK_FALSE == skipCall) {
updateResourceTrackingOnDraw(pCB);
@@ -6018,19 +5895,21 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuff
dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
+ uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
+ uint32_t firstInstance) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
VkBool32 skipCall = VK_FALSE;
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
pCB->drawCount[DRAW_INDEXED]++;
skipCall |= validate_draw_state(dev_data, pCB, VK_TRUE);
// TODO : Need to pass commandBuffer as srcObj here
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
- "vkCmdDrawIndexed() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
+ "vkCmdDrawIndexed() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
if (VK_FALSE == skipCall) {
updateResourceTrackingOnDraw(pCB);
@@ -6039,22 +5918,24 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer comm
}
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall)
- dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
+ dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
+ firstInstance);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
VkBool32 skipCall = VK_FALSE;
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
pCB->drawCount[DRAW_INDIRECT]++;
skipCall |= validate_draw_state(dev_data, pCB, VK_FALSE);
// TODO : Need to pass commandBuffer as srcObj here
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
- "vkCmdDrawIndirect() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
+ "vkCmdDrawIndirect() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
if (VK_FALSE == skipCall) {
updateResourceTrackingOnDraw(pCB);
@@ -6066,12 +5947,12 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect(VkCommandBuffer com
dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
pCB->drawCount[DRAW_INDEXED_INDIRECT]++;
@@ -6079,8 +5960,10 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect(VkCommandBuf
skipCall |= validate_draw_state(dev_data, pCB, VK_TRUE);
loader_platform_thread_lock_mutex(&globalLock);
// TODO : Need to pass commandBuffer as srcObj here
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
- "vkCmdDrawIndexedIndirect() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDEXED_INDIRECT]++);
+ skipCall |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
+ __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call #%" PRIu64 ", reporting DS state:",
+ g_drawCount[DRAW_INDEXED_INDIRECT]++);
skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
if (VK_FALSE == skipCall) {
updateResourceTrackingOnDraw(pCB);
@@ -6092,12 +5975,11 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect(VkCommandBuf
dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
@@ -6107,12 +5989,12 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer command
dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect");
@@ -6122,12 +6004,12 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect(VkCommandBuffer
dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
+ uint32_t regionCount, const VkBufferCopy *pRegions) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBuffer");
@@ -6137,11 +6019,12 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer comma
dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
}
-VkBool32 VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers, VkImageLayout srcImageLayout) {
+VkBool32 VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers,
+ VkImageLayout srcImageLayout) {
VkBool32 skip_call = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, cmdBuffer);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
uint32_t layer = i + subLayers.baseArrayLayer;
VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
@@ -6153,38 +6036,34 @@ VkBool32 VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, Vk
if (node.layout != srcImageLayout) {
// TODO: Improve log message in the next pass
skip_call |=
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
- __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
- "Cannot copy from an image whose source layout is %s "
- "and doesn't match the current layout %s.",
- string_VkImageLayout(srcImageLayout),
- string_VkImageLayout(node.layout));
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
+ __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
+ "and doesn't match the current layout %s.",
+ string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
}
}
if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
// LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
- skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
+ skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
+ 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
"Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
} else {
- skip_call |=
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
- "Layout for input image is %s but can only be "
- "TRANSFER_SRC_OPTIMAL or GENERAL.",
- string_VkImageLayout(srcImageLayout));
+ skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
+ "TRANSFER_SRC_OPTIMAL or GENERAL.",
+ string_VkImageLayout(srcImageLayout));
}
}
return skip_call;
}
-VkBool32 VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers, VkImageLayout destImageLayout) {
+VkBool32 VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers,
+ VkImageLayout destImageLayout) {
VkBool32 skip_call = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, cmdBuffer);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
uint32_t layer = i + subLayers.baseArrayLayer;
VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
@@ -6195,44 +6074,35 @@ VkBool32 VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkI
}
if (node.layout != destImageLayout) {
skip_call |=
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
- __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
- "Cannot copy from an image whose dest layout is %s and "
- "doesn't match the current layout %s.",
- string_VkImageLayout(destImageLayout),
- string_VkImageLayout(node.layout));
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
+ __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
+ "doesn't match the current layout %s.",
+ string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
}
}
if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
// LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
- skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
+ skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
+ 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
"Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
} else {
- skip_call |=
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
- "Layout for output image is %s but can only be "
- "TRANSFER_DST_OPTIMAL or GENERAL.",
- string_VkImageLayout(destImageLayout));
+ skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
+ "TRANSFER_DST_OPTIMAL or GENERAL.",
+ string_VkImageLayout(destImageLayout));
}
}
return skip_call;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage(VkCommandBuffer commandBuffer,
- VkImage srcImage,
- VkImageLayout srcImageLayout,
- VkImage dstImage,
- VkImageLayout dstImageLayout,
- uint32_t regionCount, const VkImageCopy* pRegions)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
+ VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGE, "vkCmdCopyImage()");
skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImage");
@@ -6243,80 +6113,73 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage(VkCommandBuffer comman
}
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall)
- dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions);
+ dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
+ regionCount, pRegions);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage(VkCommandBuffer commandBuffer,
- VkImage srcImage, VkImageLayout srcImageLayout,
- VkImage dstImage, VkImageLayout dstImageLayout,
- uint32_t regionCount, const VkImageBlit* pRegions,
- VkFilter filter)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
+ VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_BLITIMAGE, "vkCmdBlitImage()");
skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBlitImage");
}
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall)
- dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter);
+ dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
+ regionCount, pRegions, filter);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer,
- VkBuffer srcBuffer,
- VkImage dstImage, VkImageLayout dstImageLayout,
- uint32_t regionCount, const VkBufferImageCopy* pRegions)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
+ VkImage dstImage, VkImageLayout dstImageLayout,
+ uint32_t regionCount, const VkBufferImageCopy *pRegions) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBufferToImage");
for (uint32_t i = 0; i < regionCount; ++i) {
- skipCall |= VerifyDestImageLayout(commandBuffer, dstImage,
- pRegions[i].imageSubresource,
- dstImageLayout);
+ skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout);
}
}
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall)
- dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
+ dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
+ pRegions);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer,
- VkImage srcImage, VkImageLayout srcImageLayout,
- VkBuffer dstBuffer,
- uint32_t regionCount, const VkBufferImageCopy* pRegions)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
+ VkImageLayout srcImageLayout, VkBuffer dstBuffer,
+ uint32_t regionCount, const VkBufferImageCopy *pRegions) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImageToBuffer");
for (uint32_t i = 0; i < regionCount; ++i) {
- skipCall |= VerifySourceImageLayout(commandBuffer, srcImage,
- pRegions[i].imageSubresource,
- srcImageLayout);
+ skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout);
}
}
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall)
- dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
+ dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
+ pRegions);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t* pData)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
+ VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyUpdateBuffer");
@@ -6326,12 +6189,12 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer com
dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_FILLBUFFER, "vkCmdFillBuffer()");
skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyFillBuffer");
@@ -6341,30 +6204,28 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer(VkCommandBuffer comma
dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(
- VkCommandBuffer commandBuffer,
- uint32_t attachmentCount,
- const VkClearAttachment* pAttachments,
- uint32_t rectCount,
- const VkClearRect* pRects)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
+ const VkClearAttachment *pAttachments, uint32_t rectCount,
+ const VkClearRect *pRects) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
// Warn if this is issued prior to Draw Cmd and clearing the entire attachment
- if (!hasDrawCmd(pCB) &&
- (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
+ if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
(pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
// TODO : commandBuffer should be srcObj
// There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
- // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must call CmdClearAttachments
+ // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
+ // call CmdClearAttachments
// Otherwise this seems more like a performance warning.
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
- "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
- " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.", (uint64_t)(commandBuffer));
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
+ "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
+ " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
+ (uint64_t)(commandBuffer));
}
skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments");
}
@@ -6372,7 +6233,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(
// Validate that attachment is in reference list of active subpass
if (pCB->activeRenderPass) {
const VkRenderPassCreateInfo *pRPCI = dev_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
- const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
+ const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
const VkClearAttachment *attachment = &pAttachments[attachment_idx];
@@ -6385,18 +6246,22 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(
}
}
if (VK_FALSE == found) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
- "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d",
- attachment->colorAttachment, pCB->activeSubpass);
+ skipCall |= log_msg(
+ dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
+ "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d",
+ attachment->colorAttachment, pCB->activeSubpass);
}
} else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
- if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
- (pSD->pDepthStencilAttachment->attachment == VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
+ if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
+ (pSD->pDepthStencilAttachment->attachment ==
+ VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ skipCall |= log_msg(
+ dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
(uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
- "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found in active subpass %d",
+ "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found "
+ "in active subpass %d",
attachment->colorAttachment,
(pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED,
pCB->activeSubpass);
@@ -6409,16 +6274,13 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(
dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(
- VkCommandBuffer commandBuffer,
- VkImage image, VkImageLayout imageLayout,
- const VkClearColorValue *pColor,
- uint32_t rangeCount, const VkImageSubresourceRange* pRanges)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
+ VkImageLayout imageLayout, const VkClearColorValue *pColor,
+ uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearColorImage");
@@ -6428,50 +6290,47 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(
dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage(
- VkCommandBuffer commandBuffer,
- VkImage image, VkImageLayout imageLayout,
- const VkClearDepthStencilValue *pDepthStencil,
- uint32_t rangeCount,
- const VkImageSubresourceRange* pRanges)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
+ const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
+ const VkImageSubresourceRange *pRanges) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearDepthStencilImage");
}
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall)
- dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
+ dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
+ pRanges);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage(VkCommandBuffer commandBuffer,
- VkImage srcImage, VkImageLayout srcImageLayout,
- VkImage dstImage, VkImageLayout dstImageLayout,
- uint32_t regionCount, const VkImageResolve* pRegions)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
+ VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResolveImage");
}
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall)
- dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions);
+ dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
+ regionCount, pRegions);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
skipCall |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
@@ -6483,12 +6342,12 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent(VkCommandBuffer command
dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
@@ -6499,9 +6358,9 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent(VkCommandBuffer comma
dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
}
-VkBool32 TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount, const VkImageMemoryBarrier* pImgMemBarriers) {
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, cmdBuffer);
+VkBool32 TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount, const VkImageMemoryBarrier *pImgMemBarriers) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
VkBool32 skip = VK_FALSE;
for (uint32_t i = 0; i < memBarrierCount; ++i) {
@@ -6510,33 +6369,23 @@ VkBool32 TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCo
continue;
// TODO: Do not iterate over every possibility - consolidate where
// possible
- for (uint32_t j = 0; j < mem_barrier->subresourceRange.levelCount;
- j++) {
+ for (uint32_t j = 0; j < mem_barrier->subresourceRange.levelCount; j++) {
uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
- for (uint32_t k = 0; k < mem_barrier->subresourceRange.layerCount;
- k++) {
- uint32_t layer =
- mem_barrier->subresourceRange.baseArrayLayer + k;
- VkImageSubresource sub = {
- mem_barrier->subresourceRange.aspectMask, level, layer};
+ for (uint32_t k = 0; k < mem_barrier->subresourceRange.layerCount; k++) {
+ uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
+ VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
IMAGE_CMD_BUF_LAYOUT_NODE node;
if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
- SetLayout(pCB, mem_barrier->image, sub,
- {mem_barrier->oldLayout, mem_barrier->newLayout});
+ SetLayout(pCB, mem_barrier->image, sub, {mem_barrier->oldLayout, mem_barrier->newLayout});
continue;
}
if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
// TODO: Set memory invalid which is in mem_tracker currently
- }
- else if (node.layout != mem_barrier->oldLayout) {
- skip |= log_msg(
- dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
- "You cannot transition the layout from %s "
- "when current layout is %s.",
- string_VkImageLayout(mem_barrier->oldLayout),
- string_VkImageLayout(node.layout));
+ } else if (node.layout != mem_barrier->oldLayout) {
+ skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
+ __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
+ "when current layout is %s.",
+ string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
}
SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
}
@@ -6546,8 +6395,7 @@ VkBool32 TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCo
}
// Print readable FlagBits in FlagMask
-std::string string_VkAccessFlags(VkAccessFlags accessMask)
-{
+std::string string_VkAccessFlags(VkAccessFlags accessMask) {
std::string result;
std::string separator;
@@ -6569,23 +6417,26 @@ std::string string_VkAccessFlags(VkAccessFlags accessMask)
// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
-VkBool32 ValidateMaskBits(const layer_data* my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags& accessMask, const VkImageLayout& layout,
- VkAccessFlags required_bit, VkAccessFlags optional_bits, const char* type) {
+VkBool32 ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
+ const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits, const char *type) {
VkBool32 skip_call = VK_FALSE;
if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
if (accessMask & !(required_bit | optional_bits)) {
// TODO: Verify against Valid Use
- skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
- "Additional bits in %s accessMask %d %s are specified when layout is %s.",
- type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
+ skip_call |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
+ type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
}
} else {
if (!required_bit) {
- skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
- "%s AccessMask %d %s must contain at least one of access bits %d %s when layout is %s, unless the app has previously added a barrier for this transition.",
- type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
- string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
+ skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
+ "%s when layout is %s, unless the app has previously added a "
+ "barrier for this transition.",
+ type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
+ string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
} else {
std::string opt_bits;
if (optional_bits != 0) {
@@ -6593,82 +6444,82 @@ VkBool32 ValidateMaskBits(const layer_data* my_data, VkCommandBuffer cmdBuffer,
ss << optional_bits;
opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
}
- skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
- "%s AccessMask %d %s must have required access bit %d %s %s when layout is %s, unless the app has previously added a barrier for this transition.",
- type, accessMask, string_VkAccessFlags(accessMask).c_str(),
- required_bit, string_VkAccessFlags(required_bit).c_str(),
- opt_bits.c_str(), string_VkImageLayout(layout));
+ skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
+ "layout is %s, unless the app has previously added a barrier for "
+ "this transition.",
+ type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
+ string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
}
}
return skip_call;
}
-VkBool32 ValidateMaskBitsFromLayouts(const layer_data* my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags& accessMask, const VkImageLayout& layout, const char* type) {
+VkBool32 ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
+ const VkImageLayout &layout, const char *type) {
VkBool32 skip_call = VK_FALSE;
switch (layout) {
- case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
- skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
- break;
- }
- case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
- skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
- break;
- }
- case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
- skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
- break;
- }
- case VK_IMAGE_LAYOUT_PREINITIALIZED: {
- skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
- break;
- }
- case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
- skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
- break;
- }
- case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
- skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0, VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
- break;
- }
- case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
- skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
- break;
- }
- case VK_IMAGE_LAYOUT_UNDEFINED: {
- if (accessMask != 0) {
- // TODO: Verify against Valid Use section spec
- skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
- "Additional bits in %s accessMask %d %s are specified when layout is %s.", type, accessMask, string_VkAccessFlags(accessMask).c_str(),
- string_VkImageLayout(layout));
- }
- break;
- }
- case VK_IMAGE_LAYOUT_GENERAL:
- default: {
- break;
+ case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
+ skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
+ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
+ break;
+ }
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
+ skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
+ break;
+ }
+ case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
+ skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
+ break;
+ }
+ case VK_IMAGE_LAYOUT_PREINITIALIZED: {
+ skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
+ break;
+ }
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
+ skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
+ break;
+ }
+ case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
+ skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
+ VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
+ break;
+ }
+ case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
+ skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
+ break;
+ }
+ case VK_IMAGE_LAYOUT_UNDEFINED: {
+ if (accessMask != 0) {
+ // TODO: Verify against Valid Use section spec
+ skip_call |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
+ type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
}
+ break;
+ }
+ case VK_IMAGE_LAYOUT_GENERAL:
+ default: { break; }
}
return skip_call;
}
-VkBool32 ValidateBarriers(const char* funcName,
- VkCommandBuffer cmdBuffer,
- uint32_t memBarrierCount,
- const VkMemoryBarrier *pMemBarriers,
- uint32_t bufferBarrierCount,
- const VkBufferMemoryBarrier *pBufferMemBarriers,
- uint32_t imageMemBarrierCount,
+VkBool32 ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
+ const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
+ const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
const VkImageMemoryBarrier *pImageMemBarriers) {
VkBool32 skip_call = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, cmdBuffer);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
if (pCB->activeRenderPass && memBarrierCount) {
if (!dev_data->renderPassMap[pCB->activeRenderPass]->hasSelfDependency[pCB->activeSubpass]) {
- skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
- "%s: Barriers cannot be set during subpass %d "
- "with no self dependency specified.",
- funcName, pCB->activeSubpass);
+ skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
+ "with no self dependency specified.",
+ funcName, pCB->activeSubpass);
}
}
for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
@@ -6680,65 +6531,52 @@ VkBool32 ValidateBarriers(const char* funcName,
if (image_data->second.createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
// srcQueueFamilyIndex and dstQueueFamilyIndex must both
// be VK_QUEUE_FAMILY_IGNORED
- if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) ||
- (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
- skip_call |=
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
- "%s: Image Barrier for image 0x%" PRIx64
- " was created with sharingMode of "
- "VK_SHARING_MODE_CONCURRENT. Src and dst "
- " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
- funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
+ if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
+ skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
+ __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
+ "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
+ "VK_SHARING_MODE_CONCURRENT. Src and dst "
+ " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
+ funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
}
} else {
// Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
// dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
// or both be a valid queue family
- if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) ||
- (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
+ if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
(src_q_f_index != dst_q_f_index)) {
skip_call |=
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
- "%s: Image 0x%" PRIx64 " was created with sharingMode "
- "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
- "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
- "must be.", funcName,
- reinterpret_cast<const uint64_t &>(mem_barrier->image));
- } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) &&
- (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
- ((src_q_f_index >=
- dev_data->physDevProperties.queue_family_properties.size()) ||
- (dst_q_f_index >=
- dev_data->physDevProperties.queue_family_properties.size()))) {
- skip_call |=
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
- "%s: Image 0x%" PRIx64 " was created with sharingMode "
- "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
- " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
- "queueFamilies crated for this device.", funcName,
- reinterpret_cast<const uint64_t &>(mem_barrier->image),
- src_q_f_index, dst_q_f_index,
- dev_data->physDevProperties.queue_family_properties.size());
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
+ "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
+ "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
+ "must be.",
+ funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
+ } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
+ ((src_q_f_index >= dev_data->physDevProperties.queue_family_properties.size()) ||
+ (dst_q_f_index >= dev_data->physDevProperties.queue_family_properties.size()))) {
+ skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
+ __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
+ "%s: Image 0x%" PRIx64 " was created with sharingMode "
+ "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
+ " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
+ "queueFamilies crated for this device.",
+ funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
+ dst_q_f_index, dev_data->physDevProperties.queue_family_properties.size());
}
}
}
if (mem_barrier) {
- skip_call |= ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
- skip_call |= ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
- if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED ||
- mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_BARRIER, "DS",
- "%s: Image Layout cannot be transitioned to UNDEFINED or "
- "PREINITIALIZED.", funcName);
+ skip_call |=
+ ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
+ skip_call |=
+ ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
+ if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
+ "PREINITIALIZED.",
+ funcName);
}
auto image_data = dev_data->imageMap.find(mem_barrier->image);
VkFormat format;
@@ -6750,19 +6588,12 @@ VkBool32 ValidateBarriers(const char* funcName,
mipLevels = image_data->second.createInfo.mipLevels;
imageFound = true;
} else if (dev_data->device_extensions.wsi_enabled) {
- auto imageswap_data =
- dev_data->device_extensions.imageToSwapchainMap.find(
- mem_barrier->image);
- if (imageswap_data !=
- dev_data->device_extensions.imageToSwapchainMap.end()) {
- auto swapchain_data =
- dev_data->device_extensions.swapchainMap.find(
- imageswap_data->second);
- if (swapchain_data !=
- dev_data->device_extensions.swapchainMap.end()) {
+ auto imageswap_data = dev_data->device_extensions.imageToSwapchainMap.find(mem_barrier->image);
+ if (imageswap_data != dev_data->device_extensions.imageToSwapchainMap.end()) {
+ auto swapchain_data = dev_data->device_extensions.swapchainMap.find(imageswap_data->second);
+ if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
format = swapchain_data->second->createInfo.imageFormat;
- arrayLayers =
- swapchain_data->second->createInfo.imageArrayLayers;
+ arrayLayers = swapchain_data->second->createInfo.imageArrayLayers;
mipLevels = 1;
imageFound = true;
}
@@ -6770,42 +6601,29 @@ VkBool32 ValidateBarriers(const char* funcName,
}
if (imageFound) {
if (vk_format_is_depth_and_stencil(format) &&
- (!(mem_barrier->subresourceRange.aspectMask &
- VK_IMAGE_ASPECT_DEPTH_BIT) ||
- !(mem_barrier->subresourceRange.aspectMask &
- VK_IMAGE_ASPECT_STENCIL_BIT))) {
- log_msg(dev_data->report_data,
- VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_BARRIER, "DS",
- "%s: Image is a depth and stencil format and thus must "
- "have both VK_IMAGE_ASPECT_DEPTH_BIT and "
- "VK_IMAGE_ASPECT_STENCIL_BIT set.", funcName);
+ (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
+ !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) {
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must "
+ "have both VK_IMAGE_ASPECT_DEPTH_BIT and "
+ "VK_IMAGE_ASPECT_STENCIL_BIT set.",
+ funcName);
}
- if ((mem_barrier->subresourceRange.baseArrayLayer +
- mem_barrier->subresourceRange.layerCount) > arrayLayers) {
- log_msg(
- dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_BARRIER, "DS",
- "%s: Subresource must have the sum of the "
- "baseArrayLayer (%d) and layerCount (%d) be less "
- "than or equal to the total number of layers (%d).",
- funcName,
- mem_barrier->subresourceRange.baseArrayLayer,
- mem_barrier->subresourceRange.layerCount, arrayLayers);
+ if ((mem_barrier->subresourceRange.baseArrayLayer + mem_barrier->subresourceRange.layerCount) > arrayLayers) {
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
+ "baseArrayLayer (%d) and layerCount (%d) be less "
+ "than or equal to the total number of layers (%d).",
+ funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
+ arrayLayers);
}
- if ((mem_barrier->subresourceRange.baseMipLevel +
- mem_barrier->subresourceRange.levelCount) > mipLevels) {
- log_msg(
- dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_BARRIER, "DS",
- "%s: Subresource must have the sum of the baseMipLevel "
- "(%d) and levelCount (%d) be less than or equal to "
- "the total number of levels (%d).", funcName,
- mem_barrier->subresourceRange.baseMipLevel,
- mem_barrier->subresourceRange.levelCount, mipLevels);
+ if ((mem_barrier->subresourceRange.baseMipLevel + mem_barrier->subresourceRange.levelCount) > mipLevels) {
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
+ "(%d) and levelCount (%d) be less than or equal to "
+ "the total number of levels (%d).",
+ funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
+ mipLevels);
}
}
}
@@ -6814,81 +6632,59 @@ VkBool32 ValidateBarriers(const char* funcName,
auto mem_barrier = &pBufferMemBarriers[i];
if (pCB->activeRenderPass) {
skip_call |=
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_BARRIER, "DS",
- "%s: Buffer Barriers cannot be used during a render pass.",
- funcName);
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
}
if (!mem_barrier)
continue;
// Validate buffer barrier queue family indices
if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
- mem_barrier->srcQueueFamilyIndex >=
- dev_data->physDevProperties.queue_family_properties.size()) ||
+ mem_barrier->srcQueueFamilyIndex >= dev_data->physDevProperties.queue_family_properties.size()) ||
(mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
- mem_barrier->dstQueueFamilyIndex >=
- dev_data->physDevProperties.queue_family_properties.size())) {
- skip_call |=
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
- "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
- "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER
- ") for this device.", funcName,
- reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
- dev_data->physDevProperties.queue_family_properties.size());
+ mem_barrier->dstQueueFamilyIndex >= dev_data->physDevProperties.queue_family_properties.size())) {
+ skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
+ "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
+ "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
+ funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
+ dev_data->physDevProperties.queue_family_properties.size());
}
auto buffer_data = dev_data->bufferMap.find(mem_barrier->buffer);
- uint64_t buffer_size = buffer_data->second.create_info
- ? reinterpret_cast<uint64_t &>(
- buffer_data->second.create_info->size)
- : 0;
+ uint64_t buffer_size =
+ buffer_data->second.create_info ? reinterpret_cast<uint64_t &>(buffer_data->second.create_info->size) : 0;
if (buffer_data != dev_data->bufferMap.end()) {
if (mem_barrier->offset >= buffer_size) {
skip_call |=
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_BARRIER, "DS",
- "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64
- " whose sum is not less than total size %" PRIu64 ".",
- funcName,
- reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
- reinterpret_cast<const uint64_t &>(mem_barrier->offset),
- buffer_size);
- } else if (mem_barrier->size != VK_WHOLE_SIZE &&
- (mem_barrier->offset + mem_barrier->size > buffer_size)) {
- skip_call |=
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_BARRIER, "DS",
- "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64
- " and size %" PRIu64
- " whose sum is greater than total size %" PRIu64 ".",
- funcName,
- reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
- reinterpret_cast<const uint64_t &>(mem_barrier->offset),
- reinterpret_cast<const uint64_t &>(mem_barrier->size),
- buffer_size);
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64
+ " whose sum is not less than total size %" PRIu64 ".",
+ funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
+ reinterpret_cast<const uint64_t &>(mem_barrier->offset), buffer_size);
+ } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
+ skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
+ __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
+ "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64 " and size %" PRIu64
+ " whose sum is greater than total size %" PRIu64 ".",
+ funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
+ reinterpret_cast<const uint64_t &>(mem_barrier->offset),
+ reinterpret_cast<const uint64_t &>(mem_barrier->size), buffer_size);
}
}
}
return skip_call;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents(
- VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents,
- VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
- uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
+ VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
VkPipelineStageFlags stageMask = 0;
for (uint32_t i = 0; i < eventCount; ++i) {
@@ -6900,27 +6696,20 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents(
} else {
auto global_event_data = dev_data->eventMap.find(pEvents[i]);
if (global_event_data == dev_data->eventMap.end()) {
- skipCall |= log_msg(
- dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
- reinterpret_cast<const uint64_t &>(pEvents[i]),
- __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
- "Fence 0x%" PRIx64
- " cannot be waited on if it has never been set.",
- reinterpret_cast<const uint64_t &>(pEvents[i]));
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
+ reinterpret_cast<const uint64_t &>(pEvents[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
+ "Fence 0x%" PRIx64 " cannot be waited on if it has never been set.",
+ reinterpret_cast<const uint64_t &>(pEvents[i]));
} else {
stageMask |= global_event_data->second.stageMask;
}
}
}
if (sourceStageMask != stageMask) {
- skipCall |= log_msg(
- dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_FENCE, "DS",
- "srcStageMask in vkCmdWaitEvents must be the bitwise OR of the "
- "stageMask parameters used in calls to vkCmdSetEvent and "
- "VK_PIPELINE_STAGE_HOST_BIT if used with vkSetEvent.");
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_FENCE, "DS", "srcStageMask in vkCmdWaitEvents must be the bitwise OR of the "
+ "stageMask parameters used in calls to vkCmdSetEvent and "
+ "VK_PIPELINE_STAGE_HOST_BIT if used with vkSetEvent.");
}
if (pCB->state == CB_RECORDING) {
skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
@@ -6929,57 +6718,50 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents(
}
skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
skipCall |=
- ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount,
- pMemoryBarriers, bufferMemoryBarrierCount,
- pBufferMemoryBarriers, imageMemoryBarrierCount,
- pImageMemoryBarriers);
+ ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
+ pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
}
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall)
dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
- memoryBarrierCount, pMemoryBarriers,
- bufferMemoryBarrierCount, pBufferMemoryBarriers,
- imageMemoryBarrierCount, pImageMemoryBarriers);
-}
-
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier(
- VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
- VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
- uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers)
-{
+ memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
+ pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
+}
+
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
+ VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
skipCall |=
- ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers,
- bufferMemoryBarrierCount, pBufferMemoryBarriers,
- imageMemoryBarrierCount, pImageMemoryBarriers);
+ ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
+ pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
}
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall)
dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
- memoryBarrierCount, pMemoryBarriers,
- bufferMemoryBarrierCount, pBufferMemoryBarriers,
- imageMemoryBarrierCount, pImageMemoryBarriers);
+ memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
+ pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
QueryObject query = {queryPool, slot};
pCB->activeQueries.insert(query);
if (!pCB->startedQueries.count(query)) {
- pCB->startedQueries.insert(query);
+ pCB->startedQueries.insert(query);
}
skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
}
@@ -6988,17 +6770,18 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery(VkCommandBuffer comma
dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
QueryObject query = {queryPool, slot};
if (!pCB->activeQueries.count(query)) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
- "Ending a query before it was started: queryPool %" PRIu64 ", index %d", (uint64_t)(queryPool), slot);
+ skipCall |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool %" PRIu64 ", index %d",
+ (uint64_t)(queryPool), slot);
} else {
pCB->activeQueries.erase(query);
}
@@ -7014,12 +6797,12 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer command
dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
for (uint32_t i = 0; i < queryCount; i++) {
QueryObject query = {queryPool, firstQuery + i};
@@ -7038,20 +6821,21 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool(VkCommandBuffer c
dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
- uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
- VkDeviceSize stride, VkQueryResultFlags flags)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
+ VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
for (uint32_t i = 0; i < queryCount; i++) {
QueryObject query = {queryPool, firstQuery + i};
- if(!pCB->queryToStateMap[query]) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
- "Requesting a copy from query to buffer with invalid query: queryPool %" PRIu64 ", index %d", (uint64_t)(queryPool), firstQuery + i);
+ if (!pCB->queryToStateMap[query]) {
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
+ __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
+ "Requesting a copy from query to buffer with invalid query: queryPool %" PRIu64 ", index %d",
+ (uint64_t)(queryPool), firstQuery + i);
}
}
if (pCB->state == CB_RECORDING) {
@@ -7063,46 +6847,39 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults(VkCommandBu
}
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall)
- dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool,
- firstQuery, queryCount, dstBuffer, dstOffset, stride, flags);
+ dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
+ dstOffset, stride, flags);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
- VkShaderStageFlags stageFlags, uint32_t offset,
- uint32_t size, const void *pValues) {
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
+ VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
+ const void *pValues) {
bool skipCall = false;
- layer_data *dev_data =
- get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pCB->state == CB_RECORDING) {
- skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS,
- "vkCmdPushConstants()");
+ skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
} else {
- skipCall |= report_error_no_cb_begin(dev_data, commandBuffer,
- "vkCmdPushConstants()");
+ skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
}
}
- if ((offset + size) >
- dev_data->physDevProperties.properties.limits.maxPushConstantsSize) {
- skipCall |= validatePushConstantSize(dev_data, offset, size,
- "vkCmdPushConstants()");
+ if ((offset + size) > dev_data->physDevProperties.properties.limits.maxPushConstantsSize) {
+ skipCall |= validatePushConstantSize(dev_data, offset, size, "vkCmdPushConstants()");
}
// TODO : Add warning if push constant update doesn't align with range
loader_platform_thread_unlock_mutex(&globalLock);
if (!skipCall)
- dev_data->device_dispatch_table->CmdPushConstants(
- commandBuffer, layout, stageFlags, offset, size, pValues);
+ dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
QueryObject query = {queryPool, slot};
pCB->queryToStateMap[query] = 1;
@@ -7117,21 +6894,21 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp(VkCommandBuffer c
dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkFramebuffer *pFramebuffer) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
if (VK_SUCCESS == result) {
// Shadow create info and store in map
- VkFramebufferCreateInfo* localFBCI = new VkFramebufferCreateInfo(*pCreateInfo);
+ VkFramebufferCreateInfo *localFBCI = new VkFramebufferCreateInfo(*pCreateInfo);
if (pCreateInfo->pAttachments) {
localFBCI->pAttachments = new VkImageView[localFBCI->attachmentCount];
- memcpy((void*)localFBCI->pAttachments, pCreateInfo->pAttachments, localFBCI->attachmentCount*sizeof(VkImageView));
+ memcpy((void *)localFBCI->pAttachments, pCreateInfo->pAttachments, localFBCI->attachmentCount * sizeof(VkImageView));
}
FRAMEBUFFER_NODE fbNode = {};
fbNode.createInfo = *localFBCI;
- std::pair<VkFramebuffer, FRAMEBUFFER_NODE> fbPair(*pFramebuffer,
- fbNode);
+ std::pair<VkFramebuffer, FRAMEBUFFER_NODE> fbPair(*pFramebuffer, fbNode);
loader_platform_thread_lock_mutex(&globalLock);
dev_data->frameBufferMap.insert(fbPair);
loader_platform_thread_unlock_mutex(&globalLock);
@@ -7139,12 +6916,13 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice devi
return result;
}
-VkBool32 FindDependency(const int index, const int dependent, const std::vector<DAGNode>& subpass_to_node, std::unordered_set<uint32_t>& processed_nodes) {
+VkBool32 FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
+ std::unordered_set<uint32_t> &processed_nodes) {
// If we have already checked this node we have not found a dependency path so return false.
if (processed_nodes.count(index))
return VK_FALSE;
processed_nodes.insert(index);
- const DAGNode& node = subpass_to_node[index];
+ const DAGNode &node = subpass_to_node[index];
// Look for a dependency path. If one exists return true else recurse on the previous nodes.
if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
for (auto elem : node.prev) {
@@ -7157,16 +6935,14 @@ VkBool32 FindDependency(const int index, const int dependent, const std::vector<
return VK_FALSE;
}
-VkBool32 CheckDependencyExists(const layer_data *my_data, const int subpass,
- const std::vector<uint32_t> &dependent_subpasses,
- const std::vector<DAGNode> &subpass_to_node,
- VkBool32 &skip_call) {
+VkBool32 CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
+ const std::vector<DAGNode> &subpass_to_node, VkBool32 &skip_call) {
VkBool32 result = VK_TRUE;
// Loop through all subpasses that share the same attachment and make sure a dependency exists
for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
if (subpass == dependent_subpasses[k])
continue;
- const DAGNode& node = subpass_to_node[subpass];
+ const DAGNode &node = subpass_to_node[subpass];
// Check for a specified dependency between the two nodes. If one exists we are done.
auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
@@ -7176,13 +6952,15 @@ VkBool32 CheckDependencyExists(const layer_data *my_data, const int subpass,
if (FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes)) {
// TODO: Verify against Valid Use section of spec
- skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
+ skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
+ __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
"A dependency between subpasses %d and %d must exist but only an implicit one is specified.",
subpass, dependent_subpasses[k]);
} else {
- skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
- "A dependency between subpasses %d and %d must exist but one is not specified.",
- subpass, dependent_subpasses[k]);
+ skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
+ __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
+ "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
+ dependent_subpasses[k]);
result = VK_FALSE;
}
}
@@ -7190,32 +6968,27 @@ VkBool32 CheckDependencyExists(const layer_data *my_data, const int subpass,
return result;
}
-VkBool32 CheckPreserved(const layer_data *my_data,
- const VkRenderPassCreateInfo *pCreateInfo,
- const int index, const uint32_t attachment,
- const std::vector<DAGNode> &subpass_to_node, int depth,
- VkBool32 &skip_call) {
- const DAGNode& node = subpass_to_node[index];
+VkBool32 CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
+ const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, VkBool32 &skip_call) {
+ const DAGNode &node = subpass_to_node[index];
// If this node writes to the attachment return true as next nodes need to preserve the attachment.
- const VkSubpassDescription& subpass = pCreateInfo->pSubpasses[index];
+ const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
if (attachment == subpass.pColorAttachments[j].attachment)
return VK_TRUE;
}
- if (subpass.pDepthStencilAttachment &&
- subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
+ if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
if (attachment == subpass.pDepthStencilAttachment->attachment)
return VK_TRUE;
}
VkBool32 result = VK_FALSE;
// Loop through previous nodes and see if any of them write to the attachment.
for (auto elem : node.prev) {
- result |= CheckPreserved(my_data, pCreateInfo, elem, attachment,
- subpass_to_node, depth + 1, skip_call);
+ result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
}
// If the attachment was written to by a previous node than this node needs to preserve it.
if (result && depth > 0) {
- const VkSubpassDescription& subpass = pCreateInfo->pSubpasses[index];
+ const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
VkBool32 has_preserved = VK_FALSE;
for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
if (subpass.pPreserveAttachments[j] == attachment) {
@@ -7224,40 +6997,33 @@ VkBool32 CheckPreserved(const layer_data *my_data,
}
}
if (has_preserved == VK_FALSE) {
- skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
- "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
+ skip_call |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_RENDERPASS, "DS",
+ "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
}
}
return result;
}
-template <class T>
-bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
- return (((offset1 + size1) > offset2) &&
- ((offset1 + size1) < (offset2 + size2))) ||
+template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
+ return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
((offset1 > offset2) && (offset1 < (offset2 + size2)));
}
-bool isRegionOverlapping(VkImageSubresourceRange range1,
- VkImageSubresourceRange range2) {
- return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount,
- range2.baseMipLevel, range2.levelCount) &&
- isRangeOverlapping(range1.baseArrayLayer, range1.layerCount,
- range2.baseArrayLayer, range2.layerCount));
+bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
+ return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
+ isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
}
-VkBool32 ValidateDependencies(const layer_data *my_data,
- const VkRenderPassBeginInfo *pRenderPassBegin,
+VkBool32 ValidateDependencies(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin,
const std::vector<DAGNode> &subpass_to_node) {
VkBool32 skip_call = VK_FALSE;
- const VkFramebufferCreateInfo *pFramebufferInfo =
- &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
- const VkRenderPassCreateInfo *pCreateInfo =
- my_data->renderPassMap.at(pRenderPassBegin->renderPass)->pCreateInfo;
+ const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
+ const VkRenderPassCreateInfo *pCreateInfo = my_data->renderPassMap.at(pRenderPassBegin->renderPass)->pCreateInfo;
std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
- std::vector<std::vector<uint32_t>> overlapping_attachments(
- pCreateInfo->attachmentCount);
+ std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
VkImageView viewi = pFramebufferInfo->pAttachments[i];
@@ -7269,29 +7035,22 @@ VkBool32 ValidateDependencies(const layer_data *my_data,
}
auto view_data_i = my_data->imageViewMap.find(viewi);
auto view_data_j = my_data->imageViewMap.find(viewj);
- if (view_data_i == my_data->imageViewMap.end() ||
- view_data_j == my_data->imageViewMap.end()) {
+ if (view_data_i == my_data->imageViewMap.end() || view_data_j == my_data->imageViewMap.end()) {
continue;
}
if (view_data_i->second->image == view_data_j->second->image &&
- isRegionOverlapping(view_data_i->second->subresourceRange,
- view_data_j->second->subresourceRange)) {
+ isRegionOverlapping(view_data_i->second->subresourceRange, view_data_j->second->subresourceRange)) {
overlapping_attachments[i].push_back(j);
overlapping_attachments[j].push_back(i);
continue;
}
- auto image_data_i =
- my_data->imageMap.find(view_data_i->second->image);
- auto image_data_j =
- my_data->imageMap.find(view_data_j->second->image);
- if (image_data_i == my_data->imageMap.end() ||
- image_data_j == my_data->imageMap.end()) {
+ auto image_data_i = my_data->imageMap.find(view_data_i->second->image);
+ auto image_data_j = my_data->imageMap.find(view_data_j->second->image);
+ if (image_data_i == my_data->imageMap.end() || image_data_j == my_data->imageMap.end()) {
continue;
}
if (image_data_i->second.mem == image_data_j->second.mem &&
- isRangeOverlapping(image_data_i->second.memOffset,
- image_data_i->second.memSize,
- image_data_j->second.memOffset,
+ isRangeOverlapping(image_data_i->second.memOffset, image_data_i->second.memSize, image_data_j->second.memOffset,
image_data_j->second.memSize)) {
overlapping_attachments[i].push_back(j);
overlapping_attachments[j].push_back(i);
@@ -7300,92 +7059,78 @@ VkBool32 ValidateDependencies(const layer_data *my_data,
}
// Find for each attachment the subpasses that use them.
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
- const VkSubpassDescription& subpass = pCreateInfo->pSubpasses[i];
+ const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
uint32_t attachment = subpass.pInputAttachments[j].attachment;
input_attachment_to_subpass[attachment].push_back(i);
- for (auto overlapping_attachment :
- overlapping_attachments[attachment]) {
- input_attachment_to_subpass[attachment].push_back(
- overlapping_attachment);
+ for (auto overlapping_attachment : overlapping_attachments[attachment]) {
+ input_attachment_to_subpass[attachment].push_back(overlapping_attachment);
}
}
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
uint32_t attachment = subpass.pColorAttachments[j].attachment;
output_attachment_to_subpass[attachment].push_back(i);
- for (auto overlapping_attachment :
- overlapping_attachments[attachment]) {
- output_attachment_to_subpass[attachment].push_back(
- overlapping_attachment);
+ for (auto overlapping_attachment : overlapping_attachments[attachment]) {
+ output_attachment_to_subpass[attachment].push_back(overlapping_attachment);
}
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
output_attachment_to_subpass[attachment].push_back(i);
- for (auto overlapping_attachment :
- overlapping_attachments[attachment]) {
- output_attachment_to_subpass[attachment].push_back(
- overlapping_attachment);
+ for (auto overlapping_attachment : overlapping_attachments[attachment]) {
+ output_attachment_to_subpass[attachment].push_back(overlapping_attachment);
}
}
}
// If there is a dependency needed make sure one exists
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
- const VkSubpassDescription& subpass = pCreateInfo->pSubpasses[i];
+ const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
// If the attachment is an input then all subpasses that output must have a dependency relationship
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
- const uint32_t& attachment = subpass.pInputAttachments[j].attachment;
- CheckDependencyExists(my_data, i,
- output_attachment_to_subpass[attachment],
- subpass_to_node, skip_call);
+ const uint32_t &attachment = subpass.pInputAttachments[j].attachment;
+ CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
}
// If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
- const uint32_t& attachment = subpass.pColorAttachments[j].attachment;
- CheckDependencyExists(my_data, i,
- output_attachment_to_subpass[attachment],
- subpass_to_node, skip_call);
- CheckDependencyExists(my_data, i,
- input_attachment_to_subpass[attachment],
- subpass_to_node, skip_call);
+ const uint32_t &attachment = subpass.pColorAttachments[j].attachment;
+ CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
+ CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
- const uint32_t& attachment = subpass.pDepthStencilAttachment->attachment;
- CheckDependencyExists(my_data, i,
- output_attachment_to_subpass[attachment],
- subpass_to_node, skip_call);
- CheckDependencyExists(my_data, i,
- input_attachment_to_subpass[attachment],
- subpass_to_node, skip_call);
+ const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
+ CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
+ CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
}
}
- // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was written.
+ // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
+ // written.
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
- const VkSubpassDescription& subpass = pCreateInfo->pSubpasses[i];
+ const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
- CheckPreserved(my_data, pCreateInfo, i,
- subpass.pInputAttachments[j].attachment,
- subpass_to_node, 0, skip_call);
+ CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
}
}
return skip_call;
}
-VkBool32 ValidateLayouts(const layer_data* my_data, VkDevice device, const VkRenderPassCreateInfo* pCreateInfo) {
+VkBool32 ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
VkBool32 skip = VK_FALSE;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
- const VkSubpassDescription& subpass = pCreateInfo->pSubpasses[i];
+ const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
// TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
- skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
+ skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
+ (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
"Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
} else {
- skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
- "Layout for input attachment is %d but can only be READ_ONLY_OPTIMAL or GENERAL.", subpass.pInputAttachments[j].attachment);
+ skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
+ "Layout for input attachment is %d but can only be READ_ONLY_OPTIMAL or GENERAL.",
+ subpass.pInputAttachments[j].attachment);
}
}
}
@@ -7393,24 +7138,30 @@ VkBool32 ValidateLayouts(const layer_data* my_data, VkDevice device, const VkRen
if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
// TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
- skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
+ skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
+ (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
"Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
} else {
- skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
- "Layout for color attachment is %d but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.", subpass.pColorAttachments[j].attachment);
+ skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
+ "Layout for color attachment is %d but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
+ subpass.pColorAttachments[j].attachment);
}
}
}
- if ((subpass.pDepthStencilAttachment != NULL) &&
- (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
+ if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) {
// TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
- skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
+ skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
+ (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
"Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL.");
} else {
- skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
- "Layout for depth attachment is %d but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.", subpass.pDepthStencilAttachment->attachment);
+ skip |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
+ "Layout for depth attachment is %d but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.",
+ subpass.pDepthStencilAttachment->attachment);
}
}
}
@@ -7418,20 +7169,23 @@ VkBool32 ValidateLayouts(const layer_data* my_data, VkDevice device, const VkRen
return skip;
}
-VkBool32 CreatePassDAG(const layer_data* my_data, VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, std::vector<DAGNode>& subpass_to_node, std::vector<bool>& has_self_dependency) {
+VkBool32 CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
+ std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
VkBool32 skip_call = VK_FALSE;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
- DAGNode& subpass_node = subpass_to_node[i];
+ DAGNode &subpass_node = subpass_to_node[i];
subpass_node.pass = i;
}
for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
- const VkSubpassDependency& dependency = pCreateInfo->pDependencies[i];
- if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL && dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
- skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
+ const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
+ if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
+ dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
+ skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_RENDERPASS, "DS",
"Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
} else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
- skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
- "The src and dest subpasses cannot both be external.");
+ skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
} else if (dependency.srcSubpass == dependency.dstSubpass) {
has_self_dependency[dependency.srcSubpass] = true;
}
@@ -7446,18 +7200,14 @@ VkBool32 CreatePassDAG(const layer_data* my_data, VkDevice device, const VkRende
}
// TODOSC : Add intercept of vkCreateShaderModule
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(
- VkDevice device,
- const VkShaderModuleCreateInfo *pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkShaderModule *pShaderModule)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkShaderModule *pShaderModule) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkBool32 skip_call = VK_FALSE;
if (!shader_is_spirv(pCreateInfo)) {
skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- /* dev */ 0, __LINE__, SHADER_CHECKER_NON_SPIRV_SHADER, "SC",
- "Shader is not SPIR-V");
+ /* dev */ 0, __LINE__, SHADER_CHECKER_NON_SPIRV_SHADER, "SC", "Shader is not SPIR-V");
}
if (VK_FALSE != skip_call)
@@ -7473,10 +7223,11 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(
return res;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkRenderPass *pRenderPass) {
VkBool32 skip_call = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
// Create DAG
std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
@@ -7493,54 +7244,51 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice devic
loader_platform_thread_lock_mutex(&globalLock);
// TODOSC : Merge in tracking of renderpass from shader_checker
// Shadow create info and store in map
- VkRenderPassCreateInfo* localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
+ VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
if (pCreateInfo->pAttachments) {
localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
- memcpy((void*)localRPCI->pAttachments, pCreateInfo->pAttachments, localRPCI->attachmentCount*sizeof(VkAttachmentDescription));
+ memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
+ localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
}
if (pCreateInfo->pSubpasses) {
localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
- memcpy((void*)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount*sizeof(VkSubpassDescription));
+ memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
- VkSubpassDescription *subpass = (VkSubpassDescription *) &localRPCI->pSubpasses[i];
+ VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
const uint32_t attachmentCount = subpass->inputAttachmentCount +
- subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments?1:0)) +
- ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
+ subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
+ ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
- memcpy(attachments, subpass->pInputAttachments,
- sizeof(attachments[0]) * subpass->inputAttachmentCount);
+ memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
subpass->pInputAttachments = attachments;
attachments += subpass->inputAttachmentCount;
- memcpy(attachments, subpass->pColorAttachments,
- sizeof(attachments[0]) * subpass->colorAttachmentCount);
+ memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
subpass->pColorAttachments = attachments;
attachments += subpass->colorAttachmentCount;
if (subpass->pResolveAttachments) {
- memcpy(attachments, subpass->pResolveAttachments,
- sizeof(attachments[0]) * subpass->colorAttachmentCount);
+ memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
subpass->pResolveAttachments = attachments;
attachments += subpass->colorAttachmentCount;
}
if (subpass->pDepthStencilAttachment) {
- memcpy(attachments, subpass->pDepthStencilAttachment,
- sizeof(attachments[0]) * 1);
+ memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
subpass->pDepthStencilAttachment = attachments;
attachments += 1;
}
- memcpy(attachments, subpass->pPreserveAttachments,
- sizeof(attachments[0]) * subpass->preserveAttachmentCount);
+ memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
subpass->pPreserveAttachments = &attachments->attachment;
}
}
if (pCreateInfo->pDependencies) {
localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
- memcpy((void*)localRPCI->pDependencies, pCreateInfo->pDependencies, localRPCI->dependencyCount*sizeof(VkSubpassDependency));
+ memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
+ localRPCI->dependencyCount * sizeof(VkSubpassDependency));
}
dev_data->renderPassMap[*pRenderPass] = new RENDER_PASS_NODE(localRPCI);
dev_data->renderPassMap[*pRenderPass]->hasSelfDependency = has_self_dependency;
@@ -7550,15 +7298,14 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice devic
return result;
}
// Free the renderpass shadow
-static void deleteRenderPasses(layer_data* my_data)
-{
+static void deleteRenderPasses(layer_data *my_data) {
if (my_data->renderPassMap.size() <= 0)
return;
- for (auto ii=my_data->renderPassMap.begin(); ii!=my_data->renderPassMap.end(); ++ii) {
- const VkRenderPassCreateInfo* pRenderPassInfo = (*ii).second->pCreateInfo;
+ for (auto ii = my_data->renderPassMap.begin(); ii != my_data->renderPassMap.end(); ++ii) {
+ const VkRenderPassCreateInfo *pRenderPassInfo = (*ii).second->pCreateInfo;
delete[] pRenderPassInfo->pAttachments;
if (pRenderPassInfo->pSubpasses) {
- for (uint32_t i=0; i<pRenderPassInfo->subpassCount; ++i) {
+ for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
// Attachements are all allocated in a block, so just need to
// find the first non-null one to delete
if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
@@ -7580,31 +7327,25 @@ static void deleteRenderPasses(layer_data* my_data)
my_data->renderPassMap.clear();
}
-VkBool32 VerifyFramebufferAndRenderPassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo* pRenderPassBegin) {
+VkBool32 VerifyFramebufferAndRenderPassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
VkBool32 skip_call = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, cmdBuffer);
- const VkRenderPassCreateInfo* pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
- const VkFramebufferCreateInfo framebufferInfo =
- dev_data->frameBufferMap[pRenderPassBegin->framebuffer].createInfo;
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
+ const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
+ const VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer].createInfo;
if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
- skip_call |=
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_RENDERPASS, "DS",
- "You cannot start a render pass using a framebuffer "
- "with a different number of attachments.");
+ skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
+ "with a different number of attachments.");
}
for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
const VkImageView &image_view = framebufferInfo.pAttachments[i];
auto image_data = dev_data->imageViewMap.find(image_view);
assert(image_data != dev_data->imageViewMap.end());
- const VkImage& image = image_data->second->image;
- const VkImageSubresourceRange& subRange =
- image_data->second->subresourceRange;
- IMAGE_CMD_BUF_LAYOUT_NODE newNode = {
- pRenderPassInfo->pAttachments[i].initialLayout,
- pRenderPassInfo->pAttachments[i].initialLayout};
+ const VkImage &image = image_data->second->image;
+ const VkImageSubresourceRange &subRange = image_data->second->subresourceRange;
+ IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
+ pRenderPassInfo->pAttachments[i].initialLayout};
// TODO: Do not iterate over every possibility - consolidate where
// possible
for (uint32_t j = 0; j < subRange.levelCount; j++) {
@@ -7618,14 +7359,12 @@ VkBool32 VerifyFramebufferAndRenderPassLayouts(VkCommandBuffer cmdBuffer, const
continue;
}
if (newNode.layout != node.layout) {
- skip_call |= log_msg(
- dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_RENDERPASS, "DS",
- "You cannot start a render pass using attachment %i "
- "where the "
- "intial layout differs from the starting layout.",
- i);
+ skip_call |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using attachment %i "
+ "where the "
+ "intial layout differs from the starting layout.",
+ i);
}
}
}
@@ -7633,90 +7372,75 @@ VkBool32 VerifyFramebufferAndRenderPassLayouts(VkCommandBuffer cmdBuffer, const
return skip_call;
}
-void TransitionSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const int subpass_index) {
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, cmdBuffer);
+void TransitionSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const int subpass_index) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
if (render_pass_data == dev_data->renderPassMap.end()) {
return;
}
- const VkRenderPassCreateInfo* pRenderPassInfo = render_pass_data->second->pCreateInfo;
+ const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
if (framebuffer_data == dev_data->frameBufferMap.end()) {
return;
}
- const VkFramebufferCreateInfo framebufferInfo =
- framebuffer_data->second.createInfo;
- const VkSubpassDescription& subpass = pRenderPassInfo->pSubpasses[subpass_index];
+ const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
+ const VkSubpassDescription &subpass = pRenderPassInfo->pSubpasses[subpass_index];
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
- const VkImageView &image_view =
- framebufferInfo
- .pAttachments[subpass.pInputAttachments[j].attachment];
- SetLayout(dev_data, pCB, image_view,
- subpass.pInputAttachments[j].layout);
+ const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pInputAttachments[j].attachment];
+ SetLayout(dev_data, pCB, image_view, subpass.pInputAttachments[j].layout);
}
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
- const VkImageView &image_view =
- framebufferInfo
- .pAttachments[subpass.pColorAttachments[j].attachment];
- SetLayout(dev_data, pCB, image_view,
- subpass.pColorAttachments[j].layout);
+ const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pColorAttachments[j].attachment];
+ SetLayout(dev_data, pCB, image_view, subpass.pColorAttachments[j].layout);
}
- if ((subpass.pDepthStencilAttachment != NULL) &&
- (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
- const VkImageView &image_view =
- framebufferInfo
- .pAttachments[subpass.pDepthStencilAttachment->attachment];
- SetLayout(dev_data, pCB, image_view,
- subpass.pDepthStencilAttachment->layout);
+ if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
+ const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pDepthStencilAttachment->attachment];
+ SetLayout(dev_data, pCB, image_view, subpass.pDepthStencilAttachment->layout);
}
}
-VkBool32 validatePrimaryCommandBuffer(const layer_data* my_data, const GLOBAL_CB_NODE* pCB, const std::string& cmd_name) {
+VkBool32 validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
VkBool32 skip_call = VK_FALSE;
if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
- skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
- "Cannot execute command %s on a secondary command buffer.", cmd_name.c_str());
+ skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
+ cmd_name.c_str());
}
return skip_call;
}
-void TransitionFinalSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo* pRenderPassBegin) {
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, cmdBuffer);
+void TransitionFinalSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
if (render_pass_data == dev_data->renderPassMap.end()) {
return;
}
- const VkRenderPassCreateInfo* pRenderPassInfo = render_pass_data->second->pCreateInfo;
+ const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
if (framebuffer_data == dev_data->frameBufferMap.end()) {
return;
}
- const VkFramebufferCreateInfo framebufferInfo =
- framebuffer_data->second.createInfo;
+ const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
const VkImageView &image_view = framebufferInfo.pAttachments[i];
- SetLayout(dev_data, pCB, image_view,
- pRenderPassInfo->pAttachments[i].finalLayout);
+ SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
if (pRenderPassBegin && pRenderPassBegin->renderPass) {
skipCall |= VerifyFramebufferAndRenderPassLayouts(commandBuffer, pRenderPassBegin);
- auto render_pass_data =
- dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
+ auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
if (render_pass_data != dev_data->renderPassMap.end()) {
- skipCall |= ValidateDependencies(
- dev_data, pRenderPassBegin,
- render_pass_data->second->subpassToNode);
+ skipCall |= ValidateDependencies(dev_data, pRenderPassBegin, render_pass_data->second->subpassToNode);
}
skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass");
skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass");
@@ -7728,11 +7452,11 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass(VkCommandBuffer
pCB->activeSubpassContents = contents;
pCB->framebuffer = pRenderPassBegin->framebuffer;
// Connect this framebuffer to this cmdBuffer
- dev_data->frameBufferMap[pCB->framebuffer]
- .referencingCmdBuffers.insert(pCB->commandBuffer);
+ dev_data->frameBufferMap[pCB->framebuffer].referencingCmdBuffers.insert(pCB->commandBuffer);
} else {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
- "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
+ skipCall |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
}
}
loader_platform_thread_unlock_mutex(&globalLock);
@@ -7746,12 +7470,11 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass(VkCommandBuffer
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
TransitionSubpassLayouts(commandBuffer, &dev_data->renderPassBeginInfo, ++dev_data->currentSubpass);
if (pCB) {
skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
@@ -7769,12 +7492,11 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer comm
dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer commandBuffer)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer commandBuffer) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
TransitionFinalSubpassLayouts(commandBuffer, &dev_data->renderPassBeginInfo);
if (pCB) {
skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
@@ -7789,14 +7511,20 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer co
dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
}
-bool logInvalidAttachmentMessage(layer_data* dev_data, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass, VkRenderPass primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach, const char* msg) {
- return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
- "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64 " that is not compatible with the current render pass %" PRIx64 "."
- "Attachment %" PRIu32 " is not compatable with %" PRIu32 ". %s",
- (void*)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass), primaryAttach, secondaryAttach, msg);
+bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
+ VkRenderPass primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach, const char *msg) {
+ return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
+ "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
+ " that is not compatible with the current render pass %" PRIx64 "."
+ "Attachment %" PRIu32 " is not compatable with %" PRIu32 ". %s",
+ (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass), primaryAttach, secondaryAttach,
+ msg);
}
-bool validateAttachmentCompatibility(layer_data* dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass, uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass, uint32_t secondaryAttach, bool is_multi) {
+bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
+ uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
+ uint32_t secondaryAttach, bool is_multi) {
bool skip_call = false;
auto primary_data = dev_data->renderPassMap.find(primaryPass);
auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
@@ -7810,31 +7538,41 @@ bool validateAttachmentCompatibility(layer_data* dev_data, VkCommandBuffer prima
return skip_call;
}
if (primaryAttach == VK_ATTACHMENT_UNUSED) {
- skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach, secondaryAttach, "The first is unused while the second is not.");
+ skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
+ secondaryAttach, "The first is unused while the second is not.");
return skip_call;
}
if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
- skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach, secondaryAttach, "The second is unused while the first is not.");
+ skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
+ secondaryAttach, "The second is unused while the first is not.");
return skip_call;
}
- if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].format != secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].format) {
- skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach, secondaryAttach, "They have different formats.");
+ if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].format !=
+ secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].format) {
+ skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
+ secondaryAttach, "They have different formats.");
}
- if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].samples != secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].samples) {
- skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach, secondaryAttach, "They have different samples.");
+ if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].samples !=
+ secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].samples) {
+ skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
+ secondaryAttach, "They have different samples.");
}
- if (is_multi && primary_data->second->pCreateInfo->pAttachments[primaryAttach].flags != secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].flags) {
- skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach, secondaryAttach, "They have different flags.");
+ if (is_multi &&
+ primary_data->second->pCreateInfo->pAttachments[primaryAttach].flags !=
+ secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].flags) {
+ skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
+ secondaryAttach, "They have different flags.");
}
return skip_call;
}
-bool validateSubpassCompatibility(layer_data* dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass, const int subpass, bool is_multi) {
+bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
+ VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass, const int subpass, bool is_multi) {
bool skip_call = false;
auto primary_data = dev_data->renderPassMap.find(primaryPass);
auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
- const VkSubpassDescription& primary_desc = primary_data->second->pCreateInfo->pSubpasses[subpass];
- const VkSubpassDescription& secondary_desc = secondary_data->second->pCreateInfo->pSubpasses[subpass];
+ const VkSubpassDescription &primary_desc = primary_data->second->pCreateInfo->pSubpasses[subpass];
+ const VkSubpassDescription &secondary_desc = secondary_data->second->pCreateInfo->pSubpasses[subpass];
uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
@@ -7844,7 +7582,8 @@ bool validateSubpassCompatibility(layer_data* dev_data, VkCommandBuffer primaryB
if (i < secondary_desc.inputAttachmentCount) {
secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
}
- skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer, secondaryPass, secondary_input_attach, is_multi);
+ skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer,
+ secondaryPass, secondary_input_attach, is_multi);
}
uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
@@ -7855,7 +7594,8 @@ bool validateSubpassCompatibility(layer_data* dev_data, VkCommandBuffer primaryB
if (i < secondary_desc.colorAttachmentCount) {
secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
}
- skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer, secondaryPass, secondary_color_attach, is_multi);
+ skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer,
+ secondaryPass, secondary_color_attach, is_multi);
uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
@@ -7863,7 +7603,8 @@ bool validateSubpassCompatibility(layer_data* dev_data, VkCommandBuffer primaryB
if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
}
- skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer, secondaryPass, secondary_resolve_attach, is_multi);
+ skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer,
+ secondaryPass, secondary_resolve_attach, is_multi);
}
uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
if (primary_desc.pDepthStencilAttachment) {
@@ -7872,11 +7613,13 @@ bool validateSubpassCompatibility(layer_data* dev_data, VkCommandBuffer primaryB
if (secondary_desc.pDepthStencilAttachment) {
secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
}
- skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer, secondaryPass, secondary_depthstencil_attach, is_multi);
+ skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer,
+ secondaryPass, secondary_depthstencil_attach, is_multi);
return skip_call;
}
-bool validateRenderPassCompatibility(layer_data* dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) {
+bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
+ VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) {
bool skip_call = false;
// Early exit if renderPass objects are identical (and therefore compatible)
if (primaryPass == secondaryPass)
@@ -7884,32 +7627,40 @@ bool validateRenderPassCompatibility(layer_data* dev_data, VkCommandBuffer prima
auto primary_data = dev_data->renderPassMap.find(primaryPass);
auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
if (primary_data == dev_data->renderPassMap.end() || primary_data->second == nullptr) {
- skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
- "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
- (void*)primaryBuffer, (uint64_t)(primaryPass));
+ skip_call |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
+ "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
+ (void *)primaryBuffer, (uint64_t)(primaryPass));
return skip_call;
}
if (secondary_data == dev_data->renderPassMap.end() || secondary_data->second == nullptr) {
- skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
- "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
- (void*)secondaryBuffer, (uint64_t)(secondaryPass));
+ skip_call |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
+ "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
+ (void *)secondaryBuffer, (uint64_t)(secondaryPass));
return skip_call;
}
if (primary_data->second->pCreateInfo->subpassCount != secondary_data->second->pCreateInfo->subpassCount) {
- skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
- "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64 " that is not compatible with the current render pass %" PRIx64 "."
- "They have a different number of subpasses.",
- (void*)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass));
+ skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
+ "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
+ " that is not compatible with the current render pass %" PRIx64 "."
+ "They have a different number of subpasses.",
+ (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass));
return skip_call;
}
bool is_multi = primary_data->second->pCreateInfo->subpassCount > 1;
for (uint32_t i = 0; i < primary_data->second->pCreateInfo->subpassCount; ++i) {
- skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPass, secondaryBuffer, secondaryPass, i, is_multi);
+ skip_call |=
+ validateSubpassCompatibility(dev_data, primaryBuffer, primaryPass, secondaryBuffer, secondaryPass, i, is_multi);
}
return skip_call;
}
-bool validateFramebuffer(layer_data* dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE* pCB, VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE* pSubCB) {
+bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
+ VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
bool skip_call = false;
if (!pSubCB->beginInfo.pInheritanceInfo) {
return skip_call;
@@ -7918,162 +7669,162 @@ bool validateFramebuffer(layer_data* dev_data, VkCommandBuffer primaryBuffer, co
VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
if (secondary_fb != VK_NULL_HANDLE) {
if (primary_fb != secondary_fb) {
- skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
- "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a framebuffer %" PRIx64 " that is not compatible with the current framebuffer %" PRIx64 ".",
- (void*)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb));
+ skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
+ "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a framebuffer %" PRIx64
+ " that is not compatible with the current framebuffer %" PRIx64 ".",
+ (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb));
}
auto fb_data = dev_data->frameBufferMap.find(secondary_fb);
if (fb_data == dev_data->frameBufferMap.end()) {
- skip_call |= log_msg(
- dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
- "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
- "which has invalid framebuffer %" PRIx64 ".",
- (void *)secondaryBuffer, (uint64_t)(secondary_fb));
+ skip_call |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
+ "which has invalid framebuffer %" PRIx64 ".",
+ (void *)secondaryBuffer, (uint64_t)(secondary_fb));
return skip_call;
}
- skip_call |= validateRenderPassCompatibility(
- dev_data, secondaryBuffer, fb_data->second.createInfo.renderPass,
- secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass);
+ skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb_data->second.createInfo.renderPass,
+ secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass);
}
return skip_call;
}
-bool validateSecondaryCommandBufferState(layer_data *dev_data,
- GLOBAL_CB_NODE *pCB,
- GLOBAL_CB_NODE *pSubCB) {
+bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
bool skipCall = false;
unordered_set<int> activeTypes;
for (auto queryObject : pCB->activeQueries) {
auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
if (queryPoolData != dev_data->queryPoolMap.end()) {
- if (queryPoolData->second.createInfo.queryType ==
- VK_QUERY_TYPE_PIPELINE_STATISTICS &&
- pSubCB->beginInfo.pInheritanceInfo) {
- VkQueryPipelineStatisticFlags cmdBufStatistics =
- pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
- if ((cmdBufStatistics &
- queryPoolData->second.createInfo.pipelineStatistics) !=
- cmdBufStatistics) {
- skipCall |= log_msg(
- dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
- "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
- "which has invalid active query pool %" PRIx64
- ". Pipeline statistics is being queried so the command "
- "buffer must have all bits set on the queryPool.",
- reinterpret_cast<void *>(pCB->commandBuffer),
- reinterpret_cast<const uint64_t&>(queryPoolData->first));
- }
+ if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
+ pSubCB->beginInfo.pInheritanceInfo) {
+ VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
+ if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
+ skipCall |= log_msg(
+ dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
+ "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
+ "which has invalid active query pool %" PRIx64 ". Pipeline statistics is being queried so the command "
+ "buffer must have all bits set on the queryPool.",
+ reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
+ }
}
activeTypes.insert(queryPoolData->second.createInfo.queryType);
}
}
for (auto queryObject : pSubCB->startedQueries) {
- auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
- if (queryPoolData != dev_data->queryPoolMap.end() &&
- activeTypes.count(queryPoolData->second.createInfo.queryType)) {
- skipCall |=
- log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
- "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
- "which has invalid active query pool %" PRIx64
- "of type %d but a query of that type has been started on "
- "secondary Cmd Buffer %p.",
- reinterpret_cast<void *>(pCB->commandBuffer),
- reinterpret_cast<const uint64_t&>(queryPoolData->first),
- queryPoolData->second.createInfo.queryType,
- reinterpret_cast<void *>(pSubCB->commandBuffer));
+ auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
+ if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
+ skipCall |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
+ "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
+ "which has invalid active query pool %" PRIx64 "of type %d but a query of that type has been started on "
+ "secondary Cmd Buffer %p.",
+ reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
+ queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
}
}
return skipCall;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer* pCommandBuffers)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
VkBool32 skipCall = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- GLOBAL_CB_NODE* pCB = getCBNode(dev_data, commandBuffer);
+ GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
if (pCB) {
- GLOBAL_CB_NODE* pSubCB = NULL;
- for (uint32_t i=0; i<commandBuffersCount; i++) {
+ GLOBAL_CB_NODE *pSubCB = NULL;
+ for (uint32_t i = 0; i < commandBuffersCount; i++) {
pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
if (!pSubCB) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
- "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p in element %u of pCommandBuffers array.", (void*)pCommandBuffers[i], i);
+ skipCall |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
+ "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p in element %u of pCommandBuffers array.",
+ (void *)pCommandBuffers[i], i);
} else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, 0, __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
- "vkCmdExecuteCommands() called w/ Primary Cmd Buffer %p in element %u of pCommandBuffers array. All cmd buffers in pCommandBuffers array must be secondary.", (void*)pCommandBuffers[i], i);
+ skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
+ __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
+ "vkCmdExecuteCommands() called w/ Primary Cmd Buffer %p in element %u of pCommandBuffers "
+ "array. All cmd buffers in pCommandBuffers array must be secondary.",
+ (void *)pCommandBuffers[i], i);
} else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
- "vkCmdExecuteCommands(): Secondary Command Buffer (%p) executed within render pass (%#" PRIxLEAST64 ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.", (void*)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass);
+ skipCall |= log_msg(
+ dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
+ "vkCmdExecuteCommands(): Secondary Command Buffer (%p) executed within render pass (%#" PRIxLEAST64
+ ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
+ (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass);
} else {
// Make sure render pass is compatible with parent command buffer pass if has continue
- skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass, pCommandBuffers[i], pSubCB->beginInfo.pInheritanceInfo->renderPass);
+ skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass, pCommandBuffers[i],
+ pSubCB->beginInfo.pInheritanceInfo->renderPass);
skipCall |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
}
string errorString = "";
- if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass, pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
- "vkCmdExecuteCommands(): Secondary Command Buffer (%p) w/ render pass (%#" PRIxLEAST64 ") is incompatible w/ primary command buffer (%p) w/ render pass (%#" PRIxLEAST64 ") due to: %s",
- (void*)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void*)commandBuffer, (uint64_t)pCB->activeRenderPass, errorString.c_str());
+ if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass,
+ pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) {
+ skipCall |= log_msg(
+ dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
+ "vkCmdExecuteCommands(): Secondary Command Buffer (%p) w/ render pass (%#" PRIxLEAST64
+ ") is incompatible w/ primary command buffer (%p) w/ render pass (%#" PRIxLEAST64 ") due to: %s",
+ (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
+ (uint64_t)pCB->activeRenderPass, errorString.c_str());
}
// If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass()
// that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass
if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) {
if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) {
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
- "vkCmdExecuteCommands(): Secondary Command Buffer (%p) references framebuffer (%#" PRIxLEAST64 ") that does not match framebuffer (%#" PRIxLEAST64 ") in active renderpass (%#" PRIxLEAST64 ").",
- (void*)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer, (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass);
+ skipCall |= log_msg(
+ dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
+ "vkCmdExecuteCommands(): Secondary Command Buffer (%p) references framebuffer (%#" PRIxLEAST64
+ ") that does not match framebuffer (%#" PRIxLEAST64 ") in active renderpass (%#" PRIxLEAST64 ").",
+ (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer,
+ (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass);
}
}
}
// TODO(mlentine): Move more logic into this method
- skipCall |=
- validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
+ skipCall |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
skipCall |= validateCommandBufferState(dev_data, pSubCB);
// Secondary cmdBuffers are considered pending execution starting w/
// being recorded
- if (!(pSubCB->beginInfo.flags &
- VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
- if (dev_data->globalInFlightCmdBuffers.find(
- pSubCB->commandBuffer) !=
- dev_data->globalInFlightCmdBuffers.end()) {
+ if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
+ if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
skipCall |= log_msg(
- dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)(pCB->commandBuffer), __LINE__,
- DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
- "Attempt to simultaneously execute CB %#" PRIxLEAST64
- " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
+ dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
+ "Attempt to simultaneously execute CB %#" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
"set!",
(uint64_t)(pCB->commandBuffer));
}
if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
// Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
- skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
- "vkCmdExecuteCommands(): Secondary Command Buffer (%#" PRIxLEAST64 ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer (%#" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set, even though it does.",
- (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
+ skipCall |= log_msg(
+ dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
+ "vkCmdExecuteCommands(): Secondary Command Buffer (%#" PRIxLEAST64
+ ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
+ "(%#" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
+ "set, even though it does.",
+ (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
}
}
- if (!pCB->activeQueries.empty() &&
- !dev_data->physDevProperties.features.inheritedQueries) {
- skipCall |= log_msg(
- dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__,
- DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
- "vkCmdExecuteCommands(): Secondary Command Buffer "
- "(%#" PRIxLEAST64 ") cannot be submitted with a query in "
- "flight and inherited queries not "
- "supported on this device.",
- reinterpret_cast<uint64_t>(pCommandBuffers[i]));
+ if (!pCB->activeQueries.empty() && !dev_data->physDevProperties.features.inheritedQueries) {
+ skipCall |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
+ "vkCmdExecuteCommands(): Secondary Command Buffer "
+ "(%#" PRIxLEAST64 ") cannot be submitted with a query in "
+ "flight and inherited queries not "
+ "supported on this device.",
+ reinterpret_cast<uint64_t>(pCommandBuffers[i]));
}
pSubCB->primaryCommandBuffer = pCB->commandBuffer;
pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
@@ -8089,21 +7840,17 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdExecuteCommands(VkCommandBuffer
VkBool32 ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) {
VkBool32 skip_call = VK_FALSE;
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
auto mem_data = dev_data->memImageMap.find(mem);
if (mem_data != dev_data->memImageMap.end()) {
std::vector<VkImageLayout> layouts;
if (FindLayouts(dev_data, mem_data->second, layouts)) {
for (auto layout : layouts) {
- if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED &&
- layout != VK_IMAGE_LAYOUT_GENERAL) {
- skip_call |= log_msg(
- dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
- "Cannot map an image with layout %s. Only "
- "GENERAL or PREINITIALIZED are supported.",
- string_VkImageLayout(layout));
+ if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
+ skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
+ __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
+ "GENERAL or PREINITIALIZED are supported.",
+ string_VkImageLayout(layout));
}
}
}
@@ -8111,15 +7858,9 @@ VkBool32 ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) {
return skip_call;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory(
- VkDevice device,
- VkDeviceMemory mem,
- VkDeviceSize offset,
- VkDeviceSize size,
- VkFlags flags,
- void **ppData)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkBool32 skip_call = VK_FALSE;
loader_platform_thread_lock_mutex(&globalLock);
@@ -8132,17 +7873,11 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory(
return VK_ERROR_VALIDATION_FAILED_EXT;
}
-VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(
- VkDevice device,
- VkImage image,
- VkDeviceMemory mem,
- VkDeviceSize memOffset)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memOffset) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memOffset);
VkMemoryRequirements memRequirements;
- dev_data->device_dispatch_table->GetImageMemoryRequirements(
- device, image, &memRequirements);
+ dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
loader_platform_thread_lock_mutex(&globalLock);
dev_data->memImageMap[mem] = image;
dev_data->imageMap[image].mem = mem;
@@ -8152,9 +7887,8 @@ VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(
return result;
}
-
VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(VkDevice device, VkEvent event) {
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
dev_data->eventMap[event].needsSignaled = false;
dev_data->eventMap[event].stageMask = VK_PIPELINE_STAGE_HOST_BIT;
@@ -8163,29 +7897,26 @@ VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(VkDevice device, VkEvent event) {
return result;
}
-VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse(
- VkQueue queue,
- uint32_t bindInfoCount,
- const VkBindSparseInfo* pBindInfo,
- VkFence fence)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
+VKAPI_ATTR VkResult VKAPI_CALL
+vkQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
VkBool32 skip_call = VK_FALSE;
loader_platform_thread_lock_mutex(&globalLock);
- for (uint32_t bindIdx=0; bindIdx < bindInfoCount; ++bindIdx) {
- const VkBindSparseInfo& bindInfo = pBindInfo[bindIdx];
- for (uint32_t i=0; i < bindInfo.waitSemaphoreCount; ++i) {
+ for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
+ const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
+ for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
if (dev_data->semaphoreMap[bindInfo.pWaitSemaphores[i]].signaled) {
- dev_data->semaphoreMap[bindInfo.pWaitSemaphores[i]].signaled =
- 0;
+ dev_data->semaphoreMap[bindInfo.pWaitSemaphores[i]].signaled = 0;
} else {
- skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
- "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
- (uint64_t)(queue), (uint64_t)(bindInfo.pWaitSemaphores[i]));
+ skip_call |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
+ __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
+ "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
+ (uint64_t)(queue), (uint64_t)(bindInfo.pWaitSemaphores[i]));
}
}
- for (uint32_t i=0; i < bindInfo.signalSemaphoreCount; ++i) {
+ for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
dev_data->semaphoreMap[bindInfo.pSignalSemaphores[i]].signaled = 1;
}
}
@@ -8197,13 +7928,9 @@ VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse(
return VK_ERROR_VALIDATION_FAILED_EXT;
}
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(
- VkDevice device,
- const VkSemaphoreCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkSemaphore* pSemaphore)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
if (result == VK_SUCCESS) {
loader_platform_thread_lock_mutex(&globalLock);
@@ -8214,12 +7941,9 @@ VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(
return result;
}
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateEvent(
- VkDevice device,
- const VkEventCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkEvent* pEvent) {
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
if (result == VK_SUCCESS) {
loader_platform_thread_lock_mutex(&globalLock);
@@ -8231,13 +7955,10 @@ VKAPI_ATTR VkResult VKAPI_CALL vkCreateEvent(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(
- VkDevice device,
- const VkSwapchainCreateInfoKHR *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkSwapchainKHR *pSwapchain)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkSwapchainKHR *pSwapchain) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
if (VK_SUCCESS == result) {
@@ -8250,24 +7971,19 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR(
- VkDevice device,
- VkSwapchainKHR swapchain,
- const VkAllocationCallbacks *pAllocator)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
auto swapchain_data = dev_data->device_extensions.swapchainMap.find(swapchain);
if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
if (swapchain_data->second->images.size() > 0) {
for (auto swapchain_image : swapchain_data->second->images) {
- auto image_sub =
- dev_data->imageSubresourceMap.find(swapchain_image);
+ auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
if (image_sub != dev_data->imageSubresourceMap.end()) {
for (auto imgsubpair : image_sub->second) {
- auto image_item =
- dev_data->imageLayoutMap.find(imgsubpair);
+ auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
if (image_item != dev_data->imageLayoutMap.end()) {
dev_data->imageLayoutMap.erase(image_item);
}
@@ -8283,18 +7999,15 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR(
dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR(
- VkDevice device,
- VkSwapchainKHR swapchain,
- uint32_t* pCount,
- VkImage* pSwapchainImages)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
if (result == VK_SUCCESS && pSwapchainImages != NULL) {
// This should never happen and is checked by param checker.
- if (!pCount) return result;
+ if (!pCount)
+ return result;
loader_platform_thread_lock_mutex(&globalLock);
for (uint32_t i = 0; i < *pCount; ++i) {
IMAGE_LAYOUT_NODE image_layout_node;
@@ -8302,62 +8015,50 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR(
auto swapchain_node = dev_data->device_extensions.swapchainMap[swapchain];
image_layout_node.format = swapchain_node->createInfo.imageFormat;
dev_data->imageMap[pSwapchainImages[i]].createInfo.mipLevels = 1;
- dev_data->imageMap[pSwapchainImages[i]].createInfo.arrayLayers =
- swapchain_node->createInfo.imageArrayLayers;
+ dev_data->imageMap[pSwapchainImages[i]].createInfo.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
swapchain_node->images.push_back(pSwapchainImages[i]);
- ImageSubresourcePair subpair = {pSwapchainImages[i], false,
- VkImageSubresource()};
- dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(
- subpair);
+ ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
+ dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
dev_data->imageLayoutMap[subpair] = image_layout_node;
- dev_data->device_extensions
- .imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
+ dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
}
loader_platform_thread_unlock_mutex(&globalLock);
}
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR* pPresentInfo)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
VkBool32 skip_call = VK_FALSE;
if (pPresentInfo) {
loader_platform_thread_lock_mutex(&globalLock);
- for (uint32_t i=0; i < pPresentInfo->waitSemaphoreCount; ++i) {
- if (dev_data->semaphoreMap[pPresentInfo->pWaitSemaphores[i]]
- .signaled) {
- dev_data->semaphoreMap[pPresentInfo->pWaitSemaphores[i]]
- .signaled = 0;
+ for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
+ if (dev_data->semaphoreMap[pPresentInfo->pWaitSemaphores[i]].signaled) {
+ dev_data->semaphoreMap[pPresentInfo->pWaitSemaphores[i]].signaled = 0;
} else {
- skip_call |= log_msg(
- dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__,
- DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
- "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64
- " that has no way to be signaled.",
- (uint64_t)(queue),
- (uint64_t)(pPresentInfo->pWaitSemaphores[i]));
+ skip_call |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
+ __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
+ "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
+ (uint64_t)(queue), (uint64_t)(pPresentInfo->pWaitSemaphores[i]));
}
}
for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
auto swapchain_data = dev_data->device_extensions.swapchainMap.find(pPresentInfo->pSwapchains[i]);
- if (swapchain_data != dev_data->device_extensions.swapchainMap.end() && pPresentInfo->pImageIndices[i] < swapchain_data->second->images.size()) {
+ if (swapchain_data != dev_data->device_extensions.swapchainMap.end() &&
+ pPresentInfo->pImageIndices[i] < swapchain_data->second->images.size()) {
VkImage image = swapchain_data->second->images[pPresentInfo->pImageIndices[i]];
vector<VkImageLayout> layouts;
if (FindLayouts(dev_data, image, layouts)) {
for (auto layout : layouts) {
if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
- skip_call |= log_msg(
- dev_data->report_data,
- VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
- reinterpret_cast<uint64_t &>(queue), __LINE__,
- DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
- "Images passed to present must be in layout "
- "PRESENT_SOURCE_KHR but is in %s",
- string_VkImageLayout(layout));
+ skip_call |=
+ log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
+ reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
+ "Images passed to present must be in layout "
+ "PRESENT_SOURCE_KHR but is in %s",
+ string_VkImageLayout(layout));
}
}
}
@@ -8371,16 +8072,11 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue,
return VK_ERROR_VALIDATION_FAILED_EXT;
}
-VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(
- VkDevice device,
- VkSwapchainKHR swapchain,
- uint64_t timeout,
- VkSemaphore semaphore,
- VkFence fence,
- uint32_t* pImageIndex)
-{
- layer_data* dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- VkResult result = dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
+VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
+ VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
+ layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ VkResult result =
+ dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
loader_platform_thread_lock_mutex(&globalLock);
// FIXME/TODO: Need to add some thing code the "fence" parameter
dev_data->semaphoreMap[semaphore].signaled = 1;
@@ -8388,13 +8084,10 @@ VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(
- VkInstance instance,
- const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkDebugReportCallbackEXT* pMsgCallback)
-{
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
if (VK_SUCCESS == res) {
@@ -8405,12 +8098,10 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(
return res;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(
- VkInstance instance,
- VkDebugReportCallbackEXT msgCallback,
- const VkAllocationCallbacks* pAllocator)
-{
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance,
+ VkDebugReportCallbackEXT msgCallback,
+ const VkAllocationCallbacks *pAllocator) {
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
loader_platform_thread_lock_mutex(&globalLock);
@@ -8418,237 +8109,229 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(
loader_platform_thread_unlock_mutex(&globalLock);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT(
- VkInstance instance,
- VkDebugReportFlagsEXT flags,
- VkDebugReportObjectTypeEXT objType,
- uint64_t object,
- size_t location,
- int32_t msgCode,
- const char* pLayerPrefix,
- const char* pMsg)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
+ size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
- my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
+ my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
+ pMsg);
}
-
-VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char* funcName)
-{
+VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
if (!strcmp(funcName, "vkGetDeviceProcAddr"))
- return (PFN_vkVoidFunction) vkGetDeviceProcAddr;
+ return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
if (!strcmp(funcName, "vkDestroyDevice"))
- return (PFN_vkVoidFunction) vkDestroyDevice;
+ return (PFN_vkVoidFunction)vkDestroyDevice;
if (!strcmp(funcName, "vkQueueSubmit"))
- return (PFN_vkVoidFunction) vkQueueSubmit;
+ return (PFN_vkVoidFunction)vkQueueSubmit;
if (!strcmp(funcName, "vkWaitForFences"))
- return (PFN_vkVoidFunction) vkWaitForFences;
+ return (PFN_vkVoidFunction)vkWaitForFences;
if (!strcmp(funcName, "vkGetFenceStatus"))
- return (PFN_vkVoidFunction) vkGetFenceStatus;
+ return (PFN_vkVoidFunction)vkGetFenceStatus;
if (!strcmp(funcName, "vkQueueWaitIdle"))
- return (PFN_vkVoidFunction) vkQueueWaitIdle;
+ return (PFN_vkVoidFunction)vkQueueWaitIdle;
if (!strcmp(funcName, "vkDeviceWaitIdle"))
- return (PFN_vkVoidFunction) vkDeviceWaitIdle;
+ return (PFN_vkVoidFunction)vkDeviceWaitIdle;
if (!strcmp(funcName, "vkGetDeviceQueue"))
- return (PFN_vkVoidFunction) vkGetDeviceQueue;
+ return (PFN_vkVoidFunction)vkGetDeviceQueue;
if (!strcmp(funcName, "vkDestroyInstance"))
- return (PFN_vkVoidFunction) vkDestroyInstance;
+ return (PFN_vkVoidFunction)vkDestroyInstance;
if (!strcmp(funcName, "vkDestroyDevice"))
- return (PFN_vkVoidFunction) vkDestroyDevice;
+ return (PFN_vkVoidFunction)vkDestroyDevice;
if (!strcmp(funcName, "vkDestroyFence"))
- return (PFN_vkVoidFunction) vkDestroyFence;
+ return (PFN_vkVoidFunction)vkDestroyFence;
if (!strcmp(funcName, "vkResetFences"))
return (PFN_vkVoidFunction)vkResetFences;
if (!strcmp(funcName, "vkDestroySemaphore"))
- return (PFN_vkVoidFunction) vkDestroySemaphore;
+ return (PFN_vkVoidFunction)vkDestroySemaphore;
if (!strcmp(funcName, "vkDestroyEvent"))
- return (PFN_vkVoidFunction) vkDestroyEvent;
+ return (PFN_vkVoidFunction)vkDestroyEvent;
if (!strcmp(funcName, "vkDestroyQueryPool"))
- return (PFN_vkVoidFunction) vkDestroyQueryPool;
+ return (PFN_vkVoidFunction)vkDestroyQueryPool;
if (!strcmp(funcName, "vkDestroyBuffer"))
- return (PFN_vkVoidFunction) vkDestroyBuffer;
+ return (PFN_vkVoidFunction)vkDestroyBuffer;
if (!strcmp(funcName, "vkDestroyBufferView"))
- return (PFN_vkVoidFunction) vkDestroyBufferView;
+ return (PFN_vkVoidFunction)vkDestroyBufferView;
if (!strcmp(funcName, "vkDestroyImage"))
- return (PFN_vkVoidFunction) vkDestroyImage;
+ return (PFN_vkVoidFunction)vkDestroyImage;
if (!strcmp(funcName, "vkDestroyImageView"))
- return (PFN_vkVoidFunction) vkDestroyImageView;
+ return (PFN_vkVoidFunction)vkDestroyImageView;
if (!strcmp(funcName, "vkDestroyShaderModule"))
- return (PFN_vkVoidFunction) vkDestroyShaderModule;
+ return (PFN_vkVoidFunction)vkDestroyShaderModule;
if (!strcmp(funcName, "vkDestroyPipeline"))
- return (PFN_vkVoidFunction) vkDestroyPipeline;
+ return (PFN_vkVoidFunction)vkDestroyPipeline;
if (!strcmp(funcName, "vkDestroyPipelineLayout"))
- return (PFN_vkVoidFunction) vkDestroyPipelineLayout;
+ return (PFN_vkVoidFunction)vkDestroyPipelineLayout;
if (!strcmp(funcName, "vkDestroySampler"))
- return (PFN_vkVoidFunction) vkDestroySampler;
+ return (PFN_vkVoidFunction)vkDestroySampler;
if (!strcmp(funcName, "vkDestroyDescriptorSetLayout"))
- return (PFN_vkVoidFunction) vkDestroyDescriptorSetLayout;
+ return (PFN_vkVoidFunction)vkDestroyDescriptorSetLayout;
if (!strcmp(funcName, "vkDestroyDescriptorPool"))
- return (PFN_vkVoidFunction) vkDestroyDescriptorPool;
+ return (PFN_vkVoidFunction)vkDestroyDescriptorPool;
if (!strcmp(funcName, "vkDestroyFramebuffer"))
- return (PFN_vkVoidFunction) vkDestroyFramebuffer;
+ return (PFN_vkVoidFunction)vkDestroyFramebuffer;
if (!strcmp(funcName, "vkDestroyRenderPass"))
- return (PFN_vkVoidFunction) vkDestroyRenderPass;
+ return (PFN_vkVoidFunction)vkDestroyRenderPass;
if (!strcmp(funcName, "vkCreateBuffer"))
- return (PFN_vkVoidFunction) vkCreateBuffer;
+ return (PFN_vkVoidFunction)vkCreateBuffer;
if (!strcmp(funcName, "vkCreateBufferView"))
- return (PFN_vkVoidFunction) vkCreateBufferView;
+ return (PFN_vkVoidFunction)vkCreateBufferView;
if (!strcmp(funcName, "vkCreateImage"))
- return (PFN_vkVoidFunction) vkCreateImage;
+ return (PFN_vkVoidFunction)vkCreateImage;
if (!strcmp(funcName, "vkCreateImageView"))
- return (PFN_vkVoidFunction) vkCreateImageView;
+ return (PFN_vkVoidFunction)vkCreateImageView;
if (!strcmp(funcName, "vkCreateFence"))
- return (PFN_vkVoidFunction) vkCreateFence;
+ return (PFN_vkVoidFunction)vkCreateFence;
if (!strcmp(funcName, "CreatePipelineCache"))
- return (PFN_vkVoidFunction) vkCreatePipelineCache;
+ return (PFN_vkVoidFunction)vkCreatePipelineCache;
if (!strcmp(funcName, "DestroyPipelineCache"))
- return (PFN_vkVoidFunction) vkDestroyPipelineCache;
+ return (PFN_vkVoidFunction)vkDestroyPipelineCache;
if (!strcmp(funcName, "GetPipelineCacheData"))
- return (PFN_vkVoidFunction) vkGetPipelineCacheData;
+ return (PFN_vkVoidFunction)vkGetPipelineCacheData;
if (!strcmp(funcName, "MergePipelineCaches"))
- return (PFN_vkVoidFunction) vkMergePipelineCaches;
+ return (PFN_vkVoidFunction)vkMergePipelineCaches;
if (!strcmp(funcName, "vkCreateGraphicsPipelines"))
- return (PFN_vkVoidFunction) vkCreateGraphicsPipelines;
+ return (PFN_vkVoidFunction)vkCreateGraphicsPipelines;
if (!strcmp(funcName, "vkCreateComputePipelines"))
- return (PFN_vkVoidFunction) vkCreateComputePipelines;
+ return (PFN_vkVoidFunction)vkCreateComputePipelines;
if (!strcmp(funcName, "vkCreateSampler"))
- return (PFN_vkVoidFunction) vkCreateSampler;
+ return (PFN_vkVoidFunction)vkCreateSampler;
if (!strcmp(funcName, "vkCreateDescriptorSetLayout"))
- return (PFN_vkVoidFunction) vkCreateDescriptorSetLayout;
+ return (PFN_vkVoidFunction)vkCreateDescriptorSetLayout;
if (!strcmp(funcName, "vkCreatePipelineLayout"))
- return (PFN_vkVoidFunction) vkCreatePipelineLayout;
+ return (PFN_vkVoidFunction)vkCreatePipelineLayout;
if (!strcmp(funcName, "vkCreateDescriptorPool"))
- return (PFN_vkVoidFunction) vkCreateDescriptorPool;
+ return (PFN_vkVoidFunction)vkCreateDescriptorPool;
if (!strcmp(funcName, "vkResetDescriptorPool"))
- return (PFN_vkVoidFunction) vkResetDescriptorPool;
+ return (PFN_vkVoidFunction)vkResetDescriptorPool;
if (!strcmp(funcName, "vkAllocateDescriptorSets"))
- return (PFN_vkVoidFunction) vkAllocateDescriptorSets;
+ return (PFN_vkVoidFunction)vkAllocateDescriptorSets;
if (!strcmp(funcName, "vkFreeDescriptorSets"))
- return (PFN_vkVoidFunction) vkFreeDescriptorSets;
+ return (PFN_vkVoidFunction)vkFreeDescriptorSets;
if (!strcmp(funcName, "vkUpdateDescriptorSets"))
- return (PFN_vkVoidFunction) vkUpdateDescriptorSets;
+ return (PFN_vkVoidFunction)vkUpdateDescriptorSets;
if (!strcmp(funcName, "vkCreateCommandPool"))
- return (PFN_vkVoidFunction) vkCreateCommandPool;
+ return (PFN_vkVoidFunction)vkCreateCommandPool;
if (!strcmp(funcName, "vkDestroyCommandPool"))
- return (PFN_vkVoidFunction) vkDestroyCommandPool;
+ return (PFN_vkVoidFunction)vkDestroyCommandPool;
if (!strcmp(funcName, "vkResetCommandPool"))
- return (PFN_vkVoidFunction) vkResetCommandPool;
+ return (PFN_vkVoidFunction)vkResetCommandPool;
if (!strcmp(funcName, "vkCreateQueryPool"))
return (PFN_vkVoidFunction)vkCreateQueryPool;
if (!strcmp(funcName, "vkAllocateCommandBuffers"))
- return (PFN_vkVoidFunction) vkAllocateCommandBuffers;
+ return (PFN_vkVoidFunction)vkAllocateCommandBuffers;
if (!strcmp(funcName, "vkFreeCommandBuffers"))
- return (PFN_vkVoidFunction) vkFreeCommandBuffers;
+ return (PFN_vkVoidFunction)vkFreeCommandBuffers;
if (!strcmp(funcName, "vkBeginCommandBuffer"))
- return (PFN_vkVoidFunction) vkBeginCommandBuffer;
+ return (PFN_vkVoidFunction)vkBeginCommandBuffer;
if (!strcmp(funcName, "vkEndCommandBuffer"))
- return (PFN_vkVoidFunction) vkEndCommandBuffer;
+ return (PFN_vkVoidFunction)vkEndCommandBuffer;
if (!strcmp(funcName, "vkResetCommandBuffer"))
- return (PFN_vkVoidFunction) vkResetCommandBuffer;
+ return (PFN_vkVoidFunction)vkResetCommandBuffer;
if (!strcmp(funcName, "vkCmdBindPipeline"))
- return (PFN_vkVoidFunction) vkCmdBindPipeline;
+ return (PFN_vkVoidFunction)vkCmdBindPipeline;
if (!strcmp(funcName, "vkCmdSetViewport"))
- return (PFN_vkVoidFunction) vkCmdSetViewport;
+ return (PFN_vkVoidFunction)vkCmdSetViewport;
if (!strcmp(funcName, "vkCmdSetScissor"))
- return (PFN_vkVoidFunction) vkCmdSetScissor;
+ return (PFN_vkVoidFunction)vkCmdSetScissor;
if (!strcmp(funcName, "vkCmdSetLineWidth"))
- return (PFN_vkVoidFunction) vkCmdSetLineWidth;
+ return (PFN_vkVoidFunction)vkCmdSetLineWidth;
if (!strcmp(funcName, "vkCmdSetDepthBias"))
- return (PFN_vkVoidFunction) vkCmdSetDepthBias;
+ return (PFN_vkVoidFunction)vkCmdSetDepthBias;
if (!strcmp(funcName, "vkCmdSetBlendConstants"))
- return (PFN_vkVoidFunction) vkCmdSetBlendConstants;
+ return (PFN_vkVoidFunction)vkCmdSetBlendConstants;
if (!strcmp(funcName, "vkCmdSetDepthBounds"))
- return (PFN_vkVoidFunction) vkCmdSetDepthBounds;
+ return (PFN_vkVoidFunction)vkCmdSetDepthBounds;
if (!strcmp(funcName, "vkCmdSetStencilCompareMask"))
- return (PFN_vkVoidFunction) vkCmdSetStencilCompareMask;
+ return (PFN_vkVoidFunction)vkCmdSetStencilCompareMask;
if (!strcmp(funcName, "vkCmdSetStencilWriteMask"))
- return (PFN_vkVoidFunction) vkCmdSetStencilWriteMask;
+ return (PFN_vkVoidFunction)vkCmdSetStencilWriteMask;
if (!strcmp(funcName, "vkCmdSetStencilReference"))
- return (PFN_vkVoidFunction) vkCmdSetStencilReference;
+ return (PFN_vkVoidFunction)vkCmdSetStencilReference;
if (!strcmp(funcName, "vkCmdBindDescriptorSets"))
- return (PFN_vkVoidFunction) vkCmdBindDescriptorSets;
+ return (PFN_vkVoidFunction)vkCmdBindDescriptorSets;
if (!strcmp(funcName, "vkCmdBindVertexBuffers"))
- return (PFN_vkVoidFunction) vkCmdBindVertexBuffers;
+ return (PFN_vkVoidFunction)vkCmdBindVertexBuffers;
if (!strcmp(funcName, "vkCmdBindIndexBuffer"))
- return (PFN_vkVoidFunction) vkCmdBindIndexBuffer;
+ return (PFN_vkVoidFunction)vkCmdBindIndexBuffer;
if (!strcmp(funcName, "vkCmdDraw"))
- return (PFN_vkVoidFunction) vkCmdDraw;
+ return (PFN_vkVoidFunction)vkCmdDraw;
if (!strcmp(funcName, "vkCmdDrawIndexed"))
- return (PFN_vkVoidFunction) vkCmdDrawIndexed;
+ return (PFN_vkVoidFunction)vkCmdDrawIndexed;
if (!strcmp(funcName, "vkCmdDrawIndirect"))
- return (PFN_vkVoidFunction) vkCmdDrawIndirect;
+ return (PFN_vkVoidFunction)vkCmdDrawIndirect;
if (!strcmp(funcName, "vkCmdDrawIndexedIndirect"))
- return (PFN_vkVoidFunction) vkCmdDrawIndexedIndirect;
+ return (PFN_vkVoidFunction)vkCmdDrawIndexedIndirect;
if (!strcmp(funcName, "vkCmdDispatch"))
- return (PFN_vkVoidFunction) vkCmdDispatch;
+ return (PFN_vkVoidFunction)vkCmdDispatch;
if (!strcmp(funcName, "vkCmdDispatchIndirect"))
- return (PFN_vkVoidFunction) vkCmdDispatchIndirect;
+ return (PFN_vkVoidFunction)vkCmdDispatchIndirect;
if (!strcmp(funcName, "vkCmdCopyBuffer"))
- return (PFN_vkVoidFunction) vkCmdCopyBuffer;
+ return (PFN_vkVoidFunction)vkCmdCopyBuffer;
if (!strcmp(funcName, "vkCmdCopyImage"))
- return (PFN_vkVoidFunction) vkCmdCopyImage;
+ return (PFN_vkVoidFunction)vkCmdCopyImage;
if (!strcmp(funcName, "vkCmdCopyBufferToImage"))
- return (PFN_vkVoidFunction) vkCmdCopyBufferToImage;
+ return (PFN_vkVoidFunction)vkCmdCopyBufferToImage;
if (!strcmp(funcName, "vkCmdCopyImageToBuffer"))
- return (PFN_vkVoidFunction) vkCmdCopyImageToBuffer;
+ return (PFN_vkVoidFunction)vkCmdCopyImageToBuffer;
if (!strcmp(funcName, "vkCmdUpdateBuffer"))
- return (PFN_vkVoidFunction) vkCmdUpdateBuffer;
+ return (PFN_vkVoidFunction)vkCmdUpdateBuffer;
if (!strcmp(funcName, "vkCmdFillBuffer"))
- return (PFN_vkVoidFunction) vkCmdFillBuffer;
+ return (PFN_vkVoidFunction)vkCmdFillBuffer;
if (!strcmp(funcName, "vkCmdClearColorImage"))
- return (PFN_vkVoidFunction) vkCmdClearColorImage;
+ return (PFN_vkVoidFunction)vkCmdClearColorImage;
if (!strcmp(funcName, "vkCmdClearDepthStencilImage"))
- return (PFN_vkVoidFunction) vkCmdClearDepthStencilImage;
+ return (PFN_vkVoidFunction)vkCmdClearDepthStencilImage;
if (!strcmp(funcName, "vkCmdClearAttachments"))
- return (PFN_vkVoidFunction) vkCmdClearAttachments;
+ return (PFN_vkVoidFunction)vkCmdClearAttachments;
if (!strcmp(funcName, "vkCmdResolveImage"))
- return (PFN_vkVoidFunction) vkCmdResolveImage;
+ return (PFN_vkVoidFunction)vkCmdResolveImage;
if (!strcmp(funcName, "vkCmdSetEvent"))
- return (PFN_vkVoidFunction) vkCmdSetEvent;
+ return (PFN_vkVoidFunction)vkCmdSetEvent;
if (!strcmp(funcName, "vkCmdResetEvent"))
- return (PFN_vkVoidFunction) vkCmdResetEvent;
+ return (PFN_vkVoidFunction)vkCmdResetEvent;
if (!strcmp(funcName, "vkCmdWaitEvents"))
- return (PFN_vkVoidFunction) vkCmdWaitEvents;
+ return (PFN_vkVoidFunction)vkCmdWaitEvents;
if (!strcmp(funcName, "vkCmdPipelineBarrier"))
- return (PFN_vkVoidFunction) vkCmdPipelineBarrier;
+ return (PFN_vkVoidFunction)vkCmdPipelineBarrier;
if (!strcmp(funcName, "vkCmdBeginQuery"))
- return (PFN_vkVoidFunction) vkCmdBeginQuery;
+ return (PFN_vkVoidFunction)vkCmdBeginQuery;
if (!strcmp(funcName, "vkCmdEndQuery"))
- return (PFN_vkVoidFunction) vkCmdEndQuery;
+ return (PFN_vkVoidFunction)vkCmdEndQuery;
if (!strcmp(funcName, "vkCmdResetQueryPool"))
- return (PFN_vkVoidFunction) vkCmdResetQueryPool;
+ return (PFN_vkVoidFunction)vkCmdResetQueryPool;
if (!strcmp(funcName, "vkCmdPushConstants"))
return (PFN_vkVoidFunction)vkCmdPushConstants;
if (!strcmp(funcName, "vkCmdWriteTimestamp"))
- return (PFN_vkVoidFunction) vkCmdWriteTimestamp;
+ return (PFN_vkVoidFunction)vkCmdWriteTimestamp;
if (!strcmp(funcName, "vkCreateFramebuffer"))
- return (PFN_vkVoidFunction) vkCreateFramebuffer;
+ return (PFN_vkVoidFunction)vkCreateFramebuffer;
if (!strcmp(funcName, "vkCreateShaderModule"))
- return (PFN_vkVoidFunction) vkCreateShaderModule;
+ return (PFN_vkVoidFunction)vkCreateShaderModule;
if (!strcmp(funcName, "vkCreateRenderPass"))
- return (PFN_vkVoidFunction) vkCreateRenderPass;
+ return (PFN_vkVoidFunction)vkCreateRenderPass;
if (!strcmp(funcName, "vkCmdBeginRenderPass"))
- return (PFN_vkVoidFunction) vkCmdBeginRenderPass;
+ return (PFN_vkVoidFunction)vkCmdBeginRenderPass;
if (!strcmp(funcName, "vkCmdNextSubpass"))
- return (PFN_vkVoidFunction) vkCmdNextSubpass;
+ return (PFN_vkVoidFunction)vkCmdNextSubpass;
if (!strcmp(funcName, "vkCmdEndRenderPass"))
- return (PFN_vkVoidFunction) vkCmdEndRenderPass;
+ return (PFN_vkVoidFunction)vkCmdEndRenderPass;
if (!strcmp(funcName, "vkCmdExecuteCommands"))
- return (PFN_vkVoidFunction) vkCmdExecuteCommands;
+ return (PFN_vkVoidFunction)vkCmdExecuteCommands;
if (!strcmp(funcName, "vkSetEvent"))
- return (PFN_vkVoidFunction) vkSetEvent;
+ return (PFN_vkVoidFunction)vkSetEvent;
if (!strcmp(funcName, "vkMapMemory"))
- return (PFN_vkVoidFunction) vkMapMemory;
+ return (PFN_vkVoidFunction)vkMapMemory;
if (!strcmp(funcName, "vkGetQueryPoolResults"))
- return (PFN_vkVoidFunction) vkGetQueryPoolResults;
+ return (PFN_vkVoidFunction)vkGetQueryPoolResults;
if (!strcmp(funcName, "vkBindImageMemory"))
- return (PFN_vkVoidFunction) vkBindImageMemory;
+ return (PFN_vkVoidFunction)vkBindImageMemory;
if (!strcmp(funcName, "vkQueueBindSparse"))
- return (PFN_vkVoidFunction) vkQueueBindSparse;
+ return (PFN_vkVoidFunction)vkQueueBindSparse;
if (!strcmp(funcName, "vkCreateSemaphore"))
- return (PFN_vkVoidFunction) vkCreateSemaphore;
+ return (PFN_vkVoidFunction)vkCreateSemaphore;
if (!strcmp(funcName, "vkCreateEvent"))
- return (PFN_vkVoidFunction) vkCreateEvent;
+ return (PFN_vkVoidFunction)vkCreateEvent;
if (dev == NULL)
return NULL;
@@ -8656,21 +8339,20 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkD
layer_data *dev_data;
dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
- if (dev_data->device_extensions.wsi_enabled)
- {
+ if (dev_data->device_extensions.wsi_enabled) {
if (!strcmp(funcName, "vkCreateSwapchainKHR"))
- return (PFN_vkVoidFunction) vkCreateSwapchainKHR;
+ return (PFN_vkVoidFunction)vkCreateSwapchainKHR;
if (!strcmp(funcName, "vkDestroySwapchainKHR"))
- return (PFN_vkVoidFunction) vkDestroySwapchainKHR;
+ return (PFN_vkVoidFunction)vkDestroySwapchainKHR;
if (!strcmp(funcName, "vkGetSwapchainImagesKHR"))
- return (PFN_vkVoidFunction) vkGetSwapchainImagesKHR;
+ return (PFN_vkVoidFunction)vkGetSwapchainImagesKHR;
if (!strcmp(funcName, "vkAcquireNextImageKHR"))
- return (PFN_vkVoidFunction) vkAcquireNextImageKHR;
+ return (PFN_vkVoidFunction)vkAcquireNextImageKHR;
if (!strcmp(funcName, "vkQueuePresentKHR"))
- return (PFN_vkVoidFunction) vkQueuePresentKHR;
+ return (PFN_vkVoidFunction)vkQueuePresentKHR;
}
- VkLayerDispatchTable* pTable = dev_data->device_dispatch_table;
+ VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
{
if (pTable->GetDeviceProcAddr == NULL)
return NULL;
@@ -8678,39 +8360,38 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkD
}
}
-VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char* funcName)
-{
+VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
if (!strcmp(funcName, "vkGetInstanceProcAddr"))
- return (PFN_vkVoidFunction) vkGetInstanceProcAddr;
+ return (PFN_vkVoidFunction)vkGetInstanceProcAddr;
if (!strcmp(funcName, "vkGetDeviceProcAddr"))
- return (PFN_vkVoidFunction) vkGetDeviceProcAddr;
+ return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
if (!strcmp(funcName, "vkCreateInstance"))
- return (PFN_vkVoidFunction) vkCreateInstance;
+ return (PFN_vkVoidFunction)vkCreateInstance;
if (!strcmp(funcName, "vkCreateDevice"))
- return (PFN_vkVoidFunction) vkCreateDevice;
+ return (PFN_vkVoidFunction)vkCreateDevice;
if (!strcmp(funcName, "vkDestroyInstance"))
- return (PFN_vkVoidFunction) vkDestroyInstance;
+ return (PFN_vkVoidFunction)vkDestroyInstance;
if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties"))
- return (PFN_vkVoidFunction) vkEnumerateInstanceLayerProperties;
+ return (PFN_vkVoidFunction)vkEnumerateInstanceLayerProperties;
if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties"))
- return (PFN_vkVoidFunction) vkEnumerateInstanceExtensionProperties;
+ return (PFN_vkVoidFunction)vkEnumerateInstanceExtensionProperties;
if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties"))
- return (PFN_vkVoidFunction) vkEnumerateDeviceLayerProperties;
+ return (PFN_vkVoidFunction)vkEnumerateDeviceLayerProperties;
if (!strcmp(funcName, "vkEnumerateDeviceExtensionProperties"))
- return (PFN_vkVoidFunction) vkEnumerateDeviceExtensionProperties;
+ return (PFN_vkVoidFunction)vkEnumerateDeviceExtensionProperties;
if (instance == NULL)
return NULL;
PFN_vkVoidFunction fptr;
- layer_data* my_data;
+ layer_data *my_data;
my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
fptr = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
if (fptr)
return fptr;
- VkLayerInstanceDispatchTable* pTable = my_data->instance_dispatch_table;
+ VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
if (pTable->GetInstanceProcAddr == NULL)
return NULL;
return pTable->GetInstanceProcAddr(instance, funcName);
diff --git a/layers/draw_state.h b/layers/draw_state.h
index 93c84ea51..ec4e90563 100644
--- a/layers/draw_state.h
+++ b/layers/draw_state.h
@@ -37,15 +37,15 @@ using std::vector;
// Draw State ERROR codes
typedef enum _DRAW_STATE_ERROR {
- DRAWSTATE_NONE, // Used for INFO & other non-error messages
- DRAWSTATE_INTERNAL_ERROR, // Error with DrawState internal data structures
- DRAWSTATE_NO_PIPELINE_BOUND, // Unable to identify a bound pipeline
- DRAWSTATE_INVALID_POOL, // Invalid DS pool
- DRAWSTATE_INVALID_SET, // Invalid DS
- DRAWSTATE_INVALID_LAYOUT, // Invalid DS layout
- DRAWSTATE_INVALID_IMAGE_LAYOUT, // Invalid Image layout
- DRAWSTATE_INVALID_PIPELINE, // Invalid Pipeline handle referenced
- DRAWSTATE_INVALID_PIPELINE_LAYOUT, // Invalid PipelineLayout
+ DRAWSTATE_NONE, // Used for INFO & other non-error messages
+ DRAWSTATE_INTERNAL_ERROR, // Error with DrawState internal data structures
+ DRAWSTATE_NO_PIPELINE_BOUND, // Unable to identify a bound pipeline
+ DRAWSTATE_INVALID_POOL, // Invalid DS pool
+ DRAWSTATE_INVALID_SET, // Invalid DS
+ DRAWSTATE_INVALID_LAYOUT, // Invalid DS layout
+ DRAWSTATE_INVALID_IMAGE_LAYOUT, // Invalid Image layout
+ DRAWSTATE_INVALID_PIPELINE, // Invalid Pipeline handle referenced
+ DRAWSTATE_INVALID_PIPELINE_LAYOUT, // Invalid PipelineLayout
DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, // Attempt to create a pipeline
// with invalid state
DRAWSTATE_INVALID_COMMAND_BUFFER, // Invalid CommandBuffer referenced
@@ -55,41 +55,41 @@ typedef enum _DRAW_STATE_ERROR {
DRAWSTATE_INVALID_FENCE, // Invalid Fence
DRAWSTATE_INVALID_SEMAPHORE, // Invalid Semaphore
DRAWSTATE_INVALID_EVENT, // Invalid Event
- DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, // binding in vkCmdBindVertexData() too
- // large for PSO's
- // pVertexBindingDescriptions array
- DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, // binding offset in
- // vkCmdBindIndexBuffer() out of
- // alignment based on indexType
+ DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, // binding in vkCmdBindVertexData() too
+ // large for PSO's
+ // pVertexBindingDescriptions array
+ DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, // binding offset in
+ // vkCmdBindIndexBuffer() out of
+ // alignment based on indexType
// DRAWSTATE_MISSING_DOT_PROGRAM, // No "dot" program in order
// to generate png image
- DRAWSTATE_OUT_OF_MEMORY, // malloc failed
- DRAWSTATE_INVALID_DESCRIPTOR_SET, // Descriptor Set handle is unknown
- DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, // Type in layout vs. update are not the
- // same
- DRAWSTATE_DESCRIPTOR_STAGEFLAGS_MISMATCH, // StageFlags in layout are not
- // the same throughout a single
- // VkWriteDescriptorSet update
- DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, // Descriptors set for update out
- // of bounds for corresponding
- // layout section
- DRAWSTATE_DESCRIPTOR_POOL_EMPTY, // Attempt to allocate descriptor from a
- // pool with no more descriptors of that
- // type available
- DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, // Invalid to call
- // vkFreeDescriptorSets on Sets
- // allocated from a NON_FREE Pool
- DRAWSTATE_INVALID_UPDATE_INDEX, // Index of requested update is invalid for
- // specified descriptors set
- DRAWSTATE_INVALID_UPDATE_STRUCT, // Struct in DS Update tree is of invalid
- // type
- DRAWSTATE_NUM_SAMPLES_MISMATCH, // Number of samples in bound PSO does not
- // match number in FB of current RenderPass
- DRAWSTATE_NO_END_COMMAND_BUFFER, // Must call vkEndCommandBuffer() before
- // QueueSubmit on that commandBuffer
- DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, // Binding cmds or calling End on CB that
- // never had vkBeginCommandBuffer()
- // called on it
+ DRAWSTATE_OUT_OF_MEMORY, // malloc failed
+ DRAWSTATE_INVALID_DESCRIPTOR_SET, // Descriptor Set handle is unknown
+ DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, // Type in layout vs. update are not the
+ // same
+ DRAWSTATE_DESCRIPTOR_STAGEFLAGS_MISMATCH, // StageFlags in layout are not
+ // the same throughout a single
+ // VkWriteDescriptorSet update
+ DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, // Descriptors set for update out
+ // of bounds for corresponding
+ // layout section
+ DRAWSTATE_DESCRIPTOR_POOL_EMPTY, // Attempt to allocate descriptor from a
+ // pool with no more descriptors of that
+ // type available
+ DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, // Invalid to call
+ // vkFreeDescriptorSets on Sets
+ // allocated from a NON_FREE Pool
+ DRAWSTATE_INVALID_UPDATE_INDEX, // Index of requested update is invalid for
+ // specified descriptors set
+ DRAWSTATE_INVALID_UPDATE_STRUCT, // Struct in DS Update tree is of invalid
+ // type
+ DRAWSTATE_NUM_SAMPLES_MISMATCH, // Number of samples in bound PSO does not
+ // match number in FB of current RenderPass
+ DRAWSTATE_NO_END_COMMAND_BUFFER, // Must call vkEndCommandBuffer() before
+ // QueueSubmit on that commandBuffer
+ DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, // Binding cmds or calling End on CB that
+ // never had vkBeginCommandBuffer()
+ // called on it
DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, // Cmd Buffer created with
// VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT
// flag is submitted
@@ -97,56 +97,56 @@ typedef enum _DRAW_STATE_ERROR {
DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, // vkCmdExecuteCommands() called
// with a primary commandBuffer
// in pCommandBuffers array
- DRAWSTATE_VIEWPORT_NOT_BOUND, // Draw submitted with no viewport state bound
- DRAWSTATE_SCISSOR_NOT_BOUND, // Draw submitted with no scissor state bound
- DRAWSTATE_LINE_WIDTH_NOT_BOUND, // Draw submitted with no line width state
- // bound
- DRAWSTATE_DEPTH_BIAS_NOT_BOUND, // Draw submitted with no depth bias state
- // bound
- DRAWSTATE_BLEND_NOT_BOUND, // Draw submitted with no blend state bound when
- // color write enabled
- DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, // Draw submitted with no depth bounds
- // state bound when depth enabled
- DRAWSTATE_STENCIL_NOT_BOUND, // Draw submitted with no stencil state bound
- // when stencil enabled
- DRAWSTATE_INDEX_BUFFER_NOT_BOUND, // Draw submitted with no depth-stencil
- // state bound when depth write enabled
- DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, // Draw submitted PSO Pipeline
- // layout that's not compatible
- // with layout from
- // BindDescriptorSets
- DRAWSTATE_RENDERPASS_INCOMPATIBLE, // Incompatible renderpasses between
- // secondary cmdBuffer and primary
- // cmdBuffer or framebuffer
- DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, // Incompatible framebuffer between
- // secondary cmdBuffer and active
- // renderPass
- DRAWSTATE_INVALID_RENDERPASS, // Use of a NULL or otherwise invalid
- // RenderPass object
- DRAWSTATE_INVALID_RENDERPASS_CMD, // Invalid cmd submitted while a
- // RenderPass is active
- DRAWSTATE_NO_ACTIVE_RENDERPASS, // Rendering cmd submitted without an active
- // RenderPass
- DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, // DescriptorSet bound but it was
- // never updated. This is a warning
- // code.
- DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, // DescriptorSet used by pipeline at
- // draw time is not bound, or has been
- // disturbed (which would have flagged
- // previous warning)
- DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, // DescriptorSets bound with
- // different number of dynamic
- // descriptors that were included in
- // dynamicOffsetCount
- DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, // Clear cmd issued before any Draw in
- // CommandBuffer, should use RenderPass Ops
- // instead
- DRAWSTATE_BEGIN_CB_INVALID_STATE, // CB state at Begin call is bad. Can be
- // Primary/Secondary CB created with
- // mismatched FB/RP information or CB in
- // RECORDING state
- DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, // CmdBuffer is being used in
- // violation of
+ DRAWSTATE_VIEWPORT_NOT_BOUND, // Draw submitted with no viewport state bound
+ DRAWSTATE_SCISSOR_NOT_BOUND, // Draw submitted with no scissor state bound
+ DRAWSTATE_LINE_WIDTH_NOT_BOUND, // Draw submitted with no line width state
+ // bound
+ DRAWSTATE_DEPTH_BIAS_NOT_BOUND, // Draw submitted with no depth bias state
+ // bound
+ DRAWSTATE_BLEND_NOT_BOUND, // Draw submitted with no blend state bound when
+ // color write enabled
+ DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, // Draw submitted with no depth bounds
+ // state bound when depth enabled
+ DRAWSTATE_STENCIL_NOT_BOUND, // Draw submitted with no stencil state bound
+ // when stencil enabled
+ DRAWSTATE_INDEX_BUFFER_NOT_BOUND, // Draw submitted with no depth-stencil
+ // state bound when depth write enabled
+ DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, // Draw submitted PSO Pipeline
+ // layout that's not compatible
+ // with layout from
+ // BindDescriptorSets
+ DRAWSTATE_RENDERPASS_INCOMPATIBLE, // Incompatible renderpasses between
+ // secondary cmdBuffer and primary
+ // cmdBuffer or framebuffer
+ DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, // Incompatible framebuffer between
+ // secondary cmdBuffer and active
+ // renderPass
+ DRAWSTATE_INVALID_RENDERPASS, // Use of a NULL or otherwise invalid
+ // RenderPass object
+ DRAWSTATE_INVALID_RENDERPASS_CMD, // Invalid cmd submitted while a
+ // RenderPass is active
+ DRAWSTATE_NO_ACTIVE_RENDERPASS, // Rendering cmd submitted without an active
+ // RenderPass
+ DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, // DescriptorSet bound but it was
+ // never updated. This is a warning
+ // code.
+ DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, // DescriptorSet used by pipeline at
+ // draw time is not bound, or has been
+ // disturbed (which would have flagged
+ // previous warning)
+ DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, // DescriptorSets bound with
+ // different number of dynamic
+ // descriptors that were included in
+ // dynamicOffsetCount
+ DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, // Clear cmd issued before any Draw in
+ // CommandBuffer, should use RenderPass Ops
+ // instead
+ DRAWSTATE_BEGIN_CB_INVALID_STATE, // CB state at Begin call is bad. Can be
+ // Primary/Secondary CB created with
+ // mismatched FB/RP information or CB in
+ // RECORDING state
+ DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, // CmdBuffer is being used in
+ // violation of
// VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT
// rules (i.e. simultaneous use w/o
// that bit set)
@@ -155,148 +155,127 @@ typedef enum _DRAW_STATE_ERROR {
// was allocated from Pool w/o
// VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT
// bit set
- DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, // Count for viewports and scissors
- // mismatch and/or state doesn't match
- // count
- DRAWSTATE_INVALID_IMAGE_ASPECT, // Image aspect is invalid for the current
- // operation
- DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, // Attachment reference must be
- // present in active subpass
- DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, // A Descriptor of *_SAMPLER type is
- // being updated with an invalid or bad
- // Sampler
+ DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, // Count for viewports and scissors
+ // mismatch and/or state doesn't match
+ // count
+ DRAWSTATE_INVALID_IMAGE_ASPECT, // Image aspect is invalid for the current
+ // operation
+ DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, // Attachment reference must be
+ // present in active subpass
+ DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, // A Descriptor of *_SAMPLER type is
+ // being updated with an invalid or bad
+ // Sampler
DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, // Descriptors of
// *COMBINED_IMAGE_SAMPLER
// type are being updated
// where some, but not all,
// of the updates use
// immutable samplers
- DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, // A Descriptor of *_IMAGE or
- // *_ATTACHMENT type is being updated
- // with an invalid or bad ImageView
- DRAWSTATE_BUFFERVIEW_DESCRIPTOR_ERROR, // A Descriptor of *_TEXEL_BUFFER
- // type is being updated with an
- // invalid or bad BufferView
- DRAWSTATE_BUFFERINFO_DESCRIPTOR_ERROR, // A Descriptor of
+ DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, // A Descriptor of *_IMAGE or
+ // *_ATTACHMENT type is being updated
+ // with an invalid or bad ImageView
+ DRAWSTATE_BUFFERVIEW_DESCRIPTOR_ERROR, // A Descriptor of *_TEXEL_BUFFER
+ // type is being updated with an
+ // invalid or bad BufferView
+ DRAWSTATE_BUFFERINFO_DESCRIPTOR_ERROR, // A Descriptor of
// *_[UNIFORM|STORAGE]_BUFFER_[DYNAMIC]
// type is being updated with an
// invalid or bad BufferView
- DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, // At draw time the dynamic offset
- // combined with buffer offset and range
- // oversteps size of buffer
- DRAWSTATE_DOUBLE_DESTROY, // Destroying an object twice
- DRAWSTATE_OBJECT_INUSE, // Destroying or modifying an object in use by a
- // command buffer
- DRAWSTATE_QUEUE_FORWARD_PROGRESS, // Queue cannot guarantee forward progress
+ DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, // At draw time the dynamic offset
+ // combined with buffer offset and range
+ // oversteps size of buffer
+ DRAWSTATE_DOUBLE_DESTROY, // Destroying an object twice
+ DRAWSTATE_OBJECT_INUSE, // Destroying or modifying an object in use by a
+ // command buffer
+ DRAWSTATE_QUEUE_FORWARD_PROGRESS, // Queue cannot guarantee forward progress
DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, // Dynamic Uniform Buffer Offsets
// violate device limit
DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, // Dynamic Storage Buffer Offsets
// violate device limit
- DRAWSTATE_INDEPENDENT_BLEND, // If independent blending is not enabled, all
- // elements of pAttachmentsMustBeIdentical
- DRAWSTATE_DISABLED_LOGIC_OP, // If logic operations is not enabled, logicOpEnable
- // must be VK_FALSE
- DRAWSTATE_INVALID_LOGIC_OP, // If logicOpEnable is VK_TRUE, logicOp must
- // must be a valid VkLogicOp value
+ DRAWSTATE_INDEPENDENT_BLEND, // If independent blending is not enabled, all
+ // elements of pAttachmentsMustBeIdentical
+ DRAWSTATE_DISABLED_LOGIC_OP, // If logic operations is not enabled, logicOpEnable
+ // must be VK_FALSE
+ DRAWSTATE_INVALID_LOGIC_OP, // If logicOpEnable is VK_TRUE, logicOp must
+ // must be a valid VkLogicOp value
DRAWSTATE_INVALID_QUEUE_INDEX, // Specified queue index exceeds number
// of queried queue families
- DRAWSTATE_PUSH_CONSTANTS_ERROR, // Push constants exceed maxPushConstantSize
+ DRAWSTATE_PUSH_CONSTANTS_ERROR, // Push constants exceed maxPushConstantSize
} DRAW_STATE_ERROR;
typedef enum _SHADER_CHECKER_ERROR {
SHADER_CHECKER_NONE,
- SHADER_CHECKER_FS_MIXED_BROADCAST, /* FS writes broadcast output AND custom outputs -- DEFUNCT */
- SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, /* Type mismatch between shader stages or shader and pipeline */
- SHADER_CHECKER_OUTPUT_NOT_CONSUMED, /* Entry appears in output interface, but missing in input */
- SHADER_CHECKER_INPUT_NOT_PRODUCED, /* Entry appears in input interface, but missing in output */
- SHADER_CHECKER_NON_SPIRV_SHADER, /* Shader image is not SPIR-V */
- SHADER_CHECKER_INCONSISTENT_SPIRV, /* General inconsistency within a SPIR-V module */
- SHADER_CHECKER_UNKNOWN_STAGE, /* Stage is not supported by analysis */
- SHADER_CHECKER_INCONSISTENT_VI, /* VI state contains conflicting binding or attrib descriptions */
- SHADER_CHECKER_MISSING_DESCRIPTOR, /* Shader attempts to use a descriptor binding not declared in the layout */
- SHADER_CHECKER_BAD_SPECIALIZATION, /* Specialization map entry points outside specialization data block */
- SHADER_CHECKER_MISSING_ENTRYPOINT, /* Shader module does not contain the requested entrypoint */
- SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, /* Push constant variable is not in a push constant range */
+ SHADER_CHECKER_FS_MIXED_BROADCAST, /* FS writes broadcast output AND custom outputs -- DEFUNCT */
+ SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, /* Type mismatch between shader stages or shader and pipeline */
+ SHADER_CHECKER_OUTPUT_NOT_CONSUMED, /* Entry appears in output interface, but missing in input */
+ SHADER_CHECKER_INPUT_NOT_PRODUCED, /* Entry appears in input interface, but missing in output */
+ SHADER_CHECKER_NON_SPIRV_SHADER, /* Shader image is not SPIR-V */
+ SHADER_CHECKER_INCONSISTENT_SPIRV, /* General inconsistency within a SPIR-V module */
+ SHADER_CHECKER_UNKNOWN_STAGE, /* Stage is not supported by analysis */
+ SHADER_CHECKER_INCONSISTENT_VI, /* VI state contains conflicting binding or attrib descriptions */
+ SHADER_CHECKER_MISSING_DESCRIPTOR, /* Shader attempts to use a descriptor binding not declared in the layout */
+ SHADER_CHECKER_BAD_SPECIALIZATION, /* Specialization map entry points outside specialization data block */
+ SHADER_CHECKER_MISSING_ENTRYPOINT, /* Shader module does not contain the requested entrypoint */
+ SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, /* Push constant variable is not in a push constant range */
SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, /* Push constant range exists, but not accessible from stage */
} SHADER_CHECKER_ERROR;
-typedef enum _DRAW_TYPE
-{
- DRAW = 0,
- DRAW_INDEXED = 1,
- DRAW_INDIRECT = 2,
+typedef enum _DRAW_TYPE {
+ DRAW = 0,
+ DRAW_INDEXED = 1,
+ DRAW_INDIRECT = 2,
DRAW_INDEXED_INDIRECT = 3,
- DRAW_BEGIN_RANGE = DRAW,
- DRAW_END_RANGE = DRAW_INDEXED_INDIRECT,
- NUM_DRAW_TYPES = (DRAW_END_RANGE - DRAW_BEGIN_RANGE + 1),
+ DRAW_BEGIN_RANGE = DRAW,
+ DRAW_END_RANGE = DRAW_INDEXED_INDIRECT,
+ NUM_DRAW_TYPES = (DRAW_END_RANGE - DRAW_BEGIN_RANGE + 1),
} DRAW_TYPE;
typedef struct _SHADER_DS_MAPPING {
uint32_t slotCount;
- VkDescriptorSetLayoutCreateInfo* pShaderMappingSlot;
+ VkDescriptorSetLayoutCreateInfo *pShaderMappingSlot;
} SHADER_DS_MAPPING;
typedef struct _GENERIC_HEADER {
VkStructureType sType;
- const void* pNext;
+ const void *pNext;
} GENERIC_HEADER;
typedef struct _PIPELINE_NODE {
- VkPipeline pipeline;
- VkGraphicsPipelineCreateInfo graphicsPipelineCI;
- VkPipelineVertexInputStateCreateInfo vertexInputCI;
- VkPipelineInputAssemblyStateCreateInfo iaStateCI;
- VkPipelineTessellationStateCreateInfo tessStateCI;
- VkPipelineViewportStateCreateInfo vpStateCI;
- VkPipelineRasterizationStateCreateInfo rsStateCI;
- VkPipelineMultisampleStateCreateInfo msStateCI;
- VkPipelineColorBlendStateCreateInfo cbStateCI;
- VkPipelineDepthStencilStateCreateInfo dsStateCI;
- VkPipelineDynamicStateCreateInfo dynStateCI;
- VkPipelineShaderStageCreateInfo vsCI;
- VkPipelineShaderStageCreateInfo tcsCI;
- VkPipelineShaderStageCreateInfo tesCI;
- VkPipelineShaderStageCreateInfo gsCI;
- VkPipelineShaderStageCreateInfo fsCI;
+ VkPipeline pipeline;
+ VkGraphicsPipelineCreateInfo graphicsPipelineCI;
+ VkPipelineVertexInputStateCreateInfo vertexInputCI;
+ VkPipelineInputAssemblyStateCreateInfo iaStateCI;
+ VkPipelineTessellationStateCreateInfo tessStateCI;
+ VkPipelineViewportStateCreateInfo vpStateCI;
+ VkPipelineRasterizationStateCreateInfo rsStateCI;
+ VkPipelineMultisampleStateCreateInfo msStateCI;
+ VkPipelineColorBlendStateCreateInfo cbStateCI;
+ VkPipelineDepthStencilStateCreateInfo dsStateCI;
+ VkPipelineDynamicStateCreateInfo dynStateCI;
+ VkPipelineShaderStageCreateInfo vsCI;
+ VkPipelineShaderStageCreateInfo tcsCI;
+ VkPipelineShaderStageCreateInfo tesCI;
+ VkPipelineShaderStageCreateInfo gsCI;
+ VkPipelineShaderStageCreateInfo fsCI;
// Compute shader is include in VkComputePipelineCreateInfo
- VkComputePipelineCreateInfo computePipelineCI;
+ VkComputePipelineCreateInfo computePipelineCI;
// Flag of which shader stages are active for this pipeline
- uint32_t active_shaders;
+ uint32_t active_shaders;
// Capture which sets are actually used by the shaders of this pipeline
- std::set<unsigned> active_sets;
+ std::set<unsigned> active_sets;
// Vtx input info (if any)
- uint32_t vtxBindingCount; // number of bindings
- VkVertexInputBindingDescription* pVertexBindingDescriptions;
- uint32_t vtxAttributeCount; // number of attributes
- VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
- uint32_t attachmentCount; // number of CB attachments
- VkPipelineColorBlendAttachmentState* pAttachments;
+ uint32_t vtxBindingCount; // number of bindings
+ VkVertexInputBindingDescription *pVertexBindingDescriptions;
+ uint32_t vtxAttributeCount; // number of attributes
+ VkVertexInputAttributeDescription *pVertexAttributeDescriptions;
+ uint32_t attachmentCount; // number of CB attachments
+ VkPipelineColorBlendAttachmentState *pAttachments;
// Default constructor
- _PIPELINE_NODE():pipeline{},
- graphicsPipelineCI{},
- vertexInputCI{},
- iaStateCI{},
- tessStateCI{},
- vpStateCI{},
- rsStateCI{},
- msStateCI{},
- cbStateCI{},
- dsStateCI{},
- dynStateCI{},
- vsCI{},
- tcsCI{},
- tesCI{},
- gsCI{},
- fsCI{},
- computePipelineCI{},
- active_shaders(0),
- vtxBindingCount(0),
- pVertexBindingDescriptions(0),
- vtxAttributeCount(0),
- pVertexAttributeDescriptions(0),
- attachmentCount(0),
- pAttachments(0)
- {};
+ _PIPELINE_NODE()
+ : pipeline{}, graphicsPipelineCI{}, vertexInputCI{}, iaStateCI{}, tessStateCI{}, vpStateCI{}, rsStateCI{}, msStateCI{},
+ cbStateCI{}, dsStateCI{}, dynStateCI{}, vsCI{}, tcsCI{}, tesCI{}, gsCI{}, fsCI{}, computePipelineCI{}, active_shaders(0),
+ vtxBindingCount(0), pVertexBindingDescriptions(0), vtxAttributeCount(0), pVertexAttributeDescriptions(0),
+ attachmentCount(0), pAttachments(0){};
} PIPELINE_NODE;
class BASE_NODE {
@@ -305,10 +284,10 @@ class BASE_NODE {
};
typedef struct _SAMPLER_NODE {
- VkSampler sampler;
+ VkSampler sampler;
VkSamplerCreateInfo createInfo;
- _SAMPLER_NODE(const VkSampler* ps, const VkSamplerCreateInfo* pci) : sampler(*ps), createInfo(*pci) {};
+ _SAMPLER_NODE(const VkSampler *ps, const VkSamplerCreateInfo *pci) : sampler(*ps), createInfo(*pci){};
} SAMPLER_NODE;
class IMAGE_NODE : public BASE_NODE {
@@ -321,7 +300,7 @@ class IMAGE_NODE : public BASE_NODE {
typedef struct _IMAGE_LAYOUT_NODE {
VkImageLayout layout;
- VkFormat format;
+ VkFormat format;
} IMAGE_LAYOUT_NODE;
typedef struct _IMAGE_CMD_BUF_LAYOUT_NODE {
@@ -343,13 +322,12 @@ struct DAGNode {
};
struct RENDER_PASS_NODE {
- VkRenderPassCreateInfo const* pCreateInfo;
+ VkRenderPassCreateInfo const *pCreateInfo;
std::vector<bool> hasSelfDependency;
std::vector<DAGNode> subpassToNode;
vector<std::vector<VkFormat>> subpassColorFormats;
- RENDER_PASS_NODE(VkRenderPassCreateInfo const *pCreateInfo) : pCreateInfo(pCreateInfo)
- {
+ RENDER_PASS_NODE(VkRenderPassCreateInfo const *pCreateInfo) : pCreateInfo(pCreateInfo) {
uint32_t i;
subpassColorFormats.reserve(pCreateInfo->subpassCount);
@@ -387,9 +365,7 @@ class FENCE_NODE : public BASE_NODE {
VkFence priorFence;
// Default constructor
- FENCE_NODE() : queue(NULL), needsSignaled(VK_FALSE),
- priorFence(static_cast<VkFence>(NULL)) {};
-
+ FENCE_NODE() : queue(NULL), needsSignaled(VK_FALSE), priorFence(static_cast<VkFence>(NULL)){};
};
class SEMAPHORE_NODE : public BASE_NODE {
@@ -427,75 +403,68 @@ class FRAMEBUFFER_NODE {
// Descriptor Data structures
// Layout Node has the core layout data
typedef struct _LAYOUT_NODE {
- VkDescriptorSetLayout layout;
+ VkDescriptorSetLayout layout;
VkDescriptorSetLayoutCreateInfo createInfo;
- uint32_t startIndex; // 1st index of this layout
- uint32_t endIndex; // last index of this layout
- uint32_t dynamicDescriptorCount; // Total count of dynamic descriptors used
- // by this layout
- vector<VkDescriptorType> descriptorTypes; // Type per descriptor in this
- // layout to verify correct
- // updates
- vector<VkShaderStageFlags> stageFlags; // stageFlags per descriptor in this
- // layout to verify correct updates
+ uint32_t startIndex; // 1st index of this layout
+ uint32_t endIndex; // last index of this layout
+ uint32_t dynamicDescriptorCount; // Total count of dynamic descriptors used
+ // by this layout
+ vector<VkDescriptorType> descriptorTypes; // Type per descriptor in this
+ // layout to verify correct
+ // updates
+ vector<VkShaderStageFlags> stageFlags; // stageFlags per descriptor in this
+ // layout to verify correct updates
unordered_map<uint32_t, uint32_t> bindingToIndexMap; // map set binding # to
// pBindings index
// Default constructor
- _LAYOUT_NODE():layout{},
- createInfo{},
- startIndex(0),
- endIndex(0),
- dynamicDescriptorCount(0)
- {};
+ _LAYOUT_NODE() : layout{}, createInfo{}, startIndex(0), endIndex(0), dynamicDescriptorCount(0){};
} LAYOUT_NODE;
// Store layouts and pushconstants for PipelineLayout
struct PIPELINE_LAYOUT_NODE {
- vector<VkDescriptorSetLayout> descriptorSetLayouts;
- vector<VkPushConstantRange> pushConstantRanges;
+ vector<VkDescriptorSetLayout> descriptorSetLayouts;
+ vector<VkPushConstantRange> pushConstantRanges;
};
class SET_NODE : public BASE_NODE {
public:
using BASE_NODE::in_use;
- VkDescriptorSet set;
- VkDescriptorPool pool;
+ VkDescriptorSet set;
+ VkDescriptorPool pool;
// Head of LL of all Update structs for this set
- GENERIC_HEADER* pUpdateStructs;
+ GENERIC_HEADER *pUpdateStructs;
// Total num of descriptors in this set (count of its layout plus all prior layouts)
- uint32_t descriptorCount;
- GENERIC_HEADER** ppDescriptors; // Array where each index points to update node for its slot
- LAYOUT_NODE* pLayout; // Layout for this set
- SET_NODE* pNext;
+ uint32_t descriptorCount;
+ GENERIC_HEADER **ppDescriptors; // Array where each index points to update node for its slot
+ LAYOUT_NODE *pLayout; // Layout for this set
+ SET_NODE *pNext;
unordered_set<VkCommandBuffer> boundCmdBuffers; // Cmd buffers that this set has been bound to
- SET_NODE() : pUpdateStructs(NULL), ppDescriptors(NULL), pLayout(NULL), pNext(NULL) {};
+ SET_NODE() : pUpdateStructs(NULL), ppDescriptors(NULL), pLayout(NULL), pNext(NULL){};
};
typedef struct _DESCRIPTOR_POOL_NODE {
- VkDescriptorPool pool;
- uint32_t maxSets;
+ VkDescriptorPool pool;
+ uint32_t maxSets;
VkDescriptorPoolCreateInfo createInfo;
- SET_NODE* pSets; // Head of LL of sets for this Pool
- vector<uint32_t> maxDescriptorTypeCount; // max # of descriptors of each type in this pool
- vector<uint32_t> availableDescriptorTypeCount; // available # of descriptors of each type in this pool
-
- _DESCRIPTOR_POOL_NODE(const VkDescriptorPool pool,
- const VkDescriptorPoolCreateInfo *pCreateInfo)
- : pool(pool), maxSets(pCreateInfo->maxSets), createInfo(*pCreateInfo),
- pSets(NULL), maxDescriptorTypeCount(VK_DESCRIPTOR_TYPE_RANGE_SIZE),
- availableDescriptorTypeCount(VK_DESCRIPTOR_TYPE_RANGE_SIZE) {
+ SET_NODE *pSets; // Head of LL of sets for this Pool
+ vector<uint32_t> maxDescriptorTypeCount; // max # of descriptors of each type in this pool
+ vector<uint32_t> availableDescriptorTypeCount; // available # of descriptors of each type in this pool
+
+ _DESCRIPTOR_POOL_NODE(const VkDescriptorPool pool, const VkDescriptorPoolCreateInfo *pCreateInfo)
+ : pool(pool), maxSets(pCreateInfo->maxSets), createInfo(*pCreateInfo), pSets(NULL),
+ maxDescriptorTypeCount(VK_DESCRIPTOR_TYPE_RANGE_SIZE), availableDescriptorTypeCount(VK_DESCRIPTOR_TYPE_RANGE_SIZE) {
if (createInfo.poolSizeCount) { // Shadow type struct from ptr into local struct
size_t poolSizeCountSize = createInfo.poolSizeCount * sizeof(VkDescriptorPoolSize);
createInfo.pPoolSizes = new VkDescriptorPoolSize[poolSizeCountSize];
- memcpy((void*)createInfo.pPoolSizes, pCreateInfo->pPoolSizes, poolSizeCountSize);
+ memcpy((void *)createInfo.pPoolSizes, pCreateInfo->pPoolSizes, poolSizeCountSize);
// Now set max counts for each descriptor type based on count of that type times maxSets
- uint32_t i=0;
- for (i=0; i<createInfo.poolSizeCount; ++i) {
+ uint32_t i = 0;
+ for (i = 0; i < createInfo.poolSizeCount; ++i) {
uint32_t typeIndex = static_cast<uint32_t>(createInfo.pPoolSizes[i].type);
uint32_t poolSizeCount = createInfo.pPoolSizes[i].descriptorCount;
maxDescriptorTypeCount[typeIndex] += poolSizeCount;
}
- for (i=0; i<maxDescriptorTypeCount.size(); ++i) {
+ for (i = 0; i < maxDescriptorTypeCount.size(); ++i) {
maxDescriptorTypeCount[i] *= createInfo.maxSets;
// Initially the available counts are equal to the max counts
availableDescriptorTypeCount[i] = maxDescriptorTypeCount[i];
@@ -565,12 +534,11 @@ typedef enum _CMD_TYPE {
} CMD_TYPE;
// Data structure for holding sequence of cmds in cmd buffer
typedef struct _CMD_NODE {
- CMD_TYPE type;
- uint64_t cmdNumber;
+ CMD_TYPE type;
+ uint64_t cmdNumber;
} CMD_NODE;
-typedef enum _CB_STATE
-{
+typedef enum _CB_STATE {
CB_NEW, // Newly created CB w/o any cmds
CB_RECORDING, // BeginCB has been called on this CB
CB_RECORDED, // EndCB has been called on this CB
@@ -578,34 +546,31 @@ typedef enum _CB_STATE
} CB_STATE;
// CB Status -- used to track status of various bindings on cmd buffer objects
typedef VkFlags CBStatusFlags;
-typedef enum _CBStatusFlagBits
-{
- CBSTATUS_NONE = 0x00000000, // No status is set
- CBSTATUS_VIEWPORT_SET = 0x00000001, // Viewport has been set
- CBSTATUS_LINE_WIDTH_SET = 0x00000002, // Line width has been set
- CBSTATUS_DEPTH_BIAS_SET = 0x00000004, // Depth bias has been set
- CBSTATUS_COLOR_BLEND_WRITE_ENABLE = 0x00000008, // PSO w/ CB Enable set has been set
- CBSTATUS_BLEND_SET = 0x00000010, // Blend state object has been set
- CBSTATUS_DEPTH_WRITE_ENABLE = 0x00000020, // PSO w/ Depth Enable set has been set
- CBSTATUS_STENCIL_TEST_ENABLE = 0x00000040, // PSO w/ Stencil Enable set has been set
- CBSTATUS_DEPTH_BOUNDS_SET = 0x00000080, // Depth bounds state object has been set
- CBSTATUS_STENCIL_READ_MASK_SET = 0x00000100, // Stencil read mask has been set
- CBSTATUS_STENCIL_WRITE_MASK_SET = 0x00000200, // Stencil write mask has been set
- CBSTATUS_STENCIL_REFERENCE_SET = 0x00000400, // Stencil reference has been set
- CBSTATUS_INDEX_BUFFER_BOUND = 0x00000800, // Index buffer has been set
- CBSTATUS_SCISSOR_SET = 0x00001000, // Scissor has been set
- CBSTATUS_ALL = 0x00001FFF, // All dynamic state set
+typedef enum _CBStatusFlagBits {
+ CBSTATUS_NONE = 0x00000000, // No status is set
+ CBSTATUS_VIEWPORT_SET = 0x00000001, // Viewport has been set
+ CBSTATUS_LINE_WIDTH_SET = 0x00000002, // Line width has been set
+ CBSTATUS_DEPTH_BIAS_SET = 0x00000004, // Depth bias has been set
+ CBSTATUS_COLOR_BLEND_WRITE_ENABLE = 0x00000008, // PSO w/ CB Enable set has been set
+ CBSTATUS_BLEND_SET = 0x00000010, // Blend state object has been set
+ CBSTATUS_DEPTH_WRITE_ENABLE = 0x00000020, // PSO w/ Depth Enable set has been set
+ CBSTATUS_STENCIL_TEST_ENABLE = 0x00000040, // PSO w/ Stencil Enable set has been set
+ CBSTATUS_DEPTH_BOUNDS_SET = 0x00000080, // Depth bounds state object has been set
+ CBSTATUS_STENCIL_READ_MASK_SET = 0x00000100, // Stencil read mask has been set
+ CBSTATUS_STENCIL_WRITE_MASK_SET = 0x00000200, // Stencil write mask has been set
+ CBSTATUS_STENCIL_REFERENCE_SET = 0x00000400, // Stencil reference has been set
+ CBSTATUS_INDEX_BUFFER_BOUND = 0x00000800, // Index buffer has been set
+ CBSTATUS_SCISSOR_SET = 0x00001000, // Scissor has been set
+ CBSTATUS_ALL = 0x00001FFF, // All dynamic state set
} CBStatusFlagBits;
typedef struct stencil_data {
- uint32_t compareMask;
- uint32_t writeMask;
- uint32_t reference;
+ uint32_t compareMask;
+ uint32_t writeMask;
+ uint32_t reference;
} CBStencilData;
-typedef struct _DRAW_DATA {
- vector<VkBuffer> buffers;
-} DRAW_DATA;
+typedef struct _DRAW_DATA { vector<VkBuffer> buffers; } DRAW_DATA;
struct ImageSubresourcePair {
VkImage image;
@@ -614,17 +579,17 @@ struct ImageSubresourcePair {
};
bool operator==(const ImageSubresourcePair &img1, const ImageSubresourcePair &img2) {
- if (img1.image != img2.image || img1.hasSubresource != img2.hasSubresource) return false;
- return !img1.hasSubresource || (img1.subresource.aspectMask == img2.subresource.aspectMask &&
- img1.subresource.mipLevel == img2.subresource.mipLevel &&
+ if (img1.image != img2.image || img1.hasSubresource != img2.hasSubresource)
+ return false;
+ return !img1.hasSubresource ||
+ (img1.subresource.aspectMask == img2.subresource.aspectMask && img1.subresource.mipLevel == img2.subresource.mipLevel &&
img1.subresource.arrayLayer == img2.subresource.arrayLayer);
}
namespace std {
template <> struct hash<ImageSubresourcePair> {
size_t operator()(ImageSubresourcePair img) const throw() {
- size_t hashVal =
- hash<uint64_t>()(reinterpret_cast<uint64_t &>(img.image));
+ size_t hashVal = hash<uint64_t>()(reinterpret_cast<uint64_t &>(img.image));
hashVal ^= hash<bool>()(img.hasSubresource);
if (img.hasSubresource) {
hashVal ^= hash<uint32_t>()(reinterpret_cast<uint32_t &>(img.subresource.aspectMask));
@@ -641,13 +606,12 @@ struct QueryObject {
uint32_t index;
};
-bool operator==(const QueryObject& query1, const QueryObject& query2) {
+bool operator==(const QueryObject &query1, const QueryObject &query2) {
return (query1.pool == query2.pool && query1.index == query2.index);
}
namespace std {
-template <>
-struct hash<QueryObject> {
+template <> struct hash<QueryObject> {
size_t operator()(QueryObject query) const throw() {
return hash<uint64_t>()((uint64_t)(query.pool)) ^ hash<uint32_t>()(query.index);
}
@@ -656,91 +620,83 @@ struct hash<QueryObject> {
// Cmd Buffer Wrapper Struct
typedef struct _GLOBAL_CB_NODE {
- VkCommandBuffer commandBuffer;
- VkCommandBufferAllocateInfo createInfo;
- VkCommandBufferBeginInfo beginInfo;
+ VkCommandBuffer commandBuffer;
+ VkCommandBufferAllocateInfo createInfo;
+ VkCommandBufferBeginInfo beginInfo;
VkCommandBufferInheritanceInfo inheritanceInfo;
- VkFence fence; // fence tracking this cmd buffer
- VkDevice device; // device this DB belongs to
- uint64_t numCmds; // number of cmds in this CB
- uint64_t drawCount[NUM_DRAW_TYPES]; // Count of each type of draw in this CB
- CB_STATE state; // Track cmd buffer update state
- uint64_t submitCount; // Number of times CB has been submitted
- CBStatusFlags status; // Track status of various bindings on cmd buffer
- vector<CMD_NODE> cmds; // vector of commands bound to this command buffer
+ VkFence fence; // fence tracking this cmd buffer
+ VkDevice device; // device this DB belongs to
+ uint64_t numCmds; // number of cmds in this CB
+ uint64_t drawCount[NUM_DRAW_TYPES]; // Count of each type of draw in this CB
+ CB_STATE state; // Track cmd buffer update state
+ uint64_t submitCount; // Number of times CB has been submitted
+ CBStatusFlags status; // Track status of various bindings on cmd buffer
+ vector<CMD_NODE> cmds; // vector of commands bound to this command buffer
// Currently storing "lastBound" objects on per-CB basis
// long-term may want to create caches of "lastBound" states and could have
// each individual CMD_NODE referencing its own "lastBound" state
- VkPipeline lastBoundPipeline;
- uint32_t lastVtxBinding;
- vector<VkBuffer> boundVtxBuffers;
- vector<VkViewport> viewports;
- vector<VkRect2D> scissors;
- float lineWidth;
- float depthBiasConstantFactor;
- float depthBiasClamp;
- float depthBiasSlopeFactor;
- float blendConstants[4];
- float minDepthBounds;
- float maxDepthBounds;
- CBStencilData front;
- CBStencilData back;
- VkDescriptorSet lastBoundDescriptorSet;
- VkPipelineLayout lastBoundPipelineLayout;
- VkRenderPassBeginInfo activeRenderPassBeginInfo;
- VkRenderPass activeRenderPass;
- VkSubpassContents activeSubpassContents;
- uint32_t activeSubpass;
- VkFramebuffer framebuffer;
+ VkPipeline lastBoundPipeline;
+ uint32_t lastVtxBinding;
+ vector<VkBuffer> boundVtxBuffers;
+ vector<VkViewport> viewports;
+ vector<VkRect2D> scissors;
+ float lineWidth;
+ float depthBiasConstantFactor;
+ float depthBiasClamp;
+ float depthBiasSlopeFactor;
+ float blendConstants[4];
+ float minDepthBounds;
+ float maxDepthBounds;
+ CBStencilData front;
+ CBStencilData back;
+ VkDescriptorSet lastBoundDescriptorSet;
+ VkPipelineLayout lastBoundPipelineLayout;
+ VkRenderPassBeginInfo activeRenderPassBeginInfo;
+ VkRenderPass activeRenderPass;
+ VkSubpassContents activeSubpassContents;
+ uint32_t activeSubpass;
+ VkFramebuffer framebuffer;
// Capture unique std::set of descriptorSets that are bound to this CB.
- std::set<VkDescriptorSet> uniqueBoundSets;
+ std::set<VkDescriptorSet> uniqueBoundSets;
// Track descriptor sets that are destroyed or updated while bound to CB
// TODO : These data structures relate to tracking resources that invalidate
// a cmd buffer that references them. Need to unify how we handle these
// cases so we don't have different tracking data for each type.
- std::set<VkDescriptorSet> destroyedSets;
- std::set<VkDescriptorSet> updatedSets;
+ std::set<VkDescriptorSet> destroyedSets;
+ std::set<VkDescriptorSet> updatedSets;
unordered_set<VkFramebuffer> destroyedFramebuffers;
// Keep running track of which sets are bound to which set# at any given
// time
- vector<VkDescriptorSet> boundDescriptorSets; // Index is set# that given set is bound to
- vector<VkEvent> waitedEvents;
+ vector<VkDescriptorSet> boundDescriptorSets; // Index is set# that given set is bound to
+ vector<VkEvent> waitedEvents;
vector<VkSemaphore> semaphores;
vector<VkEvent> events;
- unordered_map<QueryObject, vector<VkEvent> > waitedEventsBeforeQueryReset;
+ unordered_map<QueryObject, vector<VkEvent>> waitedEventsBeforeQueryReset;
unordered_map<QueryObject, bool> queryToStateMap; // 0 is unavailable, 1 is available
- unordered_set<QueryObject> activeQueries;
+ unordered_set<QueryObject> activeQueries;
unordered_set<QueryObject> startedQueries;
- unordered_map<ImageSubresourcePair, IMAGE_CMD_BUF_LAYOUT_NODE>
- imageLayoutMap;
+ unordered_map<ImageSubresourcePair, IMAGE_CMD_BUF_LAYOUT_NODE> imageLayoutMap;
unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
unordered_map<VkEvent, VkPipelineStageFlags> eventToStageMap;
- vector<DRAW_DATA> drawData;
- DRAW_DATA currentDrawData;
+ vector<DRAW_DATA> drawData;
+ DRAW_DATA currentDrawData;
VkCommandBuffer primaryCommandBuffer;
// If cmd buffer is primary, track secondary command buffers pending
// execution
std::unordered_set<VkCommandBuffer> secondaryCommandBuffers;
- vector<uint32_t> dynamicOffsets; // one dynamic offset per dynamic descriptor bound to this CB
+ vector<uint32_t> dynamicOffsets; // one dynamic offset per dynamic descriptor bound to this CB
} GLOBAL_CB_NODE;
typedef struct _SWAPCHAIN_NODE {
- VkSwapchainCreateInfoKHR createInfo;
- uint32_t* pQueueFamilyIndices;
- std::vector<VkImage> images;
- _SWAPCHAIN_NODE(const VkSwapchainCreateInfoKHR *pCreateInfo) :
- createInfo(*pCreateInfo),
- pQueueFamilyIndices(NULL)
- {
- if (pCreateInfo->queueFamilyIndexCount &&
- pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT) {
+ VkSwapchainCreateInfoKHR createInfo;
+ uint32_t *pQueueFamilyIndices;
+ std::vector<VkImage> images;
+ _SWAPCHAIN_NODE(const VkSwapchainCreateInfoKHR *pCreateInfo) : createInfo(*pCreateInfo), pQueueFamilyIndices(NULL) {
+ if (pCreateInfo->queueFamilyIndexCount && pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT) {
pQueueFamilyIndices = new uint32_t[pCreateInfo->queueFamilyIndexCount];
- memcpy(pQueueFamilyIndices, pCreateInfo->pQueueFamilyIndices, pCreateInfo->queueFamilyIndexCount*sizeof(uint32_t));
+ memcpy(pQueueFamilyIndices, pCreateInfo->pQueueFamilyIndices, pCreateInfo->queueFamilyIndexCount * sizeof(uint32_t));
createInfo.pQueueFamilyIndices = pQueueFamilyIndices;
}
}
- ~_SWAPCHAIN_NODE()
- {
- delete pQueueFamilyIndices;
- }
+ ~_SWAPCHAIN_NODE() { delete pQueueFamilyIndices; }
} SWAPCHAIN_NODE;
diff --git a/layers/image.cpp b/layers/image.cpp
index c53cc8d40..ca7f223fb 100644
--- a/layers/image.cpp
+++ b/layers/image.cpp
@@ -54,37 +54,31 @@ using namespace std;
using namespace std;
struct layer_data {
- debug_report_data *report_data;
- vector<VkDebugReportCallbackEXT> logging_callback;
- VkLayerDispatchTable* device_dispatch_table;
+ debug_report_data *report_data;
+ vector<VkDebugReportCallbackEXT> logging_callback;
+ VkLayerDispatchTable *device_dispatch_table;
VkLayerInstanceDispatchTable *instance_dispatch_table;
- VkPhysicalDevice physicalDevice;
- VkPhysicalDeviceProperties physicalDeviceProperties;
+ VkPhysicalDevice physicalDevice;
+ VkPhysicalDeviceProperties physicalDeviceProperties;
unordered_map<VkImage, IMAGE_STATE> imageMap;
- layer_data() :
- report_data(nullptr),
- device_dispatch_table(nullptr),
- instance_dispatch_table(nullptr),
- physicalDevice(0),
- physicalDeviceProperties()
- {};
+ layer_data()
+ : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr), physicalDevice(0),
+ physicalDeviceProperties(){};
};
-static unordered_map<void*, layer_data*> layer_data_map;
+static unordered_map<void *, layer_data *> layer_data_map;
-static void InitImage(layer_data *data, const VkAllocationCallbacks *pAllocator)
-{
+static void InitImage(layer_data *data, const VkAllocationCallbacks *pAllocator) {
VkDebugReportCallbackEXT callback;
uint32_t report_flags = getLayerOptionFlags("lunarg_image.report_flags", 0);
uint32_t debug_action = 0;
- getLayerOptionEnum("lunarg_image.debug_action", (uint32_t *) &debug_action);
- if(debug_action & VK_DBG_LAYER_ACTION_LOG_MSG)
- {
+ getLayerOptionEnum("lunarg_image.debug_action", (uint32_t *)&debug_action);
+ if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG) {
FILE *log_output = NULL;
- const char* option_str = getLayerOption("lunarg_image.log_filename");
+ const char *option_str = getLayerOption("lunarg_image.log_filename");
log_output = getLayerLogOutput(option_str, "lunarg_image");
VkDebugReportCallbackCreateInfoEXT dbgInfo;
memset(&dbgInfo, 0, sizeof(dbgInfo));
@@ -107,12 +101,9 @@ static void InitImage(layer_data *data, const VkAllocationCallbacks *pAllocator)
}
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(
- VkInstance instance,
- const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkDebugReportCallbackEXT* pMsgCallback)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
VkResult res = my_data->instance_dispatch_table->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
if (res == VK_SUCCESS) {
@@ -121,37 +112,29 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(
return res;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(
- VkInstance instance,
- VkDebugReportCallbackEXT msgCallback,
- const VkAllocationCallbacks* pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance,
+ VkDebugReportCallbackEXT msgCallback,
+ const VkAllocationCallbacks *pAllocator) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
my_data->instance_dispatch_table->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT(
- VkInstance instance,
- VkDebugReportFlagsEXT flags,
- VkDebugReportObjectTypeEXT objType,
- uint64_t object,
- size_t location,
- int32_t msgCode,
- const char* pLayerPrefix,
- const char* pMsg)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
+ size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
- my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
+ my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
+ pMsg);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
- PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance) fpGetInstanceProcAddr(NULL, "vkCreateInstance");
+ PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
if (fpCreateInstance == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
@@ -167,19 +150,15 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(const VkInstance
my_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
layer_init_instance_dispatch_table(*pInstance, my_data->instance_dispatch_table, fpGetInstanceProcAddr);
- my_data->report_data = debug_report_create_instance(
- my_data->instance_dispatch_table,
- *pInstance,
- pCreateInfo->enabledExtensionCount,
- pCreateInfo->ppEnabledExtensionNames);
+ my_data->report_data = debug_report_create_instance(my_data->instance_dispatch_table, *pInstance,
+ pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
InitImage(my_data, pAllocator);
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks* pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
// Grab the key before the instance is destroyed.
dispatch_key key = get_dispatch_key(instance);
layer_data *my_data = get_my_data_ptr(key, layer_data_map);
@@ -196,17 +175,17 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance
layer_debug_report_destroy_instance(my_data->report_data);
delete my_data->instance_dispatch_table;
layer_data_map.erase(key);
-
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice physicalDevice,
+ const VkDeviceCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
- PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice) fpGetInstanceProcAddr(NULL, "vkCreateDevice");
+ PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
if (fpCreateDevice == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
@@ -229,13 +208,13 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice p
my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
my_device_data->physicalDevice = physicalDevice;
- my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(physicalDevice, &(my_device_data->physicalDeviceProperties));
+ my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(physicalDevice,
+ &(my_device_data->physicalDeviceProperties));
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks* pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
dispatch_key key = get_dispatch_key(device);
layer_data *my_data = get_my_data_ptr(key, layer_data_map);
my_data->device_dispatch_table->DestroyDevice(device, pAllocator);
@@ -243,183 +222,146 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, cons
layer_data_map.erase(key);
}
-static const VkExtensionProperties instance_extensions[] = {
- {
- VK_EXT_DEBUG_REPORT_EXTENSION_NAME,
- VK_EXT_DEBUG_REPORT_SPEC_VERSION
- }
-};
+static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(
- const char *pLayerName,
- uint32_t *pCount,
- VkExtensionProperties* pProperties)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
}
-static const VkLayerProperties pc_global_layers[] = {
- {
- "VK_LAYER_LUNARG_image",
- VK_API_VERSION,
- 1,
- "LunarG Validation Layer",
- }
-};
+static const VkLayerProperties pc_global_layers[] = {{
+ "VK_LAYER_LUNARG_image", VK_API_VERSION, 1, "LunarG Validation Layer",
+}};
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(
- uint32_t *pCount,
- VkLayerProperties* pProperties)
-{
- return util_GetLayerProperties(ARRAY_SIZE(pc_global_layers),
- pc_global_layers,
- pCount, pProperties);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
+ return util_GetLayerProperties(ARRAY_SIZE(pc_global_layers), pc_global_layers, pCount, pProperties);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(
- VkPhysicalDevice physicalDevice,
- const char* pLayerName,
- uint32_t* pCount,
- VkExtensionProperties* pProperties)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
+ const char *pLayerName, uint32_t *pCount,
+ VkExtensionProperties *pProperties) {
// Image does not have any physical device extensions
if (pLayerName == NULL) {
dispatch_key key = get_dispatch_key(physicalDevice);
layer_data *my_data = get_my_data_ptr(key, layer_data_map);
VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
- return pTable->EnumerateDeviceExtensionProperties(
- physicalDevice,
- NULL,
- pCount,
- pProperties);
+ return pTable->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
} else {
return util_GetExtensionProperties(0, NULL, pCount, pProperties);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(
- VkPhysicalDevice physicalDevice,
- uint32_t* pCount,
- VkLayerProperties* pProperties)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
// ParamChecker's physical device layers are the same as global
- return util_GetLayerProperties(ARRAY_SIZE(pc_global_layers), pc_global_layers,
- pCount, pProperties);
+ return util_GetLayerProperties(ARRAY_SIZE(pc_global_layers), pc_global_layers, pCount, pProperties);
}
// Start of the Image layer proper
// Returns TRUE if a format is a depth-compatible format
-bool is_depth_format(VkFormat format)
-{
+bool is_depth_format(VkFormat format) {
bool result = VK_FALSE;
switch (format) {
- case VK_FORMAT_D16_UNORM:
- case VK_FORMAT_X8_D24_UNORM_PACK32:
- case VK_FORMAT_D32_SFLOAT:
- case VK_FORMAT_S8_UINT:
- case VK_FORMAT_D16_UNORM_S8_UINT:
- case VK_FORMAT_D24_UNORM_S8_UINT:
- case VK_FORMAT_D32_SFLOAT_S8_UINT:
- result = VK_TRUE;
- break;
- default:
- break;
+ case VK_FORMAT_D16_UNORM:
+ case VK_FORMAT_X8_D24_UNORM_PACK32:
+ case VK_FORMAT_D32_SFLOAT:
+ case VK_FORMAT_S8_UINT:
+ case VK_FORMAT_D16_UNORM_S8_UINT:
+ case VK_FORMAT_D24_UNORM_S8_UINT:
+ case VK_FORMAT_D32_SFLOAT_S8_UINT:
+ result = VK_TRUE;
+ break;
+ default:
+ break;
}
return result;
}
-static inline uint32_t validate_VkImageLayoutKHR(VkImageLayout input_value)
-{
- return ((validate_VkImageLayout(input_value) == 1) ||
- (input_value == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR));
+static inline uint32_t validate_VkImageLayoutKHR(VkImageLayout input_value) {
+ return ((validate_VkImageLayout(input_value) == 1) || (input_value == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR));
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage)
-{
- VkBool32 skipCall = VK_FALSE;
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
+ VkBool32 skipCall = VK_FALSE;
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
VkImageFormatProperties ImageFormatProperties;
- layer_data *device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- VkPhysicalDevice physicalDevice = device_data->physicalDevice;
- layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
+ layer_data *device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ VkPhysicalDevice physicalDevice = device_data->physicalDevice;
+ layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
- if (pCreateInfo->format != VK_FORMAT_UNDEFINED)
- {
+ if (pCreateInfo->format != VK_FORMAT_UNDEFINED) {
VkFormatProperties properties;
- phy_dev_data->instance_dispatch_table->GetPhysicalDeviceFormatProperties(
- device_data->physicalDevice, pCreateInfo->format, &properties);
+ phy_dev_data->instance_dispatch_table->GetPhysicalDeviceFormatProperties(device_data->physicalDevice, pCreateInfo->format,
+ &properties);
- if ((properties.linearTilingFeatures) == 0 && (properties.optimalTilingFeatures == 0))
- {
+ if ((properties.linearTilingFeatures) == 0 && (properties.optimalTilingFeatures == 0)) {
char const str[] = "vkCreateImage parameter, VkFormat pCreateInfo->format, contains unsupported format";
// TODO: Verify against Valid Use section of spec. Generally if something yield an undefined result, it's invalid
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, IMAGE_FORMAT_UNSUPPORTED, "IMAGE", str);
+ skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ IMAGE_FORMAT_UNSUPPORTED, "IMAGE", str);
}
}
// Internal call to get format info. Still goes through layers, could potentially go directly to ICD.
phy_dev_data->instance_dispatch_table->GetPhysicalDeviceImageFormatProperties(
- physicalDevice, pCreateInfo->format, pCreateInfo->imageType, pCreateInfo->tiling,
- pCreateInfo->usage, pCreateInfo->flags, &ImageFormatProperties);
+ physicalDevice, pCreateInfo->format, pCreateInfo->imageType, pCreateInfo->tiling, pCreateInfo->usage, pCreateInfo->flags,
+ &ImageFormatProperties);
VkDeviceSize imageGranularity = device_data->physicalDeviceProperties.limits.bufferImageGranularity;
imageGranularity = imageGranularity == 1 ? 0 : imageGranularity;
- if ((pCreateInfo->extent.depth > ImageFormatProperties.maxExtent.depth) ||
- (pCreateInfo->extent.width > ImageFormatProperties.maxExtent.width) ||
+ if ((pCreateInfo->extent.depth > ImageFormatProperties.maxExtent.depth) ||
+ (pCreateInfo->extent.width > ImageFormatProperties.maxExtent.width) ||
(pCreateInfo->extent.height > ImageFormatProperties.maxExtent.height)) {
- skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, (uint64_t)pImage, __LINE__,
- IMAGE_INVALID_FORMAT_LIMITS_VIOLATION, "Image",
- "CreateImage extents exceed allowable limits for format: "
- "Width = %d Height = %d Depth = %d: Limits for Width = %d Height = %d Depth = %d for format %s.",
- pCreateInfo->extent.width, pCreateInfo->extent.height, pCreateInfo->extent.depth,
- ImageFormatProperties.maxExtent.width, ImageFormatProperties.maxExtent.height, ImageFormatProperties.maxExtent.depth,
- string_VkFormat(pCreateInfo->format));
-
+ skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
+ (uint64_t)pImage, __LINE__, IMAGE_INVALID_FORMAT_LIMITS_VIOLATION, "Image",
+ "CreateImage extents exceed allowable limits for format: "
+ "Width = %d Height = %d Depth = %d: Limits for Width = %d Height = %d Depth = %d for format %s.",
+ pCreateInfo->extent.width, pCreateInfo->extent.height, pCreateInfo->extent.depth,
+ ImageFormatProperties.maxExtent.width, ImageFormatProperties.maxExtent.height,
+ ImageFormatProperties.maxExtent.depth, string_VkFormat(pCreateInfo->format));
}
- uint64_t totalSize = ((uint64_t)pCreateInfo->extent.width *
- (uint64_t)pCreateInfo->extent.height *
- (uint64_t)pCreateInfo->extent.depth *
- (uint64_t)pCreateInfo->arrayLayers *
- (uint64_t)pCreateInfo->samples *
- (uint64_t)vk_format_get_size(pCreateInfo->format) +
- (uint64_t)imageGranularity ) & ~(uint64_t)imageGranularity;
+ uint64_t totalSize = ((uint64_t)pCreateInfo->extent.width * (uint64_t)pCreateInfo->extent.height *
+ (uint64_t)pCreateInfo->extent.depth * (uint64_t)pCreateInfo->arrayLayers *
+ (uint64_t)pCreateInfo->samples * (uint64_t)vk_format_get_size(pCreateInfo->format) +
+ (uint64_t)imageGranularity) &
+ ~(uint64_t)imageGranularity;
if (totalSize > ImageFormatProperties.maxResourceSize) {
- skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, (uint64_t)pImage, __LINE__,
- IMAGE_INVALID_FORMAT_LIMITS_VIOLATION, "Image",
- "CreateImage resource size exceeds allowable maximum "
- "Image resource size = %#" PRIxLEAST64 ", maximum resource size = %#" PRIxLEAST64 " ",
- totalSize, ImageFormatProperties.maxResourceSize);
+ skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
+ (uint64_t)pImage, __LINE__, IMAGE_INVALID_FORMAT_LIMITS_VIOLATION, "Image",
+ "CreateImage resource size exceeds allowable maximum "
+ "Image resource size = %#" PRIxLEAST64 ", maximum resource size = %#" PRIxLEAST64 " ",
+ totalSize, ImageFormatProperties.maxResourceSize);
}
if (pCreateInfo->mipLevels > ImageFormatProperties.maxMipLevels) {
- skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, (uint64_t)pImage, __LINE__,
- IMAGE_INVALID_FORMAT_LIMITS_VIOLATION, "Image",
- "CreateImage mipLevels=%d exceeds allowable maximum supported by format of %d",
- pCreateInfo->mipLevels, ImageFormatProperties.maxMipLevels);
+ skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
+ (uint64_t)pImage, __LINE__, IMAGE_INVALID_FORMAT_LIMITS_VIOLATION, "Image",
+ "CreateImage mipLevels=%d exceeds allowable maximum supported by format of %d", pCreateInfo->mipLevels,
+ ImageFormatProperties.maxMipLevels);
}
if (pCreateInfo->arrayLayers > ImageFormatProperties.maxArrayLayers) {
- skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, (uint64_t)pImage, __LINE__,
- IMAGE_INVALID_FORMAT_LIMITS_VIOLATION, "Image",
- "CreateImage arrayLayers=%d exceeds allowable maximum supported by format of %d",
- pCreateInfo->arrayLayers, ImageFormatProperties.maxArrayLayers);
+ skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
+ (uint64_t)pImage, __LINE__, IMAGE_INVALID_FORMAT_LIMITS_VIOLATION, "Image",
+ "CreateImage arrayLayers=%d exceeds allowable maximum supported by format of %d",
+ pCreateInfo->arrayLayers, ImageFormatProperties.maxArrayLayers);
}
if ((pCreateInfo->samples & ImageFormatProperties.sampleCounts) == 0) {
- skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, (uint64_t)pImage, __LINE__,
- IMAGE_INVALID_FORMAT_LIMITS_VIOLATION, "Image",
- "CreateImage samples %s is not supported by format 0x%.8X",
- string_VkSampleCountFlagBits(pCreateInfo->samples), ImageFormatProperties.sampleCounts);
+ skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
+ (uint64_t)pImage, __LINE__, IMAGE_INVALID_FORMAT_LIMITS_VIOLATION, "Image",
+ "CreateImage samples %s is not supported by format 0x%.8X",
+ string_VkSampleCountFlagBits(pCreateInfo->samples), ImageFormatProperties.sampleCounts);
}
- if (pCreateInfo->initialLayout != VK_IMAGE_LAYOUT_UNDEFINED &&
- pCreateInfo->initialLayout != VK_IMAGE_LAYOUT_PREINITIALIZED)
- {
+ if (pCreateInfo->initialLayout != VK_IMAGE_LAYOUT_UNDEFINED && pCreateInfo->initialLayout != VK_IMAGE_LAYOUT_PREINITIALIZED) {
skipCall |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
(uint64_t)pImage, __LINE__, IMAGE_INVALID_LAYOUT, "Image",
"vkCreateImage parameter, pCreateInfo->initialLayout, must be VK_IMAGE_LAYOUT_UNDEFINED or "
@@ -435,73 +377,69 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(VkDevice device, co
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
device_data->imageMap.erase(image);
device_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkRenderPass *pRenderPass) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkBool32 skipCall = VK_FALSE;
- for(uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i)
- {
- if(pCreateInfo->pAttachments[i].format != VK_FORMAT_UNDEFINED)
- {
+ for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
+ if (pCreateInfo->pAttachments[i].format != VK_FORMAT_UNDEFINED) {
VkFormatProperties properties;
- get_my_data_ptr(get_dispatch_key(my_data->physicalDevice), layer_data_map)->instance_dispatch_table->GetPhysicalDeviceFormatProperties(
- my_data->physicalDevice, pCreateInfo->pAttachments[i].format, &properties);
+ get_my_data_ptr(get_dispatch_key(my_data->physicalDevice), layer_data_map)
+ ->instance_dispatch_table->GetPhysicalDeviceFormatProperties(my_data->physicalDevice,
+ pCreateInfo->pAttachments[i].format, &properties);
- if((properties.linearTilingFeatures) == 0 && (properties.optimalTilingFeatures == 0))
- {
+ if ((properties.linearTilingFeatures) == 0 && (properties.optimalTilingFeatures == 0)) {
std::stringstream ss;
- ss << "vkCreateRenderPass parameter, VkFormat in pCreateInfo->pAttachments[" << i << "], contains unsupported format";
+ ss << "vkCreateRenderPass parameter, VkFormat in pCreateInfo->pAttachments[" << i
+ << "], contains unsupported format";
// TODO: Verify against Valid Use section of spec. Generally if something yield an undefined result, it's invalid
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, IMAGE_FORMAT_UNSUPPORTED, "IMAGE", "%s", ss.str().c_str());
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ IMAGE_FORMAT_UNSUPPORTED, "IMAGE", "%s", ss.str().c_str());
}
}
}
- for(uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i)
- {
- if(!validate_VkImageLayoutKHR(pCreateInfo->pAttachments[i].initialLayout) ||
- !validate_VkImageLayoutKHR(pCreateInfo->pAttachments[i].finalLayout))
- {
+ for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
+ if (!validate_VkImageLayoutKHR(pCreateInfo->pAttachments[i].initialLayout) ||
+ !validate_VkImageLayoutKHR(pCreateInfo->pAttachments[i].finalLayout)) {
std::stringstream ss;
ss << "vkCreateRenderPass parameter, VkImageLayout in pCreateInfo->pAttachments[" << i << "], is unrecognized";
// TODO: Verify against Valid Use section of spec. Generally if something yield an undefined result, it's invalid
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, IMAGE_RENDERPASS_INVALID_ATTACHMENT, "IMAGE", "%s", ss.str().c_str());
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ IMAGE_RENDERPASS_INVALID_ATTACHMENT, "IMAGE", "%s", ss.str().c_str());
}
}
- for(uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i)
- {
- if(!validate_VkAttachmentLoadOp(pCreateInfo->pAttachments[i].loadOp))
- {
+ for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
+ if (!validate_VkAttachmentLoadOp(pCreateInfo->pAttachments[i].loadOp)) {
std::stringstream ss;
ss << "vkCreateRenderPass parameter, VkAttachmentLoadOp in pCreateInfo->pAttachments[" << i << "], is unrecognized";
// TODO: Verify against Valid Use section of spec. Generally if something yield an undefined result, it's invalid
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, IMAGE_RENDERPASS_INVALID_ATTACHMENT, "IMAGE", "%s", ss.str().c_str());
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ IMAGE_RENDERPASS_INVALID_ATTACHMENT, "IMAGE", "%s", ss.str().c_str());
}
}
- for(uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i)
- {
- if(!validate_VkAttachmentStoreOp(pCreateInfo->pAttachments[i].storeOp))
- {
+ for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
+ if (!validate_VkAttachmentStoreOp(pCreateInfo->pAttachments[i].storeOp)) {
std::stringstream ss;
ss << "vkCreateRenderPass parameter, VkAttachmentStoreOp in pCreateInfo->pAttachments[" << i << "], is unrecognized";
// TODO: Verify against Valid Use section of spec. Generally if something yield an undefined result, it's invalid
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, IMAGE_RENDERPASS_INVALID_ATTACHMENT, "IMAGE", "%s", ss.str().c_str());
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ IMAGE_RENDERPASS_INVALID_ATTACHMENT, "IMAGE", "%s", ss.str().c_str());
}
}
// Any depth buffers specified as attachments?
bool depthFormatPresent = VK_FALSE;
- for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i)
- {
+ for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
depthFormatPresent |= is_depth_format(pCreateInfo->pAttachments[i].format);
}
@@ -511,8 +449,10 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice devic
if (pCreateInfo->pSubpasses[i].pDepthStencilAttachment &&
pCreateInfo->pSubpasses[i].pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
std::stringstream ss;
- ss << "vkCreateRenderPass has no depth/stencil attachment, yet subpass[" << i << "] has VkSubpassDescription::depthStencilAttachment value that is not VK_ATTACHMENT_UNUSED";
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, IMAGE_RENDERPASS_INVALID_DS_ATTACHMENT, "IMAGE", "%s", ss.str().c_str());
+ ss << "vkCreateRenderPass has no depth/stencil attachment, yet subpass[" << i
+ << "] has VkSubpassDescription::depthStencilAttachment value that is not VK_ATTACHMENT_UNUSED";
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ IMAGE_RENDERPASS_INVALID_DS_ATTACHMENT, "IMAGE", "%s", ss.str().c_str());
}
}
}
@@ -524,59 +464,65 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice devic
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
VkBool32 skipCall = VK_FALSE;
layer_data *device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
auto imageEntry = device_data->imageMap.find(pCreateInfo->image);
if (imageEntry != device_data->imageMap.end()) {
if (pCreateInfo->subresourceRange.baseMipLevel >= imageEntry->second.mipLevels) {
std::stringstream ss;
- ss << "vkCreateImageView called with baseMipLevel " << pCreateInfo->subresourceRange.baseMipLevel
- << " for image " << pCreateInfo->image << " that only has " << imageEntry->second.mipLevels << " mip levels.";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, IMAGE_VIEW_CREATE_ERROR, "IMAGE", "%s", ss.str().c_str());
+ ss << "vkCreateImageView called with baseMipLevel " << pCreateInfo->subresourceRange.baseMipLevel << " for image "
+ << pCreateInfo->image << " that only has " << imageEntry->second.mipLevels << " mip levels.";
+ skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ IMAGE_VIEW_CREATE_ERROR, "IMAGE", "%s", ss.str().c_str());
}
if (pCreateInfo->subresourceRange.baseArrayLayer >= imageEntry->second.arraySize) {
std::stringstream ss;
ss << "vkCreateImageView called with baseArrayLayer " << pCreateInfo->subresourceRange.baseArrayLayer << " for image "
<< pCreateInfo->image << " that only has " << imageEntry->second.arraySize << " mip levels.";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, IMAGE_VIEW_CREATE_ERROR, "IMAGE", "%s", ss.str().c_str());
+ skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ IMAGE_VIEW_CREATE_ERROR, "IMAGE", "%s", ss.str().c_str());
}
if (!pCreateInfo->subresourceRange.levelCount) {
std::stringstream ss;
ss << "vkCreateImageView called with 0 in pCreateInfo->subresourceRange.levelCount.";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, IMAGE_VIEW_CREATE_ERROR, "IMAGE", "%s", ss.str().c_str());
+ skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ IMAGE_VIEW_CREATE_ERROR, "IMAGE", "%s", ss.str().c_str());
}
if (!pCreateInfo->subresourceRange.layerCount) {
std::stringstream ss;
ss << "vkCreateImageView called with 0 in pCreateInfo->subresourceRange.layerCount.";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, IMAGE_VIEW_CREATE_ERROR, "IMAGE", "%s", ss.str().c_str());
+ skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
+ IMAGE_VIEW_CREATE_ERROR, "IMAGE", "%s", ss.str().c_str());
}
- VkImageCreateFlags imageFlags = imageEntry->second.flags;
- VkFormat imageFormat = imageEntry->second.format;
- VkFormat ivciFormat = pCreateInfo->format;
- VkImageAspectFlags aspectMask = pCreateInfo->subresourceRange.aspectMask;
+ VkImageCreateFlags imageFlags = imageEntry->second.flags;
+ VkFormat imageFormat = imageEntry->second.format;
+ VkFormat ivciFormat = pCreateInfo->format;
+ VkImageAspectFlags aspectMask = pCreateInfo->subresourceRange.aspectMask;
// Validate VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT state
if (imageFlags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) {
// Format MUST be compatible (in the same format compatibility class) as the format the image was created with
if (vk_format_get_compatibility_class(imageFormat) != vk_format_get_compatibility_class(ivciFormat)) {
std::stringstream ss;
- ss << "vkCreateImageView(): ImageView format " << string_VkFormat(ivciFormat) << " is not in the same format compatibility class as image (" <<
- (uint64_t)pCreateInfo->image << ") format " << string_VkFormat(imageFormat) << ". Images created with the VK_IMAGE_CREATE_MUTABLE_FORMAT BIT " <<
- "can support ImageViews with differing formats but they must be in the same compatibility class.";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- IMAGE_VIEW_CREATE_ERROR, "IMAGE", "%s", ss.str().c_str());
+ ss << "vkCreateImageView(): ImageView format " << string_VkFormat(ivciFormat)
+ << " is not in the same format compatibility class as image (" << (uint64_t)pCreateInfo->image << ") format "
+ << string_VkFormat(imageFormat) << ". Images created with the VK_IMAGE_CREATE_MUTABLE_FORMAT BIT "
+ << "can support ImageViews with differing formats but they must be in the same compatibility class.";
+ skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
+ __LINE__, IMAGE_VIEW_CREATE_ERROR, "IMAGE", "%s", ss.str().c_str());
}
} else {
// Format MUST be IDENTICAL to the format the image was created with
if (imageFormat != ivciFormat) {
std::stringstream ss;
- ss << "vkCreateImageView() format " << string_VkFormat(ivciFormat) << " differs from image " << (uint64_t)pCreateInfo->image << " format " <<
- string_VkFormat(imageFormat) << ". Formats MUST be IDENTICAL unless VK_IMAGE_CREATE_MUTABLE_FORMAT BIT was set on image creation.";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
- IMAGE_VIEW_CREATE_ERROR, "IMAGE", "%s", ss.str().c_str());
+ ss << "vkCreateImageView() format " << string_VkFormat(ivciFormat) << " differs from image "
+ << (uint64_t)pCreateInfo->image << " format " << string_VkFormat(imageFormat)
+ << ". Formats MUST be IDENTICAL unless VK_IMAGE_CREATE_MUTABLE_FORMAT BIT was set on image creation.";
+ skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
+ __LINE__, IMAGE_VIEW_CREATE_ERROR, "IMAGE", "%s", ss.str().c_str());
}
}
@@ -585,63 +531,74 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device
if ((aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
std::stringstream ss;
ss << "vkCreateImageView: Color image formats must have the VK_IMAGE_ASPECT_COLOR_BIT set";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
- (uint64_t)pCreateInfo->image, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
+ (uint64_t)pCreateInfo->image, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
}
if ((aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != aspectMask) {
std::stringstream ss;
ss << "vkCreateImageView: Color image formats must have ONLY the VK_IMAGE_ASPECT_COLOR_BIT set";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
- (uint64_t)pCreateInfo->image, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
+ (uint64_t)pCreateInfo->image, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
}
if (VK_FALSE == vk_format_is_color(ivciFormat)) {
std::stringstream ss;
ss << "vkCreateImageView: The image view's format can differ from the parent image's format, but both must be "
- << "color formats. ImageFormat is " << string_VkFormat(imageFormat) << " ImageViewFormat is " << string_VkFormat(ivciFormat);
+ << "color formats. ImageFormat is " << string_VkFormat(imageFormat) << " ImageViewFormat is "
+ << string_VkFormat(ivciFormat);
skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
- (uint64_t)pCreateInfo->image, __LINE__, IMAGE_INVALID_FORMAT, "IMAGE", "%s", ss.str().c_str());
+ (uint64_t)pCreateInfo->image, __LINE__, IMAGE_INVALID_FORMAT, "IMAGE", "%s", ss.str().c_str());
}
// TODO: Uncompressed formats are compatible if they occupy they same number of bits per pixel.
// Compressed formats are compatible if the only difference between them is the numerical type of
// the uncompressed pixels (e.g. signed vs. unsigned, or sRGB vs. UNORM encoding).
- } else if (vk_format_is_depth_and_stencil(imageFormat)) {
+ } else if (vk_format_is_depth_and_stencil(imageFormat)) {
if ((aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) == 0) {
std::stringstream ss;
- ss << "vkCreateImageView: Depth/stencil image formats must have at least one of VK_IMAGE_ASPECT_DEPTH_BIT and VK_IMAGE_ASPECT_STENCIL_BIT set";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
- (uint64_t)pCreateInfo->image, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
+ ss << "vkCreateImageView: Depth/stencil image formats must have at least one of VK_IMAGE_ASPECT_DEPTH_BIT and "
+ "VK_IMAGE_ASPECT_STENCIL_BIT set";
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
+ (uint64_t)pCreateInfo->image, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
}
if ((aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != aspectMask) {
std::stringstream ss;
- ss << "vkCreateImageView: Combination depth/stencil image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT and VK_IMAGE_ASPECT_STENCIL_BIT set";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
- (uint64_t)pCreateInfo->image, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
+ ss << "vkCreateImageView: Combination depth/stencil image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT and "
+ "VK_IMAGE_ASPECT_STENCIL_BIT set";
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
+ (uint64_t)pCreateInfo->image, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
}
- } else if (vk_format_is_depth_only(imageFormat)) {
+ } else if (vk_format_is_depth_only(imageFormat)) {
if ((aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) {
std::stringstream ss;
ss << "vkCreateImageView: Depth-only image formats must have the VK_IMAGE_ASPECT_DEPTH_BIT set";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
- (uint64_t)pCreateInfo->image, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
+ (uint64_t)pCreateInfo->image, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
}
if ((aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) != aspectMask) {
std::stringstream ss;
ss << "vkCreateImageView: Depth-only image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT set";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
- (uint64_t)pCreateInfo->image, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
+ (uint64_t)pCreateInfo->image, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
}
- } else if (vk_format_is_stencil_only(imageFormat)) {
+ } else if (vk_format_is_stencil_only(imageFormat)) {
if ((aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT) {
std::stringstream ss;
ss << "vkCreateImageView: Stencil-only image formats must have the VK_IMAGE_ASPECT_STENCIL_BIT set";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
- (uint64_t)pCreateInfo->image, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
+ (uint64_t)pCreateInfo->image, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
}
if ((aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) != aspectMask) {
std::stringstream ss;
ss << "vkCreateImageView: Stencil-only image formats can have only the VK_IMAGE_ASPECT_STENCIL_BIT set";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
- (uint64_t)pCreateInfo->image, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
+ (uint64_t)pCreateInfo->image, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
}
}
}
@@ -654,20 +611,13 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(
- VkCommandBuffer commandBuffer,
- VkImage image,
- VkImageLayout imageLayout,
- const VkClearColorValue *pColor,
- uint32_t rangeCount,
- const VkImageSubresourceRange *pRanges)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
+ VkImageLayout imageLayout, const VkClearColorValue *pColor,
+ uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
VkBool32 skipCall = VK_FALSE;
layer_data *device_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
- if (imageLayout != VK_IMAGE_LAYOUT_GENERAL &&
- imageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
- {
+ if (imageLayout != VK_IMAGE_LAYOUT_GENERAL && imageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
(uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_LAYOUT, "IMAGE",
"vkCmdClearColorImage parameter, imageLayout, must be VK_IMAGE_LAYOUT_GENERAL or "
@@ -677,55 +627,46 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(
// For each range, image aspect must be color only
for (uint32_t i = 0; i < rangeCount; i++) {
if (pRanges[i].aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) {
- char const str[] = "vkCmdClearColorImage aspectMasks for all subresource ranges must be set to VK_IMAGE_ASPECT_COLOR_BIT";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", str);
+ char const str[] =
+ "vkCmdClearColorImage aspectMasks for all subresource ranges must be set to VK_IMAGE_ASPECT_COLOR_BIT";
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", str);
}
}
if (VK_FALSE == skipCall) {
- device_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout,
- pColor, rangeCount, pRanges);
+ device_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage(
- VkCommandBuffer commandBuffer,
- VkImage image,
- VkImageLayout imageLayout,
- const VkClearDepthStencilValue *pDepthStencil,
- uint32_t rangeCount,
- const VkImageSubresourceRange *pRanges)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
+ const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
+ const VkImageSubresourceRange *pRanges) {
VkBool32 skipCall = VK_FALSE;
layer_data *device_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
// For each range, Image aspect must be depth or stencil or both
for (uint32_t i = 0; i < rangeCount; i++) {
- if (((pRanges[i].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) &&
- ((pRanges[i].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT))
- {
+ if (((pRanges[i].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) &&
+ ((pRanges[i].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT)) {
char const str[] = "vkCmdClearDepthStencilImage aspectMasks for all subresource ranges must be "
"set to VK_IMAGE_ASPECT_DEPTH_BIT and/or VK_IMAGE_ASPECT_STENCIL_BIT";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", str);
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", str);
}
}
if (VK_FALSE == skipCall) {
- device_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer,
- image, imageLayout, pDepthStencil, rangeCount, pRanges);
+ device_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
+ pRanges);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage(
- VkCommandBuffer commandBuffer,
- VkImage srcImage,
- VkImageLayout srcImageLayout,
- VkImage dstImage,
- VkImageLayout dstImageLayout,
- uint32_t regionCount,
- const VkImageCopy *pRegions)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
+ VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
VkBool32 skipCall = VK_FALSE;
layer_data *device_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
auto srcImageEntry = device_data->imageMap.find(srcImage);
@@ -736,80 +677,80 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage(
// For each region, src aspect mask must match dest aspect mask
// For each region, color aspects cannot be mixed with depth/stencil aspects
for (uint32_t i = 0; i < regionCount; i++) {
- if(pRegions[i].srcSubresource.layerCount == 0)
- {
+ if (pRegions[i].srcSubresource.layerCount == 0) {
char const str[] = "vkCmdCopyImage: number of layers in source subresource is zero";
// TODO: Verify against Valid Use section of spec
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_ASPECT, "IMAGE", str);
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_ASPECT, "IMAGE", str);
}
- if(pRegions[i].dstSubresource.layerCount == 0)
- {
+ if (pRegions[i].dstSubresource.layerCount == 0) {
char const str[] = "vkCmdCopyImage: number of layers in destination subresource is zero";
// TODO: Verify against Valid Use section of spec
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_ASPECT, "IMAGE", str);
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_ASPECT, "IMAGE", str);
}
- if(pRegions[i].srcSubresource.layerCount != pRegions[i].dstSubresource.layerCount)
- {
+ if (pRegions[i].srcSubresource.layerCount != pRegions[i].dstSubresource.layerCount) {
char const str[] = "vkCmdCopyImage: number of layers in source and destination subresources must match";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_ASPECT, "IMAGE", str);
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_ASPECT, "IMAGE", str);
}
if (pRegions[i].srcSubresource.aspectMask != pRegions[i].dstSubresource.aspectMask) {
char const str[] = "vkCmdCopyImage: Src and dest aspectMasks for each region must match";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_ASPECT, "IMAGE", str);
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_ASPECT, "IMAGE", str);
}
if ((pRegions[i].srcSubresource.aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) &&
(pRegions[i].srcSubresource.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT))) {
char const str[] = "vkCmdCopyImage aspectMask cannot specify both COLOR and DEPTH/STENCIL aspects";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", str);
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", str);
}
}
- if ((srcImageEntry != device_data->imageMap.end())
- && (dstImageEntry != device_data->imageMap.end())) {
+ if ((srcImageEntry != device_data->imageMap.end()) && (dstImageEntry != device_data->imageMap.end())) {
if (srcImageEntry->second.imageType != dstImageEntry->second.imageType) {
char const str[] = "vkCmdCopyImage called with unmatched source and dest image types.";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_TYPE, "IMAGE", str);
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_TYPE, "IMAGE", str);
}
// Check that format is same size or exact stencil/depth
if (is_depth_format(srcImageEntry->second.format)) {
if (srcImageEntry->second.format != dstImageEntry->second.format) {
char const str[] = "vkCmdCopyImage called with unmatched source and dest image depth/stencil formats.";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_FORMAT, "IMAGE", str);
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_FORMAT, "IMAGE", str);
}
} else {
size_t srcSize = vk_format_get_size(srcImageEntry->second.format);
size_t destSize = vk_format_get_size(dstImageEntry->second.format);
if (srcSize != destSize) {
char const str[] = "vkCmdCopyImage called with unmatched source and dest image format sizes.";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_FORMAT, "IMAGE", str);
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_FORMAT, "IMAGE", str);
}
}
}
if (VK_FALSE == skipCall) {
- device_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage,
- srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions);
+ device_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
+ regionCount, pRegions);
}
}
-VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(
- VkCommandBuffer commandBuffer,
- uint32_t attachmentCount,
- const VkClearAttachment* pAttachments,
- uint32_t rectCount,
- const VkClearRect* pRects)
-{
+VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
+ const VkClearAttachment *pAttachments, uint32_t rectCount,
+ const VkClearRect *pRects) {
VkBool32 skipCall = VK_FALSE;
VkImageAspectFlags aspectMask;
layer_data *device_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
@@ -818,120 +759,104 @@ VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(
if (aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
if (aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) {
// VK_IMAGE_ASPECT_COLOR_BIT is not the only bit set for this attachment
- char const str[] = "vkCmdClearAttachments aspectMask [%d] must set only VK_IMAGE_ASPECT_COLOR_BIT of a color attachment.";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", str, i);
+ char const str[] =
+ "vkCmdClearAttachments aspectMask [%d] must set only VK_IMAGE_ASPECT_COLOR_BIT of a color attachment.";
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", str, i);
}
} else {
// Image aspect must be depth or stencil or both
- if (((aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) &&
- ((aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT))
- {
- char const str[] = "vkCmdClearAttachments aspectMask [%d] must be set to VK_IMAGE_ASPECT_DEPTH_BIT and/or VK_IMAGE_ASPECT_STENCIL_BIT";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", str, i);
+ if (((aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) &&
+ ((aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT)) {
+ char const str[] = "vkCmdClearAttachments aspectMask [%d] must be set to VK_IMAGE_ASPECT_DEPTH_BIT and/or "
+ "VK_IMAGE_ASPECT_STENCIL_BIT";
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", str, i);
}
}
}
if (VK_FALSE == skipCall) {
- device_data->device_dispatch_table->CmdClearAttachments(commandBuffer,
- attachmentCount, pAttachments, rectCount, pRects);
+ device_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(
- VkCommandBuffer commandBuffer,
- VkImage srcImage,
- VkImageLayout srcImageLayout,
- VkBuffer dstBuffer,
- uint32_t regionCount,
- const VkBufferImageCopy *pRegions)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
+ VkImageLayout srcImageLayout, VkBuffer dstBuffer,
+ uint32_t regionCount, const VkBufferImageCopy *pRegions) {
VkBool32 skipCall = VK_FALSE;
layer_data *device_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
// For each region, the number of layers in the image subresource should not be zero
// Image aspect must be ONE OF color, depth, stencil
for (uint32_t i = 0; i < regionCount; i++) {
- if(pRegions[i].imageSubresource.layerCount == 0)
- {
+ if (pRegions[i].imageSubresource.layerCount == 0) {
char const str[] = "vkCmdCopyImageToBuffer: number of layers in image subresource is zero";
// TODO: Verify against Valid Use section of spec, if this case yields undefined results, then it's an error
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_ASPECT, "IMAGE", str);
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_ASPECT, "IMAGE", str);
}
VkImageAspectFlags aspectMask = pRegions[i].imageSubresource.aspectMask;
- if ((aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) &&
- (aspectMask != VK_IMAGE_ASPECT_DEPTH_BIT) &&
+ if ((aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) && (aspectMask != VK_IMAGE_ASPECT_DEPTH_BIT) &&
(aspectMask != VK_IMAGE_ASPECT_STENCIL_BIT)) {
char const str[] = "vkCmdCopyImageToBuffer: aspectMasks for each region must specify only COLOR or DEPTH or STENCIL";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", str);
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", str);
}
}
if (VK_FALSE == skipCall) {
- device_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer,
- srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
+ device_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
+ pRegions);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(
- VkCommandBuffer commandBuffer,
- VkBuffer srcBuffer,
- VkImage dstImage,
- VkImageLayout dstImageLayout,
- uint32_t regionCount,
- const VkBufferImageCopy *pRegions)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
+ VkImage dstImage, VkImageLayout dstImageLayout,
+ uint32_t regionCount, const VkBufferImageCopy *pRegions) {
VkBool32 skipCall = VK_FALSE;
layer_data *device_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
// For each region, the number of layers in the image subresource should not be zero
// Image aspect must be ONE OF color, depth, stencil
for (uint32_t i = 0; i < regionCount; i++) {
- if(pRegions[i].imageSubresource.layerCount == 0)
- {
+ if (pRegions[i].imageSubresource.layerCount == 0) {
char const str[] = "vkCmdCopyBufferToImage: number of layers in image subresource is zero";
// TODO: Verify against Valid Use section of spec, if this case yields undefined results, then it's an error
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_ASPECT, "IMAGE", str);
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_ASPECT, "IMAGE", str);
}
VkImageAspectFlags aspectMask = pRegions[i].imageSubresource.aspectMask;
- if ((aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) &&
- (aspectMask != VK_IMAGE_ASPECT_DEPTH_BIT) &&
+ if ((aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) && (aspectMask != VK_IMAGE_ASPECT_DEPTH_BIT) &&
(aspectMask != VK_IMAGE_ASPECT_STENCIL_BIT)) {
char const str[] = "vkCmdCopyBufferToImage: aspectMasks for each region must specify only COLOR or DEPTH or STENCIL";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", str);
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", str);
}
}
if (VK_FALSE == skipCall) {
- device_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer,
- srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
+ device_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
+ pRegions);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage(
- VkCommandBuffer commandBuffer,
- VkImage srcImage,
- VkImageLayout srcImageLayout,
- VkImage dstImage,
- VkImageLayout dstImageLayout,
- uint32_t regionCount,
- const VkImageBlit *pRegions,
- VkFilter filter)
-{
- VkBool32 skipCall = VK_FALSE;
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
+ VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
+ VkBool32 skipCall = VK_FALSE;
layer_data *device_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
- auto srcImageEntry = device_data->imageMap.find(srcImage);
+ auto srcImageEntry = device_data->imageMap.find(srcImage);
auto dstImageEntry = device_data->imageMap.find(dstImage);
- if ((srcImageEntry != device_data->imageMap.end()) &&
- (dstImageEntry != device_data->imageMap.end())) {
+ if ((srcImageEntry != device_data->imageMap.end()) && (dstImageEntry != device_data->imageMap.end())) {
VkFormat srcFormat = srcImageEntry->second.format;
VkFormat dstFormat = dstImageEntry->second.format;
@@ -943,44 +868,45 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage(
ss << "vkCmdBlitImage: If one of srcImage and dstImage images has signed/unsigned integer format, "
<< "the other one must also have signed/unsigned integer format. "
<< "Source format is " << string_VkFormat(srcFormat) << " Destination format is " << string_VkFormat(dstFormat);
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_FORMAT, "IMAGE", "%s", ss.str().c_str());
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_FORMAT, "IMAGE", "%s", ss.str().c_str());
}
// Validate aspect bits and formats for depth/stencil images
- if (vk_format_is_depth_or_stencil(srcFormat) ||
- vk_format_is_depth_or_stencil(dstFormat)) {
+ if (vk_format_is_depth_or_stencil(srcFormat) || vk_format_is_depth_or_stencil(dstFormat)) {
if (srcFormat != dstFormat) {
std::stringstream ss;
ss << "vkCmdBlitImage: If one of srcImage and dstImage images has a format of depth, stencil or depth "
<< "stencil, the other one must have exactly the same format. "
<< "Source format is " << string_VkFormat(srcFormat) << " Destination format is " << string_VkFormat(dstFormat);
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_FORMAT, "IMAGE", "%s", ss.str().c_str());
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_FORMAT, "IMAGE", "%s", ss.str().c_str());
}
for (uint32_t i = 0; i < regionCount; i++) {
- if(pRegions[i].srcSubresource.layerCount == 0)
- {
+ if (pRegions[i].srcSubresource.layerCount == 0) {
char const str[] = "vkCmdBlitImage: number of layers in source subresource is zero";
// TODO: Verify against Valid Use section of spec, if this case yields undefined results, then it's an error
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_ASPECT, "IMAGE", str);
+ skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
+ IMAGE_MISMATCHED_IMAGE_ASPECT, "IMAGE", str);
}
- if(pRegions[i].dstSubresource.layerCount == 0)
- {
+ if (pRegions[i].dstSubresource.layerCount == 0) {
char const str[] = "vkCmdBlitImage: number of layers in destination subresource is zero";
// TODO: Verify against Valid Use section of spec, if this case yields undefined results, then it's an error
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_ASPECT, "IMAGE", str);
+ skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
+ IMAGE_MISMATCHED_IMAGE_ASPECT, "IMAGE", str);
}
- if(pRegions[i].srcSubresource.layerCount != pRegions[i].dstSubresource.layerCount)
- {
+ if (pRegions[i].srcSubresource.layerCount != pRegions[i].dstSubresource.layerCount) {
char const str[] = "vkCmdBlitImage: number of layers in source and destination subresources must match";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_ASPECT, "IMAGE", str);
+ skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
+ IMAGE_MISMATCHED_IMAGE_ASPECT, "IMAGE", str);
}
VkImageAspectFlags srcAspect = pRegions[i].srcSubresource.aspectMask;
@@ -990,104 +916,91 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage(
std::stringstream ss;
ss << "vkCmdBlitImage: Image aspects of depth/stencil images should match";
// TODO: Verify against Valid Use section of spec, if this case yields undefined results, then it's an error
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
+ skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
+ IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
}
if (vk_format_is_depth_and_stencil(srcFormat)) {
if ((srcAspect != VK_IMAGE_ASPECT_DEPTH_BIT) && (srcAspect != VK_IMAGE_ASPECT_STENCIL_BIT)) {
std::stringstream ss;
- ss << "vkCmdBlitImage: Combination depth/stencil image formats must have only one of VK_IMAGE_ASPECT_DEPTH_BIT "
+ ss << "vkCmdBlitImage: Combination depth/stencil image formats must have only one of "
+ "VK_IMAGE_ASPECT_DEPTH_BIT "
<< "and VK_IMAGE_ASPECT_STENCIL_BIT set in srcImage and dstImage";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
+ skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
+ IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
}
} else if (vk_format_is_stencil_only(srcFormat)) {
if (srcAspect != VK_IMAGE_ASPECT_STENCIL_BIT) {
std::stringstream ss;
ss << "vkCmdBlitImage: Stencil-only image formats must have only the VK_IMAGE_ASPECT_STENCIL_BIT "
<< "set in both the srcImage and dstImage";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
+ skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
+ IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
}
} else if (vk_format_is_depth_only(srcFormat)) {
if (srcAspect != VK_IMAGE_ASPECT_DEPTH_BIT) {
std::stringstream ss;
ss << "vkCmdBlitImage: Depth-only image formats must have only the VK_IMAGE_ASPECT_DEPTH "
<< "set in both the srcImage and dstImage";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
+ skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
+ IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
}
}
}
}
// Validate filter
- if (vk_format_is_depth_or_stencil(srcFormat) ||
- vk_format_is_int(srcFormat)) {
+ if (vk_format_is_depth_or_stencil(srcFormat) || vk_format_is_int(srcFormat)) {
if (filter != VK_FILTER_NEAREST) {
std::stringstream ss;
ss << "vkCmdBlitImage: If the format of srcImage is a depth, stencil, depth stencil or integer-based format "
<< "then filter must be VK_FILTER_NEAREST.";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_FILTER, "IMAGE", "%s", ss.str().c_str());
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_FILTER, "IMAGE", "%s", ss.str().c_str());
}
}
}
- device_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage,
- srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter);
+ device_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
+ pRegions, filter);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier(
- VkCommandBuffer commandBuffer,
- VkPipelineStageFlags srcStageMask,
- VkPipelineStageFlags dstStageMask,
- VkDependencyFlags dependencyFlags,
- uint32_t memoryBarrierCount,
- const VkMemoryBarrier *pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount,
- const VkBufferMemoryBarrier *pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount,
- const VkImageMemoryBarrier *pImageMemoryBarriers)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
+ VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
VkBool32 skipCall = VK_FALSE;
layer_data *device_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
- for (uint32_t i = 0; i < imageMemoryBarrierCount; ++i)
- {
- VkImageMemoryBarrier const*const barrier = (VkImageMemoryBarrier const*const) &pImageMemoryBarriers[i];
- if (barrier->sType == VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER)
- {
- if (barrier->subresourceRange.layerCount == 0)
- {
+ for (uint32_t i = 0; i < imageMemoryBarrierCount; ++i) {
+ VkImageMemoryBarrier const *const barrier = (VkImageMemoryBarrier const *const) & pImageMemoryBarriers[i];
+ if (barrier->sType == VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER) {
+ if (barrier->subresourceRange.layerCount == 0) {
std::stringstream ss;
ss << "vkCmdPipelineBarrier called with 0 in ppMemoryBarriers[" << i << "]->subresourceRange.layerCount.";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
- 0, __LINE__, IMAGE_INVALID_IMAGE_RESOURCE, "IMAGE", "%s", ss.str().c_str());
+ skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
+ __LINE__, IMAGE_INVALID_IMAGE_RESOURCE, "IMAGE", "%s", ss.str().c_str());
}
}
}
- if (skipCall)
- {
+ if (skipCall) {
return;
}
device_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
- memoryBarrierCount, pMemoryBarriers,
- bufferMemoryBarrierCount, pBufferMemoryBarriers,
- imageMemoryBarrierCount, pImageMemoryBarriers);
+ memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
+ pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage(
- VkCommandBuffer commandBuffer,
- VkImage srcImage,
- VkImageLayout srcImageLayout,
- VkImage dstImage,
- VkImageLayout dstImageLayout,
- uint32_t regionCount,
- const VkImageResolve *pRegions)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
+ VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
VkBool32 skipCall = VK_FALSE;
layer_data *device_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
auto srcImageEntry = device_data->imageMap.find(srcImage);
@@ -1096,70 +1009,71 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage(
// For each region, the number of layers in the image subresource should not be zero
// For each region, src and dest image aspect must be color only
for (uint32_t i = 0; i < regionCount; i++) {
- if(pRegions[i].srcSubresource.layerCount == 0)
- {
+ if (pRegions[i].srcSubresource.layerCount == 0) {
char const str[] = "vkCmdResolveImage: number of layers in source subresource is zero";
// TODO: Verify against Valid Use section of spec. Generally if something yield an undefined result, it's invalid/error
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_ASPECT, "IMAGE", str);
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_ASPECT, "IMAGE", str);
}
- if(pRegions[i].dstSubresource.layerCount == 0)
- {
+ if (pRegions[i].dstSubresource.layerCount == 0) {
char const str[] = "vkCmdResolveImage: number of layers in destination subresource is zero";
// TODO: Verify against Valid Use section of spec. Generally if something yield an undefined result, it's invalid/error
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_ASPECT, "IMAGE", str);
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_ASPECT, "IMAGE", str);
}
- if ((pRegions[i].srcSubresource.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) ||
+ if ((pRegions[i].srcSubresource.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) ||
(pRegions[i].dstSubresource.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT)) {
- char const str[] = "vkCmdResolveImage: src and dest aspectMasks for each region must specify only VK_IMAGE_ASPECT_COLOR_BIT";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", str);
+ char const str[] =
+ "vkCmdResolveImage: src and dest aspectMasks for each region must specify only VK_IMAGE_ASPECT_COLOR_BIT";
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", str);
}
}
- if ((srcImageEntry != device_data->imageMap.end()) &&
- (dstImageEntry != device_data->imageMap.end())) {
+ if ((srcImageEntry != device_data->imageMap.end()) && (dstImageEntry != device_data->imageMap.end())) {
if (srcImageEntry->second.format != dstImageEntry->second.format) {
- char const str[] = "vkCmdResolveImage called with unmatched source and dest formats.";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_FORMAT, "IMAGE", str);
+ char const str[] = "vkCmdResolveImage called with unmatched source and dest formats.";
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_FORMAT, "IMAGE", str);
}
if (srcImageEntry->second.imageType != dstImageEntry->second.imageType) {
- char const str[] = "vkCmdResolveImage called with unmatched source and dest image types.";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_TYPE, "IMAGE", str);
+ char const str[] = "vkCmdResolveImage called with unmatched source and dest image types.";
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_MISMATCHED_IMAGE_TYPE, "IMAGE", str);
}
if (srcImageEntry->second.samples == VK_SAMPLE_COUNT_1_BIT) {
- char const str[] = "vkCmdResolveImage called with source sample count less than 2.";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_RESOLVE_SAMPLES, "IMAGE", str);
+ char const str[] = "vkCmdResolveImage called with source sample count less than 2.";
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_RESOLVE_SAMPLES, "IMAGE", str);
}
if (dstImageEntry->second.samples != VK_SAMPLE_COUNT_1_BIT) {
- char const str[] = "vkCmdResolveImage called with dest sample count greater than 1.";
- skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
- (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_RESOLVE_SAMPLES, "IMAGE", str);
+ char const str[] = "vkCmdResolveImage called with dest sample count greater than 1.";
+ skipCall |=
+ log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, IMAGE_INVALID_RESOLVE_SAMPLES, "IMAGE", str);
}
}
if (VK_FALSE == skipCall) {
- device_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage,
- srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions);
+ device_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
+ regionCount, pRegions);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout(
- VkDevice device,
- VkImage image,
- const VkImageSubresource *pSubresource,
- VkSubresourceLayout *pLayout)
-{
- VkBool32 skipCall = VK_FALSE;
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource, VkSubresourceLayout *pLayout) {
+ VkBool32 skipCall = VK_FALSE;
layer_data *device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- VkFormat format;
+ VkFormat format;
auto imageEntry = device_data->imageMap.find(image);
@@ -1169,67 +1083,67 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout(
if (vk_format_is_color(format)) {
if (pSubresource->aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) {
std::stringstream ss;
- ss << "vkGetImageSubresourceLayout: For color formats, the aspectMask field of VkImageSubresource must be VK_IMAGE_ASPECT_COLOR.";
+ ss << "vkGetImageSubresourceLayout: For color formats, the aspectMask field of VkImageSubresource must be "
+ "VK_IMAGE_ASPECT_COLOR.";
skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
- (uint64_t)image, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
+ (uint64_t)image, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
}
} else if (vk_format_is_depth_or_stencil(format)) {
if ((pSubresource->aspectMask != VK_IMAGE_ASPECT_DEPTH_BIT) &&
(pSubresource->aspectMask != VK_IMAGE_ASPECT_STENCIL_BIT)) {
std::stringstream ss;
- ss << "vkGetImageSubresourceLayout: For depth/stencil formats, the aspectMask selects either the depth or stencil image aspectMask.";
+ ss << "vkGetImageSubresourceLayout: For depth/stencil formats, the aspectMask selects either the depth or stencil "
+ "image aspectMask.";
skipCall |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
- (uint64_t)image, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
+ (uint64_t)image, __LINE__, IMAGE_INVALID_IMAGE_ASPECT, "IMAGE", "%s", ss.str().c_str());
}
}
}
if (VK_FALSE == skipCall) {
- device_data->device_dispatch_table->GetImageSubresourceLayout(device,
- image, pSubresource, pLayout);
+ device_data->device_dispatch_table->GetImageSubresourceLayout(device, image, pSubresource, pLayout);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties *pProperties) {
layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
phy_dev_data->instance_dispatch_table->GetPhysicalDeviceProperties(physicalDevice, pProperties);
}
-VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice device, const char* funcName)
-{
+VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice device, const char *funcName) {
if (!strcmp(funcName, "vkGetDeviceProcAddr"))
- return (PFN_vkVoidFunction) vkGetDeviceProcAddr;
+ return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
if (!strcmp(funcName, "vkDestroyDevice"))
- return (PFN_vkVoidFunction) vkDestroyDevice;
+ return (PFN_vkVoidFunction)vkDestroyDevice;
if (!strcmp(funcName, "vkCreateImage"))
- return (PFN_vkVoidFunction) vkCreateImage;
+ return (PFN_vkVoidFunction)vkCreateImage;
if (!strcmp(funcName, "vkDestroyImage"))
- return (PFN_vkVoidFunction) vkDestroyImage;
+ return (PFN_vkVoidFunction)vkDestroyImage;
if (!strcmp(funcName, "vkCreateImageView"))
- return (PFN_vkVoidFunction) vkCreateImageView;
+ return (PFN_vkVoidFunction)vkCreateImageView;
if (!strcmp(funcName, "vkCreateRenderPass"))
- return (PFN_vkVoidFunction) vkCreateRenderPass;
+ return (PFN_vkVoidFunction)vkCreateRenderPass;
if (!strcmp(funcName, "vkCmdClearColorImage"))
- return (PFN_vkVoidFunction) vkCmdClearColorImage;
+ return (PFN_vkVoidFunction)vkCmdClearColorImage;
if (!strcmp(funcName, "vkCmdClearDepthStencilImage"))
- return (PFN_vkVoidFunction) vkCmdClearDepthStencilImage;
+ return (PFN_vkVoidFunction)vkCmdClearDepthStencilImage;
if (!strcmp(funcName, "vkCmdClearAttachments"))
- return (PFN_vkVoidFunction) vkCmdClearAttachments;
+ return (PFN_vkVoidFunction)vkCmdClearAttachments;
if (!strcmp(funcName, "vkCmdCopyImage"))
- return (PFN_vkVoidFunction) vkCmdCopyImage;
+ return (PFN_vkVoidFunction)vkCmdCopyImage;
if (!strcmp(funcName, "vkCmdCopyImageToBuffer"))
- return (PFN_vkVoidFunction) vkCmdCopyImageToBuffer;
+ return (PFN_vkVoidFunction)vkCmdCopyImageToBuffer;
if (!strcmp(funcName, "vkCmdCopyBufferToImage"))
- return (PFN_vkVoidFunction) vkCmdCopyBufferToImage;
+ return (PFN_vkVoidFunction)vkCmdCopyBufferToImage;
if (!strcmp(funcName, "vkCmdBlitImage"))
- return (PFN_vkVoidFunction) vkCmdBlitImage;
+ return (PFN_vkVoidFunction)vkCmdBlitImage;
if (!strcmp(funcName, "vkCmdPipelineBarrier"))
- return (PFN_vkVoidFunction) vkCmdPipelineBarrier;
+ return (PFN_vkVoidFunction)vkCmdPipelineBarrier;
if (!strcmp(funcName, "vkCmdResolveImage"))
- return (PFN_vkVoidFunction) vkCmdResolveImage;
+ return (PFN_vkVoidFunction)vkCmdResolveImage;
if (!strcmp(funcName, "vkGetImageSubresourceLayout"))
- return (PFN_vkVoidFunction) vkGetImageSubresourceLayout;
+ return (PFN_vkVoidFunction)vkGetImageSubresourceLayout;
if (device == NULL) {
return NULL;
@@ -1237,7 +1151,7 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkD
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- VkLayerDispatchTable* pTable = my_data->device_dispatch_table;
+ VkLayerDispatchTable *pTable = my_data->device_dispatch_table;
{
if (pTable->GetDeviceProcAddr == NULL)
return NULL;
@@ -1245,26 +1159,25 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkD
}
}
-VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char* funcName)
-{
+VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
if (!strcmp(funcName, "vkGetInstanceProcAddr"))
- return (PFN_vkVoidFunction) vkGetInstanceProcAddr;
+ return (PFN_vkVoidFunction)vkGetInstanceProcAddr;
if (!strcmp(funcName, "vkCreateInstance"))
- return (PFN_vkVoidFunction) vkCreateInstance;
+ return (PFN_vkVoidFunction)vkCreateInstance;
if (!strcmp(funcName, "vkDestroyInstance"))
- return (PFN_vkVoidFunction) vkDestroyInstance;
+ return (PFN_vkVoidFunction)vkDestroyInstance;
if (!strcmp(funcName, "vkCreateDevice"))
- return (PFN_vkVoidFunction) vkCreateDevice;
+ return (PFN_vkVoidFunction)vkCreateDevice;
if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties"))
- return (PFN_vkVoidFunction) vkEnumerateInstanceLayerProperties;
+ return (PFN_vkVoidFunction)vkEnumerateInstanceLayerProperties;
if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties"))
- return (PFN_vkVoidFunction) vkEnumerateInstanceExtensionProperties;
+ return (PFN_vkVoidFunction)vkEnumerateInstanceExtensionProperties;
if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties"))
- return (PFN_vkVoidFunction) vkEnumerateDeviceLayerProperties;
+ return (PFN_vkVoidFunction)vkEnumerateDeviceLayerProperties;
if (!strcmp(funcName, "vkEnumerateDeviceExtensionProperties"))
- return (PFN_vkVoidFunction) vkEnumerateDeviceExtensionProperties;
+ return (PFN_vkVoidFunction)vkEnumerateDeviceExtensionProperties;
if (!strcmp(funcName, "vkGetPhysicalDeviceProperties"))
- return (PFN_vkVoidFunction) vkGetPhysicalDeviceProperties;
+ return (PFN_vkVoidFunction)vkGetPhysicalDeviceProperties;
if (instance == NULL) {
return NULL;
@@ -1273,10 +1186,10 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(V
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
PFN_vkVoidFunction fptr = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
- if(fptr)
+ if (fptr)
return fptr;
- VkLayerInstanceDispatchTable* pTable = my_data->instance_dispatch_table;
+ VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
if (pTable->GetInstanceProcAddr == NULL)
return NULL;
return pTable->GetInstanceProcAddr(instance, funcName);
diff --git a/layers/image.h b/layers/image.h
index 3ee3500ab..bec5bb9e2 100644
--- a/layers/image.h
+++ b/layers/image.h
@@ -33,12 +33,12 @@
#include "vk_layer_logging.h"
// Image ERROR codes
-typedef enum _IMAGE_ERROR
-{
+typedef enum _IMAGE_ERROR {
IMAGE_NONE, // Used for INFO & other non-error messages
IMAGE_FORMAT_UNSUPPORTED, // Request to create Image or RenderPass with a format that is not supported
IMAGE_RENDERPASS_INVALID_ATTACHMENT, // Invalid image layouts and/or load/storeOps for an attachment when creating RenderPass
- IMAGE_RENDERPASS_INVALID_DS_ATTACHMENT, // If no depth attachment for a RenderPass, verify that subpass DS attachment is set to UNUSED
+ IMAGE_RENDERPASS_INVALID_DS_ATTACHMENT, // If no depth attachment for a RenderPass, verify that subpass DS attachment is set to
+ // UNUSED
IMAGE_INVALID_IMAGE_ASPECT, // Image aspect mask bits are invalid for this API call
IMAGE_MISMATCHED_IMAGE_ASPECT, // Image aspect masks for source and dest images do not match
IMAGE_VIEW_CREATE_ERROR, // Error occurred trying to create Image View
@@ -52,25 +52,21 @@ typedef enum _IMAGE_ERROR
IMAGE_INVALID_LAYOUT, // Operation specifies an invalid layout.
} IMAGE_ERROR;
-typedef struct _IMAGE_STATE
-{
- uint32_t mipLevels;
- uint32_t arraySize;
- VkFormat format;
+typedef struct _IMAGE_STATE {
+ uint32_t mipLevels;
+ uint32_t arraySize;
+ VkFormat format;
VkSampleCountFlagBits samples;
- VkImageType imageType;
- VkExtent3D extent;
- VkImageCreateFlags flags;
- _IMAGE_STATE():mipLevels(0), arraySize(0), format(VK_FORMAT_UNDEFINED), samples(VK_SAMPLE_COUNT_1_BIT), imageType(VK_IMAGE_TYPE_RANGE_SIZE), extent{}, flags(0) {};
- _IMAGE_STATE(const VkImageCreateInfo* pCreateInfo):
- mipLevels(pCreateInfo->mipLevels),
- arraySize(pCreateInfo->arrayLayers),
- format(pCreateInfo->format),
- samples(pCreateInfo->samples),
- imageType(pCreateInfo->imageType),
- extent(pCreateInfo->extent),
- flags(pCreateInfo->flags)
- {};
+ VkImageType imageType;
+ VkExtent3D extent;
+ VkImageCreateFlags flags;
+ _IMAGE_STATE()
+ : mipLevels(0), arraySize(0), format(VK_FORMAT_UNDEFINED), samples(VK_SAMPLE_COUNT_1_BIT),
+ imageType(VK_IMAGE_TYPE_RANGE_SIZE), extent{}, flags(0){};
+ _IMAGE_STATE(const VkImageCreateInfo *pCreateInfo)
+ : mipLevels(pCreateInfo->mipLevels), arraySize(pCreateInfo->arrayLayers), format(pCreateInfo->format),
+ samples(pCreateInfo->samples), imageType(pCreateInfo->imageType), extent(pCreateInfo->extent),
+ flags(pCreateInfo->flags){};
} IMAGE_STATE;
#endif // IMAGE_H
diff --git a/layers/mem_tracker.cpp b/layers/mem_tracker.cpp
index 3f966eab2..92c896220 100644
--- a/layers/mem_tracker.cpp
+++ b/layers/mem_tracker.cpp
@@ -54,45 +54,41 @@ using namespace std;
static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
struct layer_data {
- debug_report_data *report_data;
- std::vector<VkDebugReportCallbackEXT> logging_callback;
- VkLayerDispatchTable *device_dispatch_table;
- VkLayerInstanceDispatchTable *instance_dispatch_table;
- VkBool32 wsi_enabled;
- uint64_t currentFenceId;
- VkPhysicalDeviceProperties properties;
- unordered_map<VkDeviceMemory, vector<MEMORY_RANGE>> bufferRanges, imageRanges;
+ debug_report_data *report_data;
+ std::vector<VkDebugReportCallbackEXT> logging_callback;
+ VkLayerDispatchTable *device_dispatch_table;
+ VkLayerInstanceDispatchTable *instance_dispatch_table;
+ VkBool32 wsi_enabled;
+ uint64_t currentFenceId;
+ VkPhysicalDeviceProperties properties;
+ unordered_map<VkDeviceMemory, vector<MEMORY_RANGE>> bufferRanges, imageRanges;
// Maps for tracking key structs related to mem_tracker state
- unordered_map<VkCommandBuffer, MT_CB_INFO> cbMap;
- unordered_map<VkCommandPool, MT_CMD_POOL_INFO> commandPoolMap;
- unordered_map<VkDeviceMemory, MT_MEM_OBJ_INFO> memObjMap;
- unordered_map<VkFence, MT_FENCE_INFO> fenceMap;
- unordered_map<VkQueue, MT_QUEUE_INFO> queueMap;
- unordered_map<VkSwapchainKHR, MT_SWAP_CHAIN_INFO*> swapchainMap;
- unordered_map<VkSemaphore, MtSemaphoreState> semaphoreMap;
- unordered_map<VkFramebuffer, MT_FB_INFO> fbMap;
- unordered_map<VkRenderPass, MT_PASS_INFO> passMap;
- unordered_map<VkImageView, MT_IMAGE_VIEW_INFO> imageViewMap;
- unordered_map<VkDescriptorSet, MT_DESCRIPTOR_SET_INFO> descriptorSetMap;
+ unordered_map<VkCommandBuffer, MT_CB_INFO> cbMap;
+ unordered_map<VkCommandPool, MT_CMD_POOL_INFO> commandPoolMap;
+ unordered_map<VkDeviceMemory, MT_MEM_OBJ_INFO> memObjMap;
+ unordered_map<VkFence, MT_FENCE_INFO> fenceMap;
+ unordered_map<VkQueue, MT_QUEUE_INFO> queueMap;
+ unordered_map<VkSwapchainKHR, MT_SWAP_CHAIN_INFO *> swapchainMap;
+ unordered_map<VkSemaphore, MtSemaphoreState> semaphoreMap;
+ unordered_map<VkFramebuffer, MT_FB_INFO> fbMap;
+ unordered_map<VkRenderPass, MT_PASS_INFO> passMap;
+ unordered_map<VkImageView, MT_IMAGE_VIEW_INFO> imageViewMap;
+ unordered_map<VkDescriptorSet, MT_DESCRIPTOR_SET_INFO> descriptorSetMap;
// Images and Buffers are 2 objects that can have memory bound to them so they get special treatment
- unordered_map<uint64_t, MT_OBJ_BINDING_INFO> imageMap;
- unordered_map<uint64_t, MT_OBJ_BINDING_INFO> bufferMap;
+ unordered_map<uint64_t, MT_OBJ_BINDING_INFO> imageMap;
+ unordered_map<uint64_t, MT_OBJ_BINDING_INFO> bufferMap;
unordered_map<VkBufferView, VkBufferViewCreateInfo> bufferViewMap;
- layer_data() :
- report_data(nullptr),
- device_dispatch_table(nullptr),
- instance_dispatch_table(nullptr),
- wsi_enabled(VK_FALSE),
- currentFenceId(1)
- {};
+ layer_data()
+ : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr), wsi_enabled(VK_FALSE),
+ currentFenceId(1){};
};
static unordered_map<void *, layer_data *> layer_data_map;
static VkPhysicalDeviceMemoryProperties memProps;
-static VkBool32 clear_cmd_buf_and_mem_references(layer_data* my_data, const VkCommandBuffer cb);
+static VkBool32 clear_cmd_buf_and_mem_references(layer_data *my_data, const VkCommandBuffer cb);
// TODO : This can be much smarter, using separate locks for separate global data
static int globalLockInitialized = 0;
@@ -100,87 +96,55 @@ static loader_platform_thread_mutex globalLock;
#define MAX_BINDING 0xFFFFFFFF
-static MT_OBJ_BINDING_INFO*
- get_object_binding_info(
- layer_data *my_data,
- uint64_t handle,
- VkDebugReportObjectTypeEXT type)
-{
- MT_OBJ_BINDING_INFO* retValue = NULL;
- switch (type)
- {
- case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
- {
- auto it = my_data->imageMap.find(handle);
- if (it != my_data->imageMap.end())
- return &(*it).second;
- break;
- }
- case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
- {
- auto it = my_data->bufferMap.find(handle);
- if (it != my_data->bufferMap.end())
- return &(*it).second;
- break;
- }
- default:
- break;
+static MT_OBJ_BINDING_INFO *get_object_binding_info(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
+ MT_OBJ_BINDING_INFO *retValue = NULL;
+ switch (type) {
+ case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
+ auto it = my_data->imageMap.find(handle);
+ if (it != my_data->imageMap.end())
+ return &(*it).second;
+ break;
+ }
+ case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
+ auto it = my_data->bufferMap.find(handle);
+ if (it != my_data->bufferMap.end())
+ return &(*it).second;
+ break;
+ }
+ default:
+ break;
}
return retValue;
}
-template layer_data *get_my_data_ptr<layer_data>(
- void *data_key,
- std::unordered_map<void *, layer_data *> &data_map);
+template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map);
// Add new queue for this device to map container
-static void
-add_queue_info(
- layer_data *my_data,
- const VkQueue queue)
-{
- MT_QUEUE_INFO* pInfo = &my_data->queueMap[queue];
- pInfo->lastRetiredId = 0;
+static void add_queue_info(layer_data *my_data, const VkQueue queue) {
+ MT_QUEUE_INFO *pInfo = &my_data->queueMap[queue];
+ pInfo->lastRetiredId = 0;
pInfo->lastSubmittedId = 0;
}
-static void
-delete_queue_info_list(
- layer_data* my_data)
-{
+static void delete_queue_info_list(layer_data *my_data) {
// Process queue list, cleaning up each entry before deleting
my_data->queueMap.clear();
}
-static void
-add_swap_chain_info(
- layer_data *my_data,
- const VkSwapchainKHR swapchain,
- const VkSwapchainCreateInfoKHR *pCI)
-{
- MT_SWAP_CHAIN_INFO* pInfo = new MT_SWAP_CHAIN_INFO;
+static void add_swap_chain_info(layer_data *my_data, const VkSwapchainKHR swapchain, const VkSwapchainCreateInfoKHR *pCI) {
+ MT_SWAP_CHAIN_INFO *pInfo = new MT_SWAP_CHAIN_INFO;
memcpy(&pInfo->createInfo, pCI, sizeof(VkSwapchainCreateInfoKHR));
my_data->swapchainMap[swapchain] = pInfo;
}
// Add new CBInfo for this cb to map container
-static void
-add_cmd_buf_info(
- layer_data *my_data,
- VkCommandPool commandPool,
- const VkCommandBuffer cb)
-{
+static void add_cmd_buf_info(layer_data *my_data, VkCommandPool commandPool, const VkCommandBuffer cb) {
my_data->cbMap[cb].commandBuffer = cb;
my_data->commandPoolMap[commandPool].pCommandBuffers.push_front(cb);
}
// Delete CBInfo from container and clear mem references to CB
-static VkBool32
-delete_cmd_buf_info(
- layer_data *my_data,
- VkCommandPool commandPool,
- const VkCommandBuffer cb)
-{
+static VkBool32 delete_cmd_buf_info(layer_data *my_data, VkCommandPool commandPool, const VkCommandBuffer cb) {
VkBool32 result = VK_TRUE;
result = clear_cmd_buf_and_mem_references(my_data, cb);
// Delete the CBInfo info
@@ -192,11 +156,7 @@ delete_cmd_buf_info(
}
// Return ptr to Info in CB map, or NULL if not found
-static MT_CB_INFO*
-get_cmd_buf_info(
- layer_data *my_data,
- const VkCommandBuffer cb)
-{
+static MT_CB_INFO *get_cmd_buf_info(layer_data *my_data, const VkCommandBuffer cb) {
auto item = my_data->cbMap.find(cb);
if (item != my_data->cbMap.end()) {
return &(*item).second;
@@ -205,96 +165,75 @@ get_cmd_buf_info(
}
}
-static void
-add_object_binding_info(
- layer_data *my_data,
- const uint64_t handle,
- const VkDebugReportObjectTypeEXT type,
- const VkDeviceMemory mem)
-{
- switch (type)
- {
- // Buffers and images are unique as their CreateInfo is in container struct
- case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
- {
- auto pCI = &my_data->bufferMap[handle];
- pCI->mem = mem;
- break;
- }
- case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
- {
- auto pCI = &my_data->imageMap[handle];
- pCI->mem = mem;
- break;
- }
- default:
- break;
- }
-}
-
-static void
-add_object_create_info(
- layer_data *my_data,
- const uint64_t handle,
- const VkDebugReportObjectTypeEXT type,
- const void *pCreateInfo)
-{
+static void add_object_binding_info(layer_data *my_data, const uint64_t handle, const VkDebugReportObjectTypeEXT type,
+ const VkDeviceMemory mem) {
+ switch (type) {
+ // Buffers and images are unique as their CreateInfo is in container struct
+ case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
+ auto pCI = &my_data->bufferMap[handle];
+ pCI->mem = mem;
+ break;
+ }
+ case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
+ auto pCI = &my_data->imageMap[handle];
+ pCI->mem = mem;
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static void add_object_create_info(layer_data *my_data, const uint64_t handle, const VkDebugReportObjectTypeEXT type,
+ const void *pCreateInfo) {
// TODO : For any CreateInfo struct that has ptrs, need to deep copy them and appropriately clean up on Destroy
- switch (type)
- {
- // Buffers and images are unique as their CreateInfo is in container struct
- case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
- {
- auto pCI = &my_data->bufferMap[handle];
- memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
- memcpy(&pCI->create_info.buffer, pCreateInfo, sizeof(VkBufferCreateInfo));
- break;
- }
- case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
- {
- auto pCI = &my_data->imageMap[handle];
- memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
- memcpy(&pCI->create_info.image, pCreateInfo, sizeof(VkImageCreateInfo));
- break;
- }
- // Swap Chain is very unique, use my_data->imageMap, but copy in
- // SwapChainCreatInfo's usage flags and set the mem value to a unique key. These is used by
- // vkCreateImageView and internal mem_tracker routines to distinguish swap chain images
- case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
- {
- auto pCI = &my_data->imageMap[handle];
- memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
- pCI->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
- pCI->valid = false;
- pCI->create_info.image.usage =
- const_cast<VkSwapchainCreateInfoKHR*>(static_cast<const VkSwapchainCreateInfoKHR *>(pCreateInfo))->imageUsage;
- break;
- }
- default:
- break;
+ switch (type) {
+ // Buffers and images are unique as their CreateInfo is in container struct
+ case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
+ auto pCI = &my_data->bufferMap[handle];
+ memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
+ memcpy(&pCI->create_info.buffer, pCreateInfo, sizeof(VkBufferCreateInfo));
+ break;
+ }
+ case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
+ auto pCI = &my_data->imageMap[handle];
+ memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
+ memcpy(&pCI->create_info.image, pCreateInfo, sizeof(VkImageCreateInfo));
+ break;
+ }
+ // Swap Chain is very unique, use my_data->imageMap, but copy in
+ // SwapChainCreatInfo's usage flags and set the mem value to a unique key. These is used by
+ // vkCreateImageView and internal mem_tracker routines to distinguish swap chain images
+ case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT: {
+ auto pCI = &my_data->imageMap[handle];
+ memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
+ pCI->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
+ pCI->valid = false;
+ pCI->create_info.image.usage =
+ const_cast<VkSwapchainCreateInfoKHR *>(static_cast<const VkSwapchainCreateInfoKHR *>(pCreateInfo))->imageUsage;
+ break;
+ }
+ default:
+ break;
}
}
// Add a fence, creating one if necessary to our list of fences/fenceIds
-static VkBool32
-add_fence_info(
- layer_data *my_data,
- VkFence fence,
- VkQueue queue,
- uint64_t *fenceId)
-{
+static VkBool32 add_fence_info(layer_data *my_data, VkFence fence, VkQueue queue, uint64_t *fenceId) {
VkBool32 skipCall = VK_FALSE;
*fenceId = my_data->currentFenceId++;
// If no fence, create an internal fence to track the submissions
if (fence != VK_NULL_HANDLE) {
my_data->fenceMap[fence].fenceId = *fenceId;
- my_data->fenceMap[fence].queue = queue;
+ my_data->fenceMap[fence].queue = queue;
// Validate that fence is in UNSIGNALED state
- VkFenceCreateInfo* pFenceCI = &(my_data->fenceMap[fence].createInfo);
+ VkFenceCreateInfo *pFenceCI = &(my_data->fenceMap[fence].createInfo);
if (pFenceCI->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, (uint64_t) fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
- "Fence %#" PRIxLEAST64 " submitted in SIGNALED state. Fences must be reset before being submitted", (uint64_t) fence);
+ skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
+ (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
+ "Fence %#" PRIxLEAST64 " submitted in SIGNALED state. Fences must be reset before being submitted",
+ (uint64_t)fence);
}
} else {
// TODO : Do we need to create an internal fence here for tracking purposes?
@@ -305,20 +244,10 @@ add_fence_info(
}
// Remove a fenceInfo from our list of fences/fenceIds
-static void
-delete_fence_info(
- layer_data *my_data,
- VkFence fence)
-{
- my_data->fenceMap.erase(fence);
-}
+static void delete_fence_info(layer_data *my_data, VkFence fence) { my_data->fenceMap.erase(fence); }
// Record information when a fence is known to be signalled
-static void
-update_fence_tracking(
- layer_data *my_data,
- VkFence fence)
-{
+static void update_fence_tracking(layer_data *my_data, VkFence fence) {
auto fence_item = my_data->fenceMap.find(fence);
if (fence_item != my_data->fenceMap.end()) {
MT_FENCE_INFO *pCurFenceInfo = &(*fence_item).second;
@@ -338,25 +267,17 @@ update_fence_tracking(
}
// Helper routine that updates the fence list for a specific queue to all-retired
-static void
-retire_queue_fences(
- layer_data *my_data,
- VkQueue queue)
-{
+static void retire_queue_fences(layer_data *my_data, VkQueue queue) {
MT_QUEUE_INFO *pQueueInfo = &my_data->queueMap[queue];
// Set queue's lastRetired to lastSubmitted indicating all fences completed
pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
}
// Helper routine that updates all queues to all-retired
-static void
-retire_device_fences(
- layer_data *my_data,
- VkDevice device)
-{
+static void retire_device_fences(layer_data *my_data, VkDevice device) {
// Process each queue for device
// TODO: Add multiple device support
- for (auto ii=my_data->queueMap.begin(); ii!=my_data->queueMap.end(); ++ii) {
+ for (auto ii = my_data->queueMap.begin(); ii != my_data->queueMap.end(); ++ii) {
// Set queue's lastRetired to lastSubmitted indicating all fences completed
MT_QUEUE_INFO *pQueueInfo = &(*ii).second;
pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
@@ -367,28 +288,19 @@ retire_device_fences(
// Verify that (actual & desired) flags != 0 or,
// if strict is true, verify that (actual & desired) flags == desired
// In case of error, report it via dbg callbacks
-static VkBool32
-validate_usage_flags(
- layer_data *my_data,
- void *disp_obj,
- VkFlags actual,
- VkFlags desired,
- VkBool32 strict,
- uint64_t obj_handle,
- VkDebugReportObjectTypeEXT obj_type,
- char const *ty_str,
- char const *func_name,
- char const *usage_str)
-{
+static VkBool32 validate_usage_flags(layer_data *my_data, void *disp_obj, VkFlags actual, VkFlags desired, VkBool32 strict,
+ uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
+ char const *func_name, char const *usage_str) {
VkBool32 correct_usage = VK_FALSE;
- VkBool32 skipCall = VK_FALSE;
+ VkBool32 skipCall = VK_FALSE;
if (strict)
correct_usage = ((actual & desired) == desired);
else
correct_usage = ((actual & desired) != 0);
if (!correct_usage) {
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
- "Invalid usage flag for %s %#" PRIxLEAST64 " used by %s. In this case, %s should have %s set during creation.",
+ skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
+ MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s %#" PRIxLEAST64
+ " used by %s. In this case, %s should have %s set during creation.",
ty_str, obj_handle, func_name, ty_str, usage_str);
}
return skipCall;
@@ -397,21 +309,13 @@ validate_usage_flags(
// Helper function to validate usage flags for images
// Pulls image info and then sends actual vs. desired usage off to helper above where
// an error will be flagged if usage is not correct
-static VkBool32
-validate_image_usage_flags(
- layer_data *my_data,
- void *disp_obj,
- VkImage image,
- VkFlags desired,
- VkBool32 strict,
- char const *func_name,
- char const *usage_string)
-{
+static VkBool32 validate_image_usage_flags(layer_data *my_data, void *disp_obj, VkImage image, VkFlags desired, VkBool32 strict,
+ char const *func_name, char const *usage_string) {
VkBool32 skipCall = VK_FALSE;
- MT_OBJ_BINDING_INFO* pBindInfo = get_object_binding_info(my_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
+ MT_OBJ_BINDING_INFO *pBindInfo = get_object_binding_info(my_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
if (pBindInfo) {
- skipCall = validate_usage_flags(my_data, disp_obj, pBindInfo->create_info.image.usage, desired, strict,
- (uint64_t) image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string);
+ skipCall = validate_usage_flags(my_data, disp_obj, pBindInfo->create_info.image.usage, desired, strict, (uint64_t)image,
+ VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string);
}
return skipCall;
}
@@ -419,32 +323,20 @@ validate_image_usage_flags(
// Helper function to validate usage flags for buffers
// Pulls buffer info and then sends actual vs. desired usage off to helper above where
// an error will be flagged if usage is not correct
-static VkBool32
-validate_buffer_usage_flags(
- layer_data *my_data,
- void *disp_obj,
- VkBuffer buffer,
- VkFlags desired,
- VkBool32 strict,
- char const *func_name,
- char const *usage_string)
-{
+static VkBool32 validate_buffer_usage_flags(layer_data *my_data, void *disp_obj, VkBuffer buffer, VkFlags desired, VkBool32 strict,
+ char const *func_name, char const *usage_string) {
VkBool32 skipCall = VK_FALSE;
- MT_OBJ_BINDING_INFO* pBindInfo = get_object_binding_info(my_data, (uint64_t) buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
+ MT_OBJ_BINDING_INFO *pBindInfo = get_object_binding_info(my_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
if (pBindInfo) {
- skipCall = validate_usage_flags(my_data, disp_obj, pBindInfo->create_info.buffer.usage, desired, strict,
- (uint64_t) buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string);
+ skipCall = validate_usage_flags(my_data, disp_obj, pBindInfo->create_info.buffer.usage, desired, strict, (uint64_t)buffer,
+ VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string);
}
return skipCall;
}
// Return ptr to info in map container containing mem, or NULL if not found
// Calls to this function should be wrapped in mutex
-static MT_MEM_OBJ_INFO*
-get_mem_obj_info(
- layer_data *my_data,
- const VkDeviceMemory mem)
-{
+static MT_MEM_OBJ_INFO *get_mem_obj_info(layer_data *my_data, const VkDeviceMemory mem) {
auto item = my_data->memObjMap.find(mem);
if (item != my_data->memObjMap.end()) {
return &(*item).second;
@@ -453,43 +345,40 @@ get_mem_obj_info(
}
}
-static void
-add_mem_obj_info(
- layer_data *my_data,
- void *object,
- const VkDeviceMemory mem,
- const VkMemoryAllocateInfo *pAllocateInfo)
-{
+static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
+ const VkMemoryAllocateInfo *pAllocateInfo) {
assert(object != NULL);
memcpy(&my_data->memObjMap[mem].allocInfo, pAllocateInfo, sizeof(VkMemoryAllocateInfo));
// TODO: Update for real hardware, actually process allocation info structures
my_data->memObjMap[mem].allocInfo.pNext = NULL;
- my_data->memObjMap[mem].object = object;
- my_data->memObjMap[mem].refCount = 0;
- my_data->memObjMap[mem].mem = mem;
+ my_data->memObjMap[mem].object = object;
+ my_data->memObjMap[mem].refCount = 0;
+ my_data->memObjMap[mem].mem = mem;
my_data->memObjMap[mem].memRange.offset = 0;
- my_data->memObjMap[mem].memRange.size = 0;
- my_data->memObjMap[mem].pData = 0;
- my_data->memObjMap[mem].pDriverData = 0;
- my_data->memObjMap[mem].valid = false;
+ my_data->memObjMap[mem].memRange.size = 0;
+ my_data->memObjMap[mem].pData = 0;
+ my_data->memObjMap[mem].pDriverData = 0;
+ my_data->memObjMap[mem].valid = false;
}
-static VkBool32 validate_memory_is_valid(layer_data *my_data, VkDeviceMemory mem, const char* functionName, VkImage image = VK_NULL_HANDLE) {
+static VkBool32 validate_memory_is_valid(layer_data *my_data, VkDeviceMemory mem, const char *functionName,
+ VkImage image = VK_NULL_HANDLE) {
if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
- MT_OBJ_BINDING_INFO* pBindInfo = get_object_binding_info(my_data, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
+ MT_OBJ_BINDING_INFO *pBindInfo = get_object_binding_info(my_data, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
if (pBindInfo && !pBindInfo->valid) {
return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
- (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
- "%s: Cannot read invalid swapchain image %" PRIx64 ", please fill the memory before using.", functionName, (uint64_t)(image));
+ (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
+ "%s: Cannot read invalid swapchain image %" PRIx64 ", please fill the memory before using.",
+ functionName, (uint64_t)(image));
}
- }
- else {
+ } else {
MT_MEM_OBJ_INFO *pMemObj = get_mem_obj_info(my_data, mem);
if (pMemObj && !pMemObj->valid) {
return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
- (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
- "%s: Cannot read invalid memory %" PRIx64 ", please fill the memory before using.", functionName, (uint64_t)(mem));
+ (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
+ "%s: Cannot read invalid memory %" PRIx64 ", please fill the memory before using.", functionName,
+ (uint64_t)(mem));
}
}
return false;
@@ -497,7 +386,7 @@ static VkBool32 validate_memory_is_valid(layer_data *my_data, VkDeviceMemory mem
static void set_memory_valid(layer_data *my_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) {
if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
- MT_OBJ_BINDING_INFO* pBindInfo = get_object_binding_info(my_data, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
+ MT_OBJ_BINDING_INFO *pBindInfo = get_object_binding_info(my_data, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
if (pBindInfo) {
pBindInfo->valid = valid;
}
@@ -511,25 +400,21 @@ static void set_memory_valid(layer_data *my_data, VkDeviceMemory mem, bool valid
// Find CB Info and add mem reference to list container
// Find Mem Obj Info and add CB reference to list container
-static VkBool32
-update_cmd_buf_and_mem_references(
- layer_data *my_data,
- const VkCommandBuffer cb,
- const VkDeviceMemory mem,
- const char *apiName)
-{
+static VkBool32 update_cmd_buf_and_mem_references(layer_data *my_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
+ const char *apiName) {
VkBool32 skipCall = VK_FALSE;
// Skip validation if this image was created through WSI
if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
// First update CB binding in MemObj mini CB list
- MT_MEM_OBJ_INFO* pMemInfo = get_mem_obj_info(my_data, mem);
+ MT_MEM_OBJ_INFO *pMemInfo = get_mem_obj_info(my_data, mem);
if (pMemInfo) {
// Search for cmd buffer object in memory object's binding list
- VkBool32 found = VK_FALSE;
+ VkBool32 found = VK_FALSE;
if (pMemInfo->pCommandBufferBindings.size() > 0) {
- for (list<VkCommandBuffer>::iterator it = pMemInfo->pCommandBufferBindings.begin(); it != pMemInfo->pCommandBufferBindings.end(); ++it) {
+ for (list<VkCommandBuffer>::iterator it = pMemInfo->pCommandBufferBindings.begin();
+ it != pMemInfo->pCommandBufferBindings.end(); ++it) {
if ((*it) == cb) {
found = VK_TRUE;
break;
@@ -542,11 +427,11 @@ update_cmd_buf_and_mem_references(
pMemInfo->refCount++;
}
// Now update CBInfo's Mem reference list
- MT_CB_INFO* pCBInfo = get_cmd_buf_info(my_data, cb);
+ MT_CB_INFO *pCBInfo = get_cmd_buf_info(my_data, cb);
// TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
if (pCBInfo) {
// Search for memory object in cmd buffer's reference list
- VkBool32 found = VK_FALSE;
+ VkBool32 found = VK_FALSE;
if (pCBInfo->pMemObjList.size() > 0) {
for (auto it = pCBInfo->pMemObjList.begin(); it != pCBInfo->pMemObjList.end(); ++it) {
if ((*it) == mem) {
@@ -566,19 +451,15 @@ update_cmd_buf_and_mem_references(
}
// Free bindings related to CB
-static VkBool32
-clear_cmd_buf_and_mem_references(
- layer_data *my_data,
- const VkCommandBuffer cb)
-{
+static VkBool32 clear_cmd_buf_and_mem_references(layer_data *my_data, const VkCommandBuffer cb) {
VkBool32 skipCall = VK_FALSE;
- MT_CB_INFO* pCBInfo = get_cmd_buf_info(my_data, cb);
+ MT_CB_INFO *pCBInfo = get_cmd_buf_info(my_data, cb);
if (pCBInfo) {
if (pCBInfo->pMemObjList.size() > 0) {
list<VkDeviceMemory> mem_obj_list = pCBInfo->pMemObjList;
- for (list<VkDeviceMemory>::iterator it=mem_obj_list.begin(); it!=mem_obj_list.end(); ++it) {
- MT_MEM_OBJ_INFO* pInfo = get_mem_obj_info(my_data, *it);
+ for (list<VkDeviceMemory>::iterator it = mem_obj_list.begin(); it != mem_obj_list.end(); ++it) {
+ MT_MEM_OBJ_INFO *pInfo = get_mem_obj_info(my_data, *it);
if (pInfo) {
pInfo->pCommandBufferBindings.remove(cb);
pInfo->refCount--;
@@ -593,12 +474,9 @@ clear_cmd_buf_and_mem_references(
}
// Delete the entire CB list
-static VkBool32
-delete_cmd_buf_info_list(
- layer_data* my_data)
-{
+static VkBool32 delete_cmd_buf_info_list(layer_data *my_data) {
VkBool32 skipCall = VK_FALSE;
- for (unordered_map<VkCommandBuffer, MT_CB_INFO>::iterator ii=my_data->cbMap.begin(); ii!=my_data->cbMap.end(); ++ii) {
+ for (unordered_map<VkCommandBuffer, MT_CB_INFO>::iterator ii = my_data->cbMap.begin(); ii != my_data->cbMap.end(); ++ii) {
skipCall |= clear_cmd_buf_and_mem_references(my_data, (*ii).first);
}
my_data->cbMap.clear();
@@ -606,26 +484,26 @@ delete_cmd_buf_info_list(
}
// For given MemObjInfo, report Obj & CB bindings
-static VkBool32
-reportMemReferencesAndCleanUp(
- layer_data *my_data,
- MT_MEM_OBJ_INFO *pMemObjInfo)
-{
+static VkBool32 reportMemReferencesAndCleanUp(layer_data *my_data, MT_MEM_OBJ_INFO *pMemObjInfo) {
VkBool32 skipCall = VK_FALSE;
size_t cmdBufRefCount = pMemObjInfo->pCommandBufferBindings.size();
- size_t objRefCount = pMemObjInfo->pObjBindings.size();
+ size_t objRefCount = pMemObjInfo->pObjBindings.size();
if ((pMemObjInfo->pCommandBufferBindings.size()) != 0) {
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t) pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
- "Attempting to free memory object %#" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER " references",
- (uint64_t) pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
+ skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
+ (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
+ "Attempting to free memory object %#" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
+ " references",
+ (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
}
if (cmdBufRefCount > 0 && pMemObjInfo->pCommandBufferBindings.size() > 0) {
- for (list<VkCommandBuffer>::const_iterator it = pMemObjInfo->pCommandBufferBindings.begin(); it != pMemObjInfo->pCommandBufferBindings.end(); ++it) {
+ for (list<VkCommandBuffer>::const_iterator it = pMemObjInfo->pCommandBufferBindings.begin();
+ it != pMemObjInfo->pCommandBufferBindings.end(); ++it) {
// TODO : CommandBuffer should be source Obj here
- log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)(*it), __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
- "Command Buffer %p still has a reference to mem obj %#" PRIxLEAST64, (*it), (uint64_t) pMemObjInfo->mem);
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)(*it), __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
+ "Command Buffer %p still has a reference to mem obj %#" PRIxLEAST64, (*it), (uint64_t)pMemObjInfo->mem);
}
// Clear the list of hanging references
pMemObjInfo->pCommandBufferBindings.clear();
@@ -633,8 +511,9 @@ reportMemReferencesAndCleanUp(
if (objRefCount > 0 && pMemObjInfo->pObjBindings.size() > 0) {
for (auto it = pMemObjInfo->pObjBindings.begin(); it != pMemObjInfo->pObjBindings.end(); ++it) {
- log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, it->type, it->handle, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
- "VK Object %#" PRIxLEAST64 " still has a reference to mem obj %#" PRIxLEAST64, it->handle, (uint64_t) pMemObjInfo->mem);
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, it->type, it->handle, __LINE__,
+ MEMTRACK_FREED_MEM_REF, "MEM", "VK Object %#" PRIxLEAST64 " still has a reference to mem obj %#" PRIxLEAST64,
+ it->handle, (uint64_t)pMemObjInfo->mem);
}
// Clear the list of hanging references
pMemObjInfo->pObjBindings.clear();
@@ -642,42 +521,34 @@ reportMemReferencesAndCleanUp(
return skipCall;
}
-static VkBool32
-deleteMemObjInfo(
- layer_data *my_data,
- void *object,
- VkDeviceMemory mem)
-{
+static VkBool32 deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) {
VkBool32 skipCall = VK_FALSE;
auto item = my_data->memObjMap.find(mem);
if (item != my_data->memObjMap.end()) {
my_data->memObjMap.erase(item);
} else {
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t) mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
- "Request to delete memory object %#" PRIxLEAST64 " not present in memory Object Map", (uint64_t) mem);
+ skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
+ (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
+ "Request to delete memory object %#" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem);
}
return skipCall;
}
// Check if fence for given CB is completed
-static VkBool32
-checkCBCompleted(
- layer_data *my_data,
- const VkCommandBuffer cb,
- VkBool32 *complete)
-{
- MT_CB_INFO *pCBInfo = get_cmd_buf_info(my_data, cb);
- VkBool32 skipCall = VK_FALSE;
- *complete = VK_TRUE;
+static VkBool32 checkCBCompleted(layer_data *my_data, const VkCommandBuffer cb, VkBool32 *complete) {
+ MT_CB_INFO *pCBInfo = get_cmd_buf_info(my_data, cb);
+ VkBool32 skipCall = VK_FALSE;
+ *complete = VK_TRUE;
if (pCBInfo) {
if (pCBInfo->lastSubmittedQueue != NULL) {
VkQueue queue = pCBInfo->lastSubmittedQueue;
MT_QUEUE_INFO *pQueueInfo = &my_data->queueMap[queue];
if (pCBInfo->fenceId > pQueueInfo->lastRetiredId) {
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)cb, __LINE__,
- MEMTRACK_NONE, "MEM", "fence %#" PRIxLEAST64 " for CB %p has not been checked for completion",
- (uint64_t) pCBInfo->lastSubmittedFence, cb);
+ skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)cb, __LINE__, MEMTRACK_NONE, "MEM",
+ "fence %#" PRIxLEAST64 " for CB %p has not been checked for completion",
+ (uint64_t)pCBInfo->lastSubmittedFence, cb);
*complete = VK_FALSE;
}
}
@@ -685,22 +556,18 @@ checkCBCompleted(
return skipCall;
}
-static VkBool32
-freeMemObjInfo(
- layer_data *my_data,
- void* object,
- VkDeviceMemory mem,
- VkBool32 internal)
-{
+static VkBool32 freeMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem, VkBool32 internal) {
VkBool32 skipCall = VK_FALSE;
// Parse global list to find info w/ mem
- MT_MEM_OBJ_INFO* pInfo = get_mem_obj_info(my_data, mem);
+ MT_MEM_OBJ_INFO *pInfo = get_mem_obj_info(my_data, mem);
if (pInfo) {
if (pInfo->allocInfo.allocationSize == 0 && !internal) {
// TODO: Verify against Valid Use section
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t) mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
- "Attempting to free memory associated with a Persistent Image, %#" PRIxLEAST64 ", "
- "this should not be explicitly freed\n", (uint64_t) mem);
+ skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
+ (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
+ "Attempting to free memory associated with a Persistent Image, %#" PRIxLEAST64 ", "
+ "this should not be explicitly freed\n",
+ (uint64_t)mem);
} else {
// Clear any CB bindings for completed CBs
// TODO : Is there a better place to do this?
@@ -732,23 +599,19 @@ freeMemObjInfo(
return skipCall;
}
-static const char*
-object_type_to_string(
- VkDebugReportObjectTypeEXT type)
-{
- switch (type)
- {
- case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
- return "image";
- break;
- case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
- return "buffer";
- break;
- case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
- return "swapchain";
- break;
- default:
- return "unknown";
+static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
+ switch (type) {
+ case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
+ return "image";
+ break;
+ case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
+ return "buffer";
+ break;
+ case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
+ return "swapchain";
+ break;
+ default:
+ return "unknown";
}
}
@@ -757,22 +620,17 @@ object_type_to_string(
// 2. Decrement refCount for MemObjInfo
// 3. Clear mem binding for image/buffer by setting its handle to 0
// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
-static VkBool32
-clear_object_binding(
- layer_data *my_data,
- void *dispObj,
- uint64_t handle,
- VkDebugReportObjectTypeEXT type)
-{
+static VkBool32 clear_object_binding(layer_data *my_data, void *dispObj, uint64_t handle, VkDebugReportObjectTypeEXT type) {
// TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
VkBool32 skipCall = VK_FALSE;
- MT_OBJ_BINDING_INFO* pObjBindInfo = get_object_binding_info(my_data, handle, type);
+ MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(my_data, handle, type);
if (pObjBindInfo) {
- MT_MEM_OBJ_INFO* pMemObjInfo = get_mem_obj_info(my_data, pObjBindInfo->mem);
+ MT_MEM_OBJ_INFO *pMemObjInfo = get_mem_obj_info(my_data, pObjBindInfo->mem);
// TODO : Make sure this is a reasonable way to reset mem binding
pObjBindInfo->mem = VK_NULL_HANDLE;
if (pMemObjInfo) {
- // This obj is bound to a memory object. Remove the reference to this object in that memory object's list, decrement the memObj's refcount
+ // This obj is bound to a memory object. Remove the reference to this object in that memory object's list, decrement the
+ // memObj's refcount
// and set the objects memory binding pointer to NULL.
VkBool32 clearSucceeded = VK_FALSE;
for (auto it = pMemObjInfo->pObjBindings.begin(); it != pMemObjInfo->pObjBindings.end(); ++it) {
@@ -783,10 +641,12 @@ clear_object_binding(
break;
}
}
- if (VK_FALSE == clearSucceeded ) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT, "MEM",
- "While trying to clear mem binding for %s obj %#" PRIxLEAST64 ", unable to find that object referenced by mem obj %#" PRIxLEAST64,
- object_type_to_string(type), handle, (uint64_t) pMemObjInfo->mem);
+ if (VK_FALSE == clearSucceeded) {
+ skipCall |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
+ "MEM", "While trying to clear mem binding for %s obj %#" PRIxLEAST64
+ ", unable to find that object referenced by mem obj %#" PRIxLEAST64,
+ object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
}
}
}
@@ -800,39 +660,35 @@ clear_object_binding(
// Add reference off of objInfo
// device is required for error logging, need a dispatchable
// object for that.
-static VkBool32
-set_mem_binding(
- layer_data *my_data,
- void *dispatch_object,
- VkDeviceMemory mem,
- uint64_t handle,
- VkDebugReportObjectTypeEXT type,
- const char *apiName)
-{
+static VkBool32 set_mem_binding(layer_data *my_data, void *dispatch_object, VkDeviceMemory mem, uint64_t handle,
+ VkDebugReportObjectTypeEXT type, const char *apiName) {
VkBool32 skipCall = VK_FALSE;
// Handle NULL case separately, just clear previous binding & decrement reference
if (mem == VK_NULL_HANDLE) {
// TODO: Verify against Valid Use section of spec.
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
- "In %s, attempting to Bind Obj(%#" PRIxLEAST64 ") to NULL", apiName, handle);
+ skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
+ "MEM", "In %s, attempting to Bind Obj(%#" PRIxLEAST64 ") to NULL", apiName, handle);
} else {
- MT_OBJ_BINDING_INFO* pObjBindInfo = get_object_binding_info(my_data, handle, type);
+ MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(my_data, handle, type);
if (!pObjBindInfo) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS, "MEM",
- "In %s, attempting to update Binding of %s Obj(%#" PRIxLEAST64 ") that's not in global list()",
- object_type_to_string(type), apiName, handle);
+ skipCall |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
+ "MEM", "In %s, attempting to update Binding of %s Obj(%#" PRIxLEAST64 ") that's not in global list()",
+ object_type_to_string(type), apiName, handle);
} else {
// non-null case so should have real mem obj
- MT_MEM_OBJ_INFO* pMemInfo = get_mem_obj_info(my_data, mem);
+ MT_MEM_OBJ_INFO *pMemInfo = get_mem_obj_info(my_data, mem);
if (pMemInfo) {
// TODO : Need to track mem binding for obj and report conflict here
- MT_MEM_OBJ_INFO* pPrevBinding = get_mem_obj_info(my_data, pObjBindInfo->mem);
+ MT_MEM_OBJ_INFO *pPrevBinding = get_mem_obj_info(my_data, pObjBindInfo->mem);
if (pPrevBinding != NULL) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t) mem, __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
- "In %s, attempting to bind memory (%#" PRIxLEAST64 ") to object (%#" PRIxLEAST64 ") which has already been bound to mem object %#" PRIxLEAST64,
- apiName, (uint64_t) mem, handle, (uint64_t) pPrevBinding->mem);
- }
- else {
+ skipCall |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
+ (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
+ "In %s, attempting to bind memory (%#" PRIxLEAST64 ") to object (%#" PRIxLEAST64
+ ") which has already been bound to mem object %#" PRIxLEAST64,
+ apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
+ } else {
MT_OBJ_HANDLE_TYPE oht;
oht.handle = handle;
oht.type = type;
@@ -842,8 +698,7 @@ set_mem_binding(
// TODO : What's the best/correct way to handle this?
if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
VkImageCreateInfo ici = pObjBindInfo->create_info.image;
- if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
- VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
+ if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
// TODO:: More memory state transition stuff.
}
}
@@ -861,30 +716,24 @@ set_mem_binding(
// Add reference from objectInfo to memoryInfo
// Add reference off of object's binding info
// Return VK_TRUE if addition is successful, VK_FALSE otherwise
-static VkBool32
-set_sparse_mem_binding(
- layer_data *my_data,
- void *dispObject,
- VkDeviceMemory mem,
- uint64_t handle,
- VkDebugReportObjectTypeEXT type,
- const char *apiName)
-{
+static VkBool32 set_sparse_mem_binding(layer_data *my_data, void *dispObject, VkDeviceMemory mem, uint64_t handle,
+ VkDebugReportObjectTypeEXT type, const char *apiName) {
VkBool32 skipCall = VK_FALSE;
// Handle NULL case separately, just clear previous binding & decrement reference
if (mem == VK_NULL_HANDLE) {
skipCall = clear_object_binding(my_data, dispObject, handle, type);
} else {
- MT_OBJ_BINDING_INFO* pObjBindInfo = get_object_binding_info(my_data, handle, type);
+ MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(my_data, handle, type);
if (!pObjBindInfo) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS, "MEM",
- "In %s, attempting to update Binding of Obj(%#" PRIxLEAST64 ") that's not in global list()", apiName, handle);
+ skipCall |= log_msg(
+ my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS, "MEM",
+ "In %s, attempting to update Binding of Obj(%#" PRIxLEAST64 ") that's not in global list()", apiName, handle);
}
// non-null case so should have real mem obj
- MT_MEM_OBJ_INFO* pInfo = get_mem_obj_info(my_data, mem);
+ MT_MEM_OBJ_INFO *pInfo = get_mem_obj_info(my_data, mem);
if (pInfo) {
// Search for object in memory object's binding list
- VkBool32 found = VK_FALSE;
+ VkBool32 found = VK_FALSE;
if (pInfo->pObjBindings.size() > 0) {
for (auto it = pInfo->pObjBindings.begin(); it != pInfo->pObjBindings.end(); ++it) {
if (((*it).handle == handle) && ((*it).type == type)) {
@@ -897,7 +746,7 @@ set_sparse_mem_binding(
if (found == VK_FALSE) {
MT_OBJ_HANDLE_TYPE oht;
oht.handle = handle;
- oht.type = type;
+ oht.type = type;
pInfo->pObjBindings.push_front(oht);
pInfo->refCount++;
}
@@ -908,54 +757,40 @@ set_sparse_mem_binding(
return skipCall;
}
-template <typename T> void
-print_object_map_members(
- layer_data *my_data,
- void *dispObj,
- T const& objectName,
- VkDebugReportObjectTypeEXT objectType,
- const char *objectStr)
-{
- for (auto const& element : objectName) {
+template <typename T>
+void print_object_map_members(layer_data *my_data, void *dispObj, T const &objectName, VkDebugReportObjectTypeEXT objectType,
+ const char *objectStr) {
+ for (auto const &element : objectName) {
log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objectType, 0, __LINE__, MEMTRACK_NONE, "MEM",
- " %s Object list contains %s Object %#" PRIxLEAST64 " ", objectStr, objectStr, element.first);
+ " %s Object list contains %s Object %#" PRIxLEAST64 " ", objectStr, objectStr, element.first);
}
}
// For given Object, get 'mem' obj that it's bound to or NULL if no binding
-static VkBool32
-get_mem_binding_from_object(
- layer_data *my_data,
- void *dispObj,
- const uint64_t handle,
- const VkDebugReportObjectTypeEXT type,
- VkDeviceMemory *mem)
-{
+static VkBool32 get_mem_binding_from_object(layer_data *my_data, void *dispObj, const uint64_t handle,
+ const VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
VkBool32 skipCall = VK_FALSE;
*mem = VK_NULL_HANDLE;
- MT_OBJ_BINDING_INFO* pObjBindInfo = get_object_binding_info(my_data, handle, type);
+ MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(my_data, handle, type);
if (pObjBindInfo) {
if (pObjBindInfo->mem) {
*mem = pObjBindInfo->mem;
} else {
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS, "MEM",
- "Trying to get mem binding for object %#" PRIxLEAST64 " but object has no mem binding", handle);
+ skipCall =
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
+ "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but object has no mem binding", handle);
}
} else {
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT, "MEM",
- "Trying to get mem binding for object %#" PRIxLEAST64 " but no such object in %s list",
- handle, object_type_to_string(type));
+ skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
+ "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but no such object in %s list", handle,
+ object_type_to_string(type));
}
return skipCall;
}
// Print details of MemObjInfo list
-static void
-print_mem_list(
- layer_data *my_data,
- void *dispObj)
-{
- MT_MEM_OBJ_INFO* pInfo = NULL;
+static void print_mem_list(layer_data *my_data, void *dispObj) {
+ MT_MEM_OBJ_INFO *pInfo = NULL;
// Early out if info is not requested
if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
@@ -963,95 +798,90 @@ print_mem_list(
}
// Just printing each msg individually for now, may want to package these into single large print
- log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, MEMTRACK_NONE, "MEM",
- "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)", my_data->memObjMap.size());
- log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, MEMTRACK_NONE, "MEM",
- "=============================");
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
+ MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
+ my_data->memObjMap.size());
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
+ MEMTRACK_NONE, "MEM", "=============================");
if (my_data->memObjMap.size() <= 0)
return;
- for (auto ii=my_data->memObjMap.begin(); ii!=my_data->memObjMap.end(); ++ii) {
+ for (auto ii = my_data->memObjMap.begin(); ii != my_data->memObjMap.end(); ++ii) {
pInfo = &(*ii).second;
- log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, MEMTRACK_NONE, "MEM",
- " ===MemObjInfo at %p===", (void*)pInfo);
- log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, MEMTRACK_NONE, "MEM",
- " Mem object: %#" PRIxLEAST64, (uint64_t)(pInfo->mem));
- log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, MEMTRACK_NONE, "MEM",
- " Ref Count: %u", pInfo->refCount);
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
+ __LINE__, MEMTRACK_NONE, "MEM", " ===MemObjInfo at %p===", (void *)pInfo);
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
+ __LINE__, MEMTRACK_NONE, "MEM", " Mem object: %#" PRIxLEAST64, (uint64_t)(pInfo->mem));
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
+ __LINE__, MEMTRACK_NONE, "MEM", " Ref Count: %u", pInfo->refCount);
if (0 != pInfo->allocInfo.allocationSize) {
string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&pInfo->allocInfo, "MEM(INFO): ");
- log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, MEMTRACK_NONE, "MEM",
- " Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
+ __LINE__, MEMTRACK_NONE, "MEM", " Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
} else {
- log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, MEMTRACK_NONE, "MEM",
- " Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
+ __LINE__, MEMTRACK_NONE, "MEM", " Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
}
- log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, MEMTRACK_NONE, "MEM",
- " VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:", pInfo->pObjBindings.size());
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
+ __LINE__, MEMTRACK_NONE, "MEM", " VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
+ pInfo->pObjBindings.size());
if (pInfo->pObjBindings.size() > 0) {
for (list<MT_OBJ_HANDLE_TYPE>::iterator it = pInfo->pObjBindings.begin(); it != pInfo->pObjBindings.end(); ++it) {
- log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, MEMTRACK_NONE, "MEM",
- " VK OBJECT %" PRIu64, it->handle);
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
+ __LINE__, MEMTRACK_NONE, "MEM", " VK OBJECT %" PRIu64, it->handle);
}
}
- log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, MEMTRACK_NONE, "MEM",
- " VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements", pInfo->pCommandBufferBindings.size());
- if (pInfo->pCommandBufferBindings.size() > 0)
- {
- for (list<VkCommandBuffer>::iterator it = pInfo->pCommandBufferBindings.begin(); it != pInfo->pCommandBufferBindings.end(); ++it) {
- log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, MEMTRACK_NONE, "MEM",
- " VK CB %p", (*it));
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
+ __LINE__, MEMTRACK_NONE, "MEM",
+ " VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
+ pInfo->pCommandBufferBindings.size());
+ if (pInfo->pCommandBufferBindings.size() > 0) {
+ for (list<VkCommandBuffer>::iterator it = pInfo->pCommandBufferBindings.begin();
+ it != pInfo->pCommandBufferBindings.end(); ++it) {
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
+ __LINE__, MEMTRACK_NONE, "MEM", " VK CB %p", (*it));
}
}
}
}
-static void
-printCBList(
- layer_data *my_data,
- void *dispObj)
-{
- MT_CB_INFO* pCBInfo = NULL;
+static void printCBList(layer_data *my_data, void *dispObj) {
+ MT_CB_INFO *pCBInfo = NULL;
// Early out if info is not requested
if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
return;
}
- log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, MEMTRACK_NONE, "MEM",
- "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)", my_data->cbMap.size());
- log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, MEMTRACK_NONE, "MEM",
- "==================");
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
+ MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)", my_data->cbMap.size());
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
+ MEMTRACK_NONE, "MEM", "==================");
if (my_data->cbMap.size() <= 0)
return;
- for (auto ii=my_data->cbMap.begin(); ii!=my_data->cbMap.end(); ++ii) {
+ for (auto ii = my_data->cbMap.begin(); ii != my_data->cbMap.end(); ++ii) {
pCBInfo = &(*ii).second;
- log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, MEMTRACK_NONE, "MEM",
- " CB Info (%p) has CB %p, fenceId %" PRIx64", and fence %#" PRIxLEAST64,
- (void*)pCBInfo, (void*)pCBInfo->commandBuffer, pCBInfo->fenceId,
- (uint64_t) pCBInfo->lastSubmittedFence);
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
+ __LINE__, MEMTRACK_NONE, "MEM", " CB Info (%p) has CB %p, fenceId %" PRIx64 ", and fence %#" PRIxLEAST64,
+ (void *)pCBInfo, (void *)pCBInfo->commandBuffer, pCBInfo->fenceId, (uint64_t)pCBInfo->lastSubmittedFence);
if (pCBInfo->pMemObjList.size() <= 0)
continue;
for (list<VkDeviceMemory>::iterator it = pCBInfo->pMemObjList.begin(); it != pCBInfo->pMemObjList.end(); ++it) {
- log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__, MEMTRACK_NONE, "MEM",
- " Mem obj %" PRIu64, (uint64_t)(*it));
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
+ __LINE__, MEMTRACK_NONE, "MEM", " Mem obj %" PRIu64, (uint64_t)(*it));
}
}
}
-static void
-init_mem_tracker(
- layer_data *my_data,
- const VkAllocationCallbacks *pAllocator)
-{
+static void init_mem_tracker(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
uint32_t report_flags = 0;
uint32_t debug_action = 0;
FILE *log_output = NULL;
@@ -1059,10 +889,9 @@ init_mem_tracker(
VkDebugReportCallbackEXT callback;
// initialize mem_tracker options
report_flags = getLayerOptionFlags("lunarg_mem_tracker.report_flags", 0);
- getLayerOptionEnum("lunarg_mem_tracker.debug_action", (uint32_t *) &debug_action);
+ getLayerOptionEnum("lunarg_mem_tracker.debug_action", (uint32_t *)&debug_action);
- if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG)
- {
+ if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG) {
option_str = getLayerOption("lunarg_mem_tracker.log_filename");
log_output = getLayerLogOutput(option_str, "lunarg_mem_tracker");
VkDebugReportCallbackCreateInfoEXT dbgInfo;
@@ -1086,8 +915,7 @@ init_mem_tracker(
my_data->logging_callback.push_back(callback);
}
- if (!globalLockInitialized)
- {
+ if (!globalLockInitialized) {
loader_platform_thread_create_mutex(&globalLock);
globalLockInitialized = 1;
}
@@ -1097,10 +925,7 @@ init_mem_tracker(
}
// hook DestroyInstance to remove tableInstanceMap entry
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(
- VkInstance instance,
- const VkAllocationCallbacks *pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
// Grab the key before the instance is destroyed.
dispatch_key key = get_dispatch_key(instance);
layer_data *my_data = get_my_data_ptr(key, layer_data_map);
@@ -1126,16 +951,13 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(
}
}
-VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(
- const VkInstanceCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkInstance* pInstance)
-{
+VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
- PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance) fpGetInstanceProcAddr(NULL, "vkCreateInstance");
+ PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
if (fpCreateInstance == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
@@ -1152,30 +974,23 @@ VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(
my_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
layer_init_instance_dispatch_table(*pInstance, my_data->instance_dispatch_table, fpGetInstanceProcAddr);
- my_data->report_data = debug_report_create_instance(
- my_data->instance_dispatch_table,
- *pInstance,
- pCreateInfo->enabledExtensionCount,
- pCreateInfo->ppEnabledExtensionNames);
+ my_data->report_data = debug_report_create_instance(my_data->instance_dispatch_table, *pInstance,
+ pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
init_mem_tracker(my_data, pAllocator);
return result;
}
-static void
-createDeviceRegisterExtensions(
- const VkDeviceCreateInfo *pCreateInfo,
- VkDevice device)
-{
+static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkLayerDispatchTable *pDisp = my_device_data->device_dispatch_table;
PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
- pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR) gpa(device, "vkCreateSwapchainKHR");
- pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR) gpa(device, "vkDestroySwapchainKHR");
- pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR) gpa(device, "vkGetSwapchainImagesKHR");
- pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR) gpa(device, "vkAcquireNextImageKHR");
- pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR) gpa(device, "vkQueuePresentKHR");
+ pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
+ pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
+ pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
+ pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
+ pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
my_device_data->wsi_enabled = VK_FALSE;
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
@@ -1183,18 +998,14 @@ createDeviceRegisterExtensions(
}
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(
- VkPhysicalDevice gpu,
- const VkDeviceCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkDevice *pDevice)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
- PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice) fpGetInstanceProcAddr(NULL, "vkCreateDevice");
+ PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
if (fpCreateDevice == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
@@ -1221,31 +1032,31 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(
- VkDevice device,
- const VkAllocationCallbacks *pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
dispatch_key key = get_dispatch_key(device);
layer_data *my_device_data = get_my_data_ptr(key, layer_data_map);
VkBool32 skipCall = VK_FALSE;
loader_platform_thread_lock_mutex(&globalLock);
- log_msg(my_device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM",
- "Printing List details prior to vkDestroyDevice()");
- log_msg(my_device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM",
- "================================================");
+ log_msg(my_device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
+ (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
+ log_msg(my_device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
+ (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
print_mem_list(my_device_data, device);
printCBList(my_device_data, device);
skipCall = delete_cmd_buf_info_list(my_device_data);
// Report any memory leaks
- MT_MEM_OBJ_INFO* pInfo = NULL;
+ MT_MEM_OBJ_INFO *pInfo = NULL;
if (my_device_data->memObjMap.size() > 0) {
- for (auto ii=my_device_data->memObjMap.begin(); ii!=my_device_data->memObjMap.end(); ++ii) {
+ for (auto ii = my_device_data->memObjMap.begin(); ii != my_device_data->memObjMap.end(); ++ii) {
pInfo = &(*ii).second;
if (pInfo->allocInfo.allocationSize != 0) {
// Valid Usage: All child objects created on device must have been destroyed prior to destroying device
- skipCall |= log_msg(my_device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t) pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK, "MEM",
- "Mem Object %" PRIu64 " has not been freed. You should clean up this memory by calling "
- "vkFreeMemory(%" PRIu64 ") prior to vkDestroyDevice().", (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
+ skipCall |=
+ log_msg(my_device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK,
+ "MEM", "Mem Object %" PRIu64 " has not been freed. You should clean up this memory by calling "
+ "vkFreeMemory(%" PRIu64 ") prior to vkDestroyDevice().",
+ (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
}
}
}
@@ -1257,7 +1068,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(
#if DISPATCH_MAP_DEBUG
fprintf(stderr, "Device: %p, key: %p\n", device, key);
#endif
- VkLayerDispatchTable *pDisp = my_device_data->device_dispatch_table;
+ VkLayerDispatchTable *pDisp = my_device_data->device_dispatch_table;
if (VK_FALSE == skipCall) {
pDisp->DestroyDevice(device, pAllocator);
}
@@ -1265,82 +1076,51 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(
layer_data_map.erase(key);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties(
- VkPhysicalDevice physicalDevice,
- VkPhysicalDeviceMemoryProperties *pMemoryProperties)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties *pMemoryProperties) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
VkLayerInstanceDispatchTable *pInstanceTable = my_data->instance_dispatch_table;
pInstanceTable->GetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties);
memcpy(&memProps, pMemoryProperties, sizeof(VkPhysicalDeviceMemoryProperties));
}
-static const VkExtensionProperties instance_extensions[] = {
- {
- VK_EXT_DEBUG_REPORT_EXTENSION_NAME,
- VK_EXT_DEBUG_REPORT_SPEC_VERSION
- }
-};
+static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(
- const char *pLayerName,
- uint32_t *pCount,
- VkExtensionProperties *pProperties)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
}
-static const VkLayerProperties mtGlobalLayers[] = {
- {
- "VK_LAYER_LUNARG_mem_tracker",
- VK_API_VERSION,
- 1,
- "LunarG Validation Layer",
- }
-};
+static const VkLayerProperties mtGlobalLayers[] = {{
+ "VK_LAYER_LUNARG_mem_tracker", VK_API_VERSION, 1, "LunarG Validation Layer",
+}};
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(
- uint32_t *pCount,
- VkLayerProperties *pProperties)
-{
- return util_GetLayerProperties(ARRAY_SIZE(mtGlobalLayers),
- mtGlobalLayers,
- pCount, pProperties);
-}
-
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(
- VkPhysicalDevice physicalDevice,
- const char *pLayerName,
- uint32_t *pCount,
- VkExtensionProperties *pProperties)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
+ return util_GetLayerProperties(ARRAY_SIZE(mtGlobalLayers), mtGlobalLayers, pCount, pProperties);
+}
+
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
+ const char *pLayerName, uint32_t *pCount,
+ VkExtensionProperties *pProperties) {
/* Mem tracker does not have any physical device extensions */
if (pLayerName == NULL) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
VkLayerInstanceDispatchTable *pInstanceTable = my_data->instance_dispatch_table;
- return pInstanceTable->EnumerateDeviceExtensionProperties(
- physicalDevice, NULL, pCount, pProperties);
+ return pInstanceTable->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
} else {
return util_GetExtensionProperties(0, NULL, pCount, pProperties);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(
- VkPhysicalDevice physicalDevice,
- uint32_t *pCount,
- VkLayerProperties *pProperties)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
/* Mem tracker's physical device layers are the same as global */
- return util_GetLayerProperties(ARRAY_SIZE(mtGlobalLayers), mtGlobalLayers,
- pCount, pProperties);
+ return util_GetLayerProperties(ARRAY_SIZE(mtGlobalLayers), mtGlobalLayers, pCount, pProperties);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(
- VkDevice device,
- uint32_t queueNodeIndex,
- uint32_t queueIndex,
- VkQueue *pQueue)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetDeviceQueue(VkDevice device, uint32_t queueNodeIndex, uint32_t queueIndex, VkQueue *pQueue) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
my_data->device_dispatch_table->GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue);
loader_platform_thread_lock_mutex(&globalLock);
@@ -1348,19 +1128,15 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(
loader_platform_thread_unlock_mutex(&globalLock);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit(
- VkQueue queue,
- uint32_t submitCount,
- const VkSubmitInfo *pSubmits,
- VkFence fence)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
loader_platform_thread_lock_mutex(&globalLock);
// TODO : Need to track fence and clear mem references when fence clears
- MT_CB_INFO* pCBInfo = NULL;
- uint64_t fenceId = 0;
+ MT_CB_INFO *pCBInfo = NULL;
+ uint64_t fenceId = 0;
VkBool32 skipCall = add_fence_info(my_data, fence, queue, &fenceId);
print_mem_list(my_data, queue);
@@ -1373,7 +1149,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit(
pCBInfo->fenceId = fenceId;
pCBInfo->lastSubmittedFence = fence;
pCBInfo->lastSubmittedQueue = queue;
- for (auto& function : pCBInfo->validate_functions) {
+ for (auto &function : pCBInfo->validate_functions) {
skipCall |= function();
}
}
@@ -1384,9 +1160,10 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit(
if (my_data->semaphoreMap.find(sem) != my_data->semaphoreMap.end()) {
if (my_data->semaphoreMap[sem] != MEMTRACK_SEMAPHORE_STATE_SIGNALLED) {
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, (uint64_t) sem,
- __LINE__, MEMTRACK_NONE, "SEMAPHORE",
- "vkQueueSubmit: Semaphore must be in signaled state before passing to pWaitSemaphores");
+ skipCall =
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
+ (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
+ "vkQueueSubmit: Semaphore must be in signaled state before passing to pWaitSemaphores");
}
my_data->semaphoreMap[sem] = MEMTRACK_SEMAPHORE_STATE_WAIT;
}
@@ -1396,9 +1173,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit(
if (my_data->semaphoreMap.find(sem) != my_data->semaphoreMap.end()) {
if (my_data->semaphoreMap[sem] != MEMTRACK_SEMAPHORE_STATE_UNSET) {
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, (uint64_t) sem,
- __LINE__, MEMTRACK_NONE, "SEMAPHORE",
- "vkQueueSubmit: Semaphore must not be currently signaled or in a wait state");
+ skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, (uint64_t)sem, __LINE__, MEMTRACK_NONE,
+ "SEMAPHORE", "vkQueueSubmit: Semaphore must not be currently signaled or in a wait state");
}
my_data->semaphoreMap[sem] = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
}
@@ -1407,8 +1184,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit(
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall) {
- result = my_data->device_dispatch_table->QueueSubmit(
- queue, submitCount, pSubmits, fence);
+ result = my_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
}
loader_platform_thread_lock_mutex(&globalLock);
@@ -1427,12 +1203,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(
- VkDevice device,
- const VkMemoryAllocateInfo *pAllocateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkDeviceMemory *pMemory)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
// TODO : Track allocations and overall size here
@@ -1443,11 +1215,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkFreeMemory(
- VkDevice device,
- VkDeviceMemory mem,
- const VkAllocationCallbacks *pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
my_data->bufferRanges.erase(mem);
my_data->imageRanges.erase(mem);
@@ -1467,54 +1236,46 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkFreeMemory(
my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
}
-VkBool32
-validateMemRange(
- layer_data *my_data,
- VkDeviceMemory mem,
- VkDeviceSize offset,
- VkDeviceSize size)
-{
+VkBool32 validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
VkBool32 skipCall = VK_FALSE;
if (size == 0) {
// TODO: a size of 0 is not listed as an invalid use in the spec, should it be?
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__,
- MEMTRACK_INVALID_MAP, "MEM", "VkMapMemory: Attempting to map memory range of size zero");
+ skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
+ (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
+ "VkMapMemory: Attempting to map memory range of size zero");
}
auto mem_element = my_data->memObjMap.find(mem);
if (mem_element != my_data->memObjMap.end()) {
// It is an application error to call VkMapMemory on an object that is already mapped
if (mem_element->second.memRange.size != 0) {
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__,
- MEMTRACK_INVALID_MAP, "MEM", "VkMapMemory: Attempting to map memory on an already-mapped object %#" PRIxLEAST64, (uint64_t)mem);
+ skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
+ (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
+ "VkMapMemory: Attempting to map memory on an already-mapped object %#" PRIxLEAST64, (uint64_t)mem);
}
// Validate that offset + size is within object's allocationSize
if (size == VK_WHOLE_SIZE) {
if (offset >= mem_element->second.allocInfo.allocationSize) {
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__,
- MEMTRACK_INVALID_MAP, "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64,
- offset, mem_element->second.allocInfo.allocationSize, mem_element->second.allocInfo.allocationSize);
+ skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
+ "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
+ mem_element->second.allocInfo.allocationSize, mem_element->second.allocInfo.allocationSize);
}
} else {
if ((offset + size) > mem_element->second.allocInfo.allocationSize) {
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__,
- MEMTRACK_INVALID_MAP, "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64,
- offset, size + offset, mem_element->second.allocInfo.allocationSize);
+ skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
+ "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
+ size + offset, mem_element->second.allocInfo.allocationSize);
}
}
}
return skipCall;
}
-void
-storeMemRanges(
- layer_data *my_data,
- VkDeviceMemory mem,
- VkDeviceSize offset,
- VkDeviceSize size)
- {
+void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
auto mem_element = my_data->memObjMap.find(mem);
if (mem_element != my_data->memObjMap.end()) {
MemRange new_range;
@@ -1524,16 +1285,14 @@ storeMemRanges(
}
}
-VkBool32 deleteMemRanges(
- layer_data *my_data,
- VkDeviceMemory mem)
-{
+VkBool32 deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
VkBool32 skipCall = VK_FALSE;
auto mem_element = my_data->memObjMap.find(mem);
if (mem_element != my_data->memObjMap.end()) {
if (!mem_element->second.memRange.size) {
// Valid Usage: memory must currently be mapped
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
+ skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
+ (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
"Unmapping Memory without memory being mapped: mem obj %#" PRIxLEAST64, (uint64_t)mem);
}
mem_element->second.memRange.size = 0;
@@ -1547,13 +1306,7 @@ VkBool32 deleteMemRanges(
static char NoncoherentMemoryFillValue = 0xb;
-void
-initializeAndTrackMemory(
- layer_data *my_data,
- VkDeviceMemory mem,
- VkDeviceSize size,
- void **ppData)
-{
+void initializeAndTrackMemory(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) {
auto mem_element = my_data->memObjMap.find(mem);
if (mem_element != my_data->memObjMap.end()) {
mem_element->second.pDriverData = *ppData;
@@ -1567,31 +1320,25 @@ initializeAndTrackMemory(
size_t convSize = (size_t)(size);
mem_element->second.pData = malloc(2 * convSize);
memset(mem_element->second.pData, NoncoherentMemoryFillValue, 2 * convSize);
- *ppData = static_cast<char*>(mem_element->second.pData) + (convSize / 2);
+ *ppData = static_cast<char *>(mem_element->second.pData) + (convSize / 2);
}
}
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory(
- VkDevice device,
- VkDeviceMemory mem,
- VkDeviceSize offset,
- VkDeviceSize size,
- VkFlags flags,
- void **ppData)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- VkBool32 skipCall = VK_FALSE;
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
loader_platform_thread_lock_mutex(&globalLock);
MT_MEM_OBJ_INFO *pMemObj = get_mem_obj_info(my_data, mem);
if (pMemObj) {
pMemObj->valid = true;
- if ((memProps.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags &
- VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
- (uint64_t) mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
- "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj %#" PRIxLEAST64, (uint64_t) mem);
+ if ((memProps.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
+ skipCall =
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
+ (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
+ "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj %#" PRIxLEAST64, (uint64_t)mem);
}
}
skipCall |= validateMemRange(my_data, mem, offset, size);
@@ -1604,12 +1351,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(
- VkDevice device,
- VkDeviceMemory mem)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(VkDevice device, VkDeviceMemory mem) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- VkBool32 skipCall = VK_FALSE;
+ VkBool32 skipCall = VK_FALSE;
loader_platform_thread_lock_mutex(&globalLock);
skipCall |= deleteMemRanges(my_data, mem);
@@ -1619,111 +1363,90 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(
}
}
-VkBool32
-validateMemoryIsMapped(
- layer_data *my_data,
- const char *funcName,
- uint32_t memRangeCount,
- const VkMappedMemoryRange *pMemRanges)
-{
+VkBool32 validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
+ const VkMappedMemoryRange *pMemRanges) {
VkBool32 skipCall = VK_FALSE;
for (uint32_t i = 0; i < memRangeCount; ++i) {
auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
if (mem_element != my_data->memObjMap.end()) {
if (mem_element->second.memRange.offset > pMemRanges[i].offset) {
skipCall |= log_msg(
- my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
- (uint64_t)pMemRanges[i].memory, __LINE__,
- MEMTRACK_INVALID_MAP, "MEM",
- "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
- ") is less than Memory Object's offset "
+ my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
+ (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
+ "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
"(" PRINTF_SIZE_T_SPECIFIER ").",
- funcName, static_cast<size_t>(pMemRanges[i].offset),
- static_cast<size_t>(mem_element->second.memRange.offset));
+ funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_element->second.memRange.offset));
}
if ((mem_element->second.memRange.size != VK_WHOLE_SIZE) &&
- ((mem_element->second.memRange.offset +
- mem_element->second.memRange.size) <
+ ((mem_element->second.memRange.offset + mem_element->second.memRange.size) <
(pMemRanges[i].offset + pMemRanges[i].size))) {
- skipCall |= log_msg(
- my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
- (uint64_t)pMemRanges[i].memory, __LINE__,
- MEMTRACK_INVALID_MAP, "MEM",
- "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
- ") exceeds the Memory Object's upper-bound "
- "(" PRINTF_SIZE_T_SPECIFIER ").",
- funcName, static_cast<size_t>(pMemRanges[i].offset +
- pMemRanges[i].size),
- static_cast<size_t>(mem_element->second.memRange.offset +
- mem_element->second.memRange.size));
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
+ MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
+ ") exceeds the Memory Object's upper-bound "
+ "(" PRINTF_SIZE_T_SPECIFIER ").",
+ funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
+ static_cast<size_t>(mem_element->second.memRange.offset + mem_element->second.memRange.size));
}
}
}
return skipCall;
}
-VkBool32
-validateAndCopyNoncoherentMemoryToDriver(
- layer_data *my_data,
- uint32_t memRangeCount,
- const VkMappedMemoryRange *pMemRanges)
-{
+VkBool32 validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
+ const VkMappedMemoryRange *pMemRanges) {
VkBool32 skipCall = VK_FALSE;
for (uint32_t i = 0; i < memRangeCount; ++i) {
auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
if (mem_element != my_data->memObjMap.end()) {
if (mem_element->second.pData) {
- VkDeviceSize size = mem_element->second.memRange.size;
+ VkDeviceSize size = mem_element->second.memRange.size;
VkDeviceSize half_size = (size / 2);
- char* data = static_cast<char*>(mem_element->second.pData);
+ char *data = static_cast<char *>(mem_element->second.pData);
for (auto j = 0; j < half_size; ++j) {
if (data[j] != NoncoherentMemoryFillValue) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory,
- __LINE__, MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64, (uint64_t)pMemRanges[i].memory);
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
+ MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
+ (uint64_t)pMemRanges[i].memory);
}
}
for (auto j = size + half_size; j < 2 * size; ++j) {
if (data[j] != NoncoherentMemoryFillValue) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory,
- __LINE__, MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64, (uint64_t)pMemRanges[i].memory);
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
+ MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
+ (uint64_t)pMemRanges[i].memory);
}
}
- memcpy(mem_element->second.pDriverData, static_cast<void*>(data + (size_t)(half_size)), (size_t)(size));
+ memcpy(mem_element->second.pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size));
}
}
}
return skipCall;
}
-VK_LAYER_EXPORT VkResult VKAPI_CALL vkFlushMappedMemoryRanges(
- VkDevice device,
- uint32_t memRangeCount,
- const VkMappedMemoryRange *pMemRanges)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VkResult VKAPI_CALL
+vkFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
- skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
+ skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
+ skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
loader_platform_thread_unlock_mutex(&globalLock);
- if (VK_FALSE == skipCall ) {
+ if (VK_FALSE == skipCall) {
result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
}
return result;
}
-VK_LAYER_EXPORT VkResult VKAPI_CALL vkInvalidateMappedMemoryRanges(
- VkDevice device,
- uint32_t memRangeCount,
- const VkMappedMemoryRange *pMemRanges)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VkResult VKAPI_CALL
+vkInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
@@ -1734,11 +1457,7 @@ VK_LAYER_EXPORT VkResult VKAPI_CALL vkInvalidateMappedMemoryRanges(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(
- VkDevice device,
- VkFence fence,
- const VkAllocationCallbacks *pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
delete_fence_info(my_data, fence);
@@ -1750,11 +1469,8 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(
my_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer(
- VkDevice device,
- VkBuffer buffer,
- const VkAllocationCallbacks *pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkBool32 skipCall = VK_FALSE;
loader_platform_thread_lock_mutex(&globalLock);
@@ -1769,11 +1485,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer(
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(
- VkDevice device,
- VkImage image,
- const VkAllocationCallbacks *pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkBool32 skipCall = VK_FALSE;
loader_platform_thread_lock_mutex(&globalLock);
@@ -1788,40 +1500,42 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(
}
}
-VkBool32 print_memory_range_error(layer_data *my_data, const uint64_t object_handle, const uint64_t other_handle, VkDebugReportObjectTypeEXT object_type) {
+VkBool32 print_memory_range_error(layer_data *my_data, const uint64_t object_handle, const uint64_t other_handle,
+ VkDebugReportObjectTypeEXT object_type) {
if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) {
- return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0, MEMTRACK_INVALID_ALIASING, "MEM",
- "Buffer %" PRIx64 " is alised with image %" PRIx64, object_handle, other_handle);
+ return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
+ MEMTRACK_INVALID_ALIASING, "MEM", "Buffer %" PRIx64 " is alised with image %" PRIx64, object_handle,
+ other_handle);
} else {
- return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0, MEMTRACK_INVALID_ALIASING, "MEM",
- "Image %" PRIx64 " is alised with buffer %" PRIx64, object_handle, other_handle);
+ return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
+ MEMTRACK_INVALID_ALIASING, "MEM", "Image %" PRIx64 " is alised with buffer %" PRIx64, object_handle,
+ other_handle);
}
}
-VkBool32 validate_memory_range(layer_data *my_data, const unordered_map<VkDeviceMemory, vector<MEMORY_RANGE>>& memory, const MEMORY_RANGE& new_range, VkDebugReportObjectTypeEXT object_type) {
+VkBool32 validate_memory_range(layer_data *my_data, const unordered_map<VkDeviceMemory, vector<MEMORY_RANGE>> &memory,
+ const MEMORY_RANGE &new_range, VkDebugReportObjectTypeEXT object_type) {
VkBool32 skip_call = false;
- if (!memory.count(new_range.memory)) return false;
- const vector<MEMORY_RANGE>& ranges = memory.at(new_range.memory);
+ if (!memory.count(new_range.memory))
+ return false;
+ const vector<MEMORY_RANGE> &ranges = memory.at(new_range.memory);
for (auto range : ranges) {
if ((range.end & ~(my_data->properties.limits.bufferImageGranularity - 1)) <
- (new_range.start & ~(my_data->properties.limits.bufferImageGranularity - 1))) continue;
+ (new_range.start & ~(my_data->properties.limits.bufferImageGranularity - 1)))
+ continue;
if ((range.start & ~(my_data->properties.limits.bufferImageGranularity - 1)) >
- (new_range.end & ~(my_data->properties.limits.bufferImageGranularity - 1))) continue;
+ (new_range.end & ~(my_data->properties.limits.bufferImageGranularity - 1)))
+ continue;
skip_call |= print_memory_range_error(my_data, new_range.handle, range.handle, object_type);
}
return skip_call;
}
-VkBool32 validate_buffer_image_aliasing(
- layer_data *my_data,
- uint64_t handle,
- VkDeviceMemory mem,
- VkDeviceSize memoryOffset,
- VkMemoryRequirements memRequirements,
- unordered_map<VkDeviceMemory, vector<MEMORY_RANGE>>& ranges,
- const unordered_map<VkDeviceMemory, vector<MEMORY_RANGE>>& other_ranges,
- VkDebugReportObjectTypeEXT object_type)
-{
+VkBool32 validate_buffer_image_aliasing(layer_data *my_data, uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset,
+ VkMemoryRequirements memRequirements,
+ unordered_map<VkDeviceMemory, vector<MEMORY_RANGE>> &ranges,
+ const unordered_map<VkDeviceMemory, vector<MEMORY_RANGE>> &other_ranges,
+ VkDebugReportObjectTypeEXT object_type) {
MEMORY_RANGE range;
range.handle = handle;
range.memory = mem;
@@ -1831,23 +1545,22 @@ VkBool32 validate_buffer_image_aliasing(
return validate_memory_range(my_data, other_ranges, range, object_type);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory(
- VkDevice device,
- VkBuffer buffer,
- VkDeviceMemory mem,
- VkDeviceSize memoryOffset)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
loader_platform_thread_lock_mutex(&globalLock);
// Track objects tied to memory
uint64_t buffer_handle = (uint64_t)(buffer);
- VkBool32 skipCall = set_mem_binding(my_data, device, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
+ VkBool32 skipCall =
+ set_mem_binding(my_data, device, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
add_object_binding_info(my_data, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, mem);
{
VkMemoryRequirements memRequirements;
vkGetBufferMemoryRequirements(device, buffer, &memRequirements);
- skipCall |= validate_buffer_image_aliasing(my_data, buffer_handle, mem, memoryOffset, memRequirements, my_data->bufferRanges, my_data->imageRanges, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
+ skipCall |=
+ validate_buffer_image_aliasing(my_data, buffer_handle, mem, memoryOffset, memRequirements, my_data->bufferRanges,
+ my_data->imageRanges, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
}
print_mem_list(my_data, device);
loader_platform_thread_unlock_mutex(&globalLock);
@@ -1857,23 +1570,21 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(
- VkDevice device,
- VkImage image,
- VkDeviceMemory mem,
- VkDeviceSize memoryOffset)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
loader_platform_thread_lock_mutex(&globalLock);
// Track objects tied to memory
uint64_t image_handle = (uint64_t)(image);
- VkBool32 skipCall = set_mem_binding(my_data, device, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
+ VkBool32 skipCall =
+ set_mem_binding(my_data, device, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
add_object_binding_info(my_data, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, mem);
{
VkMemoryRequirements memRequirements;
vkGetImageMemoryRequirements(device, image, &memRequirements);
- skipCall |= validate_buffer_image_aliasing(my_data, image_handle, mem, memoryOffset, memRequirements, my_data->imageRanges, my_data->bufferRanges, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
+ skipCall |= validate_buffer_image_aliasing(my_data, image_handle, mem, memoryOffset, memRequirements, my_data->imageRanges,
+ my_data->bufferRanges, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
}
print_mem_list(my_data, device);
loader_platform_thread_unlock_mutex(&globalLock);
@@ -1883,34 +1594,24 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements(
- VkDevice device,
- VkBuffer buffer,
- VkMemoryRequirements *pMemoryRequirements)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
// TODO : What to track here?
// Could potentially save returned mem requirements and validate values passed into BindBufferMemory
my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements(
- VkDevice device,
- VkImage image,
- VkMemoryRequirements *pMemoryRequirements)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
// TODO : What to track here?
// Could potentially save returned mem requirements and validate values passed into BindImageMemory
my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse(
- VkQueue queue,
- uint32_t bindInfoCount,
- const VkBindSparseInfo *pBindInfo,
- VkFence fence)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
VkBool32 skipCall = VK_FALSE;
@@ -1922,28 +1623,25 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse(
// Track objects tied to memory
for (uint32_t j = 0; j < bindInfo->bufferBindCount; j++) {
for (uint32_t k = 0; k < bindInfo->pBufferBinds[j].bindCount; k++) {
- if (set_sparse_mem_binding(my_data, queue,
- bindInfo->pBufferBinds[j].pBinds[k].memory,
- (uint64_t) bindInfo->pBufferBinds[j].buffer,
- VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkQueueBindSparse"))
+ if (set_sparse_mem_binding(my_data, queue, bindInfo->pBufferBinds[j].pBinds[k].memory,
+ (uint64_t)bindInfo->pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
+ "vkQueueBindSparse"))
skipCall = VK_TRUE;
}
}
for (uint32_t j = 0; j < bindInfo->imageOpaqueBindCount; j++) {
for (uint32_t k = 0; k < bindInfo->pImageOpaqueBinds[j].bindCount; k++) {
- if (set_sparse_mem_binding(my_data, queue,
- bindInfo->pImageOpaqueBinds[j].pBinds[k].memory,
- (uint64_t) bindInfo->pImageOpaqueBinds[j].image,
- VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkQueueBindSparse"))
+ if (set_sparse_mem_binding(my_data, queue, bindInfo->pImageOpaqueBinds[j].pBinds[k].memory,
+ (uint64_t)bindInfo->pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
+ "vkQueueBindSparse"))
skipCall = VK_TRUE;
}
}
for (uint32_t j = 0; j < bindInfo->imageBindCount; j++) {
for (uint32_t k = 0; k < bindInfo->pImageBinds[j].bindCount; k++) {
- if (set_sparse_mem_binding(my_data, queue,
- bindInfo->pImageBinds[j].pBinds[k].memory,
- (uint64_t) bindInfo->pImageBinds[j].image,
- VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkQueueBindSparse"))
+ if (set_sparse_mem_binding(my_data, queue, bindInfo->pImageBinds[j].pBinds[k].memory,
+ (uint64_t)bindInfo->pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
+ "vkQueueBindSparse"))
skipCall = VK_TRUE;
}
}
@@ -1953,9 +1651,10 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse(
if (my_data->semaphoreMap.find(sem) != my_data->semaphoreMap.end()) {
if (my_data->semaphoreMap[sem] != MEMTRACK_SEMAPHORE_STATE_SIGNALLED) {
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, (uint64_t) sem,
- __LINE__, MEMTRACK_NONE, "SEMAPHORE",
- "vkQueueBindSparse: Semaphore must be in signaled state before passing to pWaitSemaphores");
+ skipCall =
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
+ (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
+ "vkQueueBindSparse: Semaphore must be in signaled state before passing to pWaitSemaphores");
}
my_data->semaphoreMap[sem] = MEMTRACK_SEMAPHORE_STATE_WAIT;
}
@@ -1965,9 +1664,10 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse(
if (my_data->semaphoreMap.find(sem) != my_data->semaphoreMap.end()) {
if (my_data->semaphoreMap[sem] != MEMTRACK_SEMAPHORE_STATE_UNSET) {
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, (uint64_t) sem,
- __LINE__, MEMTRACK_NONE, "SEMAPHORE",
- "vkQueueBindSparse: Semaphore must not be currently signaled or in a wait state");
+ skipCall =
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
+ (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
+ "vkQueueBindSparse: Semaphore must not be currently signaled or in a wait state");
}
my_data->semaphoreMap[sem] = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
}
@@ -1997,17 +1697,13 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFence(
- VkDevice device,
- const VkFenceCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkFence *pFence)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = my_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
- MT_FENCE_INFO* pFI = &my_data->fenceMap[*pFence];
+ MT_FENCE_INFO *pFI = &my_data->fenceMap[*pFence];
memset(pFI, 0, sizeof(MT_FENCE_INFO));
memcpy(&(pFI->createInfo), pCreateInfo, sizeof(VkFenceCreateInfo));
if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
@@ -2018,13 +1714,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFence(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(
- VkDevice device,
- uint32_t fenceCount,
- const VkFence *pFences)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
VkBool32 skipCall = VK_FALSE;
loader_platform_thread_lock_mutex(&globalLock);
@@ -2035,10 +1727,10 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(
// Validate fences in SIGNALED state
if (!(fence_item->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT)) {
// TODO: I don't see a Valid Usage section for ResetFences. This behavior should be documented there.
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, (uint64_t) pFences[i], __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
- "Fence %#" PRIxLEAST64 " submitted to VkResetFences in UNSIGNALED STATE", (uint64_t) pFences[i]);
- }
- else {
+ skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
+ (uint64_t)pFences[i], __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
+ "Fence %#" PRIxLEAST64 " submitted to VkResetFences in UNSIGNALED STATE", (uint64_t)pFences[i]);
+ } else {
fence_item->second.createInfo.flags =
static_cast<VkFenceCreateFlags>(fence_item->second.createInfo.flags & ~VK_FENCE_CREATE_SIGNALED_BIT);
}
@@ -2051,33 +1743,25 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(
return result;
}
-static inline VkBool32
-verifyFenceStatus(
- VkDevice device,
- VkFence fence,
- const char *apiCall)
-{
+static inline VkBool32 verifyFenceStatus(VkDevice device, VkFence fence, const char *apiCall) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkBool32 skipCall = VK_FALSE;
auto pFenceInfo = my_data->fenceMap.find(fence);
if (pFenceInfo != my_data->fenceMap.end()) {
if (pFenceInfo->second.firstTimeFlag != VK_TRUE) {
- if ((pFenceInfo->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT) && pFenceInfo->second.firstTimeFlag != VK_TRUE) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, (uint64_t) fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
- "%s specified fence %#" PRIxLEAST64 " already in SIGNALED state.", apiCall, (uint64_t) fence);
+ if ((pFenceInfo->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT) &&
+ pFenceInfo->second.firstTimeFlag != VK_TRUE) {
+ skipCall |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
+ (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
+ "%s specified fence %#" PRIxLEAST64 " already in SIGNALED state.", apiCall, (uint64_t)fence);
}
- if (!pFenceInfo->second.queue &&
- !pFenceInfo->second
- .swapchain) { // Checking status of unsubmitted fence
- skipCall |= log_msg(
- my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
- reinterpret_cast<uint64_t &>(fence),
- __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
- "%s called for fence %#" PRIxLEAST64
- " which has not been submitted on a Queue or during "
- "acquire next image.",
- apiCall, reinterpret_cast<uint64_t &>(fence));
+ if (!pFenceInfo->second.queue && !pFenceInfo->second.swapchain) { // Checking status of unsubmitted fence
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
+ reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
+ "%s called for fence %#" PRIxLEAST64 " which has not been submitted on a Queue or during "
+ "acquire next image.",
+ apiCall, reinterpret_cast<uint64_t &>(fence));
}
} else {
pFenceInfo->second.firstTimeFlag = VK_FALSE;
@@ -2086,10 +1770,7 @@ verifyFenceStatus(
return skipCall;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(
- VkDevice device,
- VkFence fence)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, VkFence fence) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
VkBool32 skipCall = verifyFenceStatus(device, fence, "vkGetFenceStatus");
@@ -2105,18 +1786,13 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences(
- VkDevice device,
- uint32_t fenceCount,
- const VkFence *pFences,
- VkBool32 waitAll,
- uint64_t timeout)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkBool32 skipCall = VK_FALSE;
// Verify fence status of submitted fences
loader_platform_thread_lock_mutex(&globalLock);
- for(uint32_t i = 0; i < fenceCount; i++) {
+ for (uint32_t i = 0; i < fenceCount; i++) {
skipCall |= verifyFenceStatus(device, pFences[i], "vkWaitForFences");
}
loader_platform_thread_unlock_mutex(&globalLock);
@@ -2127,7 +1803,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences(
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
if (waitAll || fenceCount == 1) { // Clear all the fences
- for(uint32_t i = 0; i < fenceCount; i++) {
+ for (uint32_t i = 0; i < fenceCount; i++) {
update_fence_tracking(my_data, pFences[i]);
}
}
@@ -2136,9 +1812,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(
- VkQueue queue)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
VkResult result = my_data->device_dispatch_table->QueueWaitIdle(queue);
if (VK_SUCCESS == result) {
@@ -2149,9 +1823,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(
- VkDevice device)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = my_data->device_dispatch_table->DeviceWaitIdle(device);
if (VK_SUCCESS == result) {
@@ -2162,12 +1834,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(
- VkDevice device,
- const VkBufferCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkBuffer *pBuffer)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = my_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
if (VK_SUCCESS == result) {
@@ -2178,12 +1846,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(
- VkDevice device,
- const VkImageCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkImage *pImage)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = my_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
if (VK_SUCCESS == result) {
@@ -2194,12 +1858,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(
- VkDevice device,
- const VkImageViewCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkImageView *pView)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = my_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
if (result == VK_SUCCESS) {
@@ -2207,19 +1867,16 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(
my_data->imageViewMap[*pView].image = pCreateInfo->image;
// Validate that img has correct usage flags set
validate_image_usage_flags(my_data, device, pCreateInfo->image,
- VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
- VK_FALSE, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT");
+ VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
+ VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
+ VK_FALSE, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT");
loader_platform_thread_unlock_mutex(&globalLock);
}
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(
- VkDevice device,
- const VkBufferViewCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkBufferView *pView)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = my_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
if (result == VK_SUCCESS) {
@@ -2227,8 +1884,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(
// In order to create a valid buffer view, the buffer must have been created with at least one of the
// following flags: UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
validate_buffer_usage_flags(my_data, device, pCreateInfo->buffer,
- VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT,
- VK_FALSE, "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
+ VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, VK_FALSE,
+ "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
my_data->bufferViewMap[*pView] = *pCreateInfo;
loader_platform_thread_unlock_mutex(&globalLock);
}
@@ -2236,12 +1893,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(
}
VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkDestroyBufferView(VkDevice device, VkBufferView bufferView,
- const VkAllocationCallbacks *pAllocator) {
- layer_data *my_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- my_data->device_dispatch_table->DestroyBufferView(device, bufferView,
- pAllocator);
+vkDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ my_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
loader_platform_thread_lock_mutex(&globalLock);
auto item = my_data->bufferViewMap.find(bufferView);
if (item != my_data->bufferViewMap.end()) {
@@ -2250,13 +1904,10 @@ vkDestroyBufferView(VkDevice device, VkBufferView bufferView,
loader_platform_thread_unlock_mutex(&globalLock);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers(
- VkDevice device,
- const VkCommandBufferAllocateInfo *pCreateInfo,
- VkCommandBuffer *pCommandBuffer)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- VkResult result = my_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
+ VkResult result = my_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
loader_platform_thread_lock_mutex(&globalLock);
if (VK_SUCCESS == result) {
@@ -2269,12 +1920,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers(
- VkDevice device,
- VkCommandPool commandPool,
- uint32_t commandBufferCount,
- const VkCommandBuffer *pCommandBuffers)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool,
+ uint32_t commandBufferCount,
+ const VkCommandBuffer *pCommandBuffers) {
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
@@ -2290,12 +1938,9 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers(
}
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(
- VkDevice device,
- const VkCommandPoolCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkCommandPool *pCommandPool)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkCommandPool *pCommandPool) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = my_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
@@ -2308,25 +1953,23 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool(
- VkDevice device,
- VkCommandPool commandPool,
- const VkAllocationCallbacks *pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
VkBool32 commandBufferComplete = VK_FALSE;
- VkBool32 skipCall = VK_FALSE;
+ VkBool32 skipCall = VK_FALSE;
// Verify that command buffers in pool are complete (not in-flight)
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
for (auto it = my_data->commandPoolMap[commandPool].pCommandBuffers.begin();
- it != my_data->commandPoolMap[commandPool].pCommandBuffers.end(); it++) {
+ it != my_data->commandPoolMap[commandPool].pCommandBuffers.end(); it++) {
commandBufferComplete = VK_FALSE;
skipCall = checkCBCompleted(my_data, *it, &commandBufferComplete);
if (VK_FALSE == commandBufferComplete) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)(*it), __LINE__,
- MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM", "Destroying Command Pool 0x%" PRIxLEAST64 " before "
- "its command buffer (0x%" PRIxLEAST64 ") has completed.", (uint64_t)(commandPool),
- reinterpret_cast<uint64_t>(*it));
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)(*it), __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
+ "Destroying Command Pool 0x%" PRIxLEAST64 " before "
+ "its command buffer (0x%" PRIxLEAST64 ") has completed.",
+ (uint64_t)(commandPool), reinterpret_cast<uint64_t>(*it));
}
}
loader_platform_thread_unlock_mutex(&globalLock);
@@ -2346,15 +1989,12 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool(
loader_platform_thread_unlock_mutex(&globalLock);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool(
- VkDevice device,
- VkCommandPool commandPool,
- VkCommandPoolResetFlags flags)
-{
- layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- VkBool32 commandBufferComplete = VK_FALSE;
- VkBool32 skipCall = VK_FALSE;
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ VkBool32 commandBufferComplete = VK_FALSE;
+ VkBool32 skipCall = VK_FALSE;
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
loader_platform_thread_lock_mutex(&globalLock);
auto it = my_data->commandPoolMap[commandPool].pCommandBuffers.begin();
@@ -2362,9 +2002,11 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool(
while (it != my_data->commandPoolMap[commandPool].pCommandBuffers.end()) {
skipCall = checkCBCompleted(my_data, (*it), &commandBufferComplete);
if (VK_FALSE == commandBufferComplete) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)(*it), __LINE__,
- MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM", "Resetting CB %p before it has completed. You must check CB "
- "flag before calling vkResetCommandBuffer().", (*it));
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)(*it), __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
+ "Resetting CB %p before it has completed. You must check CB "
+ "flag before calling vkResetCommandBuffer().",
+ (*it));
} else {
// Clear memory references at this point.
skipCall |= clear_cmd_buf_and_mem_references(my_data, (*it));
@@ -2380,13 +2022,11 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer(
- VkCommandBuffer commandBuffer,
- const VkCommandBufferBeginInfo *pBeginInfo)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
VkBool32 commandBufferComplete = VK_FALSE;
loader_platform_thread_lock_mutex(&globalLock);
@@ -2394,9 +2034,11 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer(
skipCall = checkCBCompleted(my_data, commandBuffer, &commandBufferComplete);
if (VK_FALSE == commandBufferComplete) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
- MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM", "Calling vkBeginCommandBuffer() on active CB %p before it has completed. "
- "You must check CB flag before this call.", commandBuffer);
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
+ "Calling vkBeginCommandBuffer() on active CB %p before it has completed. "
+ "You must check CB flag before this call.",
+ commandBuffer);
}
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall) {
@@ -2408,31 +2050,29 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(
- VkCommandBuffer commandBuffer)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffer commandBuffer) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
// TODO : Anything to do here?
VkResult result = my_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer(
- VkCommandBuffer commandBuffer,
- VkCommandBufferResetFlags flags)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
VkBool32 commandBufferComplete = VK_FALSE;
loader_platform_thread_lock_mutex(&globalLock);
// Verify that CB is complete (not in-flight)
skipCall = checkCBCompleted(my_data, commandBuffer, &commandBufferComplete);
if (VK_FALSE == commandBufferComplete) {
- skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
- MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM", "Resetting CB %p before it has completed. You must check CB "
- "flag before calling vkResetCommandBuffer().", commandBuffer);
+ skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
+ "Resetting CB %p before it has completed. You must check CB "
+ "flag before calling vkResetCommandBuffer().",
+ commandBuffer);
}
// Clear memory references as this point.
skipCall |= clear_cmd_buf_and_mem_references(my_data, commandBuffer);
@@ -2445,11 +2085,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer(
// TODO : For any vkCmdBind* calls that include an object which has mem bound to it,
// need to account for that mem now having binding to given commandBuffer
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline(
- VkCommandBuffer commandBuffer,
- VkPipelineBindPoint pipelineBindPoint,
- VkPipeline pipeline)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
#if 0 // FIXME: NEED TO FIX THE FOLLOWING CODE AND REMOVE THIS #if 0
// TODO : If memory bound to pipeline, then need to tie that mem to commandBuffer
@@ -2467,20 +2104,14 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline(
my_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets(
- VkCommandBuffer commandBuffer,
- VkPipelineBindPoint pipelineBindPoint,
- VkPipelineLayout layout,
- uint32_t firstSet,
- uint32_t setCount,
- const VkDescriptorSet *pDescriptorSets,
- uint32_t dynamicOffsetCount,
- const uint32_t *pDynamicOffsets)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
+ uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
+ const uint32_t *pDynamicOffsets) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_data = my_data->cbMap.find(commandBuffer);
if (cb_data != my_data->cbMap.end()) {
- std::vector<VkDescriptorSet>& activeDescriptorSets = cb_data->second.activeDescriptorSets;
+ std::vector<VkDescriptorSet> &activeDescriptorSets = cb_data->second.activeDescriptorSets;
if (activeDescriptorSets.size() < (setCount + firstSet)) {
activeDescriptorSets.resize(setCount + firstSet);
}
@@ -2489,27 +2120,24 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets(
}
}
// TODO : Somewhere need to verify that all textures referenced by shaders in DS are in some type of *SHADER_READ* state
- my_data->device_dispatch_table->CmdBindDescriptorSets(
- commandBuffer, pipelineBindPoint, layout, firstSet, setCount, pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
+ my_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
+ pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(
- VkCommandBuffer commandBuffer,
- uint32_t firstBinding,
- uint32_t bindingCount,
- const VkBuffer *pBuffers,
- const VkDeviceSize *pOffsets)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
+ uint32_t bindingCount, const VkBuffer *pBuffers,
+ const VkDeviceSize *pOffsets) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
VkBool32 skip_call = false;
loader_platform_thread_lock_mutex(&globalLock);
for (uint32_t i = 0; i < bindingCount; ++i) {
VkDeviceMemory mem;
skip_call |= get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)(pBuffers[i]),
- VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
+ VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
auto cb_data = my_data->cbMap.find(commandBuffer);
if (cb_data != my_data->cbMap.end()) {
- std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(my_data, mem, "vkCmdBindVertexBuffers()"); };
+ std::function<VkBool32()> function =
+ [=]() { return validate_memory_is_valid(my_data, mem, "vkCmdBindVertexBuffers()"); };
cb_data->second.validate_functions.push_back(function);
}
}
@@ -2519,16 +2147,13 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(
my_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer(
- VkCommandBuffer commandBuffer,
- VkBuffer buffer,
- VkDeviceSize offset,
- VkIndexType indexType)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
VkDeviceMemory mem;
loader_platform_thread_lock_mutex(&globalLock);
- VkBool32 skip_call = get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
+ VkBool32 skip_call =
+ get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
auto cb_data = my_data->cbMap.find(commandBuffer);
if (cb_data != my_data->cbMap.end()) {
std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(my_data, mem, "vkCmdBindIndexBuffer()"); };
@@ -2540,72 +2165,72 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer(
my_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSets(
- VkDevice device,
- uint32_t descriptorWriteCount,
- const VkWriteDescriptorSet* pDescriptorWrites,
- uint32_t descriptorCopyCount,
- const VkCopyDescriptorSet* pDescriptorCopies)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
+ uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
uint32_t j = 0;
for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
for (j = 0; j < pDescriptorWrites[i].descriptorCount; ++j) {
- my_data->descriptorSetMap[pDescriptorWrites[i].dstSet]
- .images.push_back(
- pDescriptorWrites[i].pImageInfo[j].imageView);
+ my_data->descriptorSetMap[pDescriptorWrites[i].dstSet].images.push_back(
+ pDescriptorWrites[i].pImageInfo[j].imageView);
}
- } else if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER ) {
+ } else if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
for (j = 0; j < pDescriptorWrites[i].descriptorCount; ++j) {
- my_data->descriptorSetMap[pDescriptorWrites[i].dstSet]
- .buffers.push_back(
- my_data
- ->bufferViewMap[pDescriptorWrites[i]
- .pTexelBufferView[j]]
- .buffer);
+ my_data->descriptorSetMap[pDescriptorWrites[i].dstSet].buffers.push_back(
+ my_data->bufferViewMap[pDescriptorWrites[i].pTexelBufferView[j]].buffer);
}
} else if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
for (j = 0; j < pDescriptorWrites[i].descriptorCount; ++j) {
- my_data->descriptorSetMap[pDescriptorWrites[i].dstSet]
- .buffers.push_back(
- pDescriptorWrites[i].pBufferInfo[j].buffer);
+ my_data->descriptorSetMap[pDescriptorWrites[i].dstSet].buffers.push_back(
+ pDescriptorWrites[i].pBufferInfo[j].buffer);
}
}
}
// TODO : Need to handle descriptor copies. Will wait on this until merge w/
// draw_state
- my_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
+ my_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
+ pDescriptorCopies);
}
-bool markStoreImagesAndBuffersAsWritten(
- VkCommandBuffer commandBuffer)
-{
+bool markStoreImagesAndBuffersAsWritten(VkCommandBuffer commandBuffer) {
bool skip_call = false;
loader_platform_thread_lock_mutex(&globalLock);
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_data = my_data->cbMap.find(commandBuffer);
- if (cb_data == my_data->cbMap.end()) return skip_call;
- std::vector<VkDescriptorSet>& activeDescriptorSets = cb_data->second.activeDescriptorSets;
+ if (cb_data == my_data->cbMap.end())
+ return skip_call;
+ std::vector<VkDescriptorSet> &activeDescriptorSets = cb_data->second.activeDescriptorSets;
for (auto descriptorSet : activeDescriptorSets) {
auto ds_data = my_data->descriptorSetMap.find(descriptorSet);
- if (ds_data == my_data->descriptorSetMap.end()) continue;
+ if (ds_data == my_data->descriptorSetMap.end())
+ continue;
std::vector<VkImageView> images = ds_data->second.images;
std::vector<VkBuffer> buffers = ds_data->second.buffers;
for (auto imageView : images) {
auto iv_data = my_data->imageViewMap.find(imageView);
- if (iv_data == my_data->imageViewMap.end()) continue;
+ if (iv_data == my_data->imageViewMap.end())
+ continue;
VkImage image = iv_data->second.image;
VkDeviceMemory mem;
- skip_call |= get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
- std::function<VkBool32()> function = [=]() { set_memory_valid(my_data, mem, true, image); return VK_FALSE; };
+ skip_call |=
+ get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
+ std::function<VkBool32()> function = [=]() {
+ set_memory_valid(my_data, mem, true, image);
+ return VK_FALSE;
+ };
cb_data->second.validate_functions.push_back(function);
}
for (auto buffer : buffers) {
VkDeviceMemory mem;
- skip_call |= get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
- std::function<VkBool32()> function = [=]() { set_memory_valid(my_data, mem, true); return VK_FALSE; };
+ skip_call |=
+ get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
+ std::function<VkBool32()> function = [=]() {
+ set_memory_valid(my_data, mem, true);
+ return VK_FALSE;
+ };
cb_data->second.validate_functions.push_back(function);
}
}
@@ -2613,45 +2238,31 @@ bool markStoreImagesAndBuffersAsWritten(
return skip_call;
}
-VKAPI_ATTR void VKAPI_CALL vkCmdDraw(
- VkCommandBuffer commandBuffer,
- uint32_t vertexCount,
- uint32_t instanceCount,
- uint32_t firstVertex,
- uint32_t firstInstance)
-{
+VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
+ uint32_t firstVertex, uint32_t firstInstance) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
bool skip_call = markStoreImagesAndBuffersAsWritten(commandBuffer);
if (!skip_call)
my_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
}
-VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(
- VkCommandBuffer commandBuffer,
- uint32_t indexCount,
- uint32_t instanceCount,
- uint32_t firstIndex,
- int32_t vertexOffset,
- uint32_t firstInstance)
-{
+VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
+ uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
bool skip_call = markStoreImagesAndBuffersAsWritten(commandBuffer);
if (!skip_call)
- my_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
+ my_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
+ firstInstance);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect(
- VkCommandBuffer commandBuffer,
- VkBuffer buffer,
- VkDeviceSize offset,
- uint32_t count,
- uint32_t stride)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
VkDeviceMemory mem;
loader_platform_thread_lock_mutex(&globalLock);
- VkBool32 skipCall = get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
- skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdDrawIndirect");
+ VkBool32 skipCall =
+ get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
+ skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdDrawIndirect");
skipCall |= markStoreImagesAndBuffersAsWritten(commandBuffer);
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall) {
@@ -2659,18 +2270,14 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect(
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect(
- VkCommandBuffer commandBuffer,
- VkBuffer buffer,
- VkDeviceSize offset,
- uint32_t count,
- uint32_t stride)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
VkDeviceMemory mem;
loader_platform_thread_lock_mutex(&globalLock);
- VkBool32 skipCall = get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
- skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect");
+ VkBool32 skipCall =
+ get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
+ skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect");
skipCall |= markStoreImagesAndBuffersAsWritten(commandBuffer);
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall) {
@@ -2678,29 +2285,21 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect(
}
}
-
-VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(
- VkCommandBuffer commandBuffer,
- uint32_t x,
- uint32_t y,
- uint32_t z)
-{
+VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
bool skip_call = markStoreImagesAndBuffersAsWritten(commandBuffer);
if (!skip_call)
my_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect(
- VkCommandBuffer commandBuffer,
- VkBuffer buffer,
- VkDeviceSize offset)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
VkDeviceMemory mem;
loader_platform_thread_lock_mutex(&globalLock);
- VkBool32 skipCall = get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
- skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdDispatchIndirect");
+ VkBool32 skipCall =
+ get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
+ skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdDispatchIndirect");
skipCall |= markStoreImagesAndBuffersAsWritten(commandBuffer);
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall) {
@@ -2708,280 +2307,281 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect(
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(
- VkCommandBuffer commandBuffer,
- VkBuffer srcBuffer,
- VkBuffer dstBuffer,
- uint32_t regionCount,
- const VkBufferCopy *pRegions)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
+ uint32_t regionCount, const VkBufferCopy *pRegions) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
VkDeviceMemory mem;
- VkBool32 skipCall = VK_FALSE;
+ VkBool32 skipCall = VK_FALSE;
auto cb_data = my_data->cbMap.find(commandBuffer);
loader_platform_thread_lock_mutex(&globalLock);
- skipCall = get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
+ skipCall =
+ get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
if (cb_data != my_data->cbMap.end()) {
std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(my_data, mem, "vkCmdCopyBuffer()"); };
cb_data->second.validate_functions.push_back(function);
}
skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdCopyBuffer");
- skipCall |= get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
+ skipCall |=
+ get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
if (cb_data != my_data->cbMap.end()) {
- std::function<VkBool32()> function = [=]() { set_memory_valid(my_data, mem, true); return VK_FALSE; };
+ std::function<VkBool32()> function = [=]() {
+ set_memory_valid(my_data, mem, true);
+ return VK_FALSE;
+ };
cb_data->second.validate_functions.push_back(function);
}
skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdCopyBuffer");
// Validate that SRC & DST buffers have correct usage flags set
- skipCall |= validate_buffer_usage_flags(my_data, commandBuffer, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
- skipCall |= validate_buffer_usage_flags(my_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
+ skipCall |= validate_buffer_usage_flags(my_data, commandBuffer, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
+ "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
+ skipCall |= validate_buffer_usage_flags(my_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
+ "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall) {
my_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults(
- VkCommandBuffer commandBuffer,
- VkQueryPool queryPool,
- uint32_t firstQuery,
- uint32_t queryCount,
- VkBuffer dstBuffer,
- VkDeviceSize dstOffset,
- VkDeviceSize destStride,
- VkQueryResultFlags flags)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
+ VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize destStride, VkQueryResultFlags flags) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
VkDeviceMemory mem;
- VkBool32 skipCall = VK_FALSE;
+ VkBool32 skipCall = VK_FALSE;
auto cb_data = my_data->cbMap.find(commandBuffer);
loader_platform_thread_lock_mutex(&globalLock);
- skipCall |= get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
+ skipCall |=
+ get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
if (cb_data != my_data->cbMap.end()) {
- std::function<VkBool32()> function = [=]() { set_memory_valid(my_data, mem, true); return VK_FALSE; };
+ std::function<VkBool32()> function = [=]() {
+ set_memory_valid(my_data, mem, true);
+ return VK_FALSE;
+ };
cb_data->second.validate_functions.push_back(function);
}
skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdCopyQueryPoolResults");
// Validate that DST buffer has correct usage flags set
- skipCall |= validate_buffer_usage_flags(my_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
+ skipCall |= validate_buffer_usage_flags(my_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
+ "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall) {
- my_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset, destStride, flags);
+ my_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
+ dstOffset, destStride, flags);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage(
- VkCommandBuffer commandBuffer,
- VkImage srcImage,
- VkImageLayout srcImageLayout,
- VkImage dstImage,
- VkImageLayout dstImageLayout,
- uint32_t regionCount,
- const VkImageCopy *pRegions)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
+ VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
VkDeviceMemory mem;
- VkBool32 skipCall = VK_FALSE;
+ VkBool32 skipCall = VK_FALSE;
auto cb_data = my_data->cbMap.find(commandBuffer);
loader_platform_thread_lock_mutex(&globalLock);
// Validate that src & dst images have correct usage flags set
- skipCall = get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
+ skipCall = get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
if (cb_data != my_data->cbMap.end()) {
std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(my_data, mem, "vkCmdCopyImage()", srcImage); };
cb_data->second.validate_functions.push_back(function);
}
skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdCopyImage");
- skipCall |= get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
+ skipCall |=
+ get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
if (cb_data != my_data->cbMap.end()) {
- std::function<VkBool32()> function = [=]() { set_memory_valid(my_data, mem, true, dstImage); return VK_FALSE; };
+ std::function<VkBool32()> function = [=]() {
+ set_memory_valid(my_data, mem, true, dstImage);
+ return VK_FALSE;
+ };
cb_data->second.validate_functions.push_back(function);
}
skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdCopyImage");
- skipCall |= validate_image_usage_flags(my_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
- skipCall |= validate_image_usage_flags(my_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
+ skipCall |= validate_image_usage_flags(my_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
+ "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
+ skipCall |= validate_image_usage_flags(my_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
+ "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall) {
- my_data->device_dispatch_table->CmdCopyImage(
- commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions);
+ my_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
+ pRegions);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage(
- VkCommandBuffer commandBuffer,
- VkImage srcImage,
- VkImageLayout srcImageLayout,
- VkImage dstImage,
- VkImageLayout dstImageLayout,
- uint32_t regionCount,
- const VkImageBlit *pRegions,
- VkFilter filter)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
+ VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
VkDeviceMemory mem;
- VkBool32 skipCall = VK_FALSE;
+ VkBool32 skipCall = VK_FALSE;
auto cb_data = my_data->cbMap.find(commandBuffer);
loader_platform_thread_lock_mutex(&globalLock);
// Validate that src & dst images have correct usage flags set
- skipCall = get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
+ skipCall = get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
if (cb_data != my_data->cbMap.end()) {
std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(my_data, mem, "vkCmdBlitImage()", srcImage); };
cb_data->second.validate_functions.push_back(function);
}
skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdBlitImage");
- skipCall |= get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);\
+ skipCall |=
+ get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
if (cb_data != my_data->cbMap.end()) {
- std::function<VkBool32()> function = [=]() { set_memory_valid(my_data, mem, true, dstImage); return VK_FALSE; };
+ std::function<VkBool32()> function = [=]() {
+ set_memory_valid(my_data, mem, true, dstImage);
+ return VK_FALSE;
+ };
cb_data->second.validate_functions.push_back(function);
}
skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdBlitImage");
- skipCall |= validate_image_usage_flags(my_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
- skipCall |= validate_image_usage_flags(my_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
+ skipCall |= validate_image_usage_flags(my_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
+ "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
+ skipCall |= validate_image_usage_flags(my_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
+ "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall) {
- my_data->device_dispatch_table->CmdBlitImage(
- commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter);
+ my_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
+ pRegions, filter);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(
- VkCommandBuffer commandBuffer,
- VkBuffer srcBuffer,
- VkImage dstImage,
- VkImageLayout dstImageLayout,
- uint32_t regionCount,
- const VkBufferImageCopy *pRegions)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
+ VkImage dstImage, VkImageLayout dstImageLayout,
+ uint32_t regionCount, const VkBufferImageCopy *pRegions) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
VkDeviceMemory mem;
- VkBool32 skipCall = VK_FALSE;
+ VkBool32 skipCall = VK_FALSE;
auto cb_data = my_data->cbMap.find(commandBuffer);
loader_platform_thread_lock_mutex(&globalLock);
- skipCall = get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
+ skipCall = get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
if (cb_data != my_data->cbMap.end()) {
- std::function<VkBool32()> function = [=]() { set_memory_valid(my_data, mem, true, dstImage); return VK_FALSE; };
+ std::function<VkBool32()> function = [=]() {
+ set_memory_valid(my_data, mem, true, dstImage);
+ return VK_FALSE;
+ };
cb_data->second.validate_functions.push_back(function);
}
skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
- skipCall |= get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
+ skipCall |=
+ get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
if (cb_data != my_data->cbMap.end()) {
std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(my_data, mem, "vkCmdCopyBufferToImage()"); };
cb_data->second.validate_functions.push_back(function);
}
skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
// Validate that src buff & dst image have correct usage flags set
- skipCall |= validate_buffer_usage_flags(my_data, commandBuffer, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
- skipCall |= validate_image_usage_flags(my_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
+ skipCall |= validate_buffer_usage_flags(my_data, commandBuffer, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
+ "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
+ skipCall |= validate_image_usage_flags(my_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
+ "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall) {
- my_data->device_dispatch_table->CmdCopyBufferToImage(
- commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
+ my_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
+ pRegions);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(
- VkCommandBuffer commandBuffer,
- VkImage srcImage,
- VkImageLayout srcImageLayout,
- VkBuffer dstBuffer,
- uint32_t regionCount,
- const VkBufferImageCopy *pRegions)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
+ VkImageLayout srcImageLayout, VkBuffer dstBuffer,
+ uint32_t regionCount, const VkBufferImageCopy *pRegions) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
VkDeviceMemory mem;
- VkBool32 skipCall = VK_FALSE;
+ VkBool32 skipCall = VK_FALSE;
auto cb_data = my_data->cbMap.find(commandBuffer);
loader_platform_thread_lock_mutex(&globalLock);
- skipCall = get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
+ skipCall = get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
if (cb_data != my_data->cbMap.end()) {
- std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(my_data, mem, "vkCmdCopyImageToBuffer()", srcImage); };
+ std::function<VkBool32()> function =
+ [=]() { return validate_memory_is_valid(my_data, mem, "vkCmdCopyImageToBuffer()", srcImage); };
cb_data->second.validate_functions.push_back(function);
}
skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
- skipCall |= get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
+ skipCall |=
+ get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
if (cb_data != my_data->cbMap.end()) {
- std::function<VkBool32()> function = [=]() { set_memory_valid(my_data, mem, true); return VK_FALSE; };
+ std::function<VkBool32()> function = [=]() {
+ set_memory_valid(my_data, mem, true);
+ return VK_FALSE;
+ };
cb_data->second.validate_functions.push_back(function);
}
skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
// Validate that dst buff & src image have correct usage flags set
- skipCall |= validate_image_usage_flags(my_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
- skipCall |= validate_buffer_usage_flags(my_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
+ skipCall |= validate_image_usage_flags(my_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
+ "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
+ skipCall |= validate_buffer_usage_flags(my_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
+ "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall) {
- my_data->device_dispatch_table->CmdCopyImageToBuffer(
- commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
+ my_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
+ pRegions);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(
- VkCommandBuffer commandBuffer,
- VkBuffer dstBuffer,
- VkDeviceSize dstOffset,
- VkDeviceSize dataSize,
- const uint32_t *pData)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
+ VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
VkDeviceMemory mem;
- VkBool32 skipCall = VK_FALSE;
+ VkBool32 skipCall = VK_FALSE;
auto cb_data = my_data->cbMap.find(commandBuffer);
loader_platform_thread_lock_mutex(&globalLock);
- skipCall = get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
+ skipCall =
+ get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
if (cb_data != my_data->cbMap.end()) {
- std::function<VkBool32()> function = [=]() { set_memory_valid(my_data, mem, true); return VK_FALSE; };
+ std::function<VkBool32()> function = [=]() {
+ set_memory_valid(my_data, mem, true);
+ return VK_FALSE;
+ };
cb_data->second.validate_functions.push_back(function);
}
skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdUpdateBuffer");
// Validate that dst buff has correct usage flags set
- skipCall |= validate_buffer_usage_flags(my_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
+ skipCall |= validate_buffer_usage_flags(my_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
+ "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall) {
my_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer(
- VkCommandBuffer commandBuffer,
- VkBuffer dstBuffer,
- VkDeviceSize dstOffset,
- VkDeviceSize size,
- uint32_t data)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
VkDeviceMemory mem;
- VkBool32 skipCall = VK_FALSE;
+ VkBool32 skipCall = VK_FALSE;
auto cb_data = my_data->cbMap.find(commandBuffer);
loader_platform_thread_lock_mutex(&globalLock);
- skipCall = get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
+ skipCall =
+ get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
if (cb_data != my_data->cbMap.end()) {
- std::function<VkBool32()> function = [=]() { set_memory_valid(my_data, mem, true); return VK_FALSE; };
+ std::function<VkBool32()> function = [=]() {
+ set_memory_valid(my_data, mem, true);
+ return VK_FALSE;
+ };
cb_data->second.validate_functions.push_back(function);
}
skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdFillBuffer");
// Validate that dst buff has correct usage flags set
- skipCall |= validate_buffer_usage_flags(my_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
+ skipCall |= validate_buffer_usage_flags(my_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
+ "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall) {
my_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(
- VkCommandBuffer commandBuffer,
- VkImage image,
- VkImageLayout imageLayout,
- const VkClearColorValue *pColor,
- uint32_t rangeCount,
- const VkImageSubresourceRange *pRanges)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
+ VkImageLayout imageLayout, const VkClearColorValue *pColor,
+ uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
// TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
VkDeviceMemory mem;
- VkBool32 skipCall = VK_FALSE;
+ VkBool32 skipCall = VK_FALSE;
auto cb_data = my_data->cbMap.find(commandBuffer);
loader_platform_thread_lock_mutex(&globalLock);
- skipCall = get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
+ skipCall = get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
if (cb_data != my_data->cbMap.end()) {
- std::function<VkBool32()> function = [=]() { set_memory_valid(my_data, mem, true, image); return VK_FALSE; };
+ std::function<VkBool32()> function = [=]() {
+ set_memory_valid(my_data, mem, true, image);
+ return VK_FALSE;
+ };
cb_data->second.validate_functions.push_back(function);
}
skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdClearColorImage");
@@ -2991,101 +2591,84 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage(
- VkCommandBuffer commandBuffer,
- VkImage image,
- VkImageLayout imageLayout,
- const VkClearDepthStencilValue *pDepthStencil,
- uint32_t rangeCount,
- const VkImageSubresourceRange *pRanges)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
+ const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
+ const VkImageSubresourceRange *pRanges) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
// TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
VkDeviceMemory mem;
- VkBool32 skipCall = VK_FALSE;
+ VkBool32 skipCall = VK_FALSE;
auto cb_data = my_data->cbMap.find(commandBuffer);
loader_platform_thread_lock_mutex(&globalLock);
- skipCall = get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
+ skipCall = get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
if (cb_data != my_data->cbMap.end()) {
- std::function<VkBool32()> function = [=]() { set_memory_valid(my_data, mem, true, image); return VK_FALSE; };
+ std::function<VkBool32()> function = [=]() {
+ set_memory_valid(my_data, mem, true, image);
+ return VK_FALSE;
+ };
cb_data->second.validate_functions.push_back(function);
}
skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdClearDepthStencilImage");
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall) {
- my_data->device_dispatch_table->CmdClearDepthStencilImage(
- commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
+ my_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
+ pRanges);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage(
- VkCommandBuffer commandBuffer,
- VkImage srcImage,
- VkImageLayout srcImageLayout,
- VkImage dstImage,
- VkImageLayout dstImageLayout,
- uint32_t regionCount,
- const VkImageResolve *pRegions)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
+ VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
VkBool32 skipCall = VK_FALSE;
auto cb_data = my_data->cbMap.find(commandBuffer);
loader_platform_thread_lock_mutex(&globalLock);
VkDeviceMemory mem;
- skipCall = get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
+ skipCall = get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
if (cb_data != my_data->cbMap.end()) {
- std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(my_data, mem, "vkCmdResolveImage()", srcImage); };
+ std::function<VkBool32()> function =
+ [=]() { return validate_memory_is_valid(my_data, mem, "vkCmdResolveImage()", srcImage); };
cb_data->second.validate_functions.push_back(function);
}
skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdResolveImage");
- skipCall |= get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
+ skipCall |=
+ get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
if (cb_data != my_data->cbMap.end()) {
- std::function<VkBool32()> function = [=]() { set_memory_valid(my_data, mem, true, dstImage); return VK_FALSE; };
+ std::function<VkBool32()> function = [=]() {
+ set_memory_valid(my_data, mem, true, dstImage);
+ return VK_FALSE;
+ };
cb_data->second.validate_functions.push_back(function);
}
skipCall |= update_cmd_buf_and_mem_references(my_data, commandBuffer, mem, "vkCmdResolveImage");
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall) {
- my_data->device_dispatch_table->CmdResolveImage(
- commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions);
+ my_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
+ regionCount, pRegions);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery(
- VkCommandBuffer commandBuffer,
- VkQueryPool queryPool,
- uint32_t slot,
- VkFlags flags)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
my_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(
- VkCommandBuffer commandBuffer,
- VkQueryPool queryPool,
- uint32_t slot)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
my_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool(
- VkCommandBuffer commandBuffer,
- VkQueryPool queryPool,
- uint32_t firstQuery,
- uint32_t queryCount)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
my_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(
- VkInstance instance,
- const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkDebugReportCallbackEXT* pMsgCallback)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
@@ -3097,11 +2680,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(
return res;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(
- VkInstance instance,
- VkDebugReportCallbackEXT msgCallback,
- const VkAllocationCallbacks* pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance,
+ VkDebugReportCallbackEXT msgCallback,
+ const VkAllocationCallbacks *pAllocator) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
@@ -3110,26 +2691,17 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(
loader_platform_thread_unlock_mutex(&globalLock);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT(
- VkInstance instance,
- VkDebugReportFlagsEXT flags,
- VkDebugReportObjectTypeEXT objType,
- uint64_t object,
- size_t location,
- int32_t msgCode,
- const char* pLayerPrefix,
- const char* pMsg)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
+ size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
- my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
+ my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
+ pMsg);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(
- VkDevice device,
- const VkSwapchainCreateInfoKHR *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkSwapchainKHR *pSwapchain)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkSwapchainKHR *pSwapchain) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = my_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
@@ -3142,16 +2714,13 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR(
- VkDevice device,
- VkSwapchainKHR swapchain,
- const VkAllocationCallbacks *pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkBool32 skipCall = VK_FALSE;
loader_platform_thread_lock_mutex(&globalLock);
if (my_data->swapchainMap.find(swapchain) != my_data->swapchainMap.end()) {
- MT_SWAP_CHAIN_INFO* pInfo = my_data->swapchainMap[swapchain];
+ MT_SWAP_CHAIN_INFO *pInfo = my_data->swapchainMap[swapchain];
if (pInfo->images.size() > 0) {
for (auto it = pInfo->images.begin(); it != pInfo->images.end(); it++) {
@@ -3170,12 +2739,8 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR(
}
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR(
- VkDevice device,
- VkSwapchainKHR swapchain,
- uint32_t *pCount,
- VkImage *pSwapchainImages)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = my_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
@@ -3189,22 +2754,25 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR(
memcpy(&pInfo->images[0], pSwapchainImages, sizeof(pInfo->images[0]) * count);
if (pInfo->images.size() > 0) {
- for (std::vector<VkImage>::const_iterator it = pInfo->images.begin();
- it != pInfo->images.end(); it++) {
+ for (std::vector<VkImage>::const_iterator it = pInfo->images.begin(); it != pInfo->images.end(); it++) {
// Add image object binding, then insert the new Mem Object and then bind it to created image
- add_object_create_info(my_data, (uint64_t)*it, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, &pInfo->createInfo);
+ add_object_create_info(my_data, (uint64_t)*it, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
+ &pInfo->createInfo);
}
}
} else {
const size_t count = *pCount;
MT_SWAP_CHAIN_INFO *pInfo = my_data->swapchainMap[swapchain];
- const VkBool32 mismatch = (pInfo->images.size() != count ||
- memcmp(&pInfo->images[0], pSwapchainImages, sizeof(pInfo->images[0]) * count));
+ const VkBool32 mismatch =
+ (pInfo->images.size() != count || memcmp(&pInfo->images[0], pSwapchainImages, sizeof(pInfo->images[0]) * count));
if (mismatch) {
// TODO: Verify against Valid Usage section of extension
- log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, (uint64_t) swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
- "vkGetSwapchainInfoKHR(%" PRIu64 ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data", (uint64_t)(swapchain));
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
+ (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
+ "vkGetSwapchainInfoKHR(%" PRIu64
+ ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
+ (uint64_t)(swapchain));
}
}
}
@@ -3212,23 +2780,17 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(
- VkDevice device,
- VkSwapchainKHR swapchain,
- uint64_t timeout,
- VkSemaphore semaphore,
- VkFence fence,
- uint32_t *pImageIndex)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
+ VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
VkBool32 skipCall = VK_FALSE;
loader_platform_thread_lock_mutex(&globalLock);
if (my_data->semaphoreMap.find(semaphore) != my_data->semaphoreMap.end()) {
if (my_data->semaphoreMap[semaphore] != MEMTRACK_SEMAPHORE_STATE_UNSET) {
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, (uint64_t)semaphore,
- __LINE__, MEMTRACK_NONE, "SEMAPHORE",
+ skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
+ (uint64_t)semaphore, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
"vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
}
my_data->semaphoreMap[semaphore] = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
@@ -3239,16 +2801,12 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(
}
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall) {
- result = my_data->device_dispatch_table->AcquireNextImageKHR(device,
- swapchain, timeout, semaphore, fence, pImageIndex);
+ result = my_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
}
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(
- VkQueue queue,
- const VkPresentInfoKHR* pPresentInfo)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
VkBool32 skip_call = false;
@@ -3277,12 +2835,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(
- VkDevice device,
- const VkSemaphoreCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkSemaphore *pSemaphore)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = my_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
loader_platform_thread_lock_mutex(&globalLock);
@@ -3293,11 +2847,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySemaphore(
- VkDevice device,
- VkSemaphore semaphore,
- const VkAllocationCallbacks *pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
auto item = my_data->semaphoreMap.find(semaphore);
@@ -3308,12 +2859,9 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySemaphore(
my_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(
- VkDevice device,
- const VkFramebufferCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkFramebuffer* pFramebuffer)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkFramebuffer *pFramebuffer) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = my_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
loader_platform_thread_lock_mutex(&globalLock);
@@ -3324,7 +2872,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(
continue;
}
MT_FB_ATTACHMENT_INFO fb_info;
- get_mem_binding_from_object(my_data, device, (uint64_t)(view_data->second.image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &fb_info.mem);
+ get_mem_binding_from_object(my_data, device, (uint64_t)(view_data->second.image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
+ &fb_info.mem);
fb_info.image = view_data->second.image;
my_data->fbMap[*pFramebuffer].attachments.push_back(fb_info);
}
@@ -3332,11 +2881,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(
return result;
}
-VKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer(
- VkDevice device,
- VkFramebuffer framebuffer,
- const VkAllocationCallbacks* pAllocator)
-{
+VKAPI_ATTR void VKAPI_CALL
+vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
@@ -3349,12 +2895,9 @@ VKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer(
my_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(
- VkDevice device,
- const VkRenderPassCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkRenderPass* pRenderPass)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkRenderPass *pRenderPass) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkResult result = my_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
loader_platform_thread_lock_mutex(&globalLock);
@@ -3366,26 +2909,29 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(
pass_info.attachment = i;
my_data->passMap[*pRenderPass].attachments.push_back(pass_info);
}
- //TODO: Maybe fill list and then copy instead of locking
- std::unordered_map<uint32_t, bool>& attachment_first_read = my_data->passMap[*pRenderPass].attachment_first_read;
- std::unordered_map<uint32_t, VkImageLayout>& attachment_first_layout = my_data->passMap[*pRenderPass].attachment_first_layout;
+ // TODO: Maybe fill list and then copy instead of locking
+ std::unordered_map<uint32_t, bool> &attachment_first_read = my_data->passMap[*pRenderPass].attachment_first_read;
+ std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = my_data->passMap[*pRenderPass].attachment_first_layout;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
- const VkSubpassDescription& subpass = pCreateInfo->pSubpasses[i];
+ const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
uint32_t attachment = subpass.pInputAttachments[j].attachment;
- if (attachment_first_read.count(attachment)) continue;
+ if (attachment_first_read.count(attachment))
+ continue;
attachment_first_read.insert(std::make_pair(attachment, true));
attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
}
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
uint32_t attachment = subpass.pColorAttachments[j].attachment;
- if (attachment_first_read.count(attachment)) continue;
+ if (attachment_first_read.count(attachment))
+ continue;
attachment_first_read.insert(std::make_pair(attachment, false));
attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
- if (attachment_first_read.count(attachment)) continue;
+ if (attachment_first_read.count(attachment))
+ continue;
attachment_first_read.insert(std::make_pair(attachment, false));
attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
}
@@ -3395,11 +2941,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyRenderPass(
- VkDevice device,
- VkRenderPass renderPass,
- const VkAllocationCallbacks *pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
my_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
@@ -3408,48 +2951,57 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyRenderPass(
loader_platform_thread_unlock_mutex(&globalLock);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass(
- VkCommandBuffer cmdBuffer,
- const VkRenderPassBeginInfo *pRenderPassBegin,
- VkSubpassContents contents)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdBeginRenderPass(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
VkBool32 skip_call = false;
if (pRenderPassBegin) {
loader_platform_thread_lock_mutex(&globalLock);
auto pass_data = my_data->passMap.find(pRenderPassBegin->renderPass);
if (pass_data != my_data->passMap.end()) {
- MT_PASS_INFO& pass_info = pass_data->second;
+ MT_PASS_INFO &pass_info = pass_data->second;
pass_info.fb = pRenderPassBegin->framebuffer;
auto cb_data = my_data->cbMap.find(cmdBuffer);
for (size_t i = 0; i < pass_info.attachments.size(); ++i) {
- MT_FB_ATTACHMENT_INFO& fb_info = my_data->fbMap[pass_info.fb].attachments[i];
+ MT_FB_ATTACHMENT_INFO &fb_info = my_data->fbMap[pass_info.fb].attachments[i];
if (pass_info.attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
if (cb_data != my_data->cbMap.end()) {
- std::function<VkBool32()> function = [=]() { set_memory_valid(my_data, fb_info.mem, true, fb_info.image); return VK_FALSE; };
+ std::function<VkBool32()> function = [=]() {
+ set_memory_valid(my_data, fb_info.mem, true, fb_info.image);
+ return VK_FALSE;
+ };
cb_data->second.validate_functions.push_back(function);
}
- VkImageLayout& attachment_layout = pass_info.attachment_first_layout[pass_info.attachments[i].attachment];
+ VkImageLayout &attachment_layout = pass_info.attachment_first_layout[pass_info.attachments[i].attachment];
if (attachment_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL ||
attachment_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
- skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
- (uint64_t)(pRenderPassBegin->renderPass), __LINE__, MEMTRACK_INVALID_LAYOUT, "MEM",
- "Cannot clear attachment %d with invalid first layout %d.", pass_info.attachments[i].attachment, attachment_layout);
+ skip_call |=
+ log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, (uint64_t)(pRenderPassBegin->renderPass), __LINE__,
+ MEMTRACK_INVALID_LAYOUT, "MEM", "Cannot clear attachment %d with invalid first layout %d.",
+ pass_info.attachments[i].attachment, attachment_layout);
}
} else if (pass_info.attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE) {
if (cb_data != my_data->cbMap.end()) {
- std::function<VkBool32()> function = [=]() { set_memory_valid(my_data, fb_info.mem, false, fb_info.image); return VK_FALSE; };
+ std::function<VkBool32()> function = [=]() {
+ set_memory_valid(my_data, fb_info.mem, false, fb_info.image);
+ return VK_FALSE;
+ };
cb_data->second.validate_functions.push_back(function);
}
} else if (pass_info.attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
if (cb_data != my_data->cbMap.end()) {
- std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(my_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image); };
+ std::function<VkBool32()> function = [=]() {
+ return validate_memory_is_valid(my_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
+ };
cb_data->second.validate_functions.push_back(function);
}
}
if (pass_info.attachment_first_read[pass_info.attachments[i].attachment]) {
if (cb_data != my_data->cbMap.end()) {
- std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(my_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image); };
+ std::function<VkBool32()> function = [=]() {
+ return validate_memory_is_valid(my_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
+ };
cb_data->second.validate_functions.push_back(function);
}
}
@@ -3464,26 +3016,30 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass(
return my_data->device_dispatch_table->CmdBeginRenderPass(cmdBuffer, pRenderPassBegin, contents);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(
- VkCommandBuffer cmdBuffer)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer cmdBuffer) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
auto cb_data = my_data->cbMap.find(cmdBuffer);
if (cb_data != my_data->cbMap.end()) {
auto pass_data = my_data->passMap.find(cb_data->second.pass);
if (pass_data != my_data->passMap.end()) {
- MT_PASS_INFO& pass_info = pass_data->second;
+ MT_PASS_INFO &pass_info = pass_data->second;
for (size_t i = 0; i < pass_info.attachments.size(); ++i) {
- MT_FB_ATTACHMENT_INFO& fb_info = my_data->fbMap[pass_info.fb].attachments[i];
+ MT_FB_ATTACHMENT_INFO &fb_info = my_data->fbMap[pass_info.fb].attachments[i];
if (pass_info.attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
if (cb_data != my_data->cbMap.end()) {
- std::function<VkBool32()> function = [=]() { set_memory_valid(my_data, fb_info.mem, true, fb_info.image); return VK_FALSE; };
+ std::function<VkBool32()> function = [=]() {
+ set_memory_valid(my_data, fb_info.mem, true, fb_info.image);
+ return VK_FALSE;
+ };
cb_data->second.validate_functions.push_back(function);
}
} else if (pass_info.attachments[i].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE) {
if (cb_data != my_data->cbMap.end()) {
- std::function<VkBool32()> function = [=]() { set_memory_valid(my_data, fb_info.mem, false, fb_info.image); return VK_FALSE; };
+ std::function<VkBool32()> function = [=]() {
+ set_memory_valid(my_data, fb_info.mem, false, fb_info.image);
+ return VK_FALSE;
+ };
cb_data->second.validate_functions.push_back(function);
}
}
@@ -3494,104 +3050,101 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(
my_data->device_dispatch_table->CmdEndRenderPass(cmdBuffer);
}
-VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(
- VkDevice dev,
- const char *funcName)
-{
+VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
if (!strcmp(funcName, "vkGetDeviceProcAddr"))
- return (PFN_vkVoidFunction) vkGetDeviceProcAddr;
+ return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
if (!strcmp(funcName, "vkDestroyDevice"))
- return (PFN_vkVoidFunction) vkDestroyDevice;
+ return (PFN_vkVoidFunction)vkDestroyDevice;
if (!strcmp(funcName, "vkQueueSubmit"))
- return (PFN_vkVoidFunction) vkQueueSubmit;
+ return (PFN_vkVoidFunction)vkQueueSubmit;
if (!strcmp(funcName, "vkAllocateMemory"))
- return (PFN_vkVoidFunction) vkAllocateMemory;
+ return (PFN_vkVoidFunction)vkAllocateMemory;
if (!strcmp(funcName, "vkFreeMemory"))
- return (PFN_vkVoidFunction) vkFreeMemory;
+ return (PFN_vkVoidFunction)vkFreeMemory;
if (!strcmp(funcName, "vkMapMemory"))
- return (PFN_vkVoidFunction) vkMapMemory;
+ return (PFN_vkVoidFunction)vkMapMemory;
if (!strcmp(funcName, "vkUnmapMemory"))
- return (PFN_vkVoidFunction) vkUnmapMemory;
+ return (PFN_vkVoidFunction)vkUnmapMemory;
if (!strcmp(funcName, "vkFlushMappedMemoryRanges"))
- return (PFN_vkVoidFunction) vkFlushMappedMemoryRanges;
+ return (PFN_vkVoidFunction)vkFlushMappedMemoryRanges;
if (!strcmp(funcName, "vkInvalidateMappedMemoryRanges"))
- return (PFN_vkVoidFunction) vkInvalidateMappedMemoryRanges;
+ return (PFN_vkVoidFunction)vkInvalidateMappedMemoryRanges;
if (!strcmp(funcName, "vkDestroyFence"))
- return (PFN_vkVoidFunction) vkDestroyFence;
+ return (PFN_vkVoidFunction)vkDestroyFence;
if (!strcmp(funcName, "vkDestroyBuffer"))
- return (PFN_vkVoidFunction) vkDestroyBuffer;
+ return (PFN_vkVoidFunction)vkDestroyBuffer;
if (!strcmp(funcName, "vkDestroyBufferView"))
return (PFN_vkVoidFunction)vkDestroyBufferView;
if (!strcmp(funcName, "vkDestroyImage"))
- return (PFN_vkVoidFunction) vkDestroyImage;
+ return (PFN_vkVoidFunction)vkDestroyImage;
if (!strcmp(funcName, "vkBindBufferMemory"))
- return (PFN_vkVoidFunction) vkBindBufferMemory;
+ return (PFN_vkVoidFunction)vkBindBufferMemory;
if (!strcmp(funcName, "vkBindImageMemory"))
- return (PFN_vkVoidFunction) vkBindImageMemory;
+ return (PFN_vkVoidFunction)vkBindImageMemory;
if (!strcmp(funcName, "vkGetBufferMemoryRequirements"))
- return (PFN_vkVoidFunction) vkGetBufferMemoryRequirements;
+ return (PFN_vkVoidFunction)vkGetBufferMemoryRequirements;
if (!strcmp(funcName, "vkGetImageMemoryRequirements"))
- return (PFN_vkVoidFunction) vkGetImageMemoryRequirements;
+ return (PFN_vkVoidFunction)vkGetImageMemoryRequirements;
if (!strcmp(funcName, "vkQueueBindSparse"))
- return (PFN_vkVoidFunction) vkQueueBindSparse;
+ return (PFN_vkVoidFunction)vkQueueBindSparse;
if (!strcmp(funcName, "vkCreateFence"))
- return (PFN_vkVoidFunction) vkCreateFence;
+ return (PFN_vkVoidFunction)vkCreateFence;
if (!strcmp(funcName, "vkGetFenceStatus"))
- return (PFN_vkVoidFunction) vkGetFenceStatus;
+ return (PFN_vkVoidFunction)vkGetFenceStatus;
if (!strcmp(funcName, "vkResetFences"))
- return (PFN_vkVoidFunction) vkResetFences;
+ return (PFN_vkVoidFunction)vkResetFences;
if (!strcmp(funcName, "vkWaitForFences"))
- return (PFN_vkVoidFunction) vkWaitForFences;
+ return (PFN_vkVoidFunction)vkWaitForFences;
if (!strcmp(funcName, "vkCreateSemaphore"))
- return (PFN_vkVoidFunction) vkCreateSemaphore;
+ return (PFN_vkVoidFunction)vkCreateSemaphore;
if (!strcmp(funcName, "vkDestroySemaphore"))
- return (PFN_vkVoidFunction) vkDestroySemaphore;
+ return (PFN_vkVoidFunction)vkDestroySemaphore;
if (!strcmp(funcName, "vkQueueWaitIdle"))
- return (PFN_vkVoidFunction) vkQueueWaitIdle;
+ return (PFN_vkVoidFunction)vkQueueWaitIdle;
if (!strcmp(funcName, "vkDeviceWaitIdle"))
- return (PFN_vkVoidFunction) vkDeviceWaitIdle;
+ return (PFN_vkVoidFunction)vkDeviceWaitIdle;
if (!strcmp(funcName, "vkCreateBuffer"))
- return (PFN_vkVoidFunction) vkCreateBuffer;
+ return (PFN_vkVoidFunction)vkCreateBuffer;
if (!strcmp(funcName, "vkCreateImage"))
- return (PFN_vkVoidFunction) vkCreateImage;
+ return (PFN_vkVoidFunction)vkCreateImage;
if (!strcmp(funcName, "vkCreateImageView"))
- return (PFN_vkVoidFunction) vkCreateImageView;
+ return (PFN_vkVoidFunction)vkCreateImageView;
if (!strcmp(funcName, "vkCreateBufferView"))
- return (PFN_vkVoidFunction) vkCreateBufferView;
+ return (PFN_vkVoidFunction)vkCreateBufferView;
if (!strcmp(funcName, "vkUpdateDescriptorSets"))
- return (PFN_vkVoidFunction) vkUpdateDescriptorSets;
+ return (PFN_vkVoidFunction)vkUpdateDescriptorSets;
if (!strcmp(funcName, "vkAllocateCommandBuffers"))
- return (PFN_vkVoidFunction) vkAllocateCommandBuffers;
+ return (PFN_vkVoidFunction)vkAllocateCommandBuffers;
if (!strcmp(funcName, "vkFreeCommandBuffers"))
- return (PFN_vkVoidFunction) vkFreeCommandBuffers;
+ return (PFN_vkVoidFunction)vkFreeCommandBuffers;
if (!strcmp(funcName, "vkCreateCommandPool"))
- return (PFN_vkVoidFunction) vkCreateCommandPool;
+ return (PFN_vkVoidFunction)vkCreateCommandPool;
if (!strcmp(funcName, "vkDestroyCommandPool"))
- return (PFN_vkVoidFunction) vkDestroyCommandPool;
+ return (PFN_vkVoidFunction)vkDestroyCommandPool;
if (!strcmp(funcName, "vkResetCommandPool"))
- return (PFN_vkVoidFunction) vkResetCommandPool;
+ return (PFN_vkVoidFunction)vkResetCommandPool;
if (!strcmp(funcName, "vkBeginCommandBuffer"))
- return (PFN_vkVoidFunction) vkBeginCommandBuffer;
+ return (PFN_vkVoidFunction)vkBeginCommandBuffer;
if (!strcmp(funcName, "vkEndCommandBuffer"))
- return (PFN_vkVoidFunction) vkEndCommandBuffer;
+ return (PFN_vkVoidFunction)vkEndCommandBuffer;
if (!strcmp(funcName, "vkResetCommandBuffer"))
- return (PFN_vkVoidFunction) vkResetCommandBuffer;
+ return (PFN_vkVoidFunction)vkResetCommandBuffer;
if (!strcmp(funcName, "vkCmdBindPipeline"))
- return (PFN_vkVoidFunction) vkCmdBindPipeline;
+ return (PFN_vkVoidFunction)vkCmdBindPipeline;
if (!strcmp(funcName, "vkCmdBindDescriptorSets"))
- return (PFN_vkVoidFunction) vkCmdBindDescriptorSets;
+ return (PFN_vkVoidFunction)vkCmdBindDescriptorSets;
if (!strcmp(funcName, "vkCmdBindVertexBuffers"))
- return (PFN_vkVoidFunction) vkCmdBindVertexBuffers;
+ return (PFN_vkVoidFunction)vkCmdBindVertexBuffers;
if (!strcmp(funcName, "vkCmdBindIndexBuffer"))
- return (PFN_vkVoidFunction) vkCmdBindIndexBuffer;
+ return (PFN_vkVoidFunction)vkCmdBindIndexBuffer;
if (!strcmp(funcName, "vkCmdDraw"))
- return (PFN_vkVoidFunction) vkCmdDraw;
+ return (PFN_vkVoidFunction)vkCmdDraw;
if (!strcmp(funcName, "vkCmdDrawIndexed"))
- return (PFN_vkVoidFunction) vkCmdDrawIndexed;
+ return (PFN_vkVoidFunction)vkCmdDrawIndexed;
if (!strcmp(funcName, "vkCmdDrawIndirect"))
- return (PFN_vkVoidFunction) vkCmdDrawIndirect;
+ return (PFN_vkVoidFunction)vkCmdDrawIndirect;
if (!strcmp(funcName, "vkCmdDrawIndexedIndirect"))
- return (PFN_vkVoidFunction) vkCmdDrawIndexedIndirect;
+ return (PFN_vkVoidFunction)vkCmdDrawIndexedIndirect;
if (!strcmp(funcName, "vkCmdDispatch"))
return (PFN_vkVoidFunction)vkCmdDispatch;
if (!strcmp(funcName, "vkCmdDispatchIndirect"))
@@ -3601,56 +3154,54 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(
if (!strcmp(funcName, "vkCmdCopyQueryPoolResults"))
return (PFN_vkVoidFunction)vkCmdCopyQueryPoolResults;
if (!strcmp(funcName, "vkCmdCopyImage"))
- return (PFN_vkVoidFunction) vkCmdCopyImage;
+ return (PFN_vkVoidFunction)vkCmdCopyImage;
if (!strcmp(funcName, "vkCmdCopyBufferToImage"))
- return (PFN_vkVoidFunction) vkCmdCopyBufferToImage;
+ return (PFN_vkVoidFunction)vkCmdCopyBufferToImage;
if (!strcmp(funcName, "vkCmdCopyImageToBuffer"))
- return (PFN_vkVoidFunction) vkCmdCopyImageToBuffer;
+ return (PFN_vkVoidFunction)vkCmdCopyImageToBuffer;
if (!strcmp(funcName, "vkCmdUpdateBuffer"))
- return (PFN_vkVoidFunction) vkCmdUpdateBuffer;
+ return (PFN_vkVoidFunction)vkCmdUpdateBuffer;
if (!strcmp(funcName, "vkCmdFillBuffer"))
- return (PFN_vkVoidFunction) vkCmdFillBuffer;
+ return (PFN_vkVoidFunction)vkCmdFillBuffer;
if (!strcmp(funcName, "vkCmdClearColorImage"))
- return (PFN_vkVoidFunction) vkCmdClearColorImage;
+ return (PFN_vkVoidFunction)vkCmdClearColorImage;
if (!strcmp(funcName, "vkCmdClearDepthStencilImage"))
- return (PFN_vkVoidFunction) vkCmdClearDepthStencilImage;
+ return (PFN_vkVoidFunction)vkCmdClearDepthStencilImage;
if (!strcmp(funcName, "vkCmdResolveImage"))
- return (PFN_vkVoidFunction) vkCmdResolveImage;
+ return (PFN_vkVoidFunction)vkCmdResolveImage;
if (!strcmp(funcName, "vkCmdBeginQuery"))
- return (PFN_vkVoidFunction) vkCmdBeginQuery;
+ return (PFN_vkVoidFunction)vkCmdBeginQuery;
if (!strcmp(funcName, "vkCmdEndQuery"))
- return (PFN_vkVoidFunction) vkCmdEndQuery;
+ return (PFN_vkVoidFunction)vkCmdEndQuery;
if (!strcmp(funcName, "vkCmdResetQueryPool"))
- return (PFN_vkVoidFunction) vkCmdResetQueryPool;
+ return (PFN_vkVoidFunction)vkCmdResetQueryPool;
if (!strcmp(funcName, "vkCreateRenderPass"))
- return (PFN_vkVoidFunction) vkCreateRenderPass;
+ return (PFN_vkVoidFunction)vkCreateRenderPass;
if (!strcmp(funcName, "vkDestroyRenderPass"))
- return (PFN_vkVoidFunction) vkDestroyRenderPass;
+ return (PFN_vkVoidFunction)vkDestroyRenderPass;
if (!strcmp(funcName, "vkCmdBeginRenderPass"))
- return (PFN_vkVoidFunction) vkCmdBeginRenderPass;
+ return (PFN_vkVoidFunction)vkCmdBeginRenderPass;
if (!strcmp(funcName, "vkCmdEndRenderPass"))
- return (PFN_vkVoidFunction) vkCmdEndRenderPass;
+ return (PFN_vkVoidFunction)vkCmdEndRenderPass;
if (!strcmp(funcName, "vkGetDeviceQueue"))
- return (PFN_vkVoidFunction) vkGetDeviceQueue;
+ return (PFN_vkVoidFunction)vkGetDeviceQueue;
if (!strcmp(funcName, "vkCreateFramebuffer"))
- return (PFN_vkVoidFunction) vkCreateFramebuffer;
+ return (PFN_vkVoidFunction)vkCreateFramebuffer;
if (!strcmp(funcName, "vkDestroyFramebuffer"))
- return (PFN_vkVoidFunction) vkDestroyFramebuffer;
-
+ return (PFN_vkVoidFunction)vkDestroyFramebuffer;
if (dev == NULL)
return NULL;
layer_data *my_data;
my_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
- if (my_data->wsi_enabled)
- {
+ if (my_data->wsi_enabled) {
if (!strcmp(funcName, "vkCreateSwapchainKHR"))
- return (PFN_vkVoidFunction) vkCreateSwapchainKHR;
+ return (PFN_vkVoidFunction)vkCreateSwapchainKHR;
if (!strcmp(funcName, "vkDestroySwapchainKHR"))
- return (PFN_vkVoidFunction) vkDestroySwapchainKHR;
+ return (PFN_vkVoidFunction)vkDestroySwapchainKHR;
if (!strcmp(funcName, "vkGetSwapchainImagesKHR"))
- return (PFN_vkVoidFunction) vkGetSwapchainImagesKHR;
+ return (PFN_vkVoidFunction)vkGetSwapchainImagesKHR;
if (!strcmp(funcName, "vkAcquireNextImageKHR"))
return (PFN_vkVoidFunction)vkAcquireNextImageKHR;
if (!strcmp(funcName, "vkQueuePresentKHR"))
@@ -3663,42 +3214,41 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(
return pDisp->GetDeviceProcAddr(dev, funcName);
}
-VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(
- VkInstance instance,
- const char *funcName)
-{
+VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
PFN_vkVoidFunction fptr;
if (!strcmp(funcName, "vkGetInstanceProcAddr"))
- return (PFN_vkVoidFunction) vkGetInstanceProcAddr;
+ return (PFN_vkVoidFunction)vkGetInstanceProcAddr;
if (!strcmp(funcName, "vkGetDeviceProcAddr"))
- return (PFN_vkVoidFunction) vkGetDeviceProcAddr;
+ return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
if (!strcmp(funcName, "vkDestroyInstance"))
- return (PFN_vkVoidFunction) vkDestroyInstance;
+ return (PFN_vkVoidFunction)vkDestroyInstance;
if (!strcmp(funcName, "vkCreateInstance"))
- return (PFN_vkVoidFunction) vkCreateInstance;
+ return (PFN_vkVoidFunction)vkCreateInstance;
if (!strcmp(funcName, "vkGetPhysicalDeviceMemoryProperties"))
- return (PFN_vkVoidFunction) vkGetPhysicalDeviceMemoryProperties;
+ return (PFN_vkVoidFunction)vkGetPhysicalDeviceMemoryProperties;
if (!strcmp(funcName, "vkCreateDevice"))
- return (PFN_vkVoidFunction) vkCreateDevice;
+ return (PFN_vkVoidFunction)vkCreateDevice;
if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties"))
- return (PFN_vkVoidFunction) vkEnumerateInstanceLayerProperties;
+ return (PFN_vkVoidFunction)vkEnumerateInstanceLayerProperties;
if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties"))
- return (PFN_vkVoidFunction) vkEnumerateInstanceExtensionProperties;
+ return (PFN_vkVoidFunction)vkEnumerateInstanceExtensionProperties;
if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties"))
- return (PFN_vkVoidFunction) vkEnumerateDeviceLayerProperties;
+ return (PFN_vkVoidFunction)vkEnumerateDeviceLayerProperties;
if (!strcmp(funcName, "vkEnumerateDeviceExtensionProperties"))
- return (PFN_vkVoidFunction) vkEnumerateDeviceExtensionProperties;
+ return (PFN_vkVoidFunction)vkEnumerateDeviceExtensionProperties;
- if (instance == NULL) return NULL;
+ if (instance == NULL)
+ return NULL;
layer_data *my_data;
my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
fptr = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
- if (fptr) return fptr;
+ if (fptr)
+ return fptr;
- VkLayerInstanceDispatchTable* pTable = my_data->instance_dispatch_table;
+ VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
if (pTable->GetInstanceProcAddr == NULL)
return NULL;
return pTable->GetInstanceProcAddr(instance, funcName);
diff --git a/layers/mem_tracker.h b/layers/mem_tracker.h
index dd835e34e..9c0246660 100644
--- a/layers/mem_tracker.h
+++ b/layers/mem_tracker.h
@@ -36,34 +36,32 @@ extern "C" {
#endif
// Mem Tracker ERROR codes
-typedef enum _MEM_TRACK_ERROR
-{
- MEMTRACK_NONE, // Used for INFO & other non-error messages
- MEMTRACK_INVALID_CB, // Cmd Buffer invalid
- MEMTRACK_INVALID_MEM_OBJ, // Invalid Memory Object
- MEMTRACK_INVALID_ALIASING, // Invalid Memory Aliasing
- MEMTRACK_INVALID_LAYOUT, // Invalid Layout
- MEMTRACK_INTERNAL_ERROR, // Bug in Mem Track Layer internal data structures
- MEMTRACK_FREED_MEM_REF, // MEM Obj freed while it still has obj and/or CB refs
- MEMTRACK_MEM_OBJ_CLEAR_EMPTY_BINDINGS, // Clearing bindings on mem obj that doesn't have any bindings
- MEMTRACK_MISSING_MEM_BINDINGS, // Trying to retrieve mem bindings, but none found (may be internal error)
- MEMTRACK_INVALID_OBJECT, // Attempting to reference generic VK Object that is invalid
- MEMTRACK_MEMORY_BINDING_ERROR, // Error during one of many calls that bind memory to object or CB
- MEMTRACK_MEMORY_LEAK, // Failure to call vkFreeMemory on Mem Obj prior to DestroyDevice
- MEMTRACK_INVALID_STATE, // Memory not in the correct state
- MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, // vkResetCommandBuffer() called on a CB that hasn't completed
- MEMTRACK_INVALID_FENCE_STATE, // Invalid Fence State signaled or used
- MEMTRACK_REBIND_OBJECT, // Non-sparse object bindings are immutable
- MEMTRACK_INVALID_USAGE_FLAG, // Usage flags specified at image/buffer create conflict w/ use of object
- MEMTRACK_INVALID_MAP, // Size flag specified at alloc is too small for mapping range
+typedef enum _MEM_TRACK_ERROR {
+ MEMTRACK_NONE, // Used for INFO & other non-error messages
+ MEMTRACK_INVALID_CB, // Cmd Buffer invalid
+ MEMTRACK_INVALID_MEM_OBJ, // Invalid Memory Object
+ MEMTRACK_INVALID_ALIASING, // Invalid Memory Aliasing
+ MEMTRACK_INVALID_LAYOUT, // Invalid Layout
+ MEMTRACK_INTERNAL_ERROR, // Bug in Mem Track Layer internal data structures
+ MEMTRACK_FREED_MEM_REF, // MEM Obj freed while it still has obj and/or CB refs
+ MEMTRACK_MEM_OBJ_CLEAR_EMPTY_BINDINGS, // Clearing bindings on mem obj that doesn't have any bindings
+ MEMTRACK_MISSING_MEM_BINDINGS, // Trying to retrieve mem bindings, but none found (may be internal error)
+ MEMTRACK_INVALID_OBJECT, // Attempting to reference generic VK Object that is invalid
+ MEMTRACK_MEMORY_BINDING_ERROR, // Error during one of many calls that bind memory to object or CB
+ MEMTRACK_MEMORY_LEAK, // Failure to call vkFreeMemory on Mem Obj prior to DestroyDevice
+ MEMTRACK_INVALID_STATE, // Memory not in the correct state
+ MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, // vkResetCommandBuffer() called on a CB that hasn't completed
+ MEMTRACK_INVALID_FENCE_STATE, // Invalid Fence State signaled or used
+ MEMTRACK_REBIND_OBJECT, // Non-sparse object bindings are immutable
+ MEMTRACK_INVALID_USAGE_FLAG, // Usage flags specified at image/buffer create conflict w/ use of object
+ MEMTRACK_INVALID_MAP, // Size flag specified at alloc is too small for mapping range
} MEM_TRACK_ERROR;
// MemTracker Semaphore states
-typedef enum _MtSemaphoreState
-{
- MEMTRACK_SEMAPHORE_STATE_UNSET, // Semaphore is in an undefined state
- MEMTRACK_SEMAPHORE_STATE_SIGNALLED, // Semaphore has is in signalled state
- MEMTRACK_SEMAPHORE_STATE_WAIT, // Semaphore is in wait state
+typedef enum _MtSemaphoreState {
+ MEMTRACK_SEMAPHORE_STATE_UNSET, // Semaphore is in an undefined state
+ MEMTRACK_SEMAPHORE_STATE_SIGNALLED, // Semaphore has is in signalled state
+ MEMTRACK_SEMAPHORE_STATE_WAIT, // Semaphore is in wait state
} MtSemaphoreState;
struct MemRange {
@@ -103,29 +101,29 @@ struct MemRange {
// Simple struct to hold handle and type of object so they can be uniquely identified and looked up in appropriate map
struct MT_OBJ_HANDLE_TYPE {
- uint64_t handle;
+ uint64_t handle;
VkDebugReportObjectTypeEXT type;
};
// Data struct for tracking memory object
struct MT_MEM_OBJ_INFO {
- void* object; // Dispatchable object used to create this memory (device of swapchain)
- uint32_t refCount; // Count of references (obj bindings or CB use)
- bool valid; // Stores if the memory has valid data or not
- VkDeviceMemory mem;
- VkMemoryAllocateInfo allocInfo;
- list<MT_OBJ_HANDLE_TYPE> pObjBindings; // list container of objects bound to this memory
- list<VkCommandBuffer> pCommandBufferBindings; // list container of cmd buffers that reference this mem object
- MemRange memRange;
- void *pData, *pDriverData;
+ void *object; // Dispatchable object used to create this memory (device of swapchain)
+ uint32_t refCount; // Count of references (obj bindings or CB use)
+ bool valid; // Stores if the memory has valid data or not
+ VkDeviceMemory mem;
+ VkMemoryAllocateInfo allocInfo;
+ list<MT_OBJ_HANDLE_TYPE> pObjBindings; // list container of objects bound to this memory
+ list<VkCommandBuffer> pCommandBufferBindings; // list container of cmd buffers that reference this mem object
+ MemRange memRange;
+ void *pData, *pDriverData;
};
// This only applies to Buffers and Images, which can have memory bound to them
struct MT_OBJ_BINDING_INFO {
VkDeviceMemory mem;
- bool valid; //If this is a swapchain image backing memory is not a MT_MEM_OBJ_INFO so store it here.
+ bool valid; // If this is a swapchain image backing memory is not a MT_MEM_OBJ_INFO so store it here.
union create_info {
- VkImageCreateInfo image;
+ VkImageCreateInfo image;
VkBufferCreateInfo buffer;
} create_info;
};
@@ -133,25 +131,25 @@ struct MT_OBJ_BINDING_INFO {
// Track all command buffers
typedef struct _MT_CB_INFO {
VkCommandBufferAllocateInfo createInfo;
- VkPipeline pipelines[VK_PIPELINE_BIND_POINT_RANGE_SIZE];
- uint32_t attachmentCount;
- VkCommandBuffer commandBuffer;
- uint64_t fenceId;
- VkFence lastSubmittedFence;
- VkQueue lastSubmittedQueue;
- VkRenderPass pass;
- vector<VkDescriptorSet> activeDescriptorSets;
- vector<std::function<VkBool32()> > validate_functions;
+ VkPipeline pipelines[VK_PIPELINE_BIND_POINT_RANGE_SIZE];
+ uint32_t attachmentCount;
+ VkCommandBuffer commandBuffer;
+ uint64_t fenceId;
+ VkFence lastSubmittedFence;
+ VkQueue lastSubmittedQueue;
+ VkRenderPass pass;
+ vector<VkDescriptorSet> activeDescriptorSets;
+ vector<std::function<VkBool32()>> validate_functions;
// Order dependent, stl containers must be at end of struct
- list<VkDeviceMemory> pMemObjList; // List container of Mem objs referenced by this CB
+ list<VkDeviceMemory> pMemObjList; // List container of Mem objs referenced by this CB
// Constructor
- _MT_CB_INFO():createInfo{},pipelines{},attachmentCount(0),fenceId(0),lastSubmittedFence{},lastSubmittedQueue{} {};
+ _MT_CB_INFO() : createInfo{}, pipelines{}, attachmentCount(0), fenceId(0), lastSubmittedFence{}, lastSubmittedQueue{} {};
} MT_CB_INFO;
// Track command pools and their command buffers
typedef struct _MT_CMD_POOL_INFO {
- VkCommandPoolCreateFlags createFlags;
- list<VkCommandBuffer> pCommandBuffers; // list container of cmd buffers allocated from this pool
+ VkCommandPoolCreateFlags createFlags;
+ list<VkCommandBuffer> pCommandBuffers; // list container of cmd buffers allocated from this pool
} MT_CMD_POOL_INFO;
struct MT_IMAGE_VIEW_INFO {
@@ -168,9 +166,9 @@ struct MT_FB_INFO {
};
struct MT_PASS_ATTACHMENT_INFO {
- uint32_t attachment;
- VkAttachmentLoadOp load_op;
- VkAttachmentStoreOp store_op;
+ uint32_t attachment;
+ VkAttachmentLoadOp load_op;
+ VkAttachmentStoreOp store_op;
};
struct MT_PASS_INFO {
@@ -182,20 +180,19 @@ struct MT_PASS_INFO {
// Associate fenceId with a fence object
struct MT_FENCE_INFO {
- uint64_t fenceId; // Sequence number for fence at last submit
- VkQueue queue; // Queue that this fence is submitted against or NULL
- VkSwapchainKHR
- swapchain; // Swapchain that this fence is submitted against or NULL
- VkBool32 firstTimeFlag; // Fence was created in signaled state, avoid warnings for first use
+ uint64_t fenceId; // Sequence number for fence at last submit
+ VkQueue queue; // Queue that this fence is submitted against or NULL
+ VkSwapchainKHR swapchain; // Swapchain that this fence is submitted against or NULL
+ VkBool32 firstTimeFlag; // Fence was created in signaled state, avoid warnings for first use
VkFenceCreateInfo createInfo;
};
// Track Queue information
struct MT_QUEUE_INFO {
- uint64_t lastRetiredId;
- uint64_t lastSubmittedId;
- list<VkCommandBuffer> pQueueCommandBuffers;
- list<VkDeviceMemory> pMemRefList;
+ uint64_t lastRetiredId;
+ uint64_t lastSubmittedId;
+ list<VkCommandBuffer> pQueueCommandBuffers;
+ list<VkDeviceMemory> pMemRefList;
};
struct MT_DESCRIPTOR_SET_INFO {
@@ -205,8 +202,8 @@ struct MT_DESCRIPTOR_SET_INFO {
// Track Swapchain Information
struct MT_SWAP_CHAIN_INFO {
- VkSwapchainCreateInfoKHR createInfo;
- std::vector<VkImage> images;
+ VkSwapchainCreateInfoKHR createInfo;
+ std::vector<VkImage> images;
};
struct MEMORY_RANGE {
diff --git a/layers/object_tracker.h b/layers/object_tracker.h
index 4d570686c..2c4b9e7f0 100644
--- a/layers/object_tracker.h
+++ b/layers/object_tracker.h
@@ -33,39 +33,37 @@
#include "vk_layer_table.h"
// Object Tracker ERROR codes
-typedef enum _OBJECT_TRACK_ERROR
-{
- OBJTRACK_NONE, // Used for INFO & other non-error messages
- OBJTRACK_UNKNOWN_OBJECT, // Updating uses of object that's not in global object list
- OBJTRACK_INTERNAL_ERROR, // Bug with data tracking within the layer
- OBJTRACK_DESTROY_OBJECT_FAILED, // Couldn't find object to be destroyed
- OBJTRACK_OBJECT_LEAK, // OBJECT was not correctly freed/destroyed
- OBJTRACK_OBJCOUNT_MAX_EXCEEDED, // Request for Object data in excess of max obj count
- OBJTRACK_INVALID_OBJECT, // Object used that has never been created
- OBJTRACK_DESCRIPTOR_POOL_MISMATCH, // Descriptor Pools specified incorrectly
- OBJTRACK_COMMAND_POOL_MISMATCH, // Command Pools specified incorrectly
+typedef enum _OBJECT_TRACK_ERROR {
+ OBJTRACK_NONE, // Used for INFO & other non-error messages
+ OBJTRACK_UNKNOWN_OBJECT, // Updating uses of object that's not in global object list
+ OBJTRACK_INTERNAL_ERROR, // Bug with data tracking within the layer
+ OBJTRACK_DESTROY_OBJECT_FAILED, // Couldn't find object to be destroyed
+ OBJTRACK_OBJECT_LEAK, // OBJECT was not correctly freed/destroyed
+ OBJTRACK_OBJCOUNT_MAX_EXCEEDED, // Request for Object data in excess of max obj count
+ OBJTRACK_INVALID_OBJECT, // Object used that has never been created
+ OBJTRACK_DESCRIPTOR_POOL_MISMATCH, // Descriptor Pools specified incorrectly
+ OBJTRACK_COMMAND_POOL_MISMATCH, // Command Pools specified incorrectly
} OBJECT_TRACK_ERROR;
// Object Status -- used to track state of individual objects
typedef VkFlags ObjectStatusFlags;
-typedef enum _ObjectStatusFlagBits
-{
- OBJSTATUS_NONE = 0x00000000, // No status is set
- OBJSTATUS_FENCE_IS_SUBMITTED = 0x00000001, // Fence has been submitted
- OBJSTATUS_VIEWPORT_BOUND = 0x00000002, // Viewport state object has been bound
- OBJSTATUS_RASTER_BOUND = 0x00000004, // Viewport state object has been bound
- OBJSTATUS_COLOR_BLEND_BOUND = 0x00000008, // Viewport state object has been bound
- OBJSTATUS_DEPTH_STENCIL_BOUND = 0x00000010, // Viewport state object has been bound
- OBJSTATUS_GPU_MEM_MAPPED = 0x00000020, // Memory object is currently mapped
- OBJSTATUS_COMMAND_BUFFER_SECONDARY = 0x00000040, // Command Buffer is of type SECONDARY
+typedef enum _ObjectStatusFlagBits {
+ OBJSTATUS_NONE = 0x00000000, // No status is set
+ OBJSTATUS_FENCE_IS_SUBMITTED = 0x00000001, // Fence has been submitted
+ OBJSTATUS_VIEWPORT_BOUND = 0x00000002, // Viewport state object has been bound
+ OBJSTATUS_RASTER_BOUND = 0x00000004, // Viewport state object has been bound
+ OBJSTATUS_COLOR_BLEND_BOUND = 0x00000008, // Viewport state object has been bound
+ OBJSTATUS_DEPTH_STENCIL_BOUND = 0x00000010, // Viewport state object has been bound
+ OBJSTATUS_GPU_MEM_MAPPED = 0x00000020, // Memory object is currently mapped
+ OBJSTATUS_COMMAND_BUFFER_SECONDARY = 0x00000040, // Command Buffer is of type SECONDARY
} ObjectStatusFlagBits;
typedef struct _OBJTRACK_NODE {
- uint64_t vkObj; // Object handle
- VkDebugReportObjectTypeEXT objType; // Object type identifier
- ObjectStatusFlags status; // Object state
- uint64_t parentObj; // Parent object
- uint64_t belongsTo; // Object Scope -- owning device/instance
+ uint64_t vkObj; // Object handle
+ VkDebugReportObjectTypeEXT objType; // Object type identifier
+ ObjectStatusFlags status; // Object state
+ uint64_t parentObj; // Parent object
+ uint64_t belongsTo; // Object Scope -- owning device/instance
} OBJTRACK_NODE;
// prototype for extension functions
@@ -78,17 +76,12 @@ typedef uint64_t (*OBJ_TRACK_GET_OBJECTS_OF_TYPE_COUNT)(VkDevice, VkDebugReportO
struct layer_data {
debug_report_data *report_data;
- //TODO: put instance data here
- VkDebugReportCallbackEXT logging_callback;
+ // TODO: put instance data here
+ VkDebugReportCallbackEXT logging_callback;
bool wsi_enabled;
bool objtrack_extensions_enabled;
- layer_data() :
- report_data(nullptr),
- logging_callback(VK_NULL_HANDLE),
- wsi_enabled(false),
- objtrack_extensions_enabled(false)
- {};
+ layer_data() : report_data(nullptr), logging_callback(VK_NULL_HANDLE), wsi_enabled(false), objtrack_extensions_enabled(false){};
};
struct instExts {
@@ -96,13 +89,13 @@ struct instExts {
};
static std::unordered_map<void *, struct instExts> instanceExtMap;
-static std::unordered_map<void*, layer_data *> layer_data_map;
-static device_table_map object_tracker_device_table_map;
-static instance_table_map object_tracker_instance_table_map;
+static std::unordered_map<void *, layer_data *> layer_data_map;
+static device_table_map object_tracker_device_table_map;
+static instance_table_map object_tracker_instance_table_map;
// We need additionally validate image usage using a separate map
// of swapchain-created images
-static unordered_map<uint64_t, OBJTRACK_NODE*> swapchainImageMap;
+static unordered_map<uint64_t, OBJTRACK_NODE *> swapchainImageMap;
static long long unsigned int object_track_index = 0;
static int objLockInitialized = 0;
@@ -111,31 +104,28 @@ static loader_platform_thread_mutex objLock;
// Objects stored in a global map w/ struct containing basic info
// unordered_map<const void*, OBJTRACK_NODE*> objMap;
-#define NUM_OBJECT_TYPES (VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT+1)
-
-static uint64_t numObjs[NUM_OBJECT_TYPES] = {0};
-static uint64_t numTotalObjs = 0;
-static VkQueueFamilyProperties *queueInfo = NULL;
-static uint32_t queueCount = 0;
+#define NUM_OBJECT_TYPES (VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT + 1)
-template layer_data *get_my_data_ptr<layer_data>(
- void *data_key, std::unordered_map<void *, layer_data *> &data_map);
+static uint64_t numObjs[NUM_OBJECT_TYPES] = {0};
+static uint64_t numTotalObjs = 0;
+static VkQueueFamilyProperties *queueInfo = NULL;
+static uint32_t queueCount = 0;
+template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map);
//
// Internal Object Tracker Functions
//
-static void createDeviceRegisterExtensions(const VkDeviceCreateInfo* pCreateInfo, VkDevice device)
-{
+static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkLayerDispatchTable *pDisp = get_dispatch_table(object_tracker_device_table_map, device);
PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
- pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR) gpa(device, "vkCreateSwapchainKHR");
- pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR) gpa(device, "vkDestroySwapchainKHR");
- pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR) gpa(device, "vkGetSwapchainImagesKHR");
- pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR) gpa(device, "vkAcquireNextImageKHR");
- pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR) gpa(device, "vkQueuePresentKHR");
+ pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
+ pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
+ pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
+ pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
+ pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
my_device_data->wsi_enabled = false;
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
@@ -146,66 +136,70 @@ static void createDeviceRegisterExtensions(const VkDeviceCreateInfo* pCreateInfo
}
}
-static void createInstanceRegisterExtensions(const VkInstanceCreateInfo* pCreateInfo, VkInstance instance)
-{
+static void createInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, VkInstance instance) {
uint32_t i;
VkLayerInstanceDispatchTable *pDisp = get_dispatch_table(object_tracker_instance_table_map, instance);
PFN_vkGetInstanceProcAddr gpa = pDisp->GetInstanceProcAddr;
pDisp->DestroySurfaceKHR = (PFN_vkDestroySurfaceKHR)gpa(instance, "vkDestroySurfaceKHR");
- pDisp->GetPhysicalDeviceSurfaceSupportKHR = (PFN_vkGetPhysicalDeviceSurfaceSupportKHR) gpa(instance, "vkGetPhysicalDeviceSurfaceSupportKHR");
- pDisp->GetPhysicalDeviceSurfaceCapabilitiesKHR = (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR) gpa(instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
- pDisp->GetPhysicalDeviceSurfaceFormatsKHR = (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR) gpa(instance, "vkGetPhysicalDeviceSurfaceFormatsKHR");
- pDisp->GetPhysicalDeviceSurfacePresentModesKHR = (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR) gpa(instance, "vkGetPhysicalDeviceSurfacePresentModesKHR");
+ pDisp->GetPhysicalDeviceSurfaceSupportKHR =
+ (PFN_vkGetPhysicalDeviceSurfaceSupportKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceSupportKHR");
+ pDisp->GetPhysicalDeviceSurfaceCapabilitiesKHR =
+ (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
+ pDisp->GetPhysicalDeviceSurfaceFormatsKHR =
+ (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceFormatsKHR");
+ pDisp->GetPhysicalDeviceSurfacePresentModesKHR =
+ (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)gpa(instance, "vkGetPhysicalDeviceSurfacePresentModesKHR");
#if VK_USE_PLATFORM_WIN32_KHR
- pDisp->CreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR) gpa(instance, "vkCreateWin32SurfaceKHR");
- pDisp->GetPhysicalDeviceWin32PresentationSupportKHR = (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR");
+ pDisp->CreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR)gpa(instance, "vkCreateWin32SurfaceKHR");
+ pDisp->GetPhysicalDeviceWin32PresentationSupportKHR =
+ (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR");
#endif // VK_USE_PLATFORM_WIN32_KHR
#ifdef VK_USE_PLATFORM_XCB_KHR
- pDisp->CreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR) gpa(instance, "vkCreateXcbSurfaceKHR");
- pDisp->GetPhysicalDeviceXcbPresentationSupportKHR = (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR");
+ pDisp->CreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR)gpa(instance, "vkCreateXcbSurfaceKHR");
+ pDisp->GetPhysicalDeviceXcbPresentationSupportKHR =
+ (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR");
#endif // VK_USE_PLATFORM_XCB_KHR
#ifdef VK_USE_PLATFORM_XLIB_KHR
- pDisp->CreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR) gpa(instance, "vkCreateXlibSurfaceKHR");
- pDisp->GetPhysicalDeviceXlibPresentationSupportKHR = (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR");
+ pDisp->CreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR)gpa(instance, "vkCreateXlibSurfaceKHR");
+ pDisp->GetPhysicalDeviceXlibPresentationSupportKHR =
+ (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR");
#endif // VK_USE_PLATFORM_XLIB_KHR
#ifdef VK_USE_PLATFORM_MIR_KHR
- pDisp->CreateMirSurfaceKHR = (PFN_vkCreateMirSurfaceKHR) gpa(instance, "vkCreateMirSurfaceKHR");
- pDisp->GetPhysicalDeviceMirPresentationSupportKHR = (PFN_vkGetPhysicalDeviceMirPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceMirPresentationSupportKHR");
+ pDisp->CreateMirSurfaceKHR = (PFN_vkCreateMirSurfaceKHR)gpa(instance, "vkCreateMirSurfaceKHR");
+ pDisp->GetPhysicalDeviceMirPresentationSupportKHR =
+ (PFN_vkGetPhysicalDeviceMirPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceMirPresentationSupportKHR");
#endif // VK_USE_PLATFORM_MIR_KHR
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
- pDisp->CreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR) gpa(instance, "vkCreateWaylandSurfaceKHR");
- pDisp->GetPhysicalDeviceWaylandPresentationSupportKHR = (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR");
+ pDisp->CreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR)gpa(instance, "vkCreateWaylandSurfaceKHR");
+ pDisp->GetPhysicalDeviceWaylandPresentationSupportKHR =
+ (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR");
#endif // VK_USE_PLATFORM_WAYLAND_KHR
#ifdef VK_USE_PLATFORM_ANDROID_KHR
- pDisp->CreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR) gpa(instance, "vkCreateAndroidSurfaceKHR");
+ pDisp->CreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR)gpa(instance, "vkCreateAndroidSurfaceKHR");
#endif // VK_USE_PLATFORM_ANDROID_KHR
instanceExtMap[pDisp].wsi_enabled = false;
for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME) == 0)
instanceExtMap[pDisp].wsi_enabled = true;
-
}
}
// Indicate device or instance dispatch table type
-typedef enum _DispTableType
-{
+typedef enum _DispTableType {
DISP_TBL_TYPE_INSTANCE,
DISP_TBL_TYPE_DEVICE,
} DispTableType;
-debug_report_data *mdd(const void* object)
-{
+debug_report_data *mdd(const void *object) {
dispatch_key key = get_dispatch_key(object);
layer_data *my_data = get_my_data_ptr(key, layer_data_map);
return my_data->report_data;
}
-debug_report_data *mid(VkInstance object)
-{
+debug_report_data *mid(VkInstance object) {
dispatch_key key = get_dispatch_key(object);
layer_data *my_data = get_my_data_ptr(key, layer_data_map);
return my_data->report_data;
@@ -213,7 +207,7 @@ debug_report_data *mid(VkInstance object)
// For each Queue's doubly linked-list of mem refs
typedef struct _OT_MEM_INFO {
- VkDeviceMemory mem;
+ VkDeviceMemory mem;
struct _OT_MEM_INFO *pNextMI;
struct _OT_MEM_INFO *pPrevMI;
@@ -221,51 +215,42 @@ typedef struct _OT_MEM_INFO {
// Track Queue information
typedef struct _OT_QUEUE_INFO {
- OT_MEM_INFO *pMemRefList;
- struct _OT_QUEUE_INFO *pNextQI;
- uint32_t queueNodeIndex;
- VkQueue queue;
- uint32_t refCount;
+ OT_MEM_INFO *pMemRefList;
+ struct _OT_QUEUE_INFO *pNextQI;
+ uint32_t queueNodeIndex;
+ VkQueue queue;
+ uint32_t refCount;
} OT_QUEUE_INFO;
// Global list of QueueInfo structures, one per queue
static OT_QUEUE_INFO *g_pQueueInfo = NULL;
// Convert an object type enum to an object type array index
-static uint32_t
-objTypeToIndex(
- uint32_t objType)
-{
+static uint32_t objTypeToIndex(uint32_t objType) {
uint32_t index = objType;
return index;
}
// Add new queue to head of global queue list
-static void
-addQueueInfo(
- uint32_t queueNodeIndex,
- VkQueue queue)
-{
+static void addQueueInfo(uint32_t queueNodeIndex, VkQueue queue) {
OT_QUEUE_INFO *pQueueInfo = new OT_QUEUE_INFO;
if (pQueueInfo != NULL) {
memset(pQueueInfo, 0, sizeof(OT_QUEUE_INFO));
- pQueueInfo->queue = queue;
+ pQueueInfo->queue = queue;
pQueueInfo->queueNodeIndex = queueNodeIndex;
- pQueueInfo->pNextQI = g_pQueueInfo;
- g_pQueueInfo = pQueueInfo;
- }
- else {
- log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, reinterpret_cast<uint64_t>(queue), __LINE__, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
- "ERROR: VK_ERROR_OUT_OF_HOST_MEMORY -- could not allocate memory for Queue Information");
+ pQueueInfo->pNextQI = g_pQueueInfo;
+ g_pQueueInfo = pQueueInfo;
+ } else {
+ log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, reinterpret_cast<uint64_t>(queue),
+ __LINE__, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
+ "ERROR: VK_ERROR_OUT_OF_HOST_MEMORY -- could not allocate memory for Queue Information");
}
}
// Destroy memRef lists and free all memory
-static void
-destroyQueueMemRefLists(void)
-{
- OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
+static void destroyQueueMemRefLists(void) {
+ OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
OT_QUEUE_INFO *pDelQueueInfo = NULL;
while (pQueueInfo != NULL) {
OT_MEM_INFO *pMemInfo = pQueueInfo->pMemRefList;
@@ -275,38 +260,31 @@ destroyQueueMemRefLists(void)
delete pDelMemInfo;
}
pDelQueueInfo = pQueueInfo;
- pQueueInfo = pQueueInfo->pNextQI;
+ pQueueInfo = pQueueInfo->pNextQI;
delete pDelQueueInfo;
}
g_pQueueInfo = pQueueInfo;
}
-static void
-setGpuQueueInfoState(
- uint32_t count,
- void *pData)
-{
+static void setGpuQueueInfoState(uint32_t count, void *pData) {
queueCount = count;
- queueInfo = (VkQueueFamilyProperties*)realloc((void*)queueInfo, count * sizeof(VkQueueFamilyProperties));
+ queueInfo = (VkQueueFamilyProperties *)realloc((void *)queueInfo, count * sizeof(VkQueueFamilyProperties));
if (queueInfo != NULL) {
memcpy(queueInfo, pData, count * sizeof(VkQueueFamilyProperties));
}
}
// Check Queue type flags for selected queue operations
-static void
-validateQueueFlags(
- VkQueue queue,
- const char *function)
-{
+static void validateQueueFlags(VkQueue queue, const char *function) {
OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
while ((pQueueInfo != NULL) && (pQueueInfo->queue != queue)) {
pQueueInfo = pQueueInfo->pNextQI;
}
if (pQueueInfo != NULL) {
if ((queueInfo != NULL) && (queueInfo[pQueueInfo->queueNodeIndex].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT) == 0) {
- log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, reinterpret_cast<uint64_t>(queue), __LINE__, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
- "Attempting %s on a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set", function);
+ log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
+ reinterpret_cast<uint64_t>(queue), __LINE__, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
+ "Attempting %s on a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set", function);
}
}
}
@@ -347,21 +325,16 @@ validate_status(
#endif
#include "vk_dispatch_table_helper.h"
-static void
-initObjectTracker(
- layer_data *my_data,
- const VkAllocationCallbacks *pAllocator)
-{
+static void initObjectTracker(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
uint32_t report_flags = 0;
uint32_t debug_action = 0;
FILE *log_output = NULL;
const char *option_str;
// initialize object_tracker options
report_flags = getLayerOptionFlags("lunarg_object_tracker.report_flags", 0);
- getLayerOptionEnum("lunarg_object_tracker.debug_action", (uint32_t *) &debug_action);
+ getLayerOptionEnum("lunarg_object_tracker.debug_action", (uint32_t *)&debug_action);
- if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG)
- {
+ if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG) {
option_str = getLayerOption("lunarg_object_tracker.log_filename");
log_output = getLayerLogOutput(option_str, "lunarg_object_tracker");
VkDebugReportCallbackCreateInfoEXT dbgInfo;
@@ -373,8 +346,7 @@ initObjectTracker(
layer_create_msg_callback(my_data->report_data, &dbgInfo, pAllocator, &my_data->logging_callback);
}
- if (!objLockInitialized)
- {
+ if (!objLockInitialized) {
// TODO/TBD: Need to delete this mutex sometime. How??? One
// suggestion is to call this during vkCreateInstance(), and then we
// can clean it up during vkDestroyInstance(). However, that requires
@@ -395,113 +367,124 @@ static void create_device(VkDevice dispatchable_object, VkDevice object, VkDebug
static void create_device(VkPhysicalDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType);
static void create_queue(VkDevice dispatchable_object, VkQueue vkObj, VkDebugReportObjectTypeEXT objType);
static VkBool32 validate_image(VkQueue dispatchable_object, VkImage object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
-static VkBool32 validate_instance(VkInstance dispatchable_object, VkInstance object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
-static VkBool32 validate_device(VkDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
-static VkBool32 validate_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
-static VkBool32 validate_descriptor_set_layout(VkDevice dispatchable_object, VkDescriptorSetLayout object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
-static VkBool32 validate_command_pool(VkDevice dispatchable_object, VkCommandPool object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
-static VkBool32 validate_buffer(VkQueue dispatchable_object, VkBuffer object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
+static VkBool32 validate_instance(VkInstance dispatchable_object, VkInstance object, VkDebugReportObjectTypeEXT objType,
+ bool null_allowed);
+static VkBool32 validate_device(VkDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType,
+ bool null_allowed);
+static VkBool32 validate_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object, VkDebugReportObjectTypeEXT objType,
+ bool null_allowed);
+static VkBool32 validate_descriptor_set_layout(VkDevice dispatchable_object, VkDescriptorSetLayout object,
+ VkDebugReportObjectTypeEXT objType, bool null_allowed);
+static VkBool32 validate_command_pool(VkDevice dispatchable_object, VkCommandPool object, VkDebugReportObjectTypeEXT objType,
+ bool null_allowed);
+static VkBool32 validate_buffer(VkQueue dispatchable_object, VkBuffer object, VkDebugReportObjectTypeEXT objType,
+ bool null_allowed);
static void create_pipeline(VkDevice dispatchable_object, VkPipeline vkObj, VkDebugReportObjectTypeEXT objType);
-static VkBool32 validate_pipeline_cache(VkDevice dispatchable_object, VkPipelineCache object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
-static VkBool32 validate_render_pass(VkDevice dispatchable_object, VkRenderPass object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
-static VkBool32 validate_shader_module(VkDevice dispatchable_object, VkShaderModule object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
-static VkBool32 validate_pipeline_layout(VkDevice dispatchable_object, VkPipelineLayout object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
-static VkBool32 validate_pipeline(VkDevice dispatchable_object, VkPipeline object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
+static VkBool32 validate_pipeline_cache(VkDevice dispatchable_object, VkPipelineCache object, VkDebugReportObjectTypeEXT objType,
+ bool null_allowed);
+static VkBool32 validate_render_pass(VkDevice dispatchable_object, VkRenderPass object, VkDebugReportObjectTypeEXT objType,
+ bool null_allowed);
+static VkBool32 validate_shader_module(VkDevice dispatchable_object, VkShaderModule object, VkDebugReportObjectTypeEXT objType,
+ bool null_allowed);
+static VkBool32 validate_pipeline_layout(VkDevice dispatchable_object, VkPipelineLayout object, VkDebugReportObjectTypeEXT objType,
+ bool null_allowed);
+static VkBool32 validate_pipeline(VkDevice dispatchable_object, VkPipeline object, VkDebugReportObjectTypeEXT objType,
+ bool null_allowed);
static void destroy_command_pool(VkDevice dispatchable_object, VkCommandPool object);
static void destroy_command_buffer(VkCommandBuffer dispatchable_object, VkCommandBuffer object);
static void destroy_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object);
static void destroy_descriptor_set(VkDevice dispatchable_object, VkDescriptorSet object);
static void destroy_device_memory(VkDevice dispatchable_object, VkDeviceMemory object);
static void destroy_swapchain_khr(VkDevice dispatchable_object, VkSwapchainKHR object);
-static VkBool32 set_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType, ObjectStatusFlags status_flag);
-static VkBool32 reset_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType, ObjectStatusFlags status_flag);
+static VkBool32 set_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType,
+ ObjectStatusFlags status_flag);
+static VkBool32 reset_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType,
+ ObjectStatusFlags status_flag);
#if 0
static VkBool32 validate_status(VkDevice dispatchable_object, VkFence object, VkDebugReportObjectTypeEXT objType,
ObjectStatusFlags status_mask, ObjectStatusFlags status_flag, VkFlags msg_flags, OBJECT_TRACK_ERROR error_code,
const char *fail_msg);
#endif
-extern unordered_map<uint64_t, OBJTRACK_NODE*> VkPhysicalDeviceMap;
-extern unordered_map<uint64_t, OBJTRACK_NODE*> VkDeviceMap;
-extern unordered_map<uint64_t, OBJTRACK_NODE*> VkImageMap;
-extern unordered_map<uint64_t, OBJTRACK_NODE*> VkQueueMap;
-extern unordered_map<uint64_t, OBJTRACK_NODE*> VkDescriptorSetMap;
-extern unordered_map<uint64_t, OBJTRACK_NODE*> VkBufferMap;
-extern unordered_map<uint64_t, OBJTRACK_NODE*> VkFenceMap;
-extern unordered_map<uint64_t, OBJTRACK_NODE*> VkSemaphoreMap;
-extern unordered_map<uint64_t, OBJTRACK_NODE*> VkCommandPoolMap;
-extern unordered_map<uint64_t, OBJTRACK_NODE*> VkCommandBufferMap;
-extern unordered_map<uint64_t, OBJTRACK_NODE*> VkSwapchainKHRMap;
-extern unordered_map<uint64_t, OBJTRACK_NODE*> VkSurfaceKHRMap;
-
-static void create_physical_device(VkInstance dispatchable_object, VkPhysicalDevice vkObj, VkDebugReportObjectTypeEXT objType)
-{
- log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__, OBJTRACK_NONE, "OBJTRACK",
- "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
- reinterpret_cast<uint64_t>(vkObj));
-
- OBJTRACK_NODE* pNewObjNode = new OBJTRACK_NODE;
+extern unordered_map<uint64_t, OBJTRACK_NODE *> VkPhysicalDeviceMap;
+extern unordered_map<uint64_t, OBJTRACK_NODE *> VkDeviceMap;
+extern unordered_map<uint64_t, OBJTRACK_NODE *> VkImageMap;
+extern unordered_map<uint64_t, OBJTRACK_NODE *> VkQueueMap;
+extern unordered_map<uint64_t, OBJTRACK_NODE *> VkDescriptorSetMap;
+extern unordered_map<uint64_t, OBJTRACK_NODE *> VkBufferMap;
+extern unordered_map<uint64_t, OBJTRACK_NODE *> VkFenceMap;
+extern unordered_map<uint64_t, OBJTRACK_NODE *> VkSemaphoreMap;
+extern unordered_map<uint64_t, OBJTRACK_NODE *> VkCommandPoolMap;
+extern unordered_map<uint64_t, OBJTRACK_NODE *> VkCommandBufferMap;
+extern unordered_map<uint64_t, OBJTRACK_NODE *> VkSwapchainKHRMap;
+extern unordered_map<uint64_t, OBJTRACK_NODE *> VkSurfaceKHRMap;
+
+static void create_physical_device(VkInstance dispatchable_object, VkPhysicalDevice vkObj, VkDebugReportObjectTypeEXT objType) {
+ log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__,
+ OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
+ string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj));
+
+ OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
pNewObjNode->objType = objType;
pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
- pNewObjNode->status = OBJSTATUS_NONE;
- pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj);
+ pNewObjNode->status = OBJSTATUS_NONE;
+ pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj);
VkPhysicalDeviceMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
uint32_t objIndex = objTypeToIndex(objType);
numObjs[objIndex]++;
numTotalObjs++;
}
-static void create_surface_khr(VkInstance dispatchable_object, VkSurfaceKHR vkObj, VkDebugReportObjectTypeEXT objType)
-{
+static void create_surface_khr(VkInstance dispatchable_object, VkSurfaceKHR vkObj, VkDebugReportObjectTypeEXT objType) {
// TODO: Add tracking of surface objects
- log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE, "OBJTRACK",
- "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
- (uint64_t)(vkObj));
+ log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE,
+ "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
+ string_VkDebugReportObjectTypeEXT(objType), (uint64_t)(vkObj));
- OBJTRACK_NODE* pNewObjNode = new OBJTRACK_NODE;
+ OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
pNewObjNode->objType = objType;
pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
- pNewObjNode->status = OBJSTATUS_NONE;
- pNewObjNode->vkObj = (uint64_t)(vkObj);
+ pNewObjNode->status = OBJSTATUS_NONE;
+ pNewObjNode->vkObj = (uint64_t)(vkObj);
VkSurfaceKHRMap[(uint64_t)vkObj] = pNewObjNode;
uint32_t objIndex = objTypeToIndex(objType);
numObjs[objIndex]++;
numTotalObjs++;
}
-static void destroy_surface_khr(VkInstance dispatchable_object, VkSurfaceKHR object)
-{
+static void destroy_surface_khr(VkInstance dispatchable_object, VkSurfaceKHR object) {
uint64_t object_handle = (uint64_t)(object);
if (VkSurfaceKHRMap.find(object_handle) != VkSurfaceKHRMap.end()) {
- OBJTRACK_NODE* pNode = VkSurfaceKHRMap[(uint64_t)object];
+ OBJTRACK_NODE *pNode = VkSurfaceKHRMap[(uint64_t)object];
uint32_t objIndex = objTypeToIndex(pNode->objType);
assert(numTotalObjs > 0);
numTotalObjs--;
assert(numObjs[objIndex] > 0);
numObjs[objIndex]--;
- log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_NONE, "OBJTRACK",
- "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
- string_VkDebugReportObjectTypeEXT(pNode->objType), (uint64_t)(object), numTotalObjs, numObjs[objIndex],
- string_VkDebugReportObjectTypeEXT(pNode->objType));
+ log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, object_handle, __LINE__,
+ OBJTRACK_NONE, "OBJTRACK",
+ "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
+ string_VkDebugReportObjectTypeEXT(pNode->objType), (uint64_t)(object), numTotalObjs, numObjs[objIndex],
+ string_VkDebugReportObjectTypeEXT(pNode->objType));
delete pNode;
VkSurfaceKHRMap.erase(object_handle);
} else {
- log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT ) 0, object_handle, __LINE__, OBJTRACK_NONE, "OBJTRACK",
- "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?",
- object_handle);
+ log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__,
+ OBJTRACK_NONE, "OBJTRACK",
+ "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?", object_handle);
}
}
-static void alloc_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer vkObj, VkDebugReportObjectTypeEXT objType, VkCommandBufferLevel level)
-{
- log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__, OBJTRACK_NONE, "OBJTRACK",
- "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
- reinterpret_cast<uint64_t>(vkObj));
+static void alloc_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer vkObj,
+ VkDebugReportObjectTypeEXT objType, VkCommandBufferLevel level) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__, OBJTRACK_NONE,
+ "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
+ string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj));
- OBJTRACK_NODE* pNewObjNode = new OBJTRACK_NODE;
- pNewObjNode->objType = objType;
+ OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
+ pNewObjNode->objType = objType;
pNewObjNode->belongsTo = (uint64_t)device;
- pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj);
- pNewObjNode->parentObj = (uint64_t) commandPool;
+ pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj);
+ pNewObjNode->parentObj = (uint64_t)commandPool;
if (level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
pNewObjNode->status = OBJSTATUS_COMMAND_BUFFER_SECONDARY;
} else {
@@ -513,123 +496,122 @@ static void alloc_command_buffer(VkDevice device, VkCommandPool commandPool, VkC
numTotalObjs++;
}
-static void free_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer commandBuffer)
-{
+static void free_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer commandBuffer) {
uint64_t object_handle = reinterpret_cast<uint64_t>(commandBuffer);
if (VkCommandBufferMap.find(object_handle) != VkCommandBufferMap.end()) {
- OBJTRACK_NODE* pNode = VkCommandBufferMap[(uint64_t)commandBuffer];
-
- if (pNode->parentObj != (uint64_t)(commandPool)) {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_COMMAND_POOL_MISMATCH, "OBJTRACK",
- "FreeCommandBuffers is attempting to free Command Buffer 0x%" PRIxLEAST64 " belonging to Command Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
- reinterpret_cast<uint64_t>(commandBuffer), pNode->parentObj, (uint64_t)(commandPool));
- } else {
+ OBJTRACK_NODE *pNode = VkCommandBufferMap[(uint64_t)commandBuffer];
+
+ if (pNode->parentObj != (uint64_t)(commandPool)) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, __LINE__,
+ OBJTRACK_COMMAND_POOL_MISMATCH, "OBJTRACK",
+ "FreeCommandBuffers is attempting to free Command Buffer 0x%" PRIxLEAST64
+ " belonging to Command Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
+ reinterpret_cast<uint64_t>(commandBuffer), pNode->parentObj, (uint64_t)(commandPool));
+ } else {
uint32_t objIndex = objTypeToIndex(pNode->objType);
assert(numTotalObjs > 0);
numTotalObjs--;
assert(numObjs[objIndex] > 0);
numObjs[objIndex]--;
- log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_NONE, "OBJTRACK",
- "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
- string_VkDebugReportObjectTypeEXT(pNode->objType), reinterpret_cast<uint64_t>(commandBuffer), numTotalObjs, numObjs[objIndex],
- string_VkDebugReportObjectTypeEXT(pNode->objType));
+ log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_NONE,
+ "OBJTRACK", "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
+ string_VkDebugReportObjectTypeEXT(pNode->objType), reinterpret_cast<uint64_t>(commandBuffer), numTotalObjs,
+ numObjs[objIndex], string_VkDebugReportObjectTypeEXT(pNode->objType));
delete pNode;
VkCommandBufferMap.erase(object_handle);
}
} else {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, object_handle, __LINE__, OBJTRACK_NONE, "OBJTRACK",
- "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?",
- object_handle);
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__, OBJTRACK_NONE,
+ "OBJTRACK", "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?",
+ object_handle);
}
}
-static void alloc_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet vkObj, VkDebugReportObjectTypeEXT objType)
-{
+static void alloc_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet vkObj,
+ VkDebugReportObjectTypeEXT objType) {
log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE, "OBJTRACK",
- "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
- (uint64_t)(vkObj));
+ "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
+ (uint64_t)(vkObj));
- OBJTRACK_NODE* pNewObjNode = new OBJTRACK_NODE;
- pNewObjNode->objType = objType;
+ OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
+ pNewObjNode->objType = objType;
pNewObjNode->belongsTo = (uint64_t)device;
- pNewObjNode->status = OBJSTATUS_NONE;
- pNewObjNode->vkObj = (uint64_t)(vkObj);
- pNewObjNode->parentObj = (uint64_t) descriptorPool;
+ pNewObjNode->status = OBJSTATUS_NONE;
+ pNewObjNode->vkObj = (uint64_t)(vkObj);
+ pNewObjNode->parentObj = (uint64_t)descriptorPool;
VkDescriptorSetMap[(uint64_t)vkObj] = pNewObjNode;
uint32_t objIndex = objTypeToIndex(objType);
numObjs[objIndex]++;
numTotalObjs++;
}
-static void free_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet descriptorSet)
-{
+static void free_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet descriptorSet) {
uint64_t object_handle = (uint64_t)(descriptorSet);
if (VkDescriptorSetMap.find(object_handle) != VkDescriptorSetMap.end()) {
- OBJTRACK_NODE* pNode = VkDescriptorSetMap[(uint64_t)descriptorSet];
+ OBJTRACK_NODE *pNode = VkDescriptorSetMap[(uint64_t)descriptorSet];
if (pNode->parentObj != (uint64_t)(descriptorPool)) {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_DESCRIPTOR_POOL_MISMATCH, "OBJTRACK",
- "FreeDescriptorSets is attempting to free descriptorSet 0x%" PRIxLEAST64 " belonging to Descriptor Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
- (uint64_t)(descriptorSet), pNode->parentObj, (uint64_t)(descriptorPool));
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, __LINE__,
+ OBJTRACK_DESCRIPTOR_POOL_MISMATCH, "OBJTRACK",
+ "FreeDescriptorSets is attempting to free descriptorSet 0x%" PRIxLEAST64
+ " belonging to Descriptor Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
+ (uint64_t)(descriptorSet), pNode->parentObj, (uint64_t)(descriptorPool));
} else {
uint32_t objIndex = objTypeToIndex(pNode->objType);
assert(numTotalObjs > 0);
numTotalObjs--;
assert(numObjs[objIndex] > 0);
numObjs[objIndex]--;
- log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_NONE, "OBJTRACK",
- "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
- string_VkDebugReportObjectTypeEXT(pNode->objType), (uint64_t)(descriptorSet), numTotalObjs, numObjs[objIndex],
- string_VkDebugReportObjectTypeEXT(pNode->objType));
+ log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_NONE,
+ "OBJTRACK", "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
+ string_VkDebugReportObjectTypeEXT(pNode->objType), (uint64_t)(descriptorSet), numTotalObjs, numObjs[objIndex],
+ string_VkDebugReportObjectTypeEXT(pNode->objType));
delete pNode;
VkDescriptorSetMap.erase(object_handle);
}
} else {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT) 0, object_handle, __LINE__, OBJTRACK_NONE, "OBJTRACK",
- "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?",
- object_handle);
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__, OBJTRACK_NONE,
+ "OBJTRACK", "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?",
+ object_handle);
}
}
-static void create_queue(VkDevice dispatchable_object, VkQueue vkObj, VkDebugReportObjectTypeEXT objType)
-{
- log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__, OBJTRACK_NONE, "OBJTRACK",
- "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
- reinterpret_cast<uint64_t>(vkObj));
+static void create_queue(VkDevice dispatchable_object, VkQueue vkObj, VkDebugReportObjectTypeEXT objType) {
+ log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__,
+ OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
+ string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj));
- OBJTRACK_NODE* pNewObjNode = new OBJTRACK_NODE;
+ OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
pNewObjNode->objType = objType;
pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
- pNewObjNode->status = OBJSTATUS_NONE;
- pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj);
+ pNewObjNode->status = OBJSTATUS_NONE;
+ pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj);
VkQueueMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
uint32_t objIndex = objTypeToIndex(objType);
numObjs[objIndex]++;
numTotalObjs++;
}
-static void create_swapchain_image_obj(VkDevice dispatchable_object, VkImage vkObj, VkSwapchainKHR swapchain)
-{
- log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, (uint64_t) vkObj, __LINE__, OBJTRACK_NONE, "OBJTRACK",
- "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, "SwapchainImage",
- (uint64_t)(vkObj));
-
- OBJTRACK_NODE* pNewObjNode = new OBJTRACK_NODE;
- pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
- pNewObjNode->objType = VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT;
- pNewObjNode->status = OBJSTATUS_NONE;
- pNewObjNode->vkObj = (uint64_t) vkObj;
- pNewObjNode->parentObj = (uint64_t) swapchain;
+static void create_swapchain_image_obj(VkDevice dispatchable_object, VkImage vkObj, VkSwapchainKHR swapchain) {
+ log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, (uint64_t)vkObj,
+ __LINE__, OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
+ "SwapchainImage", (uint64_t)(vkObj));
+
+ OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
+ pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
+ pNewObjNode->objType = VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT;
+ pNewObjNode->status = OBJSTATUS_NONE;
+ pNewObjNode->vkObj = (uint64_t)vkObj;
+ pNewObjNode->parentObj = (uint64_t)swapchain;
swapchainImageMap[(uint64_t)(vkObj)] = pNewObjNode;
}
-static void create_device(VkInstance dispatchable_object, VkDevice vkObj, VkDebugReportObjectTypeEXT objType)
-{
- log_msg(mid(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE, "OBJTRACK",
- "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
- (uint64_t)(vkObj));
+static void create_device(VkInstance dispatchable_object, VkDevice vkObj, VkDebugReportObjectTypeEXT objType) {
+ log_msg(mid(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE,
+ "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
+ string_VkDebugReportObjectTypeEXT(objType), (uint64_t)(vkObj));
- OBJTRACK_NODE* pNewObjNode = new OBJTRACK_NODE;
+ OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
pNewObjNode->objType = objType;
pNewObjNode->status = OBJSTATUS_NONE;
@@ -643,17 +625,13 @@ static void create_device(VkInstance dispatchable_object, VkDevice vkObj, VkDebu
//
// Non-auto-generated API functions called by generated code
//
-VkResult
-explicit_CreateInstance(
- const VkInstanceCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkInstance *pInstance)
-{
+VkResult explicit_CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
+ VkInstance *pInstance) {
VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
- PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance) fpGetInstanceProcAddr(NULL, "vkCreateInstance");
+ PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
if (fpCreateInstance == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
@@ -670,11 +648,8 @@ explicit_CreateInstance(
initInstanceTable(*pInstance, fpGetInstanceProcAddr, object_tracker_instance_table_map);
VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(object_tracker_instance_table_map, *pInstance);
- my_data->report_data = debug_report_create_instance(
- pInstanceTable,
- *pInstance,
- pCreateInfo->enabledExtensionCount,
- pCreateInfo->ppEnabledExtensionNames);
+ my_data->report_data = debug_report_create_instance(pInstanceTable, *pInstance, pCreateInfo->enabledExtensionCount,
+ pCreateInfo->ppEnabledExtensionNames);
initObjectTracker(my_data, pAllocator);
createInstanceRegisterExtensions(pCreateInfo, *pInstance);
@@ -684,12 +659,7 @@ explicit_CreateInstance(
return result;
}
-void
-explicit_GetPhysicalDeviceQueueFamilyProperties(
- VkPhysicalDevice gpu,
- uint32_t* pCount,
- VkQueueFamilyProperties* pProperties)
-{
+void explicit_GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice gpu, uint32_t *pCount, VkQueueFamilyProperties *pProperties) {
get_dispatch_table(object_tracker_instance_table_map, gpu)->GetPhysicalDeviceQueueFamilyProperties(gpu, pCount, pProperties);
loader_platform_thread_lock_mutex(&objLock);
@@ -698,20 +668,15 @@ explicit_GetPhysicalDeviceQueueFamilyProperties(
loader_platform_thread_unlock_mutex(&objLock);
}
-VkResult
-explicit_CreateDevice(
- VkPhysicalDevice gpu,
- const VkDeviceCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkDevice *pDevice)
-{
+VkResult explicit_CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
+ VkDevice *pDevice) {
loader_platform_thread_lock_mutex(&objLock);
VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
- PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice) fpGetInstanceProcAddr(NULL, "vkCreateDevice");
+ PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
if (fpCreateDevice == NULL) {
loader_platform_thread_unlock_mutex(&objLock);
return VK_ERROR_INITIALIZATION_FAILED;
@@ -735,7 +700,7 @@ explicit_CreateDevice(
createDeviceRegisterExtensions(pCreateInfo, *pDevice);
if (VkPhysicalDeviceMap.find((uint64_t)gpu) != VkPhysicalDeviceMap.end()) {
- OBJTRACK_NODE* pNewObjNode = VkPhysicalDeviceMap[(uint64_t)gpu];
+ OBJTRACK_NODE *pNewObjNode = VkPhysicalDeviceMap[(uint64_t)gpu];
create_device((VkInstance)pNewObjNode->belongsTo, *pDevice, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT);
}
@@ -743,15 +708,16 @@ explicit_CreateDevice(
return result;
}
-VkResult explicit_EnumeratePhysicalDevices(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices)
-{
+VkResult explicit_EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
+ VkPhysicalDevice *pPhysicalDevices) {
VkBool32 skipCall = VK_FALSE;
loader_platform_thread_lock_mutex(&objLock);
skipCall |= validate_instance(instance, instance, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, false);
loader_platform_thread_unlock_mutex(&objLock);
if (skipCall)
return VK_ERROR_VALIDATION_FAILED_EXT;
- VkResult result = get_dispatch_table(object_tracker_instance_table_map, instance)->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
+ VkResult result = get_dispatch_table(object_tracker_instance_table_map, instance)
+ ->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
loader_platform_thread_lock_mutex(&objLock);
if (result == VK_SUCCESS) {
if (pPhysicalDevices) {
@@ -764,13 +730,7 @@ VkResult explicit_EnumeratePhysicalDevices(VkInstance instance, uint32_t* pPhysi
return result;
}
-void
-explicit_GetDeviceQueue(
- VkDevice device,
- uint32_t queueNodeIndex,
- uint32_t queueIndex,
- VkQueue *pQueue)
-{
+void explicit_GetDeviceQueue(VkDevice device, uint32_t queueNodeIndex, uint32_t queueIndex, VkQueue *pQueue) {
loader_platform_thread_lock_mutex(&objLock);
validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
loader_platform_thread_unlock_mutex(&objLock);
@@ -783,15 +743,8 @@ explicit_GetDeviceQueue(
loader_platform_thread_unlock_mutex(&objLock);
}
-VkResult
-explicit_MapMemory(
- VkDevice device,
- VkDeviceMemory mem,
- VkDeviceSize offset,
- VkDeviceSize size,
- VkFlags flags,
- void **ppData)
-{
+VkResult explicit_MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
+ void **ppData) {
VkBool32 skipCall = VK_FALSE;
loader_platform_thread_lock_mutex(&objLock);
skipCall |= set_device_memory_status(device, mem, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, OBJSTATUS_GPU_MEM_MAPPED);
@@ -800,16 +753,13 @@ explicit_MapMemory(
if (skipCall == VK_TRUE)
return VK_ERROR_VALIDATION_FAILED_EXT;
- VkResult result = get_dispatch_table(object_tracker_device_table_map, device)->MapMemory(device, mem, offset, size, flags, ppData);
+ VkResult result =
+ get_dispatch_table(object_tracker_device_table_map, device)->MapMemory(device, mem, offset, size, flags, ppData);
return result;
}
-void
-explicit_UnmapMemory(
- VkDevice device,
- VkDeviceMemory mem)
-{
+void explicit_UnmapMemory(VkDevice device, VkDeviceMemory mem) {
VkBool32 skipCall = VK_FALSE;
loader_platform_thread_lock_mutex(&objLock);
skipCall |= reset_device_memory_status(device, mem, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, OBJSTATUS_GPU_MEM_MAPPED);
@@ -821,13 +771,7 @@ explicit_UnmapMemory(
get_dispatch_table(object_tracker_device_table_map, device)->UnmapMemory(device, mem);
}
-VkResult
-explicit_QueueBindSparse(
- VkQueue queue,
- uint32_t bindInfoCount,
- const VkBindSparseInfo* pBindInfo,
- VkFence fence)
-{
+VkResult explicit_QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
loader_platform_thread_lock_mutex(&objLock);
validateQueueFlags(queue, "QueueBindSparse");
@@ -842,16 +786,13 @@ explicit_QueueBindSparse(
loader_platform_thread_unlock_mutex(&objLock);
- VkResult result = get_dispatch_table(object_tracker_device_table_map, queue)->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
+ VkResult result =
+ get_dispatch_table(object_tracker_device_table_map, queue)->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
return result;
}
-VkResult
-explicit_AllocateCommandBuffers(
- VkDevice device,
- const VkCommandBufferAllocateInfo *pAllocateInfo,
- VkCommandBuffer* pCommandBuffers)
-{
+VkResult explicit_AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo,
+ VkCommandBuffer *pCommandBuffers) {
VkBool32 skipCall = VK_FALSE;
loader_platform_thread_lock_mutex(&objLock);
skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
@@ -862,42 +803,42 @@ explicit_AllocateCommandBuffers(
return VK_ERROR_VALIDATION_FAILED_EXT;
}
- VkResult result = get_dispatch_table(object_tracker_device_table_map, device)->AllocateCommandBuffers(
- device, pAllocateInfo, pCommandBuffers);
+ VkResult result =
+ get_dispatch_table(object_tracker_device_table_map, device)->AllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers);
loader_platform_thread_lock_mutex(&objLock);
for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; i++) {
- alloc_command_buffer(device, pAllocateInfo->commandPool, pCommandBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, pAllocateInfo->level);
+ alloc_command_buffer(device, pAllocateInfo->commandPool, pCommandBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ pAllocateInfo->level);
}
loader_platform_thread_unlock_mutex(&objLock);
return result;
}
-VkResult
-explicit_AllocateDescriptorSets(
- VkDevice device,
- const VkDescriptorSetAllocateInfo *pAllocateInfo,
- VkDescriptorSet *pDescriptorSets)
-{
+VkResult explicit_AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
+ VkDescriptorSet *pDescriptorSets) {
VkBool32 skipCall = VK_FALSE;
loader_platform_thread_lock_mutex(&objLock);
skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
- skipCall |= validate_descriptor_pool(device, pAllocateInfo->descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
+ skipCall |=
+ validate_descriptor_pool(device, pAllocateInfo->descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
- skipCall |= validate_descriptor_set_layout(device, pAllocateInfo->pSetLayouts[i], VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, false);
+ skipCall |= validate_descriptor_set_layout(device, pAllocateInfo->pSetLayouts[i],
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, false);
}
loader_platform_thread_unlock_mutex(&objLock);
if (skipCall)
return VK_ERROR_VALIDATION_FAILED_EXT;
- VkResult result = get_dispatch_table(object_tracker_device_table_map, device)->AllocateDescriptorSets(
- device, pAllocateInfo, pDescriptorSets);
+ VkResult result =
+ get_dispatch_table(object_tracker_device_table_map, device)->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&objLock);
for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
- alloc_descriptor_set(device, pAllocateInfo->descriptorPool, pDescriptorSets[i], VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT);
+ alloc_descriptor_set(device, pAllocateInfo->descriptorPool, pDescriptorSets[i],
+ VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT);
}
loader_platform_thread_unlock_mutex(&objLock);
}
@@ -905,46 +846,35 @@ explicit_AllocateDescriptorSets(
return result;
}
-void
-explicit_FreeCommandBuffers(
- VkDevice device,
- VkCommandPool commandPool,
- uint32_t commandBufferCount,
- const VkCommandBuffer *pCommandBuffers)
-{
+void explicit_FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
+ const VkCommandBuffer *pCommandBuffers) {
loader_platform_thread_lock_mutex(&objLock);
validate_command_pool(device, commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
loader_platform_thread_unlock_mutex(&objLock);
- get_dispatch_table(object_tracker_device_table_map, device)->FreeCommandBuffers(device,
- commandPool, commandBufferCount, pCommandBuffers);
+ get_dispatch_table(object_tracker_device_table_map, device)
+ ->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
loader_platform_thread_lock_mutex(&objLock);
- for (uint32_t i = 0; i < commandBufferCount; i++)
- {
+ for (uint32_t i = 0; i < commandBufferCount; i++) {
free_command_buffer(device, commandPool, *pCommandBuffers);
pCommandBuffers++;
}
loader_platform_thread_unlock_mutex(&objLock);
}
-void
-explicit_DestroySwapchainKHR(
- VkDevice device,
- VkSwapchainKHR swapchain,
- const VkAllocationCallbacks *pAllocator)
-{
+void explicit_DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
loader_platform_thread_lock_mutex(&objLock);
// A swapchain's images are implicitly deleted when the swapchain is deleted.
// Remove this swapchain's images from our map of such images.
- unordered_map<uint64_t, OBJTRACK_NODE*>::iterator itr = swapchainImageMap.begin();
+ unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = swapchainImageMap.begin();
while (itr != swapchainImageMap.end()) {
- OBJTRACK_NODE* pNode = (*itr).second;
+ OBJTRACK_NODE *pNode = (*itr).second;
if (pNode->parentObj == (uint64_t)(swapchain)) {
- swapchainImageMap.erase(itr++);
+ swapchainImageMap.erase(itr++);
} else {
- ++itr;
+ ++itr;
}
}
destroy_swapchain_khr(device, swapchain);
@@ -953,12 +883,7 @@ explicit_DestroySwapchainKHR(
get_dispatch_table(object_tracker_device_table_map, device)->DestroySwapchainKHR(device, swapchain, pAllocator);
}
-void
-explicit_FreeMemory(
- VkDevice device,
- VkDeviceMemory mem,
- const VkAllocationCallbacks* pAllocator)
-{
+void explicit_FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
loader_platform_thread_lock_mutex(&objLock);
validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
loader_platform_thread_unlock_mutex(&objLock);
@@ -970,34 +895,24 @@ explicit_FreeMemory(
loader_platform_thread_unlock_mutex(&objLock);
}
-VkResult
-explicit_FreeDescriptorSets(
- VkDevice device,
- VkDescriptorPool descriptorPool,
- uint32_t count,
- const VkDescriptorSet *pDescriptorSets)
-{
+VkResult explicit_FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
+ const VkDescriptorSet *pDescriptorSets) {
loader_platform_thread_lock_mutex(&objLock);
validate_descriptor_pool(device, descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
loader_platform_thread_unlock_mutex(&objLock);
- VkResult result = get_dispatch_table(object_tracker_device_table_map, device)->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
+ VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
+ ->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
loader_platform_thread_lock_mutex(&objLock);
- for (uint32_t i=0; i<count; i++)
- {
+ for (uint32_t i = 0; i < count; i++) {
free_descriptor_set(device, descriptorPool, *pDescriptorSets++);
}
loader_platform_thread_unlock_mutex(&objLock);
return result;
}
-void
-explicit_DestroyDescriptorPool(
- VkDevice device,
- VkDescriptorPool descriptorPool,
- const VkAllocationCallbacks *pAllocator)
-{
+void explicit_DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
VkBool32 skipCall = VK_FALSE;
loader_platform_thread_lock_mutex(&objLock);
skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
@@ -1009,9 +924,9 @@ explicit_DestroyDescriptorPool(
// A DescriptorPool's descriptor sets are implicitly deleted when the pool is deleted.
// Remove this pool's descriptor sets from our descriptorSet map.
loader_platform_thread_lock_mutex(&objLock);
- unordered_map<uint64_t, OBJTRACK_NODE*>::iterator itr = VkDescriptorSetMap.begin();
+ unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = VkDescriptorSetMap.begin();
while (itr != VkDescriptorSetMap.end()) {
- OBJTRACK_NODE* pNode = (*itr).second;
+ OBJTRACK_NODE *pNode = (*itr).second;
auto del_itr = itr++;
if (pNode->parentObj == (uint64_t)(descriptorPool)) {
destroy_descriptor_set(device, (VkDescriptorSet)((*del_itr).first));
@@ -1022,12 +937,7 @@ explicit_DestroyDescriptorPool(
get_dispatch_table(object_tracker_device_table_map, device)->DestroyDescriptorPool(device, descriptorPool, pAllocator);
}
-void
-explicit_DestroyCommandPool(
- VkDevice device,
- VkCommandPool commandPool,
- const VkAllocationCallbacks *pAllocator)
-{
+void explicit_DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
VkBool32 skipCall = VK_FALSE;
loader_platform_thread_lock_mutex(&objLock);
skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
@@ -1039,10 +949,10 @@ explicit_DestroyCommandPool(
loader_platform_thread_lock_mutex(&objLock);
// A CommandPool's command buffers are implicitly deleted when the pool is deleted.
// Remove this pool's cmdBuffers from our cmd buffer map.
- unordered_map<uint64_t, OBJTRACK_NODE*>::iterator itr = VkCommandBufferMap.begin();
- unordered_map<uint64_t, OBJTRACK_NODE*>::iterator del_itr;
+ unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = VkCommandBufferMap.begin();
+ unordered_map<uint64_t, OBJTRACK_NODE *>::iterator del_itr;
while (itr != VkCommandBufferMap.end()) {
- OBJTRACK_NODE* pNode = (*itr).second;
+ OBJTRACK_NODE *pNode = (*itr).second;
del_itr = itr++;
if (pNode->parentObj == (uint64_t)(commandPool)) {
destroy_command_buffer(reinterpret_cast<VkCommandBuffer>((*del_itr).first),
@@ -1054,13 +964,7 @@ explicit_DestroyCommandPool(
get_dispatch_table(object_tracker_device_table_map, device)->DestroyCommandPool(device, commandPool, pAllocator);
}
-VkResult
-explicit_GetSwapchainImagesKHR(
- VkDevice device,
- VkSwapchainKHR swapchain,
- uint32_t *pCount,
- VkImage *pSwapchainImages)
-{
+VkResult explicit_GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
VkBool32 skipCall = VK_FALSE;
loader_platform_thread_lock_mutex(&objLock);
skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
@@ -1068,7 +972,8 @@ explicit_GetSwapchainImagesKHR(
if (skipCall)
return VK_ERROR_VALIDATION_FAILED_EXT;
- VkResult result = get_dispatch_table(object_tracker_device_table_map, device)->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
+ VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
+ ->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
if (pSwapchainImages != NULL) {
loader_platform_thread_lock_mutex(&objLock);
@@ -1081,35 +986,33 @@ explicit_GetSwapchainImagesKHR(
}
// TODO: Add special case to codegen to cover validating all the pipelines instead of just the first
-VkResult
-explicit_CreateGraphicsPipelines(
- VkDevice device,
- VkPipelineCache pipelineCache,
- uint32_t createInfoCount,
- const VkGraphicsPipelineCreateInfo *pCreateInfos,
- const VkAllocationCallbacks *pAllocator,
- VkPipeline *pPipelines)
-{
+VkResult explicit_CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
+ const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
+ VkPipeline *pPipelines) {
VkBool32 skipCall = VK_FALSE;
loader_platform_thread_lock_mutex(&objLock);
skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
if (pCreateInfos) {
- for (uint32_t idx0=0; idx0<createInfoCount; ++idx0) {
+ for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
if (pCreateInfos[idx0].basePipelineHandle) {
- skipCall |= validate_pipeline(device, pCreateInfos[idx0].basePipelineHandle, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, true);
+ skipCall |= validate_pipeline(device, pCreateInfos[idx0].basePipelineHandle,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, true);
}
if (pCreateInfos[idx0].layout) {
- skipCall |= validate_pipeline_layout(device, pCreateInfos[idx0].layout, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, false);
+ skipCall |= validate_pipeline_layout(device, pCreateInfos[idx0].layout,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, false);
}
if (pCreateInfos[idx0].pStages) {
- for (uint32_t idx1=0; idx1<pCreateInfos[idx0].stageCount; ++idx1) {
+ for (uint32_t idx1 = 0; idx1 < pCreateInfos[idx0].stageCount; ++idx1) {
if (pCreateInfos[idx0].pStages[idx1].module) {
- skipCall |= validate_shader_module(device, pCreateInfos[idx0].pStages[idx1].module, VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, false);
+ skipCall |= validate_shader_module(device, pCreateInfos[idx0].pStages[idx1].module,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, false);
}
}
}
if (pCreateInfos[idx0].renderPass) {
- skipCall |= validate_render_pass(device, pCreateInfos[idx0].renderPass, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, false);
+ skipCall |=
+ validate_render_pass(device, pCreateInfos[idx0].renderPass, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, false);
}
}
}
@@ -1119,7 +1022,8 @@ explicit_CreateGraphicsPipelines(
loader_platform_thread_unlock_mutex(&objLock);
if (skipCall)
return VK_ERROR_VALIDATION_FAILED_EXT;
- VkResult result = get_dispatch_table(object_tracker_device_table_map, device)->CreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
+ VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
+ ->CreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
loader_platform_thread_lock_mutex(&objLock);
if (result == VK_SUCCESS) {
for (uint32_t idx2 = 0; idx2 < createInfoCount; ++idx2) {
@@ -1131,28 +1035,25 @@ explicit_CreateGraphicsPipelines(
}
// TODO: Add special case to codegen to cover validating all the pipelines instead of just the first
-VkResult
-explicit_CreateComputePipelines(
- VkDevice device,
- VkPipelineCache pipelineCache,
- uint32_t createInfoCount,
- const VkComputePipelineCreateInfo *pCreateInfos,
- const VkAllocationCallbacks *pAllocator,
- VkPipeline *pPipelines)
-{
+VkResult explicit_CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
+ const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
+ VkPipeline *pPipelines) {
VkBool32 skipCall = VK_FALSE;
loader_platform_thread_lock_mutex(&objLock);
skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
if (pCreateInfos) {
- for (uint32_t idx0=0; idx0<createInfoCount; ++idx0) {
+ for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
if (pCreateInfos[idx0].basePipelineHandle) {
- skipCall |= validate_pipeline(device, pCreateInfos[idx0].basePipelineHandle, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, true);
+ skipCall |= validate_pipeline(device, pCreateInfos[idx0].basePipelineHandle,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, true);
}
if (pCreateInfos[idx0].layout) {
- skipCall |= validate_pipeline_layout(device, pCreateInfos[idx0].layout, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, false);
+ skipCall |= validate_pipeline_layout(device, pCreateInfos[idx0].layout,
+ VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, false);
}
if (pCreateInfos[idx0].stage.module) {
- skipCall |= validate_shader_module(device, pCreateInfos[idx0].stage.module, VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, false);
+ skipCall |= validate_shader_module(device, pCreateInfos[idx0].stage.module,
+ VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, false);
}
}
}
@@ -1162,7 +1063,8 @@ explicit_CreateComputePipelines(
loader_platform_thread_unlock_mutex(&objLock);
if (skipCall)
return VK_ERROR_VALIDATION_FAILED_EXT;
- VkResult result = get_dispatch_table(object_tracker_device_table_map, device)->CreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
+ VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
+ ->CreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
loader_platform_thread_lock_mutex(&objLock);
if (result == VK_SUCCESS) {
for (uint32_t idx1 = 0; idx1 < createInfoCount; ++idx1) {
diff --git a/layers/param_checker.cpp b/layers/param_checker.cpp
index bf2cd5535..dd4d57259 100644
--- a/layers/param_checker.cpp
+++ b/layers/param_checker.cpp
@@ -57,23 +57,20 @@ struct layer_data {
debug_report_data *report_data;
std::vector<VkDebugReportCallbackEXT> logging_callback;
- //TODO: Split instance/device structs
- //Device Data
- //Map for queue family index to queue count
+ // TODO: Split instance/device structs
+ // Device Data
+ // Map for queue family index to queue count
std::unordered_map<uint32_t, uint32_t> queueFamilyIndexMap;
- layer_data() :
- report_data(nullptr)
- {};
+ layer_data() : report_data(nullptr){};
};
-static std::unordered_map<void*, layer_data*> layer_data_map;
+static std::unordered_map<void *, layer_data *> layer_data_map;
static device_table_map pc_device_table_map;
static instance_table_map pc_instance_table_map;
// "my instance data"
-debug_report_data *mid(VkInstance object)
-{
+debug_report_data *mid(VkInstance object) {
dispatch_key key = get_dispatch_key(object);
layer_data *data = get_my_data_ptr(key, layer_data_map);
#if DISPATCH_MAP_DEBUG
@@ -85,8 +82,7 @@ debug_report_data *mid(VkInstance object)
}
// "my device data"
-debug_report_data *mdd(void* object)
-{
+debug_report_data *mdd(void *object) {
dispatch_key key = get_dispatch_key(object);
layer_data *data = get_my_data_ptr(key, layer_data_map);
#if DISPATCH_MAP_DEBUG
@@ -96,17 +92,15 @@ debug_report_data *mdd(void* object)
return data->report_data;
}
-static void InitParamChecker(layer_data *data, const VkAllocationCallbacks *pAllocator)
-{
+static void InitParamChecker(layer_data *data, const VkAllocationCallbacks *pAllocator) {
VkDebugReportCallbackEXT callback;
uint32_t report_flags = getLayerOptionFlags("lunarg_param_checker.report_flags", 0);
uint32_t debug_action = 0;
- getLayerOptionEnum("lunarg_param_checker.debug_action", (uint32_t *) &debug_action);
- if(debug_action & VK_DBG_LAYER_ACTION_LOG_MSG)
- {
+ getLayerOptionEnum("lunarg_param_checker.debug_action", (uint32_t *)&debug_action);
+ if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG) {
FILE *log_output = NULL;
- const char* option_str = getLayerOption("lunarg_param_checker.log_filename");
+ const char *option_str = getLayerOption("lunarg_param_checker.log_filename");
log_output = getLayerLogOutput(option_str, "lunarg_param_checker");
VkDebugReportCallbackCreateInfoEXT dbgCreateInfo;
memset(&dbgCreateInfo, 0, sizeof(dbgCreateInfo));
@@ -132,17 +126,13 @@ static void InitParamChecker(layer_data *data, const VkAllocationCallbacks *pAll
}
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(
- VkInstance instance,
- const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkDebugReportCallbackEXT* pMsgCallback)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
VkLayerInstanceDispatchTable *pTable = get_dispatch_table(pc_instance_table_map, instance);
- VkResult result = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
+ VkResult result = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
- if (result == VK_SUCCESS)
- {
+ if (result == VK_SUCCESS) {
layer_data *data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
result = layer_create_msg_callback(data->report_data, pCreateInfo, pAllocator, pMsgCallback);
}
@@ -150,11 +140,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(
- VkInstance instance,
- VkDebugReportCallbackEXT msgCallback,
- const VkAllocationCallbacks *pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance,
+ VkDebugReportCallbackEXT msgCallback,
+ const VkAllocationCallbacks *pAllocator) {
VkLayerInstanceDispatchTable *pTable = get_dispatch_table(pc_instance_table_map, instance);
pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
@@ -162,264 +150,183 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(
layer_destroy_msg_callback(data->report_data, msgCallback, pAllocator);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT(
- VkInstance instance,
- VkDebugReportFlagsEXT flags,
- VkDebugReportObjectTypeEXT objType,
- uint64_t object,
- size_t location,
- int32_t msgCode,
- const char* pLayerPrefix,
- const char* pMsg)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
+ size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
VkLayerInstanceDispatchTable *pTable = get_dispatch_table(pc_instance_table_map, instance);
pTable->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
}
-static const VkExtensionProperties instance_extensions[] = {
- {
- VK_EXT_DEBUG_REPORT_EXTENSION_NAME,
- VK_EXT_DEBUG_REPORT_SPEC_VERSION
- }
-};
+static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(
- const char *pLayerName,
- uint32_t *pCount,
- VkExtensionProperties* pProperties)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
}
-static const VkLayerProperties pc_global_layers[] = {
- {
- "VK_LAYER_LUNARG_param_checker",
- VK_API_VERSION,
- 1,
- "LunarG Validation Layer",
- }
-};
+static const VkLayerProperties pc_global_layers[] = {{
+ "VK_LAYER_LUNARG_param_checker", VK_API_VERSION, 1, "LunarG Validation Layer",
+}};
+
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
+ return util_GetLayerProperties(ARRAY_SIZE(pc_global_layers), pc_global_layers, pCount, pProperties);
+}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(
- uint32_t *pCount,
- VkLayerProperties* pProperties)
-{
- return util_GetLayerProperties(ARRAY_SIZE(pc_global_layers),
- pc_global_layers,
- pCount, pProperties);
-}
-
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(
- VkPhysicalDevice physicalDevice,
- const char* pLayerName,
- uint32_t* pCount,
- VkExtensionProperties* pProperties)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
+ const char *pLayerName, uint32_t *pCount,
+ VkExtensionProperties *pProperties) {
/* param_checker does not have any physical device extensions */
if (pLayerName == NULL) {
- return get_dispatch_table(pc_instance_table_map, physicalDevice)->EnumerateDeviceExtensionProperties(
- physicalDevice,
- NULL,
- pCount,
- pProperties);
+ return get_dispatch_table(pc_instance_table_map, physicalDevice)
+ ->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
} else {
return util_GetExtensionProperties(0, NULL, pCount, pProperties);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(
- VkPhysicalDevice physicalDevice,
- uint32_t* pCount,
- VkLayerProperties* pProperties)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
/* param_checker's physical device layers are the same as global */
- return util_GetLayerProperties(ARRAY_SIZE(pc_global_layers), pc_global_layers,
- pCount, pProperties);
-}
-
-static
-std::string EnumeratorString(VkResult const& enumerator)
-{
- switch(enumerator)
- {
- case VK_RESULT_MAX_ENUM:
- {
- return "VK_RESULT_MAX_ENUM";
- break;
- }
- case VK_ERROR_LAYER_NOT_PRESENT:
- {
- return "VK_ERROR_LAYER_NOT_PRESENT";
- break;
- }
- case VK_ERROR_INCOMPATIBLE_DRIVER:
- {
- return "VK_ERROR_INCOMPATIBLE_DRIVER";
- break;
- }
- case VK_ERROR_MEMORY_MAP_FAILED:
- {
- return "VK_ERROR_MEMORY_MAP_FAILED";
- break;
- }
- case VK_INCOMPLETE:
- {
- return "VK_INCOMPLETE";
- break;
- }
- case VK_ERROR_OUT_OF_HOST_MEMORY:
- {
- return "VK_ERROR_OUT_OF_HOST_MEMORY";
- break;
- }
- case VK_ERROR_INITIALIZATION_FAILED:
- {
- return "VK_ERROR_INITIALIZATION_FAILED";
- break;
- }
- case VK_NOT_READY:
- {
- return "VK_NOT_READY";
- break;
- }
- case VK_ERROR_OUT_OF_DEVICE_MEMORY:
- {
- return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
- break;
- }
- case VK_EVENT_SET:
- {
- return "VK_EVENT_SET";
- break;
- }
- case VK_TIMEOUT:
- {
- return "VK_TIMEOUT";
- break;
- }
- case VK_EVENT_RESET:
- {
- return "VK_EVENT_RESET";
- break;
- }
- case VK_SUCCESS:
- {
- return "VK_SUCCESS";
- break;
- }
- case VK_ERROR_EXTENSION_NOT_PRESENT:
- {
- return "VK_ERROR_EXTENSION_NOT_PRESENT";
- break;
- }
- case VK_ERROR_DEVICE_LOST:
- {
- return "VK_ERROR_DEVICE_LOST";
- break;
- }
- default:
- {
- return "unrecognized enumerator";
- break;
- }
+ return util_GetLayerProperties(ARRAY_SIZE(pc_global_layers), pc_global_layers, pCount, pProperties);
+}
+
+static std::string EnumeratorString(VkResult const &enumerator) {
+ switch (enumerator) {
+ case VK_RESULT_MAX_ENUM: {
+ return "VK_RESULT_MAX_ENUM";
+ break;
+ }
+ case VK_ERROR_LAYER_NOT_PRESENT: {
+ return "VK_ERROR_LAYER_NOT_PRESENT";
+ break;
+ }
+ case VK_ERROR_INCOMPATIBLE_DRIVER: {
+ return "VK_ERROR_INCOMPATIBLE_DRIVER";
+ break;
+ }
+ case VK_ERROR_MEMORY_MAP_FAILED: {
+ return "VK_ERROR_MEMORY_MAP_FAILED";
+ break;
+ }
+ case VK_INCOMPLETE: {
+ return "VK_INCOMPLETE";
+ break;
+ }
+ case VK_ERROR_OUT_OF_HOST_MEMORY: {
+ return "VK_ERROR_OUT_OF_HOST_MEMORY";
+ break;
+ }
+ case VK_ERROR_INITIALIZATION_FAILED: {
+ return "VK_ERROR_INITIALIZATION_FAILED";
+ break;
+ }
+ case VK_NOT_READY: {
+ return "VK_NOT_READY";
+ break;
+ }
+ case VK_ERROR_OUT_OF_DEVICE_MEMORY: {
+ return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
+ break;
+ }
+ case VK_EVENT_SET: {
+ return "VK_EVENT_SET";
+ break;
+ }
+ case VK_TIMEOUT: {
+ return "VK_TIMEOUT";
+ break;
+ }
+ case VK_EVENT_RESET: {
+ return "VK_EVENT_RESET";
+ break;
+ }
+ case VK_SUCCESS: {
+ return "VK_SUCCESS";
+ break;
+ }
+ case VK_ERROR_EXTENSION_NOT_PRESENT: {
+ return "VK_ERROR_EXTENSION_NOT_PRESENT";
+ break;
+ }
+ case VK_ERROR_DEVICE_LOST: {
+ return "VK_ERROR_DEVICE_LOST";
+ break;
+ }
+ default: {
+ return "unrecognized enumerator";
+ break;
+ }
}
}
-static
-bool ValidateEnumerator(VkFormatFeatureFlagBits const& enumerator)
-{
+static bool ValidateEnumerator(VkFormatFeatureFlagBits const &enumerator) {
VkFormatFeatureFlagBits allFlags = (VkFormatFeatureFlagBits)(
- VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT |
- VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT |
- VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT |
- VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT |
- VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT |
- VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT |
- VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT |
- VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT |
- VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT |
- VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT |
- VK_FORMAT_FEATURE_BLIT_SRC_BIT |
- VK_FORMAT_FEATURE_BLIT_DST_BIT |
+ VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT | VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT |
+ VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT | VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT |
+ VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT | VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT | VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT |
+ VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT | VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT |
+ VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT |
VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT);
- if(enumerator & (~allFlags))
- {
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkFormatFeatureFlagBits const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkFormatFeatureFlagBits const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT)
- {
+ if (enumerator & VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT) {
strings.push_back("VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT");
}
- if(enumerator & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)
- {
+ if (enumerator & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT) {
strings.push_back("VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT");
}
- if(enumerator & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)
- {
+ if (enumerator & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT) {
strings.push_back("VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT");
}
- if(enumerator & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT)
- {
+ if (enumerator & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT) {
strings.push_back("VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT");
}
- if(enumerator & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
- {
+ if (enumerator & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) {
strings.push_back("VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT");
}
- if(enumerator & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT)
- {
+ if (enumerator & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT) {
strings.push_back("VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT");
}
- if(enumerator & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT)
- {
+ if (enumerator & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) {
strings.push_back("VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT");
}
- if(enumerator & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT)
- {
+ if (enumerator & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT) {
strings.push_back("VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT");
}
- if(enumerator & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)
- {
+ if (enumerator & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) {
strings.push_back("VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT");
}
- if(enumerator & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)
- {
+ if (enumerator & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) {
strings.push_back("VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT");
}
- if(enumerator & VK_FORMAT_FEATURE_BLIT_SRC_BIT)
- {
+ if (enumerator & VK_FORMAT_FEATURE_BLIT_SRC_BIT) {
strings.push_back("VK_FORMAT_FEATURE_BLIT_SRC_BIT");
}
- if(enumerator & VK_FORMAT_FEATURE_BLIT_DST_BIT)
- {
+ if (enumerator & VK_FORMAT_FEATURE_BLIT_DST_BIT) {
strings.push_back("VK_FORMAT_FEATURE_BLIT_DST_BIT");
}
- if(enumerator & VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT)
- {
+ if (enumerator & VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT) {
strings.push_back("VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -427,74 +334,54 @@ std::string EnumeratorString(VkFormatFeatureFlagBits const& enumerator)
return enumeratorString;
}
-static
-bool ValidateEnumerator(VkImageUsageFlagBits const& enumerator)
-{
- VkImageUsageFlagBits allFlags = (VkImageUsageFlagBits)(VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
- VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT |
- VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
- VK_IMAGE_USAGE_STORAGE_BIT |
- VK_IMAGE_USAGE_SAMPLED_BIT |
- VK_IMAGE_USAGE_TRANSFER_DST_BIT |
- VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT |
- VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
- if(enumerator & (~allFlags))
- {
+static bool ValidateEnumerator(VkImageUsageFlagBits const &enumerator) {
+ VkImageUsageFlagBits allFlags = (VkImageUsageFlagBits)(
+ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
+ VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
+ VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkImageUsageFlagBits const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkImageUsageFlagBits const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)
- {
+ if (enumerator & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) {
strings.push_back("VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT");
}
- if(enumerator & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)
- {
+ if (enumerator & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
strings.push_back("VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT");
}
- if(enumerator & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)
- {
+ if (enumerator & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) {
strings.push_back("VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT");
}
- if(enumerator & VK_IMAGE_USAGE_STORAGE_BIT)
- {
+ if (enumerator & VK_IMAGE_USAGE_STORAGE_BIT) {
strings.push_back("VK_IMAGE_USAGE_STORAGE_BIT");
}
- if(enumerator & VK_IMAGE_USAGE_SAMPLED_BIT)
- {
+ if (enumerator & VK_IMAGE_USAGE_SAMPLED_BIT) {
strings.push_back("VK_IMAGE_USAGE_SAMPLED_BIT");
}
- if(enumerator & VK_IMAGE_USAGE_TRANSFER_DST_BIT)
- {
+ if (enumerator & VK_IMAGE_USAGE_TRANSFER_DST_BIT) {
strings.push_back("VK_IMAGE_USAGE_TRANSFER_DST_BIT");
}
- if(enumerator & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT)
- {
+ if (enumerator & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT) {
strings.push_back("VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT");
}
- if(enumerator & VK_IMAGE_USAGE_TRANSFER_SRC_BIT)
- {
+ if (enumerator & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) {
strings.push_back("VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -502,55 +389,40 @@ std::string EnumeratorString(VkImageUsageFlagBits const& enumerator)
return enumeratorString;
}
-static
-bool ValidateEnumerator(VkQueueFlagBits const& enumerator)
-{
- VkQueueFlagBits allFlags = (VkQueueFlagBits)(
- VK_QUEUE_TRANSFER_BIT |
- VK_QUEUE_COMPUTE_BIT |
- VK_QUEUE_SPARSE_BINDING_BIT |
- VK_QUEUE_GRAPHICS_BIT);
- if(enumerator & (~allFlags))
- {
+static bool ValidateEnumerator(VkQueueFlagBits const &enumerator) {
+ VkQueueFlagBits allFlags =
+ (VkQueueFlagBits)(VK_QUEUE_TRANSFER_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_SPARSE_BINDING_BIT | VK_QUEUE_GRAPHICS_BIT);
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkQueueFlagBits const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkQueueFlagBits const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_QUEUE_TRANSFER_BIT)
- {
+ if (enumerator & VK_QUEUE_TRANSFER_BIT) {
strings.push_back("VK_QUEUE_TRANSFER_BIT");
}
- if(enumerator & VK_QUEUE_COMPUTE_BIT)
- {
+ if (enumerator & VK_QUEUE_COMPUTE_BIT) {
strings.push_back("VK_QUEUE_COMPUTE_BIT");
}
- if(enumerator & VK_QUEUE_SPARSE_BINDING_BIT)
- {
+ if (enumerator & VK_QUEUE_SPARSE_BINDING_BIT) {
strings.push_back("VK_QUEUE_SPARSE_BINDING_BIT");
}
- if(enumerator & VK_QUEUE_GRAPHICS_BIT)
- {
+ if (enumerator & VK_QUEUE_GRAPHICS_BIT) {
strings.push_back("VK_QUEUE_GRAPHICS_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -558,59 +430,44 @@ std::string EnumeratorString(VkQueueFlagBits const& enumerator)
return enumeratorString;
}
-static
-bool ValidateEnumerator(VkMemoryPropertyFlagBits const& enumerator)
-{
- VkMemoryPropertyFlagBits allFlags = (VkMemoryPropertyFlagBits)(VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT |
- VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
- VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
- VK_MEMORY_PROPERTY_HOST_CACHED_BIT |
- VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
- if(enumerator & (~allFlags))
- {
+static bool ValidateEnumerator(VkMemoryPropertyFlagBits const &enumerator) {
+ VkMemoryPropertyFlagBits allFlags = (VkMemoryPropertyFlagBits)(
+ VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_CACHED_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkMemoryPropertyFlagBits const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkMemoryPropertyFlagBits const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT)
- {
+ if (enumerator & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) {
strings.push_back("VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT");
}
- if(enumerator & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)
- {
+ if (enumerator & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
strings.push_back("VK_MEMORY_PROPERTY_HOST_COHERENT_BIT");
}
- if(enumerator & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
- {
+ if (enumerator & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
strings.push_back("VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT");
}
- if(enumerator & VK_MEMORY_PROPERTY_HOST_CACHED_BIT)
- {
+ if (enumerator & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) {
strings.push_back("VK_MEMORY_PROPERTY_HOST_CACHED_BIT");
}
- if(enumerator & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
- {
+ if (enumerator & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) {
strings.push_back("VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -618,39 +475,30 @@ std::string EnumeratorString(VkMemoryPropertyFlagBits const& enumerator)
return enumeratorString;
}
-static
-bool ValidateEnumerator(VkMemoryHeapFlagBits const& enumerator)
-{
+static bool ValidateEnumerator(VkMemoryHeapFlagBits const &enumerator) {
VkMemoryHeapFlagBits allFlags = (VkMemoryHeapFlagBits)(VK_MEMORY_HEAP_DEVICE_LOCAL_BIT);
- if(enumerator & (~allFlags))
- {
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkMemoryHeapFlagBits const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkMemoryHeapFlagBits const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)
- {
+ if (enumerator & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) {
strings.push_back("VK_MEMORY_HEAP_DEVICE_LOCAL_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -658,49 +506,38 @@ std::string EnumeratorString(VkMemoryHeapFlagBits const& enumerator)
return enumeratorString;
}
-static
-bool ValidateEnumerator(VkSparseImageFormatFlagBits const& enumerator)
-{
- VkSparseImageFormatFlagBits allFlags = (VkSparseImageFormatFlagBits)(VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT |
- VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT |
- VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT);
- if(enumerator & (~allFlags))
- {
+static bool ValidateEnumerator(VkSparseImageFormatFlagBits const &enumerator) {
+ VkSparseImageFormatFlagBits allFlags =
+ (VkSparseImageFormatFlagBits)(VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT |
+ VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT | VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT);
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkSparseImageFormatFlagBits const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkSparseImageFormatFlagBits const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT)
- {
+ if (enumerator & VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT) {
strings.push_back("VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT");
}
- if(enumerator & VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT)
- {
+ if (enumerator & VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT) {
strings.push_back("VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT");
}
- if(enumerator & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT)
- {
+ if (enumerator & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) {
strings.push_back("VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -708,39 +545,30 @@ std::string EnumeratorString(VkSparseImageFormatFlagBits const& enumerator)
return enumeratorString;
}
-static
-bool ValidateEnumerator(VkFenceCreateFlagBits const& enumerator)
-{
+static bool ValidateEnumerator(VkFenceCreateFlagBits const &enumerator) {
VkFenceCreateFlagBits allFlags = (VkFenceCreateFlagBits)(VK_FENCE_CREATE_SIGNALED_BIT);
- if(enumerator & (~allFlags))
- {
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkFenceCreateFlagBits const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkFenceCreateFlagBits const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_FENCE_CREATE_SIGNALED_BIT)
- {
+ if (enumerator & VK_FENCE_CREATE_SIGNALED_BIT) {
strings.push_back("VK_FENCE_CREATE_SIGNALED_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -748,89 +576,67 @@ std::string EnumeratorString(VkFenceCreateFlagBits const& enumerator)
return enumeratorString;
}
-static
-bool ValidateEnumerator(VkQueryPipelineStatisticFlagBits const& enumerator)
-{
- VkQueryPipelineStatisticFlagBits allFlags = (VkQueryPipelineStatisticFlagBits)(VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT |
- VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT |
- VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT |
- VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT |
- VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT |
- VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT |
- VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT |
- VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT |
+static bool ValidateEnumerator(VkQueryPipelineStatisticFlagBits const &enumerator) {
+ VkQueryPipelineStatisticFlagBits allFlags = (VkQueryPipelineStatisticFlagBits)(
+ VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT | VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT |
+ VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT | VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT |
+ VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT | VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT |
+ VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT | VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT |
VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT |
VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT |
VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT);
- if(enumerator & (~allFlags))
- {
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkQueryPipelineStatisticFlagBits const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkQueryPipelineStatisticFlagBits const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT)
- {
+ if (enumerator & VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT) {
strings.push_back("VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT");
}
- if(enumerator & VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT)
- {
+ if (enumerator & VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT) {
strings.push_back("VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT");
}
- if(enumerator & VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT)
- {
+ if (enumerator & VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT) {
strings.push_back("VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT");
}
- if(enumerator & VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT)
- {
+ if (enumerator & VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT) {
strings.push_back("VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT");
}
- if(enumerator & VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT)
- {
+ if (enumerator & VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT) {
strings.push_back("VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT");
}
- if(enumerator & VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT)
- {
+ if (enumerator & VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT) {
strings.push_back("VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT");
}
- if(enumerator & VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT)
- {
+ if (enumerator & VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT) {
strings.push_back("VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT");
}
- if(enumerator & VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT)
- {
+ if (enumerator & VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT) {
strings.push_back("VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT");
}
- if(enumerator & VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT)
- {
+ if (enumerator & VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT) {
strings.push_back("VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT");
}
- if(enumerator & VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT)
- {
+ if (enumerator & VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT) {
strings.push_back("VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT");
}
- if(enumerator & VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT)
- {
+ if (enumerator & VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT) {
strings.push_back("VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -838,54 +644,40 @@ std::string EnumeratorString(VkQueryPipelineStatisticFlagBits const& enumerator)
return enumeratorString;
}
-static
-bool ValidateEnumerator(VkQueryResultFlagBits const& enumerator)
-{
- VkQueryResultFlagBits allFlags = (VkQueryResultFlagBits)(VK_QUERY_RESULT_PARTIAL_BIT |
- VK_QUERY_RESULT_WITH_AVAILABILITY_BIT |
- VK_QUERY_RESULT_WAIT_BIT |
- VK_QUERY_RESULT_64_BIT);
- if(enumerator & (~allFlags))
- {
+static bool ValidateEnumerator(VkQueryResultFlagBits const &enumerator) {
+ VkQueryResultFlagBits allFlags = (VkQueryResultFlagBits)(VK_QUERY_RESULT_PARTIAL_BIT | VK_QUERY_RESULT_WITH_AVAILABILITY_BIT |
+ VK_QUERY_RESULT_WAIT_BIT | VK_QUERY_RESULT_64_BIT);
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkQueryResultFlagBits const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkQueryResultFlagBits const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_QUERY_RESULT_PARTIAL_BIT)
- {
+ if (enumerator & VK_QUERY_RESULT_PARTIAL_BIT) {
strings.push_back("VK_QUERY_RESULT_PARTIAL_BIT");
}
- if(enumerator & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
- {
+ if (enumerator & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
strings.push_back("VK_QUERY_RESULT_WITH_AVAILABILITY_BIT");
}
- if(enumerator & VK_QUERY_RESULT_WAIT_BIT)
- {
+ if (enumerator & VK_QUERY_RESULT_WAIT_BIT) {
strings.push_back("VK_QUERY_RESULT_WAIT_BIT");
}
- if(enumerator & VK_QUERY_RESULT_64_BIT)
- {
+ if (enumerator & VK_QUERY_RESULT_64_BIT) {
strings.push_back("VK_QUERY_RESULT_64_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -893,79 +685,57 @@ std::string EnumeratorString(VkQueryResultFlagBits const& enumerator)
return enumeratorString;
}
-static
-bool ValidateEnumerator(VkBufferUsageFlagBits const& enumerator)
-{
- VkBufferUsageFlagBits allFlags = (VkBufferUsageFlagBits)(VK_BUFFER_USAGE_VERTEX_BUFFER_BIT |
- VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
- VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT |
- VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT |
- VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
- VK_BUFFER_USAGE_TRANSFER_DST_BIT |
- VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT |
- VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
- VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
- if(enumerator & (~allFlags))
- {
+static bool ValidateEnumerator(VkBufferUsageFlagBits const &enumerator) {
+ VkBufferUsageFlagBits allFlags = (VkBufferUsageFlagBits)(
+ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT |
+ VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
+ VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkBufferUsageFlagBits const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkBufferUsageFlagBits const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_BUFFER_USAGE_VERTEX_BUFFER_BIT)
- {
+ if (enumerator & VK_BUFFER_USAGE_VERTEX_BUFFER_BIT) {
strings.push_back("VK_BUFFER_USAGE_VERTEX_BUFFER_BIT");
}
- if(enumerator & VK_BUFFER_USAGE_INDEX_BUFFER_BIT)
- {
+ if (enumerator & VK_BUFFER_USAGE_INDEX_BUFFER_BIT) {
strings.push_back("VK_BUFFER_USAGE_INDEX_BUFFER_BIT");
}
- if(enumerator & VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT)
- {
+ if (enumerator & VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT) {
strings.push_back("VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT");
}
- if(enumerator & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)
- {
+ if (enumerator & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT) {
strings.push_back("VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT");
}
- if(enumerator & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT)
- {
+ if (enumerator & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) {
strings.push_back("VK_BUFFER_USAGE_STORAGE_BUFFER_BIT");
}
- if(enumerator & VK_BUFFER_USAGE_TRANSFER_DST_BIT)
- {
+ if (enumerator & VK_BUFFER_USAGE_TRANSFER_DST_BIT) {
strings.push_back("VK_BUFFER_USAGE_TRANSFER_DST_BIT");
}
- if(enumerator & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT)
- {
+ if (enumerator & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) {
strings.push_back("VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT");
}
- if(enumerator & VK_BUFFER_USAGE_TRANSFER_SRC_BIT)
- {
+ if (enumerator & VK_BUFFER_USAGE_TRANSFER_SRC_BIT) {
strings.push_back("VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
}
- if(enumerator & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT)
- {
+ if (enumerator & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) {
strings.push_back("VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -973,49 +743,37 @@ std::string EnumeratorString(VkBufferUsageFlagBits const& enumerator)
return enumeratorString;
}
-static
-bool ValidateEnumerator(VkBufferCreateFlagBits const& enumerator)
-{
- VkBufferCreateFlagBits allFlags = (VkBufferCreateFlagBits)(VK_BUFFER_CREATE_SPARSE_ALIASED_BIT |
- VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT |
- VK_BUFFER_CREATE_SPARSE_BINDING_BIT);
- if(enumerator & (~allFlags))
- {
+static bool ValidateEnumerator(VkBufferCreateFlagBits const &enumerator) {
+ VkBufferCreateFlagBits allFlags = (VkBufferCreateFlagBits)(
+ VK_BUFFER_CREATE_SPARSE_ALIASED_BIT | VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT | VK_BUFFER_CREATE_SPARSE_BINDING_BIT);
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkBufferCreateFlagBits const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkBufferCreateFlagBits const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_BUFFER_CREATE_SPARSE_ALIASED_BIT)
- {
+ if (enumerator & VK_BUFFER_CREATE_SPARSE_ALIASED_BIT) {
strings.push_back("VK_BUFFER_CREATE_SPARSE_ALIASED_BIT");
}
- if(enumerator & VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT)
- {
+ if (enumerator & VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT) {
strings.push_back("VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT");
}
- if(enumerator & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
- {
+ if (enumerator & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
strings.push_back("VK_BUFFER_CREATE_SPARSE_BINDING_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -1023,59 +781,44 @@ std::string EnumeratorString(VkBufferCreateFlagBits const& enumerator)
return enumeratorString;
}
-static
-bool ValidateEnumerator(VkImageCreateFlagBits const& enumerator)
-{
- VkImageCreateFlagBits allFlags = (VkImageCreateFlagBits)(VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT |
- VK_IMAGE_CREATE_SPARSE_ALIASED_BIT |
- VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT |
- VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT |
- VK_IMAGE_CREATE_SPARSE_BINDING_BIT);
- if(enumerator & (~allFlags))
- {
+static bool ValidateEnumerator(VkImageCreateFlagBits const &enumerator) {
+ VkImageCreateFlagBits allFlags = (VkImageCreateFlagBits)(
+ VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT | VK_IMAGE_CREATE_SPARSE_ALIASED_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT |
+ VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_SPARSE_BINDING_BIT);
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkImageCreateFlagBits const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkImageCreateFlagBits const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)
- {
+ if (enumerator & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) {
strings.push_back("VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT");
}
- if(enumerator & VK_IMAGE_CREATE_SPARSE_ALIASED_BIT)
- {
+ if (enumerator & VK_IMAGE_CREATE_SPARSE_ALIASED_BIT) {
strings.push_back("VK_IMAGE_CREATE_SPARSE_ALIASED_BIT");
}
- if(enumerator & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT)
- {
+ if (enumerator & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT) {
strings.push_back("VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT");
}
- if(enumerator & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT)
- {
+ if (enumerator & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) {
strings.push_back("VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT");
}
- if(enumerator & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)
- {
+ if (enumerator & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) {
strings.push_back("VK_IMAGE_CREATE_SPARSE_BINDING_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -1083,54 +826,40 @@ std::string EnumeratorString(VkImageCreateFlagBits const& enumerator)
return enumeratorString;
}
-static
-bool ValidateEnumerator(VkColorComponentFlagBits const& enumerator)
-{
- VkColorComponentFlagBits allFlags = (VkColorComponentFlagBits)(VK_COLOR_COMPONENT_A_BIT |
- VK_COLOR_COMPONENT_B_BIT |
- VK_COLOR_COMPONENT_G_BIT |
- VK_COLOR_COMPONENT_R_BIT);
- if(enumerator & (~allFlags))
- {
+static bool ValidateEnumerator(VkColorComponentFlagBits const &enumerator) {
+ VkColorComponentFlagBits allFlags = (VkColorComponentFlagBits)(VK_COLOR_COMPONENT_A_BIT | VK_COLOR_COMPONENT_B_BIT |
+ VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_R_BIT);
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkColorComponentFlagBits const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkColorComponentFlagBits const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_COLOR_COMPONENT_A_BIT)
- {
+ if (enumerator & VK_COLOR_COMPONENT_A_BIT) {
strings.push_back("VK_COLOR_COMPONENT_A_BIT");
}
- if(enumerator & VK_COLOR_COMPONENT_B_BIT)
- {
+ if (enumerator & VK_COLOR_COMPONENT_B_BIT) {
strings.push_back("VK_COLOR_COMPONENT_B_BIT");
}
- if(enumerator & VK_COLOR_COMPONENT_G_BIT)
- {
+ if (enumerator & VK_COLOR_COMPONENT_G_BIT) {
strings.push_back("VK_COLOR_COMPONENT_G_BIT");
}
- if(enumerator & VK_COLOR_COMPONENT_R_BIT)
- {
+ if (enumerator & VK_COLOR_COMPONENT_R_BIT) {
strings.push_back("VK_COLOR_COMPONENT_R_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -1138,49 +867,37 @@ std::string EnumeratorString(VkColorComponentFlagBits const& enumerator)
return enumeratorString;
}
-static
-bool ValidateEnumerator(VkPipelineCreateFlagBits const& enumerator)
-{
- VkPipelineCreateFlagBits allFlags = (VkPipelineCreateFlagBits)(VK_PIPELINE_CREATE_DERIVATIVE_BIT |
- VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT |
- VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT);
- if(enumerator & (~allFlags))
- {
+static bool ValidateEnumerator(VkPipelineCreateFlagBits const &enumerator) {
+ VkPipelineCreateFlagBits allFlags = (VkPipelineCreateFlagBits)(
+ VK_PIPELINE_CREATE_DERIVATIVE_BIT | VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT | VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT);
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkPipelineCreateFlagBits const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkPipelineCreateFlagBits const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_PIPELINE_CREATE_DERIVATIVE_BIT)
- {
+ if (enumerator & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
strings.push_back("VK_PIPELINE_CREATE_DERIVATIVE_BIT");
}
- if(enumerator & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)
- {
+ if (enumerator & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT) {
strings.push_back("VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT");
}
- if(enumerator & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT)
- {
+ if (enumerator & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT) {
strings.push_back("VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -1188,69 +905,50 @@ std::string EnumeratorString(VkPipelineCreateFlagBits const& enumerator)
return enumeratorString;
}
-static
-bool ValidateEnumerator(VkShaderStageFlagBits const& enumerator)
-{
- VkShaderStageFlagBits allFlags = (VkShaderStageFlagBits)(VK_SHADER_STAGE_ALL |
- VK_SHADER_STAGE_FRAGMENT_BIT |
- VK_SHADER_STAGE_GEOMETRY_BIT |
- VK_SHADER_STAGE_COMPUTE_BIT |
- VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
- VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT |
- VK_SHADER_STAGE_VERTEX_BIT);
- if(enumerator & (~allFlags))
- {
+static bool ValidateEnumerator(VkShaderStageFlagBits const &enumerator) {
+ VkShaderStageFlagBits allFlags = (VkShaderStageFlagBits)(
+ VK_SHADER_STAGE_ALL | VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_COMPUTE_BIT |
+ VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_VERTEX_BIT);
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkShaderStageFlagBits const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkShaderStageFlagBits const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_SHADER_STAGE_ALL)
- {
+ if (enumerator & VK_SHADER_STAGE_ALL) {
strings.push_back("VK_SHADER_STAGE_ALL");
}
- if(enumerator & VK_SHADER_STAGE_FRAGMENT_BIT)
- {
+ if (enumerator & VK_SHADER_STAGE_FRAGMENT_BIT) {
strings.push_back("VK_SHADER_STAGE_FRAGMENT_BIT");
}
- if(enumerator & VK_SHADER_STAGE_GEOMETRY_BIT)
- {
+ if (enumerator & VK_SHADER_STAGE_GEOMETRY_BIT) {
strings.push_back("VK_SHADER_STAGE_GEOMETRY_BIT");
}
- if(enumerator & VK_SHADER_STAGE_COMPUTE_BIT)
- {
+ if (enumerator & VK_SHADER_STAGE_COMPUTE_BIT) {
strings.push_back("VK_SHADER_STAGE_COMPUTE_BIT");
}
- if(enumerator & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
- {
+ if (enumerator & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) {
strings.push_back("VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT");
}
- if(enumerator & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)
- {
+ if (enumerator & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
strings.push_back("VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT");
}
- if(enumerator & VK_SHADER_STAGE_VERTEX_BIT)
- {
+ if (enumerator & VK_SHADER_STAGE_VERTEX_BIT) {
strings.push_back("VK_SHADER_STAGE_VERTEX_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -1258,120 +956,85 @@ std::string EnumeratorString(VkShaderStageFlagBits const& enumerator)
return enumeratorString;
}
-static
-bool ValidateEnumerator(VkPipelineStageFlagBits const& enumerator)
-{
+static bool ValidateEnumerator(VkPipelineStageFlagBits const &enumerator) {
VkPipelineStageFlagBits allFlags = (VkPipelineStageFlagBits)(
- VK_PIPELINE_STAGE_ALL_COMMANDS_BIT |
- VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT |
- VK_PIPELINE_STAGE_HOST_BIT |
- VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
- VK_PIPELINE_STAGE_TRANSFER_BIT |
- VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
- VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
- VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
- VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
- VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
- VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
- VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
- VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
- VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
- VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
- VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
+ VK_PIPELINE_STAGE_ALL_COMMANDS_BIT | VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT | VK_PIPELINE_STAGE_HOST_BIT |
+ VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
+ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
+ VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
+ VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
+ VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
- if(enumerator & (~allFlags))
- {
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkPipelineStageFlagBits const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkPipelineStageFlagBits const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)
- {
+ if (enumerator & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) {
strings.push_back("VK_PIPELINE_STAGE_ALL_COMMANDS_BIT");
}
- if(enumerator & VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT)
- {
+ if (enumerator & VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT) {
strings.push_back("VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT");
}
- if(enumerator & VK_PIPELINE_STAGE_HOST_BIT)
- {
+ if (enumerator & VK_PIPELINE_STAGE_HOST_BIT) {
strings.push_back("VK_PIPELINE_STAGE_HOST_BIT");
}
- if(enumerator & VK_PIPELINE_STAGE_TRANSFER_BIT)
- {
+ if (enumerator & VK_PIPELINE_STAGE_TRANSFER_BIT) {
strings.push_back("VK_PIPELINE_STAGE_TRANSFER_BIT");
}
- if(enumerator & VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT)
- {
+ if (enumerator & VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT) {
strings.push_back("VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT");
}
- if(enumerator & VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT)
- {
+ if (enumerator & VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT) {
strings.push_back("VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT");
}
- if(enumerator & VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT)
- {
+ if (enumerator & VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT) {
strings.push_back("VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT");
}
- if(enumerator & VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)
- {
+ if (enumerator & VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT) {
strings.push_back("VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT");
}
- if(enumerator & VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT)
- {
+ if (enumerator & VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT) {
strings.push_back("VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT");
}
- if(enumerator & VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT)
- {
+ if (enumerator & VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT) {
strings.push_back("VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT");
}
- if(enumerator & VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT)
- {
+ if (enumerator & VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT) {
strings.push_back("VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT");
}
- if(enumerator & VK_PIPELINE_STAGE_VERTEX_SHADER_BIT)
- {
+ if (enumerator & VK_PIPELINE_STAGE_VERTEX_SHADER_BIT) {
strings.push_back("VK_PIPELINE_STAGE_VERTEX_SHADER_BIT");
}
- if(enumerator & VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT)
- {
+ if (enumerator & VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT) {
strings.push_back("VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT");
}
- if(enumerator & VK_PIPELINE_STAGE_VERTEX_INPUT_BIT)
- {
+ if (enumerator & VK_PIPELINE_STAGE_VERTEX_INPUT_BIT) {
strings.push_back("VK_PIPELINE_STAGE_VERTEX_INPUT_BIT");
}
- if(enumerator & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)
- {
+ if (enumerator & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT) {
strings.push_back("VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT");
}
- if(enumerator & VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT)
- {
+ if (enumerator & VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT) {
strings.push_back("VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT");
}
- if(enumerator & VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT)
- {
+ if (enumerator & VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT) {
strings.push_back("VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -1379,121 +1042,84 @@ std::string EnumeratorString(VkPipelineStageFlagBits const& enumerator)
return enumeratorString;
}
-static
-bool ValidateEnumerator(VkAccessFlagBits const& enumerator)
-{
+static bool ValidateEnumerator(VkAccessFlagBits const &enumerator) {
VkAccessFlagBits allFlags = (VkAccessFlagBits)(
- VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
- VK_ACCESS_INDEX_READ_BIT |
- VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
- VK_ACCESS_UNIFORM_READ_BIT |
- VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
- VK_ACCESS_SHADER_READ_BIT |
- VK_ACCESS_SHADER_WRITE_BIT |
- VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
- VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
- VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
- VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
- VK_ACCESS_TRANSFER_READ_BIT |
- VK_ACCESS_TRANSFER_WRITE_BIT |
- VK_ACCESS_HOST_READ_BIT |
- VK_ACCESS_HOST_WRITE_BIT |
- VK_ACCESS_MEMORY_READ_BIT |
- VK_ACCESS_MEMORY_WRITE_BIT);
-
- if(enumerator & (~allFlags))
- {
+ VK_ACCESS_INDIRECT_COMMAND_READ_BIT | VK_ACCESS_INDEX_READ_BIT | VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
+ VK_ACCESS_UNIFORM_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT |
+ VK_ACCESS_HOST_READ_BIT | VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT);
+
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkAccessFlagBits const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkAccessFlagBits const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_ACCESS_INDIRECT_COMMAND_READ_BIT)
- {
+ if (enumerator & VK_ACCESS_INDIRECT_COMMAND_READ_BIT) {
strings.push_back("VK_ACCESS_INDIRECT_COMMAND_READ_BIT");
}
- if(enumerator & VK_ACCESS_INDEX_READ_BIT)
- {
+ if (enumerator & VK_ACCESS_INDEX_READ_BIT) {
strings.push_back("VK_ACCESS_INDEX_READ_BIT");
}
- if(enumerator & VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT)
- {
+ if (enumerator & VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT) {
strings.push_back("VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT");
}
- if(enumerator & VK_ACCESS_UNIFORM_READ_BIT)
- {
+ if (enumerator & VK_ACCESS_UNIFORM_READ_BIT) {
strings.push_back("VK_ACCESS_UNIFORM_READ_BIT");
}
- if(enumerator & VK_ACCESS_INPUT_ATTACHMENT_READ_BIT)
- {
+ if (enumerator & VK_ACCESS_INPUT_ATTACHMENT_READ_BIT) {
strings.push_back("VK_ACCESS_INPUT_ATTACHMENT_READ_BIT");
}
- if(enumerator & VK_ACCESS_SHADER_READ_BIT)
- {
+ if (enumerator & VK_ACCESS_SHADER_READ_BIT) {
strings.push_back("VK_ACCESS_SHADER_READ_BIT");
}
- if(enumerator & VK_ACCESS_SHADER_WRITE_BIT)
- {
+ if (enumerator & VK_ACCESS_SHADER_WRITE_BIT) {
strings.push_back("VK_ACCESS_SHADER_WRITE_BIT");
}
- if(enumerator & VK_ACCESS_COLOR_ATTACHMENT_READ_BIT)
- {
+ if (enumerator & VK_ACCESS_COLOR_ATTACHMENT_READ_BIT) {
strings.push_back("VK_ACCESS_COLOR_ATTACHMENT_READ_BIT");
}
- if(enumerator & VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT)
- {
+ if (enumerator & VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT) {
strings.push_back("VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT");
}
- if(enumerator & VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT)
- {
+ if (enumerator & VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT) {
strings.push_back("VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT");
}
- if(enumerator & VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT)
- {
+ if (enumerator & VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT) {
strings.push_back("VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT");
}
- if(enumerator & VK_ACCESS_TRANSFER_READ_BIT)
- {
+ if (enumerator & VK_ACCESS_TRANSFER_READ_BIT) {
strings.push_back("VK_ACCESS_TRANSFER_READ_BIT");
}
- if(enumerator & VK_ACCESS_TRANSFER_WRITE_BIT)
- {
+ if (enumerator & VK_ACCESS_TRANSFER_WRITE_BIT) {
strings.push_back("VK_ACCESS_TRANSFER_WRITE_BIT");
}
- if(enumerator & VK_ACCESS_HOST_READ_BIT)
- {
+ if (enumerator & VK_ACCESS_HOST_READ_BIT) {
strings.push_back("VK_ACCESS_HOST_READ_BIT");
}
- if(enumerator & VK_ACCESS_HOST_WRITE_BIT)
- {
+ if (enumerator & VK_ACCESS_HOST_WRITE_BIT) {
strings.push_back("VK_ACCESS_HOST_WRITE_BIT");
}
- if(enumerator & VK_ACCESS_MEMORY_READ_BIT)
- {
+ if (enumerator & VK_ACCESS_MEMORY_READ_BIT) {
strings.push_back("VK_ACCESS_MEMORY_READ_BIT");
}
- if(enumerator & VK_ACCESS_MEMORY_WRITE_BIT)
- {
+ if (enumerator & VK_ACCESS_MEMORY_WRITE_BIT) {
strings.push_back("VK_ACCESS_MEMORY_WRITE_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -1501,44 +1127,34 @@ std::string EnumeratorString(VkAccessFlagBits const& enumerator)
return enumeratorString;
}
-static
-bool ValidateEnumerator(VkCommandPoolCreateFlagBits const& enumerator)
-{
- VkCommandPoolCreateFlagBits allFlags = (VkCommandPoolCreateFlagBits)(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT |
- VK_COMMAND_POOL_CREATE_TRANSIENT_BIT);
- if(enumerator & (~allFlags))
- {
+static bool ValidateEnumerator(VkCommandPoolCreateFlagBits const &enumerator) {
+ VkCommandPoolCreateFlagBits allFlags =
+ (VkCommandPoolCreateFlagBits)(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT | VK_COMMAND_POOL_CREATE_TRANSIENT_BIT);
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkCommandPoolCreateFlagBits const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkCommandPoolCreateFlagBits const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)
- {
+ if (enumerator & VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT) {
strings.push_back("VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT");
}
- if(enumerator & VK_COMMAND_POOL_CREATE_TRANSIENT_BIT)
- {
+ if (enumerator & VK_COMMAND_POOL_CREATE_TRANSIENT_BIT) {
strings.push_back("VK_COMMAND_POOL_CREATE_TRANSIENT_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -1546,39 +1162,30 @@ std::string EnumeratorString(VkCommandPoolCreateFlagBits const& enumerator)
return enumeratorString;
}
-static
-bool ValidateEnumerator(VkCommandPoolResetFlagBits const& enumerator)
-{
+static bool ValidateEnumerator(VkCommandPoolResetFlagBits const &enumerator) {
VkCommandPoolResetFlagBits allFlags = (VkCommandPoolResetFlagBits)(VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT);
- if(enumerator & (~allFlags))
- {
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkCommandPoolResetFlagBits const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkCommandPoolResetFlagBits const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT)
- {
+ if (enumerator & VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT) {
strings.push_back("VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -1586,49 +1193,38 @@ std::string EnumeratorString(VkCommandPoolResetFlagBits const& enumerator)
return enumeratorString;
}
-static
-bool ValidateEnumerator(VkCommandBufferUsageFlags const& enumerator)
-{
- VkCommandBufferUsageFlags allFlags = (VkCommandBufferUsageFlags)(VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT |
- VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT |
- VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT);
- if(enumerator & (~allFlags))
- {
+static bool ValidateEnumerator(VkCommandBufferUsageFlags const &enumerator) {
+ VkCommandBufferUsageFlags allFlags =
+ (VkCommandBufferUsageFlags)(VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT |
+ VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT);
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkCommandBufferUsageFlags const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkCommandBufferUsageFlags const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)
- {
+ if (enumerator & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
strings.push_back("VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT");
}
- if(enumerator & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT)
- {
+ if (enumerator & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) {
strings.push_back("VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT");
}
- if(enumerator & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)
- {
+ if (enumerator & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
strings.push_back("VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -1636,39 +1232,30 @@ std::string EnumeratorString(VkCommandBufferUsageFlags const& enumerator)
return enumeratorString;
}
-static
-bool ValidateEnumerator(VkCommandBufferResetFlagBits const& enumerator)
-{
+static bool ValidateEnumerator(VkCommandBufferResetFlagBits const &enumerator) {
VkCommandBufferResetFlagBits allFlags = (VkCommandBufferResetFlagBits)(VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT);
- if(enumerator & (~allFlags))
- {
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkCommandBufferResetFlagBits const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkCommandBufferResetFlagBits const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT)
- {
+ if (enumerator & VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT) {
strings.push_back("VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -1676,54 +1263,40 @@ std::string EnumeratorString(VkCommandBufferResetFlagBits const& enumerator)
return enumeratorString;
}
-static
-bool ValidateEnumerator(VkImageAspectFlagBits const& enumerator)
-{
- VkImageAspectFlagBits allFlags = (VkImageAspectFlagBits)(VK_IMAGE_ASPECT_METADATA_BIT |
- VK_IMAGE_ASPECT_STENCIL_BIT |
- VK_IMAGE_ASPECT_DEPTH_BIT |
- VK_IMAGE_ASPECT_COLOR_BIT);
- if(enumerator & (~allFlags))
- {
+static bool ValidateEnumerator(VkImageAspectFlagBits const &enumerator) {
+ VkImageAspectFlagBits allFlags = (VkImageAspectFlagBits)(VK_IMAGE_ASPECT_METADATA_BIT | VK_IMAGE_ASPECT_STENCIL_BIT |
+ VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_COLOR_BIT);
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkImageAspectFlagBits const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkImageAspectFlagBits const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_IMAGE_ASPECT_METADATA_BIT)
- {
+ if (enumerator & VK_IMAGE_ASPECT_METADATA_BIT) {
strings.push_back("VK_IMAGE_ASPECT_METADATA_BIT");
}
- if(enumerator & VK_IMAGE_ASPECT_STENCIL_BIT)
- {
+ if (enumerator & VK_IMAGE_ASPECT_STENCIL_BIT) {
strings.push_back("VK_IMAGE_ASPECT_STENCIL_BIT");
}
- if(enumerator & VK_IMAGE_ASPECT_DEPTH_BIT)
- {
+ if (enumerator & VK_IMAGE_ASPECT_DEPTH_BIT) {
strings.push_back("VK_IMAGE_ASPECT_DEPTH_BIT");
}
- if(enumerator & VK_IMAGE_ASPECT_COLOR_BIT)
- {
+ if (enumerator & VK_IMAGE_ASPECT_COLOR_BIT) {
strings.push_back("VK_IMAGE_ASPECT_COLOR_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -1731,39 +1304,30 @@ std::string EnumeratorString(VkImageAspectFlagBits const& enumerator)
return enumeratorString;
}
-static
-bool ValidateEnumerator(VkQueryControlFlagBits const& enumerator)
-{
+static bool ValidateEnumerator(VkQueryControlFlagBits const &enumerator) {
VkQueryControlFlagBits allFlags = (VkQueryControlFlagBits)(VK_QUERY_CONTROL_PRECISE_BIT);
- if(enumerator & (~allFlags))
- {
+ if (enumerator & (~allFlags)) {
return false;
}
return true;
}
-static
-std::string EnumeratorString(VkQueryControlFlagBits const& enumerator)
-{
- if(!ValidateEnumerator(enumerator))
- {
+static std::string EnumeratorString(VkQueryControlFlagBits const &enumerator) {
+ if (!ValidateEnumerator(enumerator)) {
return "unrecognized enumerator";
}
std::vector<std::string> strings;
- if(enumerator & VK_QUERY_CONTROL_PRECISE_BIT)
- {
+ if (enumerator & VK_QUERY_CONTROL_PRECISE_BIT) {
strings.push_back("VK_QUERY_CONTROL_PRECISE_BIT");
}
std::string enumeratorString;
- for(auto const& string : strings)
- {
+ for (auto const &string : strings) {
enumeratorString += string;
- if(string != strings.back())
- {
+ if (string != strings.back()) {
enumeratorString += '|';
}
}
@@ -1773,9 +1337,7 @@ std::string EnumeratorString(VkQueryControlFlagBits const& enumerator)
static const int MaxParamCheckerStringLength = 256;
-static
-VkBool32 validate_string(layer_data *my_data, const char *apiName, const char *stringName, const char *validateString)
-{
+static VkBool32 validate_string(layer_data *my_data, const char *apiName, const char *stringName, const char *validateString) {
VkBool32 skipCall = VK_FALSE;
VkStringErrorFlags result = vk_string_validate(MaxParamCheckerStringLength, validateString);
@@ -1783,29 +1345,23 @@ VkBool32 validate_string(layer_data *my_data, const char *apiName, const char *s
if (result == VK_STRING_ERROR_NONE) {
return skipCall;
} else if (result & VK_STRING_ERROR_LENGTH) {
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "%s: string %s exceeds max length %d", apiName, stringName, MaxParamCheckerStringLength);
+ skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
+ "PARAMCHECK", "%s: string %s exceeds max length %d", apiName, stringName, MaxParamCheckerStringLength);
} else if (result & VK_STRING_ERROR_BAD_DATA) {
- skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "%s: string %s contains invalid characters or is badly formed", apiName, stringName);
+ skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
+ "PARAMCHECK", "%s: string %s contains invalid characters or is badly formed", apiName, stringName);
}
return skipCall;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(
- const VkInstanceCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkInstance* pInstance)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkLayerInstanceCreateInfo *chain_info =
- get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
+ VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
- PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr =
- chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
- PFN_vkCreateInstance fpCreateInstance =
- (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
+ PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
+ PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
if (fpCreateInstance == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
@@ -1818,48 +1374,36 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(
return result;
}
- layer_data *my_data =
- get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
- VkLayerInstanceDispatchTable *pTable = initInstanceTable(
- *pInstance, fpGetInstanceProcAddr, pc_instance_table_map);
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
+ VkLayerInstanceDispatchTable *pTable = initInstanceTable(*pInstance, fpGetInstanceProcAddr, pc_instance_table_map);
- my_data->report_data = debug_report_create_instance(
- pTable, *pInstance, pCreateInfo->enabledExtensionCount,
- pCreateInfo->ppEnabledExtensionNames);
+ my_data->report_data =
+ debug_report_create_instance(pTable, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
InitParamChecker(my_data, pAllocator);
// Ordinarily we'd check these before calling down the chain, but none of the layer
// support is in place until now, if we survive we can report the issue now.
- layer_data *my_instance_data =
- get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
+ layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
- param_check_vkCreateInstance(my_instance_data->report_data, pCreateInfo,
- pAllocator, pInstance);
+ param_check_vkCreateInstance(my_instance_data->report_data, pCreateInfo, pAllocator, pInstance);
if (pCreateInfo->pApplicationInfo) {
if (pCreateInfo->pApplicationInfo->pApplicationName) {
- validate_string(
- my_instance_data, "vkCreateInstance()",
- "VkInstanceCreateInfo->VkApplicationInfo->pApplicationName",
- pCreateInfo->pApplicationInfo->pApplicationName);
+ validate_string(my_instance_data, "vkCreateInstance()", "VkInstanceCreateInfo->VkApplicationInfo->pApplicationName",
+ pCreateInfo->pApplicationInfo->pApplicationName);
}
if (pCreateInfo->pApplicationInfo->pEngineName) {
- validate_string(
- my_instance_data, "vkCreateInstance()",
- "VkInstanceCreateInfo->VkApplicationInfo->pEngineName",
- pCreateInfo->pApplicationInfo->pEngineName);
+ validate_string(my_instance_data, "vkCreateInstance()", "VkInstanceCreateInfo->VkApplicationInfo->pEngineName",
+ pCreateInfo->pApplicationInfo->pEngineName);
}
}
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(
- VkInstance instance,
- const VkAllocationCallbacks* pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
// Grab the key before the instance is destroyed.
dispatch_key key = get_dispatch_key(instance);
VkBool32 skipCall = VK_FALSE;
@@ -1869,16 +1413,13 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(
skipCall |= param_check_vkDestroyInstance(my_data->report_data, pAllocator);
if (skipCall == VK_FALSE) {
- VkLayerInstanceDispatchTable *pTable =
- get_dispatch_table(pc_instance_table_map, instance);
+ VkLayerInstanceDispatchTable *pTable = get_dispatch_table(pc_instance_table_map, instance);
pTable->DestroyInstance(instance, pAllocator);
// Clean up logging callback, if any
while (my_data->logging_callback.size() > 0) {
- VkDebugReportCallbackEXT callback =
- my_data->logging_callback.back();
- layer_destroy_msg_callback(my_data->report_data, callback,
- pAllocator);
+ VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
+ layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
my_data->logging_callback.pop_back();
}
@@ -1889,39 +1430,30 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(
}
}
-bool PostEnumeratePhysicalDevices(
- VkInstance instance,
- uint32_t* pPhysicalDeviceCount,
- VkPhysicalDevice* pPhysicalDevices,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostEnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount, VkPhysicalDevice *pPhysicalDevices,
+ VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkEnumeratePhysicalDevices parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mid(instance), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mid(instance), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices(
- VkInstance instance,
- uint32_t* pPhysicalDeviceCount,
- VkPhysicalDevice* pPhysicalDevices)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkEnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount, VkPhysicalDevice *pPhysicalDevices) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkEnumeratePhysicalDevices(
- my_data->report_data,
- pPhysicalDeviceCount,
- pPhysicalDevices);
+ skipCall |= param_check_vkEnumeratePhysicalDevices(my_data->report_data, pPhysicalDeviceCount, pPhysicalDevices);
if (skipCall == VK_FALSE) {
- result = get_dispatch_table(pc_instance_table_map, instance)->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
+ result = get_dispatch_table(pc_instance_table_map, instance)
+ ->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
PostEnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices, result);
}
@@ -1929,170 +1461,125 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures(
- VkPhysicalDevice physicalDevice,
- VkPhysicalDeviceFeatures* pFeatures)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures *pFeatures) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkGetPhysicalDeviceFeatures(
- my_data->report_data,
- pFeatures);
+ skipCall |= param_check_vkGetPhysicalDeviceFeatures(my_data->report_data, pFeatures);
if (skipCall == VK_FALSE) {
get_dispatch_table(pc_instance_table_map, physicalDevice)->GetPhysicalDeviceFeatures(physicalDevice, pFeatures);
}
}
-bool PostGetPhysicalDeviceFormatProperties(
- VkPhysicalDevice physicalDevice,
- VkFormat format,
- VkFormatProperties* pFormatProperties)
-{
+bool PostGetPhysicalDeviceFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format,
+ VkFormatProperties *pFormatProperties) {
- if(format < VK_FORMAT_BEGIN_RANGE ||
- format > VK_FORMAT_END_RANGE)
- {
+ if (format < VK_FORMAT_BEGIN_RANGE || format > VK_FORMAT_END_RANGE) {
log_msg(mdd(physicalDevice), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkGetPhysicalDeviceFormatProperties parameter, VkFormat format, is an unrecognized enumerator");
+ "vkGetPhysicalDeviceFormatProperties parameter, VkFormat format, is an unrecognized enumerator");
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties(
- VkPhysicalDevice physicalDevice,
- VkFormat format,
- VkFormatProperties* pFormatProperties)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetPhysicalDeviceFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties *pFormatProperties) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
assert(my_data != NULL);
-
- skipCall |= param_check_vkGetPhysicalDeviceFormatProperties(
- my_data->report_data,
- format,
- pFormatProperties);
+ skipCall |= param_check_vkGetPhysicalDeviceFormatProperties(my_data->report_data, format, pFormatProperties);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_instance_table_map, physicalDevice)->GetPhysicalDeviceFormatProperties(physicalDevice, format, pFormatProperties);
+ get_dispatch_table(pc_instance_table_map, physicalDevice)
+ ->GetPhysicalDeviceFormatProperties(physicalDevice, format, pFormatProperties);
PostGetPhysicalDeviceFormatProperties(physicalDevice, format, pFormatProperties);
}
}
-bool PostGetPhysicalDeviceImageFormatProperties(
- VkPhysicalDevice physicalDevice,
- VkFormat format,
- VkImageType type,
- VkImageTiling tiling,
- VkImageUsageFlags usage,
- VkImageCreateFlags flags,
- VkImageFormatProperties* pImageFormatProperties,
- VkResult result)
-{
+bool PostGetPhysicalDeviceImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type,
+ VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags,
+ VkImageFormatProperties *pImageFormatProperties, VkResult result) {
- if(format < VK_FORMAT_BEGIN_RANGE ||
- format > VK_FORMAT_END_RANGE)
- {
+ if (format < VK_FORMAT_BEGIN_RANGE || format > VK_FORMAT_END_RANGE) {
log_msg(mdd(physicalDevice), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkGetPhysicalDeviceImageFormatProperties parameter, VkFormat format, is an unrecognized enumerator");
+ "vkGetPhysicalDeviceImageFormatProperties parameter, VkFormat format, is an unrecognized enumerator");
return false;
}
- if(type < VK_IMAGE_TYPE_BEGIN_RANGE ||
- type > VK_IMAGE_TYPE_END_RANGE)
- {
+ if (type < VK_IMAGE_TYPE_BEGIN_RANGE || type > VK_IMAGE_TYPE_END_RANGE) {
log_msg(mdd(physicalDevice), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkGetPhysicalDeviceImageFormatProperties parameter, VkImageType type, is an unrecognized enumerator");
+ "vkGetPhysicalDeviceImageFormatProperties parameter, VkImageType type, is an unrecognized enumerator");
return false;
}
- if(tiling < VK_IMAGE_TILING_BEGIN_RANGE ||
- tiling > VK_IMAGE_TILING_END_RANGE)
- {
+ if (tiling < VK_IMAGE_TILING_BEGIN_RANGE || tiling > VK_IMAGE_TILING_END_RANGE) {
log_msg(mdd(physicalDevice), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkGetPhysicalDeviceImageFormatProperties parameter, VkImageTiling tiling, is an unrecognized enumerator");
+ "vkGetPhysicalDeviceImageFormatProperties parameter, VkImageTiling tiling, is an unrecognized enumerator");
return false;
}
- if(result < VK_SUCCESS)
- {
+ if (result < VK_SUCCESS) {
std::string reason = "vkGetPhysicalDeviceImageFormatProperties parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(physicalDevice), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(physicalDevice), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "%s", reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties(
- VkPhysicalDevice physicalDevice,
- VkFormat format,
- VkImageType type,
- VkImageTiling tiling,
- VkImageUsageFlags usage,
- VkImageCreateFlags flags,
- VkImageFormatProperties* pImageFormatProperties)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkGetPhysicalDeviceImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling,
+ VkImageUsageFlags usage, VkImageCreateFlags flags,
+ VkImageFormatProperties *pImageFormatProperties) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkGetPhysicalDeviceImageFormatProperties(
- my_data->report_data,
- format,
- type,
- tiling,
- usage,
- flags,
- pImageFormatProperties);
+ skipCall |= param_check_vkGetPhysicalDeviceImageFormatProperties(my_data->report_data, format, type, tiling, usage, flags,
+ pImageFormatProperties);
if (skipCall == VK_FALSE) {
- result = get_dispatch_table(pc_instance_table_map, physicalDevice)->GetPhysicalDeviceImageFormatProperties(physicalDevice, format, type, tiling, usage, flags, pImageFormatProperties);
+ result = get_dispatch_table(pc_instance_table_map, physicalDevice)
+ ->GetPhysicalDeviceImageFormatProperties(physicalDevice, format, type, tiling, usage, flags,
+ pImageFormatProperties);
- PostGetPhysicalDeviceImageFormatProperties(physicalDevice, format, type, tiling, usage, flags, pImageFormatProperties, result);
+ PostGetPhysicalDeviceImageFormatProperties(physicalDevice, format, type, tiling, usage, flags, pImageFormatProperties,
+ result);
}
return result;
}
-bool PostGetPhysicalDeviceProperties(
- VkPhysicalDevice physicalDevice,
- VkPhysicalDeviceProperties* pProperties)
-{
-
- if(pProperties != nullptr)
- {
- if(pProperties->deviceType < VK_PHYSICAL_DEVICE_TYPE_BEGIN_RANGE ||
- pProperties->deviceType > VK_PHYSICAL_DEVICE_TYPE_END_RANGE)
- {
- log_msg(mdd(physicalDevice), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkGetPhysicalDeviceProperties parameter, VkPhysicalDeviceType pProperties->deviceType, is an unrecognized enumerator");
- return false;
- }
+bool PostGetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties *pProperties) {
+ if (pProperties != nullptr) {
+ if (pProperties->deviceType < VK_PHYSICAL_DEVICE_TYPE_BEGIN_RANGE ||
+ pProperties->deviceType > VK_PHYSICAL_DEVICE_TYPE_END_RANGE) {
+ log_msg(mdd(physicalDevice), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkGetPhysicalDeviceProperties parameter, VkPhysicalDeviceType pProperties->deviceType, is an unrecognized "
+ "enumerator");
+ return false;
+ }
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties(
- VkPhysicalDevice physicalDevice,
- VkPhysicalDeviceProperties* pProperties)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties *pProperties) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkGetPhysicalDeviceProperties(
- my_data->report_data,
- pProperties);
+ skipCall |= param_check_vkGetPhysicalDeviceProperties(my_data->report_data, pProperties);
if (skipCall == VK_FALSE) {
get_dispatch_table(pc_instance_table_map, physicalDevice)->GetPhysicalDeviceProperties(physicalDevice, pProperties);
@@ -2101,72 +1588,77 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties(
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties(
- VkPhysicalDevice physicalDevice,
- uint32_t* pQueueFamilyPropertyCount,
- VkQueueFamilyProperties* pQueueFamilyProperties)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount,
+ VkQueueFamilyProperties *pQueueFamilyProperties) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkGetPhysicalDeviceQueueFamilyProperties(
- my_data->report_data,
- pQueueFamilyPropertyCount,
- pQueueFamilyProperties);
+ skipCall |= param_check_vkGetPhysicalDeviceQueueFamilyProperties(my_data->report_data, pQueueFamilyPropertyCount,
+ pQueueFamilyProperties);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_instance_table_map, physicalDevice)->GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties);
+ get_dispatch_table(pc_instance_table_map, physicalDevice)
+ ->GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties(
- VkPhysicalDevice physicalDevice,
- VkPhysicalDeviceMemoryProperties* pMemoryProperties)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties *pMemoryProperties) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkGetPhysicalDeviceMemoryProperties(
- my_data->report_data,
- pMemoryProperties);
+ skipCall |= param_check_vkGetPhysicalDeviceMemoryProperties(my_data->report_data, pMemoryProperties);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_instance_table_map, physicalDevice)->GetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties);
+ get_dispatch_table(pc_instance_table_map, physicalDevice)
+ ->GetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties);
}
}
-void validateDeviceCreateInfo(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const std::vector<VkQueueFamilyProperties> properties) {
+void validateDeviceCreateInfo(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo,
+ const std::vector<VkQueueFamilyProperties> properties) {
std::unordered_set<uint32_t> set;
for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; ++i) {
if (set.count(pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex)) {
log_msg(mdd(physicalDevice), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "VkDeviceCreateInfo parameter, uint32_t pQueueCreateInfos[%d]->queueFamilyIndex, is not unique within this structure.", i);
+ "VkDeviceCreateInfo parameter, uint32_t pQueueCreateInfos[%d]->queueFamilyIndex, is not unique within this "
+ "structure.",
+ i);
} else {
set.insert(pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex);
}
if (pCreateInfo->pQueueCreateInfos[i].queueCount == 0) {
log_msg(mdd(physicalDevice), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "VkDeviceCreateInfo parameter, uint32_t pQueueCreateInfos[%d]->queueCount, cannot be zero.", i);
+ "VkDeviceCreateInfo parameter, uint32_t pQueueCreateInfos[%d]->queueCount, cannot be zero.", i);
}
for (uint32_t j = 0; j < pCreateInfo->pQueueCreateInfos[i].queueCount; ++j) {
- if (pCreateInfo->pQueueCreateInfos[i].pQueuePriorities[j] < 0.f || pCreateInfo->pQueueCreateInfos[i].pQueuePriorities[j] > 1.f) {
- log_msg(mdd(physicalDevice), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "VkDeviceCreateInfo parameter, uint32_t pQueueCreateInfos[%d]->pQueuePriorities[%d], must be between 0 and 1. Actual value is %f", i, j, pCreateInfo->pQueueCreateInfos[i].pQueuePriorities[j]);
+ if (pCreateInfo->pQueueCreateInfos[i].pQueuePriorities[j] < 0.f ||
+ pCreateInfo->pQueueCreateInfos[i].pQueuePriorities[j] > 1.f) {
+ log_msg(mdd(physicalDevice), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
+ "PARAMCHECK", "VkDeviceCreateInfo parameter, uint32_t pQueueCreateInfos[%d]->pQueuePriorities[%d], must be "
+ "between 0 and 1. Actual value is %f",
+ i, j, pCreateInfo->pQueueCreateInfos[i].pQueuePriorities[j]);
}
}
if (pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex >= properties.size()) {
log_msg(mdd(physicalDevice), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "VkDeviceCreateInfo parameter, uint32_t pQueueCreateInfos[%d]->queueFamilyIndex cannot be more than the number of queue families.", i);
- } else if (pCreateInfo->pQueueCreateInfos[i].queueCount > properties[pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex].queueCount) {
+ "VkDeviceCreateInfo parameter, uint32_t pQueueCreateInfos[%d]->queueFamilyIndex cannot be more than the number "
+ "of queue families.",
+ i);
+ } else if (pCreateInfo->pQueueCreateInfos[i].queueCount >
+ properties[pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex].queueCount) {
log_msg(mdd(physicalDevice), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "VkDeviceCreateInfo parameter, uint32_t pQueueCreateInfos[%d]->queueCount cannot be more than the number of queues for the given family index.", i);
+ "VkDeviceCreateInfo parameter, uint32_t pQueueCreateInfos[%d]->queueCount cannot be more than the number of "
+ "queues for the given family index.",
+ i);
}
}
}
-void storeCreateDeviceData(VkDevice device, const VkDeviceCreateInfo* pCreateInfo) {
+void storeCreateDeviceData(VkDevice device, const VkDeviceCreateInfo *pCreateInfo) {
layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; ++i) {
my_device_data->queueFamilyIndexMap.insert(
@@ -2174,12 +1666,9 @@ void storeCreateDeviceData(VkDevice device, const VkDeviceCreateInfo* pCreateInf
}
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(
- VkPhysicalDevice physicalDevice,
- const VkDeviceCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkDevice* pDevice)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice physicalDevice,
+ const VkDeviceCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
/*
* NOTE: We do not validate physicalDevice or any dispatchable
* object as the first parameter. We couldn't get here if it was wrong!
@@ -2189,8 +1678,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(
VkBool32 skipCall = VK_FALSE;
layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
- skipCall |= param_check_vkCreateDevice(my_instance_data->report_data,
- pCreateInfo, pAllocator, pDevice);
+ skipCall |= param_check_vkCreateDevice(my_instance_data->report_data, pCreateInfo, pAllocator, pDevice);
if ((pCreateInfo->enabledLayerCount > 0) && (pCreateInfo->ppEnabledLayerNames != NULL)) {
for (auto i = 0; i < pCreateInfo->enabledLayerCount; i++) {
@@ -2211,7 +1699,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
- PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice) fpGetInstanceProcAddr(NULL, "vkCreateDevice");
+ PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
if (fpCreateDevice == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
@@ -2229,9 +1717,11 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(
initDeviceTable(*pDevice, fpGetDeviceProcAddr, pc_device_table_map);
uint32_t count;
- get_dispatch_table(pc_instance_table_map, physicalDevice)->GetPhysicalDeviceQueueFamilyProperties(physicalDevice, &count, nullptr);
+ get_dispatch_table(pc_instance_table_map, physicalDevice)
+ ->GetPhysicalDeviceQueueFamilyProperties(physicalDevice, &count, nullptr);
std::vector<VkQueueFamilyProperties> properties(count);
- get_dispatch_table(pc_instance_table_map, physicalDevice)->GetPhysicalDeviceQueueFamilyProperties(physicalDevice, &count, &properties[0]);
+ get_dispatch_table(pc_instance_table_map, physicalDevice)
+ ->GetPhysicalDeviceQueueFamilyProperties(physicalDevice, &count, &properties[0]);
validateDeviceCreateInfo(physicalDevice, pCreateInfo, properties);
storeCreateDeviceData(*pDevice, pCreateInfo);
@@ -2240,10 +1730,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(
- VkDevice device,
- const VkAllocationCallbacks* pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
dispatch_key key = get_dispatch_key(device);
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(key, layer_data_map);
@@ -2258,47 +1745,37 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(
fprintf(stderr, "Device: %p, key: %p\n", device, key);
#endif
- get_dispatch_table(pc_device_table_map, device)
- ->DestroyDevice(device, pAllocator);
+ get_dispatch_table(pc_device_table_map, device)->DestroyDevice(device, pAllocator);
pc_device_table_map.erase(key);
}
}
-bool PreGetDeviceQueue(
- VkDevice device,
- uint32_t queueFamilyIndex,
- uint32_t queueIndex)
-{
+bool PreGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex) {
layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
auto queue_data = my_device_data->queueFamilyIndexMap.find(queueFamilyIndex);
if (queue_data == my_device_data->queueFamilyIndexMap.end()) {
log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "VkGetDeviceQueue parameter, uint32_t queueFamilyIndex %d, must have been given when the device was created.", queueFamilyIndex);
+ "VkGetDeviceQueue parameter, uint32_t queueFamilyIndex %d, must have been given when the device was created.",
+ queueFamilyIndex);
return false;
}
if (queue_data->second <= queueIndex) {
log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "VkGetDeviceQueue parameter, uint32_t queueIndex %d, must be less than the number of queues given when the device was created.", queueIndex);
+ "VkGetDeviceQueue parameter, uint32_t queueIndex %d, must be less than the number of queues given when the device "
+ "was created.",
+ queueIndex);
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(
- VkDevice device,
- uint32_t queueFamilyIndex,
- uint32_t queueIndex,
- VkQueue* pQueue)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkGetDeviceQueue(
- my_data->report_data,
- queueFamilyIndex,
- queueIndex,
- pQueue);
+ skipCall |= param_check_vkGetDeviceQueue(my_data->report_data, queueFamilyIndex, queueIndex, pQueue);
if (skipCall == VK_FALSE) {
PreGetDeviceQueue(device, queueFamilyIndex, queueIndex);
@@ -2307,38 +1784,25 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(
}
}
-bool PostQueueSubmit(
- VkQueue queue,
- uint32_t commandBufferCount,
- VkFence fence,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostQueueSubmit(VkQueue queue, uint32_t commandBufferCount, VkFence fence, VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkQueueSubmit parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit(
- VkQueue queue,
- uint32_t submitCount,
- const VkSubmitInfo* pSubmits,
- VkFence fence)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkQueueSubmit(
- my_data->report_data,
- submitCount,
- pSubmits,
- fence);
+ skipCall |= param_check_vkQueueSubmit(my_data->report_data, submitCount, pSubmits, fence);
if (skipCall == VK_FALSE) {
result = get_dispatch_table(pc_device_table_map, queue)->QueueSubmit(queue, submitCount, pSubmits, fence);
@@ -2349,24 +1813,19 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit(
return result;
}
-bool PostQueueWaitIdle(
- VkQueue queue,
- VkResult result)
-{
+bool PostQueueWaitIdle(VkQueue queue, VkResult result) {
- if(result < VK_SUCCESS)
- {
+ if (result < VK_SUCCESS) {
std::string reason = "vkQueueWaitIdle parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(
- VkQueue queue)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue) {
VkResult result = get_dispatch_table(pc_device_table_map, queue)->QueueWaitIdle(queue);
PostQueueWaitIdle(queue, result);
@@ -2374,24 +1833,19 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(
return result;
}
-bool PostDeviceWaitIdle(
- VkDevice device,
- VkResult result)
-{
+bool PostDeviceWaitIdle(VkDevice device, VkResult result) {
- if(result < VK_SUCCESS)
- {
+ if (result < VK_SUCCESS) {
std::string reason = "vkDeviceWaitIdle parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(
- VkDevice device)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device) {
VkResult result = get_dispatch_table(pc_device_table_map, device)->DeviceWaitIdle(device);
PostDeviceWaitIdle(device, result);
@@ -2399,37 +1853,25 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(
return result;
}
-bool PostAllocateMemory(
- VkDevice device,
- VkDeviceMemory* pMemory,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostAllocateMemory(VkDevice device, VkDeviceMemory *pMemory, VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkAllocateMemory parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(
- VkDevice device,
- const VkMemoryAllocateInfo* pAllocateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkDeviceMemory* pMemory)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkAllocateMemory(
- my_data->report_data,
- pAllocateInfo,
- pAllocator,
- pMemory);
+ skipCall |= param_check_vkAllocateMemory(my_data->report_data, pAllocateInfo, pAllocator, pMemory);
if (skipCall == VK_FALSE) {
result = get_dispatch_table(pc_device_table_map, device)->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
@@ -2441,61 +1883,38 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(
}
VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkFreeMemory(VkDevice device, VkDeviceMemory memory,
- const VkAllocationCallbacks *pAllocator) {
+vkFreeMemory(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks *pAllocator) {
VkBool32 skipCall = VK_FALSE;
- layer_data *my_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |=
- param_check_vkFreeMemory(my_data->report_data, memory, pAllocator);
+ skipCall |= param_check_vkFreeMemory(my_data->report_data, memory, pAllocator);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, device)
- ->FreeMemory(device, memory, pAllocator);
+ get_dispatch_table(pc_device_table_map, device)->FreeMemory(device, memory, pAllocator);
}
}
-bool PostMapMemory(
- VkDevice device,
- VkDeviceMemory mem,
- VkDeviceSize offset,
- VkDeviceSize size,
- VkMemoryMapFlags flags,
- void** ppData,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags,
+ void **ppData, VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkMapMemory parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory(
- VkDevice device,
- VkDeviceMemory memory,
- VkDeviceSize offset,
- VkDeviceSize size,
- VkMemoryMapFlags flags,
- void** ppData)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkMapMemory(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void **ppData) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkMapMemory(
- my_data->report_data,
- memory,
- offset,
- size,
- flags,
- ppData);
+ skipCall |= param_check_vkMapMemory(my_data->report_data, memory, offset, size, flags, ppData);
if (skipCall == VK_FALSE) {
result = get_dispatch_table(pc_device_table_map, device)->MapMemory(device, memory, offset, size, flags, ppData);
@@ -2506,37 +1925,26 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory(
return result;
}
-bool PostFlushMappedMemoryRanges(
- VkDevice device,
- uint32_t memoryRangeCount,
- VkResult result)
-{
-
+bool PostFlushMappedMemoryRanges(VkDevice device, uint32_t memoryRangeCount, VkResult result) {
- if(result < VK_SUCCESS)
- {
+ if (result < VK_SUCCESS) {
std::string reason = "vkFlushMappedMemoryRanges parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkFlushMappedMemoryRanges(
- VkDevice device,
- uint32_t memoryRangeCount,
- const VkMappedMemoryRange* pMemoryRanges)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkFlushMappedMemoryRanges(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange *pMemoryRanges) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkFlushMappedMemoryRanges(
- my_data->report_data,
- memoryRangeCount,
- pMemoryRanges);
+ skipCall |= param_check_vkFlushMappedMemoryRanges(my_data->report_data, memoryRangeCount, pMemoryRanges);
if (skipCall == VK_FALSE) {
result = get_dispatch_table(pc_device_table_map, device)->FlushMappedMemoryRanges(device, memoryRangeCount, pMemoryRanges);
@@ -2547,40 +1955,30 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkFlushMappedMemoryRanges(
return result;
}
-bool PostInvalidateMappedMemoryRanges(
- VkDevice device,
- uint32_t memoryRangeCount,
- VkResult result)
-{
-
+bool PostInvalidateMappedMemoryRanges(VkDevice device, uint32_t memoryRangeCount, VkResult result) {
- if(result < VK_SUCCESS)
- {
+ if (result < VK_SUCCESS) {
std::string reason = "vkInvalidateMappedMemoryRanges parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkInvalidateMappedMemoryRanges(
- VkDevice device,
- uint32_t memoryRangeCount,
- const VkMappedMemoryRange* pMemoryRanges)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkInvalidateMappedMemoryRanges(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange *pMemoryRanges) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkInvalidateMappedMemoryRanges(
- my_data->report_data,
- memoryRangeCount,
- pMemoryRanges);
+ skipCall |= param_check_vkInvalidateMappedMemoryRanges(my_data->report_data, memoryRangeCount, pMemoryRanges);
if (skipCall == VK_FALSE) {
- result = get_dispatch_table(pc_device_table_map, device)->InvalidateMappedMemoryRanges(device, memoryRangeCount, pMemoryRanges);
+ result =
+ get_dispatch_table(pc_device_table_map, device)->InvalidateMappedMemoryRanges(device, memoryRangeCount, pMemoryRanges);
PostInvalidateMappedMemoryRanges(device, memoryRangeCount, result);
}
@@ -2588,52 +1986,33 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkInvalidateMappedMemoryRanges(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceMemoryCommitment(
- VkDevice device,
- VkDeviceMemory memory,
- VkDeviceSize* pCommittedMemoryInBytes)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetDeviceMemoryCommitment(VkDevice device, VkDeviceMemory memory, VkDeviceSize *pCommittedMemoryInBytes) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkGetDeviceMemoryCommitment(
- my_data->report_data,
- memory,
- pCommittedMemoryInBytes);
+ skipCall |= param_check_vkGetDeviceMemoryCommitment(my_data->report_data, memory, pCommittedMemoryInBytes);
if (skipCall == VK_FALSE) {
get_dispatch_table(pc_device_table_map, device)->GetDeviceMemoryCommitment(device, memory, pCommittedMemoryInBytes);
}
}
-bool PostBindBufferMemory(
- VkDevice device,
- VkBuffer buffer,
- VkDeviceMemory mem,
- VkDeviceSize memoryOffset,
- VkResult result)
-{
-
-
+bool PostBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset, VkResult result) {
-
- if(result < VK_SUCCESS)
- {
+ if (result < VK_SUCCESS) {
std::string reason = "vkBindBufferMemory parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory(
- VkDevice device,
- VkBuffer buffer,
- VkDeviceMemory mem,
- VkDeviceSize memoryOffset)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
VkResult result = get_dispatch_table(pc_device_table_map, device)->BindBufferMemory(device, buffer, mem, memoryOffset);
PostBindBufferMemory(device, buffer, mem, memoryOffset, result);
@@ -2641,33 +2020,20 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory(
return result;
}
-bool PostBindImageMemory(
- VkDevice device,
- VkImage image,
- VkDeviceMemory mem,
- VkDeviceSize memoryOffset,
- VkResult result)
-{
-
-
+bool PostBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset, VkResult result) {
-
- if(result < VK_SUCCESS)
- {
+ if (result < VK_SUCCESS) {
std::string reason = "vkBindImageMemory parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(
- VkDevice device,
- VkImage image,
- VkDeviceMemory mem,
- VkDeviceSize memoryOffset)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
VkResult result = get_dispatch_table(pc_device_table_map, device)->BindImageMemory(device, image, mem, memoryOffset);
PostBindImageMemory(device, image, mem, memoryOffset, result);
@@ -2675,57 +2041,41 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements(
- VkDevice device,
- VkBuffer buffer,
- VkMemoryRequirements* pMemoryRequirements)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkGetBufferMemoryRequirements(
- my_data->report_data,
- buffer,
- pMemoryRequirements);
+ skipCall |= param_check_vkGetBufferMemoryRequirements(my_data->report_data, buffer, pMemoryRequirements);
if (skipCall == VK_FALSE) {
get_dispatch_table(pc_device_table_map, device)->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements(
- VkDevice device,
- VkImage image,
- VkMemoryRequirements* pMemoryRequirements)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkGetImageMemoryRequirements(
- my_data->report_data,
- image,
- pMemoryRequirements);
+ skipCall |= param_check_vkGetImageMemoryRequirements(my_data->report_data, image, pMemoryRequirements);
if (skipCall == VK_FALSE) {
get_dispatch_table(pc_device_table_map, device)->GetImageMemoryRequirements(device, image, pMemoryRequirements);
}
}
-bool PostGetImageSparseMemoryRequirements(
- VkDevice device,
- VkImage image,
- uint32_t* pNumRequirements,
- VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
-{
- if(pSparseMemoryRequirements != nullptr)
- {
+bool PostGetImageSparseMemoryRequirements(VkDevice device, VkImage image, uint32_t *pNumRequirements,
+ VkSparseImageMemoryRequirements *pSparseMemoryRequirements) {
+ if (pSparseMemoryRequirements != nullptr) {
if ((pSparseMemoryRequirements->formatProperties.aspectMask &
- (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT)) == 0)
- {
+ (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT |
+ VK_IMAGE_ASPECT_METADATA_BIT)) == 0) {
log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkGetImageSparseMemoryRequirements parameter, VkImageAspect pSparseMemoryRequirements->formatProperties.aspectMask, is an unrecognized enumerator");
+ "vkGetImageSparseMemoryRequirements parameter, VkImageAspect "
+ "pSparseMemoryRequirements->formatProperties.aspectMask, is an unrecognized enumerator");
return false;
}
}
@@ -2733,73 +2083,52 @@ bool PostGetImageSparseMemoryRequirements(
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements(
- VkDevice device,
- VkImage image,
- uint32_t* pSparseMemoryRequirementCount,
- VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetImageSparseMemoryRequirements(VkDevice device, VkImage image, uint32_t *pSparseMemoryRequirementCount,
+ VkSparseImageMemoryRequirements *pSparseMemoryRequirements) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkGetImageSparseMemoryRequirements(
- my_data->report_data,
- image,
- pSparseMemoryRequirementCount,
- pSparseMemoryRequirements);
+ skipCall |= param_check_vkGetImageSparseMemoryRequirements(my_data->report_data, image, pSparseMemoryRequirementCount,
+ pSparseMemoryRequirements);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, device)->GetImageSparseMemoryRequirements(device, image, pSparseMemoryRequirementCount, pSparseMemoryRequirements);
+ get_dispatch_table(pc_device_table_map, device)
+ ->GetImageSparseMemoryRequirements(device, image, pSparseMemoryRequirementCount, pSparseMemoryRequirements);
PostGetImageSparseMemoryRequirements(device, image, pSparseMemoryRequirementCount, pSparseMemoryRequirements);
}
}
-bool PostGetPhysicalDeviceSparseImageFormatProperties(
- VkPhysicalDevice physicalDevice,
- VkFormat format,
- VkImageType type,
- VkSampleCountFlagBits samples,
- VkImageUsageFlags usage,
- VkImageTiling tiling,
- uint32_t* pNumProperties,
- VkSparseImageFormatProperties* pProperties)
-{
+bool PostGetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type,
+ VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling,
+ uint32_t *pNumProperties, VkSparseImageFormatProperties *pProperties) {
- if(format < VK_FORMAT_BEGIN_RANGE ||
- format > VK_FORMAT_END_RANGE)
- {
+ if (format < VK_FORMAT_BEGIN_RANGE || format > VK_FORMAT_END_RANGE) {
log_msg(mdd(physicalDevice), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkGetPhysicalDeviceSparseImageFormatProperties parameter, VkFormat format, is an unrecognized enumerator");
+ "vkGetPhysicalDeviceSparseImageFormatProperties parameter, VkFormat format, is an unrecognized enumerator");
return false;
}
- if(type < VK_IMAGE_TYPE_BEGIN_RANGE ||
- type > VK_IMAGE_TYPE_END_RANGE)
- {
+ if (type < VK_IMAGE_TYPE_BEGIN_RANGE || type > VK_IMAGE_TYPE_END_RANGE) {
log_msg(mdd(physicalDevice), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkGetPhysicalDeviceSparseImageFormatProperties parameter, VkImageType type, is an unrecognized enumerator");
+ "vkGetPhysicalDeviceSparseImageFormatProperties parameter, VkImageType type, is an unrecognized enumerator");
return false;
}
-
-
- if(tiling < VK_IMAGE_TILING_BEGIN_RANGE ||
- tiling > VK_IMAGE_TILING_END_RANGE)
- {
+ if (tiling < VK_IMAGE_TILING_BEGIN_RANGE || tiling > VK_IMAGE_TILING_END_RANGE) {
log_msg(mdd(physicalDevice), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkGetPhysicalDeviceSparseImageFormatProperties parameter, VkImageTiling tiling, is an unrecognized enumerator");
+ "vkGetPhysicalDeviceSparseImageFormatProperties parameter, VkImageTiling tiling, is an unrecognized enumerator");
return false;
}
- if(pProperties != nullptr)
- {
- if ((pProperties->aspectMask &
- (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT)) == 0)
- {
+ if (pProperties != nullptr) {
+ if ((pProperties->aspectMask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT |
+ VK_IMAGE_ASPECT_METADATA_BIT)) == 0) {
log_msg(mdd(physicalDevice), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkGetPhysicalDeviceSparseImageFormatProperties parameter, VkImageAspect pProperties->aspectMask, is an unrecognized enumerator");
+ "vkGetPhysicalDeviceSparseImageFormatProperties parameter, VkImageAspect pProperties->aspectMask, is an "
+ "unrecognized enumerator");
return false;
}
}
@@ -2807,73 +2136,47 @@ bool PostGetPhysicalDeviceSparseImageFormatProperties(
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties(
- VkPhysicalDevice physicalDevice,
- VkFormat format,
- VkImageType type,
- VkSampleCountFlagBits samples,
- VkImageUsageFlags usage,
- VkImageTiling tiling,
- uint32_t* pPropertyCount,
- VkSparseImageFormatProperties* pProperties)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type,
+ VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling,
+ uint32_t *pPropertyCount, VkSparseImageFormatProperties *pProperties) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkGetPhysicalDeviceSparseImageFormatProperties(
- my_data->report_data,
- format,
- type,
- samples,
- usage,
- tiling,
- pPropertyCount,
- pProperties);
+ skipCall |= param_check_vkGetPhysicalDeviceSparseImageFormatProperties(my_data->report_data, format, type, samples, usage,
+ tiling, pPropertyCount, pProperties);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_instance_table_map, physicalDevice)->GetPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage, tiling, pPropertyCount, pProperties);
+ get_dispatch_table(pc_instance_table_map, physicalDevice)
+ ->GetPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage, tiling, pPropertyCount,
+ pProperties);
- PostGetPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage, tiling, pPropertyCount, pProperties);
+ PostGetPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage, tiling, pPropertyCount,
+ pProperties);
}
}
-bool PostQueueBindSparse(
- VkQueue queue,
- uint32_t bindInfoCount,
- const VkBindSparseInfo* pBindInfo,
- VkFence fence,
- VkResult result)
-{
-
-
+bool PostQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence, VkResult result) {
- if(result < VK_SUCCESS)
- {
+ if (result < VK_SUCCESS) {
std::string reason = "vkQueueBindSparse parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse(
- VkQueue queue,
- uint32_t bindInfoCount,
- const VkBindSparseInfo* pBindInfo,
- VkFence fence)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkQueueBindSparse(
- my_data->report_data,
- bindInfoCount,
- pBindInfo,
- fence);
+ skipCall |= param_check_vkQueueBindSparse(my_data->report_data, bindInfoCount, pBindInfo, fence);
if (skipCall == VK_FALSE) {
result = get_dispatch_table(pc_device_table_map, queue)->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
@@ -2884,37 +2187,25 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse(
return result;
}
-bool PostCreateFence(
- VkDevice device,
- VkFence* pFence,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostCreateFence(VkDevice device, VkFence *pFence, VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkCreateFence parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFence(
- VkDevice device,
- const VkFenceCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkFence* pFence)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCreateFence(
- my_data->report_data,
- pCreateInfo,
- pAllocator,
- pFence);
+ skipCall |= param_check_vkCreateFence(my_data->report_data, pCreateInfo, pAllocator, pFence);
if (skipCall == VK_FALSE) {
result = get_dispatch_table(pc_device_table_map, device)->CreateFence(device, pCreateInfo, pAllocator, pFence);
@@ -2925,54 +2216,37 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFence(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkDestroyFence(VkDevice device, VkFence fence,
- const VkAllocationCallbacks *pAllocator) {
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
VkBool32 skipCall = VK_FALSE;
- layer_data *my_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |=
- param_check_vkDestroyFence(my_data->report_data, fence, pAllocator);
+ skipCall |= param_check_vkDestroyFence(my_data->report_data, fence, pAllocator);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, device)
- ->DestroyFence(device, fence, pAllocator);
+ get_dispatch_table(pc_device_table_map, device)->DestroyFence(device, fence, pAllocator);
}
}
-bool PostResetFences(
- VkDevice device,
- uint32_t fenceCount,
- VkResult result)
-{
-
+bool PostResetFences(VkDevice device, uint32_t fenceCount, VkResult result) {
- if(result < VK_SUCCESS)
- {
+ if (result < VK_SUCCESS) {
std::string reason = "vkResetFences parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(
- VkDevice device,
- uint32_t fenceCount,
- const VkFence* pFences)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkResetFences(
- my_data->report_data,
- fenceCount,
- pFences);
+ skipCall |= param_check_vkResetFences(my_data->report_data, fenceCount, pFences);
if (skipCall == VK_FALSE) {
result = get_dispatch_table(pc_device_table_map, device)->ResetFences(device, fenceCount, pFences);
@@ -2983,27 +2257,19 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(
return result;
}
-bool PostGetFenceStatus(
- VkDevice device,
- VkFence fence,
- VkResult result)
-{
-
+bool PostGetFenceStatus(VkDevice device, VkFence fence, VkResult result) {
- if(result < VK_SUCCESS)
- {
+ if (result < VK_SUCCESS) {
std::string reason = "vkGetFenceStatus parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(
- VkDevice device,
- VkFence fence)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, VkFence fence) {
VkResult result = get_dispatch_table(pc_device_table_map, device)->GetFenceStatus(device, fence);
PostGetFenceStatus(device, fence, result);
@@ -3011,45 +2277,26 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(
return result;
}
-bool PostWaitForFences(
- VkDevice device,
- uint32_t fenceCount,
- VkBool32 waitAll,
- uint64_t timeout,
- VkResult result)
-{
+bool PostWaitForFences(VkDevice device, uint32_t fenceCount, VkBool32 waitAll, uint64_t timeout, VkResult result) {
-
-
-
- if(result < VK_SUCCESS)
- {
+ if (result < VK_SUCCESS) {
std::string reason = "vkWaitForFences parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences(
- VkDevice device,
- uint32_t fenceCount,
- const VkFence* pFences,
- VkBool32 waitAll,
- uint64_t timeout)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkWaitForFences(
- my_data->report_data,
- fenceCount,
- pFences,
- waitAll,
- timeout);
+ skipCall |= param_check_vkWaitForFences(my_data->report_data, fenceCount, pFences, waitAll, timeout);
if (skipCall == VK_FALSE) {
result = get_dispatch_table(pc_device_table_map, device)->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
@@ -3060,37 +2307,25 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences(
return result;
}
-bool PostCreateSemaphore(
- VkDevice device,
- VkSemaphore* pSemaphore,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostCreateSemaphore(VkDevice device, VkSemaphore *pSemaphore, VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkCreateSemaphore parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(
- VkDevice device,
- const VkSemaphoreCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkSemaphore* pSemaphore)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCreateSemaphore(
- my_data->report_data,
- pCreateInfo,
- pAllocator,
- pSemaphore);
+ skipCall |= param_check_vkCreateSemaphore(my_data->report_data, pCreateInfo, pAllocator, pSemaphore);
if (skipCall == VK_FALSE) {
result = get_dispatch_table(pc_device_table_map, device)->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
@@ -3102,53 +2337,37 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(
}
VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkDestroySemaphore(VkDevice device, VkSemaphore semaphore,
- const VkAllocationCallbacks *pAllocator) {
+vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
VkBool32 skipCall = VK_FALSE;
- layer_data *my_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkDestroySemaphore(my_data->report_data, semaphore,
- pAllocator);
+ skipCall |= param_check_vkDestroySemaphore(my_data->report_data, semaphore, pAllocator);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, device)
- ->DestroySemaphore(device, semaphore, pAllocator);
+ get_dispatch_table(pc_device_table_map, device)->DestroySemaphore(device, semaphore, pAllocator);
}
}
-bool PostCreateEvent(
- VkDevice device,
- VkEvent* pEvent,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostCreateEvent(VkDevice device, VkEvent *pEvent, VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkCreateEvent parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateEvent(
- VkDevice device,
- const VkEventCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkEvent* pEvent)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCreateEvent(
- my_data->report_data,
- pCreateInfo,
- pAllocator,
- pEvent);
+ skipCall |= param_check_vkCreateEvent(my_data->report_data, pCreateInfo, pAllocator, pEvent);
if (skipCall == VK_FALSE) {
result = get_dispatch_table(pc_device_table_map, device)->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
@@ -3159,44 +2378,31 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateEvent(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkDestroyEvent(VkDevice device, VkEvent event,
- const VkAllocationCallbacks *pAllocator) {
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
VkBool32 skipCall = VK_FALSE;
- layer_data *my_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |=
- param_check_vkDestroyEvent(my_data->report_data, event, pAllocator);
+ skipCall |= param_check_vkDestroyEvent(my_data->report_data, event, pAllocator);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, device)
- ->DestroyEvent(device, event, pAllocator);
+ get_dispatch_table(pc_device_table_map, device)->DestroyEvent(device, event, pAllocator);
}
}
-bool PostGetEventStatus(
- VkDevice device,
- VkEvent event,
- VkResult result)
-{
-
+bool PostGetEventStatus(VkDevice device, VkEvent event, VkResult result) {
- if(result < VK_SUCCESS)
- {
+ if (result < VK_SUCCESS) {
std::string reason = "vkGetEventStatus parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetEventStatus(
- VkDevice device,
- VkEvent event)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetEventStatus(VkDevice device, VkEvent event) {
VkResult result = get_dispatch_table(pc_device_table_map, device)->GetEventStatus(device, event);
PostGetEventStatus(device, event, result);
@@ -3204,27 +2410,19 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetEventStatus(
return result;
}
-bool PostSetEvent(
- VkDevice device,
- VkEvent event,
- VkResult result)
-{
-
+bool PostSetEvent(VkDevice device, VkEvent event, VkResult result) {
- if(result < VK_SUCCESS)
- {
+ if (result < VK_SUCCESS) {
std::string reason = "vkSetEvent parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(
- VkDevice device,
- VkEvent event)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(VkDevice device, VkEvent event) {
VkResult result = get_dispatch_table(pc_device_table_map, device)->SetEvent(device, event);
PostSetEvent(device, event, result);
@@ -3232,27 +2430,19 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(
return result;
}
-bool PostResetEvent(
- VkDevice device,
- VkEvent event,
- VkResult result)
-{
+bool PostResetEvent(VkDevice device, VkEvent event, VkResult result) {
-
- if(result < VK_SUCCESS)
- {
+ if (result < VK_SUCCESS) {
std::string reason = "vkResetEvent parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetEvent(
- VkDevice device,
- VkEvent event)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetEvent(VkDevice device, VkEvent event) {
VkResult result = get_dispatch_table(pc_device_table_map, device)->ResetEvent(device, event);
PostResetEvent(device, event, result);
@@ -3260,55 +2450,37 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetEvent(
return result;
}
-bool PreCreateQueryPool(
- VkDevice device,
- const VkQueryPoolCreateInfo* pCreateInfo)
-{
- if(pCreateInfo != nullptr)
- {
- if(pCreateInfo->queryType < VK_QUERY_TYPE_BEGIN_RANGE ||
- pCreateInfo->queryType > VK_QUERY_TYPE_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateQueryPool parameter, VkQueryType pCreateInfo->queryType, is an unrecognized enumerator");
- return false;
- }
+bool PreCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo) {
+ if (pCreateInfo != nullptr) {
+ if (pCreateInfo->queryType < VK_QUERY_TYPE_BEGIN_RANGE || pCreateInfo->queryType > VK_QUERY_TYPE_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateQueryPool parameter, VkQueryType pCreateInfo->queryType, is an unrecognized enumerator");
+ return false;
+ }
}
return true;
}
-bool PostCreateQueryPool(
- VkDevice device,
- VkQueryPool* pQueryPool,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostCreateQueryPool(VkDevice device, VkQueryPool *pQueryPool, VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkCreateQueryPool parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(
- VkDevice device,
- const VkQueryPoolCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkQueryPool* pQueryPool)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCreateQueryPool(
- my_data->report_data,
- pCreateInfo,
- pAllocator,
- pQueryPool);
+ skipCall |= param_check_vkCreateQueryPool(my_data->report_data, pCreateInfo, pAllocator, pQueryPool);
if (skipCall == VK_FALSE) {
PreCreateQueryPool(device, pCreateInfo);
@@ -3322,70 +2494,44 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(
}
VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkDestroyQueryPool(VkDevice device, VkQueryPool queryPool,
- const VkAllocationCallbacks *pAllocator) {
+vkDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
VkBool32 skipCall = VK_FALSE;
- layer_data *my_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkDestroyQueryPool(my_data->report_data, queryPool,
- pAllocator);
+ skipCall |= param_check_vkDestroyQueryPool(my_data->report_data, queryPool, pAllocator);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, device)
- ->DestroyQueryPool(device, queryPool, pAllocator);
- }
-}
-
-bool PostGetQueryPoolResults(
- VkDevice device,
- VkQueryPool queryPool,
- uint32_t firstQuery,
- uint32_t queryCount,
- size_t dataSize,
- void* pData,
- VkDeviceSize stride,
- VkQueryResultFlags flags,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+ get_dispatch_table(pc_device_table_map, device)->DestroyQueryPool(device, queryPool, pAllocator);
+ }
+}
+
+bool PostGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize,
+ void *pData, VkDeviceSize stride, VkQueryResultFlags flags, VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkGetQueryPoolResults parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(
- VkDevice device,
- VkQueryPool queryPool,
- uint32_t firstQuery,
- uint32_t queryCount,
- size_t dataSize,
- void* pData,
- VkDeviceSize stride,
- VkQueryResultFlags flags)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
+ uint32_t queryCount, size_t dataSize, void *pData,
+ VkDeviceSize stride, VkQueryResultFlags flags) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkGetQueryPoolResults(
- my_data->report_data,
- queryPool,
- firstQuery,
- queryCount,
- dataSize,
- pData,
- stride,
- flags);
+ skipCall |=
+ param_check_vkGetQueryPoolResults(my_data->report_data, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
if (skipCall == VK_FALSE) {
- result = get_dispatch_table(pc_device_table_map, device)->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
+ result = get_dispatch_table(pc_device_table_map, device)
+ ->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
PostGetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags, result);
}
@@ -3393,55 +2539,37 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(
return result;
}
-bool PreCreateBuffer(
- VkDevice device,
- const VkBufferCreateInfo* pCreateInfo)
-{
- if(pCreateInfo != nullptr)
- {
- if(pCreateInfo->sharingMode < VK_SHARING_MODE_BEGIN_RANGE ||
- pCreateInfo->sharingMode > VK_SHARING_MODE_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateBuffer parameter, VkSharingMode pCreateInfo->sharingMode, is an unrecognized enumerator");
- return false;
- }
+bool PreCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo) {
+ if (pCreateInfo != nullptr) {
+ if (pCreateInfo->sharingMode < VK_SHARING_MODE_BEGIN_RANGE || pCreateInfo->sharingMode > VK_SHARING_MODE_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateBuffer parameter, VkSharingMode pCreateInfo->sharingMode, is an unrecognized enumerator");
+ return false;
+ }
}
return true;
}
-bool PostCreateBuffer(
- VkDevice device,
- VkBuffer* pBuffer,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostCreateBuffer(VkDevice device, VkBuffer *pBuffer, VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkCreateBuffer parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(
- VkDevice device,
- const VkBufferCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkBuffer* pBuffer)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCreateBuffer(
- my_data->report_data,
- pCreateInfo,
- pAllocator,
- pBuffer);
+ skipCall |= param_check_vkCreateBuffer(my_data->report_data, pCreateInfo, pAllocator, pBuffer);
if (skipCall == VK_FALSE) {
PreCreateBuffer(device, pCreateInfo);
@@ -3455,71 +2583,49 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(
}
VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkDestroyBuffer(VkDevice device, VkBuffer buffer,
- const VkAllocationCallbacks *pAllocator) {
+vkDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
VkBool32 skipCall = VK_FALSE;
- layer_data *my_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |=
- param_check_vkDestroyBuffer(my_data->report_data, buffer, pAllocator);
+ skipCall |= param_check_vkDestroyBuffer(my_data->report_data, buffer, pAllocator);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, device)
- ->DestroyBuffer(device, buffer, pAllocator);
+ get_dispatch_table(pc_device_table_map, device)->DestroyBuffer(device, buffer, pAllocator);
}
}
-bool PreCreateBufferView(
- VkDevice device,
- const VkBufferViewCreateInfo* pCreateInfo)
-{
- if(pCreateInfo != nullptr)
- {
- if(pCreateInfo->format < VK_FORMAT_BEGIN_RANGE ||
- pCreateInfo->format > VK_FORMAT_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateBufferView parameter, VkFormat pCreateInfo->format, is an unrecognized enumerator");
- return false;
- }
+bool PreCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo) {
+ if (pCreateInfo != nullptr) {
+ if (pCreateInfo->format < VK_FORMAT_BEGIN_RANGE || pCreateInfo->format > VK_FORMAT_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateBufferView parameter, VkFormat pCreateInfo->format, is an unrecognized enumerator");
+ return false;
+ }
}
return true;
}
-bool PostCreateBufferView(
- VkDevice device,
- VkBufferView* pView,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostCreateBufferView(VkDevice device, VkBufferView *pView, VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkCreateBufferView parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(
- VkDevice device,
- const VkBufferViewCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkBufferView* pView)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCreateBufferView(
- my_data->report_data,
- pCreateInfo,
- pAllocator,
- pView);
+ skipCall |= param_check_vkCreateBufferView(my_data->report_data, pCreateInfo, pAllocator, pView);
if (skipCall == VK_FALSE) {
PreCreateBufferView(device, pCreateInfo);
@@ -3533,92 +2639,64 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(
}
VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkDestroyBufferView(VkDevice device, VkBufferView bufferView,
- const VkAllocationCallbacks *pAllocator) {
+vkDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
VkBool32 skipCall = VK_FALSE;
- layer_data *my_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkDestroyBufferView(my_data->report_data,
- bufferView, pAllocator);
+ skipCall |= param_check_vkDestroyBufferView(my_data->report_data, bufferView, pAllocator);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, device)
- ->DestroyBufferView(device, bufferView, pAllocator);
+ get_dispatch_table(pc_device_table_map, device)->DestroyBufferView(device, bufferView, pAllocator);
}
}
-bool PreCreateImage(
- VkDevice device,
- const VkImageCreateInfo* pCreateInfo)
-{
- if(pCreateInfo != nullptr)
- {
- if(pCreateInfo->imageType < VK_IMAGE_TYPE_BEGIN_RANGE ||
- pCreateInfo->imageType > VK_IMAGE_TYPE_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateImage parameter, VkImageType pCreateInfo->imageType, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfo->format < VK_FORMAT_BEGIN_RANGE ||
- pCreateInfo->format > VK_FORMAT_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateImage parameter, VkFormat pCreateInfo->format, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfo->tiling < VK_IMAGE_TILING_BEGIN_RANGE ||
- pCreateInfo->tiling > VK_IMAGE_TILING_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateImage parameter, VkImageTiling pCreateInfo->tiling, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfo->sharingMode < VK_SHARING_MODE_BEGIN_RANGE ||
- pCreateInfo->sharingMode > VK_SHARING_MODE_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateImage parameter, VkSharingMode pCreateInfo->sharingMode, is an unrecognized enumerator");
- return false;
- }
+bool PreCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo) {
+ if (pCreateInfo != nullptr) {
+ if (pCreateInfo->imageType < VK_IMAGE_TYPE_BEGIN_RANGE || pCreateInfo->imageType > VK_IMAGE_TYPE_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateImage parameter, VkImageType pCreateInfo->imageType, is an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfo->format < VK_FORMAT_BEGIN_RANGE || pCreateInfo->format > VK_FORMAT_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateImage parameter, VkFormat pCreateInfo->format, is an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfo->tiling < VK_IMAGE_TILING_BEGIN_RANGE || pCreateInfo->tiling > VK_IMAGE_TILING_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateImage parameter, VkImageTiling pCreateInfo->tiling, is an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfo->sharingMode < VK_SHARING_MODE_BEGIN_RANGE || pCreateInfo->sharingMode > VK_SHARING_MODE_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateImage parameter, VkSharingMode pCreateInfo->sharingMode, is an unrecognized enumerator");
+ return false;
+ }
}
return true;
}
-bool PostCreateImage(
- VkDevice device,
- VkImage* pImage,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostCreateImage(VkDevice device, VkImage *pImage, VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkCreateImage parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(
- VkDevice device,
- const VkImageCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkImage* pImage)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCreateImage(
- my_data->report_data,
- pCreateInfo,
- pAllocator,
- pImage);
+ skipCall |= param_check_vkCreateImage(my_data->report_data, pCreateInfo, pAllocator, pImage);
if (skipCall == VK_FALSE) {
PreCreateImage(device, pCreateInfo);
@@ -3631,56 +2709,38 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkDestroyImage(VkDevice device, VkImage image,
- const VkAllocationCallbacks *pAllocator) {
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
VkBool32 skipCall = VK_FALSE;
- layer_data *my_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |=
- param_check_vkDestroyImage(my_data->report_data, image, pAllocator);
+ skipCall |= param_check_vkDestroyImage(my_data->report_data, image, pAllocator);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, device)
- ->DestroyImage(device, image, pAllocator);
+ get_dispatch_table(pc_device_table_map, device)->DestroyImage(device, image, pAllocator);
}
}
-bool PreGetImageSubresourceLayout(
- VkDevice device,
- const VkImageSubresource* pSubresource)
-{
- if(pSubresource != nullptr)
- {
- if ((pSubresource->aspectMask &
- (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT)) == 0)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkGetImageSubresourceLayout parameter, VkImageAspect pSubresource->aspectMask, is an unrecognized enumerator");
- return false;
- }
+bool PreGetImageSubresourceLayout(VkDevice device, const VkImageSubresource *pSubresource) {
+ if (pSubresource != nullptr) {
+ if ((pSubresource->aspectMask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT |
+ VK_IMAGE_ASPECT_METADATA_BIT)) == 0) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkGetImageSubresourceLayout parameter, VkImageAspect pSubresource->aspectMask, is an unrecognized enumerator");
+ return false;
+ }
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout(
- VkDevice device,
- VkImage image,
- const VkImageSubresource* pSubresource,
- VkSubresourceLayout* pLayout)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource, VkSubresourceLayout *pLayout) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkGetImageSubresourceLayout(
- my_data->report_data,
- image,
- pSubresource,
- pLayout);
+ skipCall |= param_check_vkGetImageSubresourceLayout(my_data->report_data, image, pSubresource, pLayout);
if (skipCall == VK_FALSE) {
PreGetImageSubresourceLayout(device, pSubresource);
@@ -3689,90 +2749,66 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout(
}
}
-bool PreCreateImageView(
- VkDevice device,
- const VkImageViewCreateInfo* pCreateInfo)
-{
- if(pCreateInfo != nullptr)
- {
- if(pCreateInfo->viewType < VK_IMAGE_VIEW_TYPE_BEGIN_RANGE ||
- pCreateInfo->viewType > VK_IMAGE_VIEW_TYPE_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateImageView parameter, VkImageViewType pCreateInfo->viewType, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfo->format < VK_FORMAT_BEGIN_RANGE ||
- pCreateInfo->format > VK_FORMAT_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateImageView parameter, VkFormat pCreateInfo->format, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfo->components.r < VK_COMPONENT_SWIZZLE_BEGIN_RANGE ||
- pCreateInfo->components.r > VK_COMPONENT_SWIZZLE_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateImageView parameter, VkComponentSwizzle pCreateInfo->components.r, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfo->components.g < VK_COMPONENT_SWIZZLE_BEGIN_RANGE ||
- pCreateInfo->components.g > VK_COMPONENT_SWIZZLE_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateImageView parameter, VkComponentSwizzle pCreateInfo->components.g, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfo->components.b < VK_COMPONENT_SWIZZLE_BEGIN_RANGE ||
- pCreateInfo->components.b > VK_COMPONENT_SWIZZLE_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateImageView parameter, VkComponentSwizzle pCreateInfo->components.b, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfo->components.a < VK_COMPONENT_SWIZZLE_BEGIN_RANGE ||
- pCreateInfo->components.a > VK_COMPONENT_SWIZZLE_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateImageView parameter, VkComponentSwizzle pCreateInfo->components.a, is an unrecognized enumerator");
- return false;
- }
+bool PreCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo) {
+ if (pCreateInfo != nullptr) {
+ if (pCreateInfo->viewType < VK_IMAGE_VIEW_TYPE_BEGIN_RANGE || pCreateInfo->viewType > VK_IMAGE_VIEW_TYPE_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateImageView parameter, VkImageViewType pCreateInfo->viewType, is an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfo->format < VK_FORMAT_BEGIN_RANGE || pCreateInfo->format > VK_FORMAT_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateImageView parameter, VkFormat pCreateInfo->format, is an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfo->components.r < VK_COMPONENT_SWIZZLE_BEGIN_RANGE ||
+ pCreateInfo->components.r > VK_COMPONENT_SWIZZLE_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateImageView parameter, VkComponentSwizzle pCreateInfo->components.r, is an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfo->components.g < VK_COMPONENT_SWIZZLE_BEGIN_RANGE ||
+ pCreateInfo->components.g > VK_COMPONENT_SWIZZLE_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateImageView parameter, VkComponentSwizzle pCreateInfo->components.g, is an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfo->components.b < VK_COMPONENT_SWIZZLE_BEGIN_RANGE ||
+ pCreateInfo->components.b > VK_COMPONENT_SWIZZLE_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateImageView parameter, VkComponentSwizzle pCreateInfo->components.b, is an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfo->components.a < VK_COMPONENT_SWIZZLE_BEGIN_RANGE ||
+ pCreateInfo->components.a > VK_COMPONENT_SWIZZLE_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateImageView parameter, VkComponentSwizzle pCreateInfo->components.a, is an unrecognized enumerator");
+ return false;
+ }
}
return true;
}
-bool PostCreateImageView(
- VkDevice device,
- VkImageView* pView,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostCreateImageView(VkDevice device, VkImageView *pView, VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkCreateImageView parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(
- VkDevice device,
- const VkImageViewCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkImageView* pView)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCreateImageView(
- my_data->report_data,
- pCreateInfo,
- pAllocator,
- pView);
+ skipCall |= param_check_vkCreateImageView(my_data->report_data, pCreateInfo, pAllocator, pView);
if (skipCall == VK_FALSE) {
PreCreateImageView(device, pCreateInfo);
@@ -3786,55 +2822,42 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(
}
VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkDestroyImageView(VkDevice device, VkImageView imageView,
- const VkAllocationCallbacks *pAllocator) {
+vkDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
VkBool32 skipCall = VK_FALSE;
- layer_data *my_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkDestroyImageView(my_data->report_data, imageView,
- pAllocator);
+ skipCall |= param_check_vkDestroyImageView(my_data->report_data, imageView, pAllocator);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, device)
- ->DestroyImageView(device, imageView, pAllocator);
+ get_dispatch_table(pc_device_table_map, device)->DestroyImageView(device, imageView, pAllocator);
}
}
-bool PostCreateShaderModule(
- VkDevice device,
- VkShaderModule* pShaderModule,
- VkResult result)
-{
- if(result < VK_SUCCESS) {
+bool PostCreateShaderModule(VkDevice device, VkShaderModule *pShaderModule, VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkCreateShaderModule parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(
- VkDevice device,
- const VkShaderModuleCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkShaderModule* pShaderModule)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkShaderModule *pShaderModule) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCreateShaderModule(
- my_data->report_data,
- pCreateInfo,
- pAllocator,
- pShaderModule);
+ skipCall |= param_check_vkCreateShaderModule(my_data->report_data, pCreateInfo, pAllocator, pShaderModule);
if (skipCall == VK_FALSE) {
- result = get_dispatch_table(pc_device_table_map, device)->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
+ result =
+ get_dispatch_table(pc_device_table_map, device)->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
PostCreateShaderModule(device, pShaderModule, result);
}
@@ -3843,56 +2866,42 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(
}
VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule,
- const VkAllocationCallbacks *pAllocator) {
+vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
VkBool32 skipCall = VK_FALSE;
- layer_data *my_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkDestroyShaderModule(my_data->report_data,
- shaderModule, pAllocator);
+ skipCall |= param_check_vkDestroyShaderModule(my_data->report_data, shaderModule, pAllocator);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, device)
- ->DestroyShaderModule(device, shaderModule, pAllocator);
+ get_dispatch_table(pc_device_table_map, device)->DestroyShaderModule(device, shaderModule, pAllocator);
}
}
-bool PostCreatePipelineCache(
- VkDevice device,
- VkPipelineCache* pPipelineCache,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostCreatePipelineCache(VkDevice device, VkPipelineCache *pPipelineCache, VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkCreatePipelineCache parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(
- VkDevice device,
- const VkPipelineCacheCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkPipelineCache* pPipelineCache)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkPipelineCache *pPipelineCache) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCreatePipelineCache(
- my_data->report_data,
- pCreateInfo,
- pAllocator,
- pPipelineCache);
+ skipCall |= param_check_vkCreatePipelineCache(my_data->report_data, pCreateInfo, pAllocator, pPipelineCache);
if (skipCall == VK_FALSE) {
- result = get_dispatch_table(pc_device_table_map, device)->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
+ result =
+ get_dispatch_table(pc_device_table_map, device)->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
PostCreatePipelineCache(device, pPipelineCache, result);
}
@@ -3901,55 +2910,37 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(
}
VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkDestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache,
- const VkAllocationCallbacks *pAllocator) {
+vkDestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
VkBool32 skipCall = VK_FALSE;
- layer_data *my_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkDestroyPipelineCache(my_data->report_data,
- pipelineCache, pAllocator);
+ skipCall |= param_check_vkDestroyPipelineCache(my_data->report_data, pipelineCache, pAllocator);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, device)
- ->DestroyPipelineCache(device, pipelineCache, pAllocator);
+ get_dispatch_table(pc_device_table_map, device)->DestroyPipelineCache(device, pipelineCache, pAllocator);
}
}
-bool PostGetPipelineCacheData(
- VkDevice device,
- VkPipelineCache pipelineCache,
- size_t* pDataSize,
- void* pData,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostGetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData, VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkGetPipelineCacheData parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineCacheData(
- VkDevice device,
- VkPipelineCache pipelineCache,
- size_t* pDataSize,
- void* pData)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkGetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkGetPipelineCacheData(
- my_data->report_data,
- pipelineCache,
- pDataSize,
- pData);
+ skipCall |= param_check_vkGetPipelineCacheData(my_data->report_data, pipelineCache, pDataSize, pData);
if (skipCall == VK_FALSE) {
result = get_dispatch_table(pc_device_table_map, device)->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
@@ -3960,41 +2951,26 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineCacheData(
return result;
}
-bool PostMergePipelineCaches(
- VkDevice device,
- VkPipelineCache dstCache,
- uint32_t srcCacheCount,
- VkResult result)
-{
-
+bool PostMergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, VkResult result) {
-
- if(result < VK_SUCCESS)
- {
+ if (result < VK_SUCCESS) {
std::string reason = "vkMergePipelineCaches parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkMergePipelineCaches(
- VkDevice device,
- VkPipelineCache dstCache,
- uint32_t srcCacheCount,
- const VkPipelineCache* pSrcCaches)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkMergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkMergePipelineCaches(
- my_data->report_data,
- dstCache,
- srcCacheCount,
- pSrcCaches);
+ skipCall |= param_check_vkMergePipelineCaches(my_data->report_data, dstCache, srcCacheCount, pSrcCaches);
if (skipCall == VK_FALSE) {
result = get_dispatch_table(pc_device_table_map, device)->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
@@ -4005,275 +2981,250 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkMergePipelineCaches(
return result;
}
-bool PreCreateGraphicsPipelines(
- VkDevice device,
- const VkGraphicsPipelineCreateInfo* pCreateInfos)
-{
+bool PreCreateGraphicsPipelines(VkDevice device, const VkGraphicsPipelineCreateInfo *pCreateInfos) {
layer_data *data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
// TODO: Handle count
- if(pCreateInfos != nullptr)
- {
- if(pCreateInfos->flags | VK_PIPELINE_CREATE_DERIVATIVE_BIT)
- {
- if(pCreateInfos->basePipelineIndex != -1)
- {
- if(pCreateInfos->basePipelineHandle != VK_NULL_HANDLE)
- {
+ if (pCreateInfos != nullptr) {
+ if (pCreateInfos->flags | VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
+ if (pCreateInfos->basePipelineIndex != -1) {
+ if (pCreateInfos->basePipelineHandle != VK_NULL_HANDLE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, pCreateInfos->basePipelineHandle, must be VK_NULL_HANDLE if "
+ "pCreateInfos->flags "
+ "contains the VK_PIPELINE_CREATE_DERIVATIVE_BIT flag and pCreateInfos->basePipelineIndex is not -1");
+ return false;
+ }
+ }
+
+ if (pCreateInfos->basePipelineHandle != VK_NULL_HANDLE) {
+ if (pCreateInfos->basePipelineIndex != -1) {
+ log_msg(
+ mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, pCreateInfos->basePipelineIndex, must be -1 if pCreateInfos->flags "
+ "contains the VK_PIPELINE_CREATE_DERIVATIVE_BIT flag and pCreateInfos->basePipelineHandle is not "
+ "VK_NULL_HANDLE");
+ return false;
+ }
+ }
+ }
+
+ if (pCreateInfos->pVertexInputState != nullptr) {
+ if (pCreateInfos->pVertexInputState->pVertexBindingDescriptions != nullptr) {
+ if (pCreateInfos->pVertexInputState->pVertexBindingDescriptions->inputRate < VK_VERTEX_INPUT_RATE_BEGIN_RANGE ||
+ pCreateInfos->pVertexInputState->pVertexBindingDescriptions->inputRate > VK_VERTEX_INPUT_RATE_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, VkVertexInputRate "
+ "pCreateInfos->pVertexInputState->pVertexBindingDescriptions->inputRate, is an unrecognized "
+ "enumerator");
+ return false;
+ }
+ }
+ if (pCreateInfos->pVertexInputState->pVertexAttributeDescriptions != nullptr) {
+ if (pCreateInfos->pVertexInputState->pVertexAttributeDescriptions->format < VK_FORMAT_BEGIN_RANGE ||
+ pCreateInfos->pVertexInputState->pVertexAttributeDescriptions->format > VK_FORMAT_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, VkFormat "
+ "pCreateInfos->pVertexInputState->pVertexAttributeDescriptions->format, is an unrecognized enumerator");
+ return false;
+ }
+ }
+ }
+ if (pCreateInfos->pInputAssemblyState != nullptr) {
+ if (pCreateInfos->pInputAssemblyState->topology < VK_PRIMITIVE_TOPOLOGY_BEGIN_RANGE ||
+ pCreateInfos->pInputAssemblyState->topology > VK_PRIMITIVE_TOPOLOGY_END_RANGE) {
log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, pCreateInfos->basePipelineHandle, must be VK_NULL_HANDLE if pCreateInfos->flags "
- "contains the VK_PIPELINE_CREATE_DERIVATIVE_BIT flag and pCreateInfos->basePipelineIndex is not -1");
+ "vkCreateGraphicsPipelines parameter, VkPrimitiveTopology pCreateInfos->pInputAssemblyState->topology, is "
+ "an unrecognized enumerator");
return false;
}
}
-
- if(pCreateInfos->basePipelineHandle != VK_NULL_HANDLE)
- {
- if(pCreateInfos->basePipelineIndex != -1)
- {
+ if (pCreateInfos->pRasterizationState != nullptr) {
+ if (pCreateInfos->pRasterizationState->polygonMode < VK_POLYGON_MODE_BEGIN_RANGE ||
+ pCreateInfos->pRasterizationState->polygonMode > VK_POLYGON_MODE_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, VkPolygonMode pCreateInfos->pRasterizationState->polygonMode, is an "
+ "unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfos->pRasterizationState->cullMode & ~VK_CULL_MODE_FRONT_AND_BACK) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, VkCullMode pCreateInfos->pRasterizationState->cullMode, is an "
+ "unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfos->pRasterizationState->frontFace < VK_FRONT_FACE_BEGIN_RANGE ||
+ pCreateInfos->pRasterizationState->frontFace > VK_FRONT_FACE_END_RANGE) {
log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, pCreateInfos->basePipelineIndex, must be -1 if pCreateInfos->flags "
- "contains the VK_PIPELINE_CREATE_DERIVATIVE_BIT flag and pCreateInfos->basePipelineHandle is not VK_NULL_HANDLE");
+ "vkCreateGraphicsPipelines parameter, VkFrontFace pCreateInfos->pRasterizationState->frontFace, is an "
+ "unrecognized enumerator");
return false;
}
}
- }
-
- if(pCreateInfos->pVertexInputState != nullptr)
- {
- if(pCreateInfos->pVertexInputState->pVertexBindingDescriptions != nullptr)
- {
- if(pCreateInfos->pVertexInputState->pVertexBindingDescriptions->inputRate < VK_VERTEX_INPUT_RATE_BEGIN_RANGE ||
- pCreateInfos->pVertexInputState->pVertexBindingDescriptions->inputRate > VK_VERTEX_INPUT_RATE_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkVertexInputRate pCreateInfos->pVertexInputState->pVertexBindingDescriptions->inputRate, is an unrecognized enumerator");
- return false;
- }
- }
- if(pCreateInfos->pVertexInputState->pVertexAttributeDescriptions != nullptr)
- {
- if(pCreateInfos->pVertexInputState->pVertexAttributeDescriptions->format < VK_FORMAT_BEGIN_RANGE ||
- pCreateInfos->pVertexInputState->pVertexAttributeDescriptions->format > VK_FORMAT_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkFormat pCreateInfos->pVertexInputState->pVertexAttributeDescriptions->format, is an unrecognized enumerator");
- return false;
- }
- }
- }
- if(pCreateInfos->pInputAssemblyState != nullptr)
- {
- if(pCreateInfos->pInputAssemblyState->topology < VK_PRIMITIVE_TOPOLOGY_BEGIN_RANGE ||
- pCreateInfos->pInputAssemblyState->topology > VK_PRIMITIVE_TOPOLOGY_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkPrimitiveTopology pCreateInfos->pInputAssemblyState->topology, is an unrecognized enumerator");
- return false;
- }
- }
- if(pCreateInfos->pRasterizationState != nullptr)
- {
- if(pCreateInfos->pRasterizationState->polygonMode < VK_POLYGON_MODE_BEGIN_RANGE ||
- pCreateInfos->pRasterizationState->polygonMode > VK_POLYGON_MODE_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkPolygonMode pCreateInfos->pRasterizationState->polygonMode, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfos->pRasterizationState->cullMode & ~VK_CULL_MODE_FRONT_AND_BACK)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkCullMode pCreateInfos->pRasterizationState->cullMode, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfos->pRasterizationState->frontFace < VK_FRONT_FACE_BEGIN_RANGE ||
- pCreateInfos->pRasterizationState->frontFace > VK_FRONT_FACE_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkFrontFace pCreateInfos->pRasterizationState->frontFace, is an unrecognized enumerator");
- return false;
- }
- }
- if(pCreateInfos->pDepthStencilState != nullptr)
- {
- if(pCreateInfos->pDepthStencilState->depthCompareOp < VK_COMPARE_OP_BEGIN_RANGE ||
- pCreateInfos->pDepthStencilState->depthCompareOp > VK_COMPARE_OP_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkCompareOp pCreateInfos->pDepthStencilState->depthCompareOp, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfos->pDepthStencilState->front.failOp < VK_STENCIL_OP_BEGIN_RANGE ||
- pCreateInfos->pDepthStencilState->front.failOp > VK_STENCIL_OP_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkStencilOp pCreateInfos->pDepthStencilState->front.failOp, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfos->pDepthStencilState->front.passOp < VK_STENCIL_OP_BEGIN_RANGE ||
- pCreateInfos->pDepthStencilState->front.passOp > VK_STENCIL_OP_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkStencilOp pCreateInfos->pDepthStencilState->front.passOp, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfos->pDepthStencilState->front.depthFailOp < VK_STENCIL_OP_BEGIN_RANGE ||
- pCreateInfos->pDepthStencilState->front.depthFailOp > VK_STENCIL_OP_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkStencilOp pCreateInfos->pDepthStencilState->front.depthFailOp, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfos->pDepthStencilState->front.compareOp < VK_COMPARE_OP_BEGIN_RANGE ||
- pCreateInfos->pDepthStencilState->front.compareOp > VK_COMPARE_OP_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkCompareOp pCreateInfos->pDepthStencilState->front.compareOp, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfos->pDepthStencilState->back.failOp < VK_STENCIL_OP_BEGIN_RANGE ||
- pCreateInfos->pDepthStencilState->back.failOp > VK_STENCIL_OP_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkStencilOp pCreateInfos->pDepthStencilState->back.failOp, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfos->pDepthStencilState->back.passOp < VK_STENCIL_OP_BEGIN_RANGE ||
- pCreateInfos->pDepthStencilState->back.passOp > VK_STENCIL_OP_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkStencilOp pCreateInfos->pDepthStencilState->back.passOp, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfos->pDepthStencilState->back.depthFailOp < VK_STENCIL_OP_BEGIN_RANGE ||
- pCreateInfos->pDepthStencilState->back.depthFailOp > VK_STENCIL_OP_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkStencilOp pCreateInfos->pDepthStencilState->back.depthFailOp, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfos->pDepthStencilState->back.compareOp < VK_COMPARE_OP_BEGIN_RANGE ||
- pCreateInfos->pDepthStencilState->back.compareOp > VK_COMPARE_OP_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkCompareOp pCreateInfos->pDepthStencilState->back.compareOp, is an unrecognized enumerator");
- return false;
- }
- }
- if(pCreateInfos->pColorBlendState != nullptr)
- {
- if (pCreateInfos->pColorBlendState->logicOpEnable == VK_TRUE &&
- (pCreateInfos->pColorBlendState->logicOp < VK_LOGIC_OP_BEGIN_RANGE ||
- pCreateInfos->pColorBlendState->logicOp > VK_LOGIC_OP_END_RANGE)) {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkLogicOp pCreateInfos->pColorBlendState->logicOp, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfos->pColorBlendState->pAttachments != nullptr && pCreateInfos->pColorBlendState->pAttachments->blendEnable == VK_TRUE)
- {
- if(pCreateInfos->pColorBlendState->pAttachments->srcColorBlendFactor < VK_BLEND_FACTOR_BEGIN_RANGE ||
- pCreateInfos->pColorBlendState->pAttachments->srcColorBlendFactor > VK_BLEND_FACTOR_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkBlendFactor pCreateInfos->pColorBlendState->pAttachments->srcColorBlendFactor, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfos->pColorBlendState->pAttachments->dstColorBlendFactor < VK_BLEND_FACTOR_BEGIN_RANGE ||
- pCreateInfos->pColorBlendState->pAttachments->dstColorBlendFactor > VK_BLEND_FACTOR_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkBlendFactor pCreateInfos->pColorBlendState->pAttachments->dstColorBlendFactor, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfos->pColorBlendState->pAttachments->colorBlendOp < VK_BLEND_OP_BEGIN_RANGE ||
- pCreateInfos->pColorBlendState->pAttachments->colorBlendOp > VK_BLEND_OP_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkBlendOp pCreateInfos->pColorBlendState->pAttachments->colorBlendOp, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfos->pColorBlendState->pAttachments->srcAlphaBlendFactor < VK_BLEND_FACTOR_BEGIN_RANGE ||
- pCreateInfos->pColorBlendState->pAttachments->srcAlphaBlendFactor > VK_BLEND_FACTOR_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkBlendFactor pCreateInfos->pColorBlendState->pAttachments->srcAlphaBlendFactor, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfos->pColorBlendState->pAttachments->dstAlphaBlendFactor < VK_BLEND_FACTOR_BEGIN_RANGE ||
- pCreateInfos->pColorBlendState->pAttachments->dstAlphaBlendFactor > VK_BLEND_FACTOR_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkBlendFactor pCreateInfos->pColorBlendState->pAttachments->dstAlphaBlendFactor, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfos->pColorBlendState->pAttachments->alphaBlendOp < VK_BLEND_OP_BEGIN_RANGE ||
- pCreateInfos->pColorBlendState->pAttachments->alphaBlendOp > VK_BLEND_OP_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkBlendOp pCreateInfos->pColorBlendState->pAttachments->alphaBlendOp, is an unrecognized enumerator");
- return false;
- }
- }
- }
- if(pCreateInfos->renderPass == VK_NULL_HANDLE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateGraphicsPipelines parameter, VkRenderPass pCreateInfos->renderPass, is null pointer");
- }
-
- int i = 0;
- for (auto j = 0; j < pCreateInfos[i].stageCount; j++) {
- validate_string(data, "vkCreateGraphicsPipelines()", "pCreateInfos[i].pStages[j].pName", pCreateInfos[i].pStages[j].pName);
- }
+ if (pCreateInfos->pDepthStencilState != nullptr) {
+ if (pCreateInfos->pDepthStencilState->depthCompareOp < VK_COMPARE_OP_BEGIN_RANGE ||
+ pCreateInfos->pDepthStencilState->depthCompareOp > VK_COMPARE_OP_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, VkCompareOp pCreateInfos->pDepthStencilState->depthCompareOp, is an "
+ "unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfos->pDepthStencilState->front.failOp < VK_STENCIL_OP_BEGIN_RANGE ||
+ pCreateInfos->pDepthStencilState->front.failOp > VK_STENCIL_OP_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, VkStencilOp pCreateInfos->pDepthStencilState->front.failOp, is an "
+ "unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfos->pDepthStencilState->front.passOp < VK_STENCIL_OP_BEGIN_RANGE ||
+ pCreateInfos->pDepthStencilState->front.passOp > VK_STENCIL_OP_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, VkStencilOp pCreateInfos->pDepthStencilState->front.passOp, is an "
+ "unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfos->pDepthStencilState->front.depthFailOp < VK_STENCIL_OP_BEGIN_RANGE ||
+ pCreateInfos->pDepthStencilState->front.depthFailOp > VK_STENCIL_OP_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, VkStencilOp pCreateInfos->pDepthStencilState->front.depthFailOp, is "
+ "an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfos->pDepthStencilState->front.compareOp < VK_COMPARE_OP_BEGIN_RANGE ||
+ pCreateInfos->pDepthStencilState->front.compareOp > VK_COMPARE_OP_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, VkCompareOp pCreateInfos->pDepthStencilState->front.compareOp, is an "
+ "unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfos->pDepthStencilState->back.failOp < VK_STENCIL_OP_BEGIN_RANGE ||
+ pCreateInfos->pDepthStencilState->back.failOp > VK_STENCIL_OP_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, VkStencilOp pCreateInfos->pDepthStencilState->back.failOp, is an "
+ "unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfos->pDepthStencilState->back.passOp < VK_STENCIL_OP_BEGIN_RANGE ||
+ pCreateInfos->pDepthStencilState->back.passOp > VK_STENCIL_OP_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, VkStencilOp pCreateInfos->pDepthStencilState->back.passOp, is an "
+ "unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfos->pDepthStencilState->back.depthFailOp < VK_STENCIL_OP_BEGIN_RANGE ||
+ pCreateInfos->pDepthStencilState->back.depthFailOp > VK_STENCIL_OP_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, VkStencilOp pCreateInfos->pDepthStencilState->back.depthFailOp, is "
+ "an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfos->pDepthStencilState->back.compareOp < VK_COMPARE_OP_BEGIN_RANGE ||
+ pCreateInfos->pDepthStencilState->back.compareOp > VK_COMPARE_OP_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, VkCompareOp pCreateInfos->pDepthStencilState->back.compareOp, is an "
+ "unrecognized enumerator");
+ return false;
+ }
+ }
+ if (pCreateInfos->pColorBlendState != nullptr) {
+ if (pCreateInfos->pColorBlendState->logicOpEnable == VK_TRUE &&
+ (pCreateInfos->pColorBlendState->logicOp < VK_LOGIC_OP_BEGIN_RANGE ||
+ pCreateInfos->pColorBlendState->logicOp > VK_LOGIC_OP_END_RANGE)) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, VkLogicOp pCreateInfos->pColorBlendState->logicOp, is an "
+ "unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfos->pColorBlendState->pAttachments != nullptr &&
+ pCreateInfos->pColorBlendState->pAttachments->blendEnable == VK_TRUE) {
+ if (pCreateInfos->pColorBlendState->pAttachments->srcColorBlendFactor < VK_BLEND_FACTOR_BEGIN_RANGE ||
+ pCreateInfos->pColorBlendState->pAttachments->srcColorBlendFactor > VK_BLEND_FACTOR_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, VkBlendFactor "
+ "pCreateInfos->pColorBlendState->pAttachments->srcColorBlendFactor, is an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfos->pColorBlendState->pAttachments->dstColorBlendFactor < VK_BLEND_FACTOR_BEGIN_RANGE ||
+ pCreateInfos->pColorBlendState->pAttachments->dstColorBlendFactor > VK_BLEND_FACTOR_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, VkBlendFactor "
+ "pCreateInfos->pColorBlendState->pAttachments->dstColorBlendFactor, is an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfos->pColorBlendState->pAttachments->colorBlendOp < VK_BLEND_OP_BEGIN_RANGE ||
+ pCreateInfos->pColorBlendState->pAttachments->colorBlendOp > VK_BLEND_OP_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, VkBlendOp "
+ "pCreateInfos->pColorBlendState->pAttachments->colorBlendOp, is an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfos->pColorBlendState->pAttachments->srcAlphaBlendFactor < VK_BLEND_FACTOR_BEGIN_RANGE ||
+ pCreateInfos->pColorBlendState->pAttachments->srcAlphaBlendFactor > VK_BLEND_FACTOR_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, VkBlendFactor "
+ "pCreateInfos->pColorBlendState->pAttachments->srcAlphaBlendFactor, is an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfos->pColorBlendState->pAttachments->dstAlphaBlendFactor < VK_BLEND_FACTOR_BEGIN_RANGE ||
+ pCreateInfos->pColorBlendState->pAttachments->dstAlphaBlendFactor > VK_BLEND_FACTOR_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, VkBlendFactor "
+ "pCreateInfos->pColorBlendState->pAttachments->dstAlphaBlendFactor, is an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfos->pColorBlendState->pAttachments->alphaBlendOp < VK_BLEND_OP_BEGIN_RANGE ||
+ pCreateInfos->pColorBlendState->pAttachments->alphaBlendOp > VK_BLEND_OP_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, VkBlendOp "
+ "pCreateInfos->pColorBlendState->pAttachments->alphaBlendOp, is an unrecognized enumerator");
+ return false;
+ }
+ }
+ }
+ if (pCreateInfos->renderPass == VK_NULL_HANDLE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateGraphicsPipelines parameter, VkRenderPass pCreateInfos->renderPass, is null pointer");
+ }
+ int i = 0;
+ for (auto j = 0; j < pCreateInfos[i].stageCount; j++) {
+ validate_string(data, "vkCreateGraphicsPipelines()", "pCreateInfos[i].pStages[j].pName",
+ pCreateInfos[i].pStages[j].pName);
+ }
}
return true;
}
-bool PostCreateGraphicsPipelines(
- VkDevice device,
- VkPipelineCache pipelineCache,
- uint32_t count,
- VkPipeline* pPipelines,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, VkPipeline *pPipelines,
+ VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkCreateGraphicsPipelines parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines(
- VkDevice device,
- VkPipelineCache pipelineCache,
- uint32_t createInfoCount,
- const VkGraphicsPipelineCreateInfo* pCreateInfos,
- const VkAllocationCallbacks* pAllocator,
- VkPipeline* pPipelines)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
+ const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
+ VkPipeline *pPipelines) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCreateGraphicsPipelines(
- my_data->report_data,
- pipelineCache,
- createInfoCount,
- pCreateInfos,
- pAllocator,
- pPipelines);
+ skipCall |= param_check_vkCreateGraphicsPipelines(my_data->report_data, pipelineCache, createInfoCount, pCreateInfos,
+ pAllocator, pPipelines);
if (skipCall == VK_FALSE) {
PreCreateGraphicsPipelines(device, pCreateInfos);
- result = get_dispatch_table(pc_device_table_map, device)->CreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
+ result = get_dispatch_table(pc_device_table_map, device)
+ ->CreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
PostCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pPipelines, result);
}
@@ -4281,64 +3232,47 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines(
return result;
}
-bool PreCreateComputePipelines(
- VkDevice device,
- const VkComputePipelineCreateInfo* pCreateInfos)
-{
+bool PreCreateComputePipelines(VkDevice device, const VkComputePipelineCreateInfo *pCreateInfos) {
layer_data *data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- if(pCreateInfos != nullptr)
- {
- // TODO: Handle count!
- int i = 0;
- validate_string(data, "vkCreateComputePipelines()", "pCreateInfos[i].stage.pName", pCreateInfos[i].stage.pName);
+ if (pCreateInfos != nullptr) {
+ // TODO: Handle count!
+ int i = 0;
+ validate_string(data, "vkCreateComputePipelines()", "pCreateInfos[i].stage.pName", pCreateInfos[i].stage.pName);
}
return true;
}
-bool PostCreateComputePipelines(
- VkDevice device,
- VkPipelineCache pipelineCache,
- uint32_t count,
- VkPipeline* pPipelines,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, VkPipeline *pPipelines,
+ VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkCreateComputePipelines parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines(
- VkDevice device,
- VkPipelineCache pipelineCache,
- uint32_t createInfoCount,
- const VkComputePipelineCreateInfo* pCreateInfos,
- const VkAllocationCallbacks* pAllocator,
- VkPipeline* pPipelines)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
+ const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
+ VkPipeline *pPipelines) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCreateComputePipelines(
- my_data->report_data,
- pipelineCache,
- createInfoCount,
- pCreateInfos,
- pAllocator,
- pPipelines);
+ skipCall |= param_check_vkCreateComputePipelines(my_data->report_data, pipelineCache, createInfoCount, pCreateInfos, pAllocator,
+ pPipelines);
if (skipCall == VK_FALSE) {
PreCreateComputePipelines(device, pCreateInfos);
- result = get_dispatch_table(pc_device_table_map, device)->CreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
+ result = get_dispatch_table(pc_device_table_map, device)
+ ->CreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
PostCreateComputePipelines(device, pipelineCache, createInfoCount, pPipelines, result);
}
@@ -4347,56 +3281,42 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines(
}
VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkDestroyPipeline(VkDevice device, VkPipeline pipeline,
- const VkAllocationCallbacks *pAllocator) {
+vkDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
VkBool32 skipCall = VK_FALSE;
- layer_data *my_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkDestroyPipeline(my_data->report_data, pipeline,
- pAllocator);
+ skipCall |= param_check_vkDestroyPipeline(my_data->report_data, pipeline, pAllocator);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, device)
- ->DestroyPipeline(device, pipeline, pAllocator);
+ get_dispatch_table(pc_device_table_map, device)->DestroyPipeline(device, pipeline, pAllocator);
}
}
-bool PostCreatePipelineLayout(
- VkDevice device,
- VkPipelineLayout* pPipelineLayout,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostCreatePipelineLayout(VkDevice device, VkPipelineLayout *pPipelineLayout, VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkCreatePipelineLayout parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(
- VkDevice device,
- const VkPipelineLayoutCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkPipelineLayout* pPipelineLayout)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
+ VkPipelineLayout *pPipelineLayout) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCreatePipelineLayout(
- my_data->report_data,
- pCreateInfo,
- pAllocator,
- pPipelineLayout);
+ skipCall |= param_check_vkCreatePipelineLayout(my_data->report_data, pCreateInfo, pAllocator, pPipelineLayout);
if (skipCall == VK_FALSE) {
- result = get_dispatch_table(pc_device_table_map, device)->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
+ result =
+ get_dispatch_table(pc_device_table_map, device)->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
PostCreatePipelineLayout(device, pPipelineLayout, result);
}
@@ -4405,141 +3325,105 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(
}
VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
- const VkAllocationCallbacks *pAllocator) {
+vkDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
VkBool32 skipCall = VK_FALSE;
- layer_data *my_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkDestroyPipelineLayout(my_data->report_data,
- pipelineLayout, pAllocator);
+ skipCall |= param_check_vkDestroyPipelineLayout(my_data->report_data, pipelineLayout, pAllocator);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, device)
- ->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
+ get_dispatch_table(pc_device_table_map, device)->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
}
}
-bool PreCreateSampler(
- VkDevice device,
- const VkSamplerCreateInfo* pCreateInfo)
-{
- if(pCreateInfo != nullptr)
- {
- if(pCreateInfo->magFilter < VK_FILTER_BEGIN_RANGE ||
- pCreateInfo->magFilter > VK_FILTER_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateSampler parameter, VkFilter pCreateInfo->magFilter, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfo->minFilter < VK_FILTER_BEGIN_RANGE ||
- pCreateInfo->minFilter > VK_FILTER_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateSampler parameter, VkFilter pCreateInfo->minFilter, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfo->mipmapMode < VK_SAMPLER_MIPMAP_MODE_BEGIN_RANGE ||
- pCreateInfo->mipmapMode > VK_SAMPLER_MIPMAP_MODE_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateSampler parameter, VkSamplerMipmapMode pCreateInfo->mipmapMode, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfo->addressModeU < VK_SAMPLER_ADDRESS_MODE_BEGIN_RANGE ||
- pCreateInfo->addressModeU > VK_SAMPLER_ADDRESS_MODE_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateSampler parameter, VkTexAddress pCreateInfo->addressModeU, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfo->addressModeV < VK_SAMPLER_ADDRESS_MODE_BEGIN_RANGE ||
- pCreateInfo->addressModeV > VK_SAMPLER_ADDRESS_MODE_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateSampler parameter, VkTexAddress pCreateInfo->addressModeV, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfo->addressModeW < VK_SAMPLER_ADDRESS_MODE_BEGIN_RANGE ||
- pCreateInfo->addressModeW > VK_SAMPLER_ADDRESS_MODE_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateSampler parameter, VkTexAddress pCreateInfo->addressModeW, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfo->anisotropyEnable > VK_TRUE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateSampler parameter, VkBool32 pCreateInfo->anisotropyEnable, is an unrecognized boolean");
- return false;
- }
- if(pCreateInfo->compareEnable > VK_TRUE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateSampler parameter, VkBool32 pCreateInfo->compareEnable, is an unrecognized boolean");
- return false;
- }
- if(pCreateInfo->compareEnable)
- {
- if(pCreateInfo->compareOp < VK_COMPARE_OP_BEGIN_RANGE ||
- pCreateInfo->compareOp > VK_COMPARE_OP_END_RANGE)
- {
+bool PreCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo) {
+ if (pCreateInfo != nullptr) {
+ if (pCreateInfo->magFilter < VK_FILTER_BEGIN_RANGE || pCreateInfo->magFilter > VK_FILTER_END_RANGE) {
log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateSampler parameter, VkCompareOp pCreateInfo->compareOp, is an unrecognized enumerator");
+ "vkCreateSampler parameter, VkFilter pCreateInfo->magFilter, is an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfo->minFilter < VK_FILTER_BEGIN_RANGE || pCreateInfo->minFilter > VK_FILTER_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateSampler parameter, VkFilter pCreateInfo->minFilter, is an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfo->mipmapMode < VK_SAMPLER_MIPMAP_MODE_BEGIN_RANGE ||
+ pCreateInfo->mipmapMode > VK_SAMPLER_MIPMAP_MODE_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateSampler parameter, VkSamplerMipmapMode pCreateInfo->mipmapMode, is an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfo->addressModeU < VK_SAMPLER_ADDRESS_MODE_BEGIN_RANGE ||
+ pCreateInfo->addressModeU > VK_SAMPLER_ADDRESS_MODE_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateSampler parameter, VkTexAddress pCreateInfo->addressModeU, is an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfo->addressModeV < VK_SAMPLER_ADDRESS_MODE_BEGIN_RANGE ||
+ pCreateInfo->addressModeV > VK_SAMPLER_ADDRESS_MODE_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateSampler parameter, VkTexAddress pCreateInfo->addressModeV, is an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfo->addressModeW < VK_SAMPLER_ADDRESS_MODE_BEGIN_RANGE ||
+ pCreateInfo->addressModeW > VK_SAMPLER_ADDRESS_MODE_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateSampler parameter, VkTexAddress pCreateInfo->addressModeW, is an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfo->anisotropyEnable > VK_TRUE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateSampler parameter, VkBool32 pCreateInfo->anisotropyEnable, is an unrecognized boolean");
+ return false;
+ }
+ if (pCreateInfo->compareEnable > VK_TRUE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateSampler parameter, VkBool32 pCreateInfo->compareEnable, is an unrecognized boolean");
+ return false;
+ }
+ if (pCreateInfo->compareEnable) {
+ if (pCreateInfo->compareOp < VK_COMPARE_OP_BEGIN_RANGE || pCreateInfo->compareOp > VK_COMPARE_OP_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateSampler parameter, VkCompareOp pCreateInfo->compareOp, is an unrecognized enumerator");
+ return false;
+ }
+ }
+ if (pCreateInfo->borderColor < VK_BORDER_COLOR_BEGIN_RANGE || pCreateInfo->borderColor > VK_BORDER_COLOR_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateSampler parameter, VkBorderColor pCreateInfo->borderColor, is an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfo->unnormalizedCoordinates > VK_TRUE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateSampler parameter, VkBool32 pCreateInfo->unnormalizedCoordinates, is an unrecognized boolean");
return false;
}
- }
- if(pCreateInfo->borderColor < VK_BORDER_COLOR_BEGIN_RANGE ||
- pCreateInfo->borderColor > VK_BORDER_COLOR_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateSampler parameter, VkBorderColor pCreateInfo->borderColor, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfo->unnormalizedCoordinates > VK_TRUE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateSampler parameter, VkBool32 pCreateInfo->unnormalizedCoordinates, is an unrecognized boolean");
- return false;
- }
}
return true;
}
-bool PostCreateSampler(
- VkDevice device,
- VkSampler* pSampler,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostCreateSampler(VkDevice device, VkSampler *pSampler, VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkCreateSampler parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(
- VkDevice device,
- const VkSamplerCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkSampler* pSampler)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCreateSampler(
- my_data->report_data,
- pCreateInfo,
- pAllocator,
- pSampler);
+ skipCall |= param_check_vkCreateSampler(my_data->report_data, pCreateInfo, pAllocator, pSampler);
if (skipCall == VK_FALSE) {
PreCreateSampler(device, pCreateInfo);
@@ -4553,79 +3437,60 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(
}
VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkDestroySampler(VkDevice device, VkSampler sampler,
- const VkAllocationCallbacks *pAllocator) {
+vkDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
VkBool32 skipCall = VK_FALSE;
- layer_data *my_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |=
- param_check_vkDestroySampler(my_data->report_data, sampler, pAllocator);
+ skipCall |= param_check_vkDestroySampler(my_data->report_data, sampler, pAllocator);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, device)
- ->DestroySampler(device, sampler, pAllocator);
+ get_dispatch_table(pc_device_table_map, device)->DestroySampler(device, sampler, pAllocator);
}
}
-bool PreCreateDescriptorSetLayout(
- VkDevice device,
- const VkDescriptorSetLayoutCreateInfo* pCreateInfo)
-{
- if(pCreateInfo != nullptr)
- {
- if(pCreateInfo->pBindings != nullptr)
- {
- if(pCreateInfo->pBindings->descriptorType < VK_DESCRIPTOR_TYPE_BEGIN_RANGE ||
- pCreateInfo->pBindings->descriptorType > VK_DESCRIPTOR_TYPE_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateDescriptorSetLayout parameter, VkDescriptorType pCreateInfo->pBindings->descriptorType, is an unrecognized enumerator");
- return false;
- }
- }
+bool PreCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo) {
+ if (pCreateInfo != nullptr) {
+ if (pCreateInfo->pBindings != nullptr) {
+ if (pCreateInfo->pBindings->descriptorType < VK_DESCRIPTOR_TYPE_BEGIN_RANGE ||
+ pCreateInfo->pBindings->descriptorType > VK_DESCRIPTOR_TYPE_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateDescriptorSetLayout parameter, VkDescriptorType pCreateInfo->pBindings->descriptorType, is an "
+ "unrecognized enumerator");
+ return false;
+ }
+ }
}
return true;
}
-bool PostCreateDescriptorSetLayout(
- VkDevice device,
- VkDescriptorSetLayout* pSetLayout,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostCreateDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout *pSetLayout, VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkCreateDescriptorSetLayout parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout(
- VkDevice device,
- const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkDescriptorSetLayout* pSetLayout)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCreateDescriptorSetLayout(
- my_data->report_data,
- pCreateInfo,
- pAllocator,
- pSetLayout);
+ skipCall |= param_check_vkCreateDescriptorSetLayout(my_data->report_data, pCreateInfo, pAllocator, pSetLayout);
if (skipCall == VK_FALSE) {
PreCreateDescriptorSetLayout(device, pCreateInfo);
- result = get_dispatch_table(pc_device_table_map, device)->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
+ result =
+ get_dispatch_table(pc_device_table_map, device)->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
PostCreateDescriptorSetLayout(device, pSetLayout, result);
}
@@ -4634,85 +3499,63 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout(
}
VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkDestroyDescriptorSetLayout(VkDevice device,
- VkDescriptorSetLayout descriptorSetLayout,
- const VkAllocationCallbacks *pAllocator) {
+vkDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
VkBool32 skipCall = VK_FALSE;
- layer_data *my_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkDestroyDescriptorSetLayout(
- my_data->report_data, descriptorSetLayout, pAllocator);
+ skipCall |= param_check_vkDestroyDescriptorSetLayout(my_data->report_data, descriptorSetLayout, pAllocator);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, device)
- ->DestroyDescriptorSetLayout(device, descriptorSetLayout,
- pAllocator);
- }
-}
-
-bool PreCreateDescriptorPool(
- VkDevice device,
- const VkDescriptorPoolCreateInfo* pCreateInfo)
-{
- if(pCreateInfo != nullptr)
- {
- if(pCreateInfo->pPoolSizes != nullptr)
- {
- if(pCreateInfo->pPoolSizes->type < VK_DESCRIPTOR_TYPE_BEGIN_RANGE ||
- pCreateInfo->pPoolSizes->type > VK_DESCRIPTOR_TYPE_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateDescriptorPool parameter, VkDescriptorType pCreateInfo->pTypeCount->type, is an unrecognized enumerator");
- return false;
- }
+ get_dispatch_table(pc_device_table_map, device)->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
}
+}
+
+bool PreCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo) {
+ if (pCreateInfo != nullptr) {
+ if (pCreateInfo->pPoolSizes != nullptr) {
+ if (pCreateInfo->pPoolSizes->type < VK_DESCRIPTOR_TYPE_BEGIN_RANGE ||
+ pCreateInfo->pPoolSizes->type > VK_DESCRIPTOR_TYPE_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateDescriptorPool parameter, VkDescriptorType pCreateInfo->pTypeCount->type, is an unrecognized "
+ "enumerator");
+ return false;
+ }
+ }
}
return true;
}
-bool PostCreateDescriptorPool(
- VkDevice device,
- uint32_t maxSets,
- VkDescriptorPool* pDescriptorPool,
- VkResult result)
-{
+bool PostCreateDescriptorPool(VkDevice device, uint32_t maxSets, VkDescriptorPool *pDescriptorPool, VkResult result) {
/* TODOVV: How do we validate maxSets? Probably belongs in the limits layer? */
- if(result < VK_SUCCESS)
- {
+ if (result < VK_SUCCESS) {
std::string reason = "vkCreateDescriptorPool parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorPool(
- VkDevice device,
- const VkDescriptorPoolCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkDescriptorPool* pDescriptorPool)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
+ VkDescriptorPool *pDescriptorPool) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCreateDescriptorPool(
- my_data->report_data,
- pCreateInfo,
- pAllocator,
- pDescriptorPool);
+ skipCall |= param_check_vkCreateDescriptorPool(my_data->report_data, pCreateInfo, pAllocator, pDescriptorPool);
if (skipCall == VK_FALSE) {
PreCreateDescriptorPool(device, pCreateInfo);
- result = get_dispatch_table(pc_device_table_map, device)->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
+ result =
+ get_dispatch_table(pc_device_table_map, device)->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
PostCreateDescriptorPool(device, pCreateInfo->maxSets, pDescriptorPool, result);
}
@@ -4721,44 +3564,32 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorPool(
}
VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
- const VkAllocationCallbacks *pAllocator) {
+vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
VkBool32 skipCall = VK_FALSE;
- layer_data *my_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkDestroyDescriptorPool(my_data->report_data,
- descriptorPool, pAllocator);
+ skipCall |= param_check_vkDestroyDescriptorPool(my_data->report_data, descriptorPool, pAllocator);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, device)
- ->DestroyDescriptorPool(device, descriptorPool, pAllocator);
+ get_dispatch_table(pc_device_table_map, device)->DestroyDescriptorPool(device, descriptorPool, pAllocator);
}
}
-bool PostResetDescriptorPool(
- VkDevice device,
- VkDescriptorPool descriptorPool,
- VkResult result)
-{
-
+bool PostResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkResult result) {
- if(result < VK_SUCCESS)
- {
+ if (result < VK_SUCCESS) {
std::string reason = "vkResetDescriptorPool parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetDescriptorPool(
- VkDevice device,
- VkDescriptorPool descriptorPool,
- VkDescriptorPoolResetFlags flags)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
VkResult result = get_dispatch_table(pc_device_table_map, device)->ResetDescriptorPool(device, descriptorPool, flags);
PostResetDescriptorPool(device, descriptorPool, result);
@@ -4766,85 +3597,62 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetDescriptorPool(
return result;
}
-bool PostAllocateDescriptorSets(
- VkDevice device,
- VkDescriptorPool descriptorPool,
- uint32_t count,
- VkDescriptorSet* pDescriptorSets,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostAllocateDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, VkDescriptorSet *pDescriptorSets,
+ VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkAllocateDescriptorSets parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets(
- VkDevice device,
- const VkDescriptorSetAllocateInfo* pAllocateInfo,
- VkDescriptorSet* pDescriptorSets)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkAllocateDescriptorSets(
- my_data->report_data,
- pAllocateInfo,
- pDescriptorSets);
+ skipCall |= param_check_vkAllocateDescriptorSets(my_data->report_data, pAllocateInfo, pDescriptorSets);
if (skipCall == VK_FALSE) {
result = get_dispatch_table(pc_device_table_map, device)->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
- PostAllocateDescriptorSets(device, pAllocateInfo->descriptorPool, pAllocateInfo->descriptorSetCount, pDescriptorSets, result);
+ PostAllocateDescriptorSets(device, pAllocateInfo->descriptorPool, pAllocateInfo->descriptorSetCount, pDescriptorSets,
+ result);
}
return result;
}
-bool PostFreeDescriptorSets(
- VkDevice device,
- VkDescriptorPool descriptorPool,
- uint32_t count,
- VkResult result)
-{
-
-
+bool PostFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, VkResult result) {
- if(result < VK_SUCCESS)
- {
+ if (result < VK_SUCCESS) {
std::string reason = "vkFreeDescriptorSets parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets(
- VkDevice device,
- VkDescriptorPool descriptorPool,
- uint32_t descriptorSetCount,
- const VkDescriptorSet* pDescriptorSets)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool,
+ uint32_t descriptorSetCount,
+ const VkDescriptorSet *pDescriptorSets) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkFreeDescriptorSets(
- my_data->report_data,
- descriptorPool,
- descriptorSetCount,
- pDescriptorSets);
+ skipCall |= param_check_vkFreeDescriptorSets(my_data->report_data, descriptorPool, descriptorSetCount, pDescriptorSets);
if (skipCall == VK_FALSE) {
- result = get_dispatch_table(pc_device_table_map, device)->FreeDescriptorSets(device, descriptorPool, descriptorSetCount, pDescriptorSets);
+ result = get_dispatch_table(pc_device_table_map, device)
+ ->FreeDescriptorSets(device, descriptorPool, descriptorSetCount, pDescriptorSets);
PostFreeDescriptorSets(device, descriptorPool, descriptorSetCount, result);
}
@@ -4852,94 +3660,71 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets(
return result;
}
-bool PreUpdateDescriptorSets(
- VkDevice device,
- const VkWriteDescriptorSet* pDescriptorWrites,
- const VkCopyDescriptorSet* pDescriptorCopies)
-{
- if(pDescriptorWrites != nullptr)
- {
- if(pDescriptorWrites->descriptorType < VK_DESCRIPTOR_TYPE_BEGIN_RANGE ||
- pDescriptorWrites->descriptorType > VK_DESCRIPTOR_TYPE_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkUpdateDescriptorSets parameter, VkDescriptorType pDescriptorWrites->descriptorType, is an unrecognized enumerator");
- return false;
- }
- /* TODO: Validate other parts of pImageInfo, pBufferInfo, pTexelBufferView? */
- /* TODO: This test should probably only be done if descriptorType is correct type of descriptor */
- if(pDescriptorWrites->pImageInfo != nullptr)
- {
- if (((pDescriptorWrites->pImageInfo->imageLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
- (pDescriptorWrites->pImageInfo->imageLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
- (pDescriptorWrites->pImageInfo->imageLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR))
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkUpdateDescriptorSets parameter, VkImageLayout pDescriptorWrites->pDescriptors->imageLayout, is an unrecognized enumerator");
- return false;
- }
- }
+bool PreUpdateDescriptorSets(VkDevice device, const VkWriteDescriptorSet *pDescriptorWrites,
+ const VkCopyDescriptorSet *pDescriptorCopies) {
+ if (pDescriptorWrites != nullptr) {
+ if (pDescriptorWrites->descriptorType < VK_DESCRIPTOR_TYPE_BEGIN_RANGE ||
+ pDescriptorWrites->descriptorType > VK_DESCRIPTOR_TYPE_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkUpdateDescriptorSets parameter, VkDescriptorType pDescriptorWrites->descriptorType, is an unrecognized "
+ "enumerator");
+ return false;
+ }
+ /* TODO: Validate other parts of pImageInfo, pBufferInfo, pTexelBufferView? */
+ /* TODO: This test should probably only be done if descriptorType is correct type of descriptor */
+ if (pDescriptorWrites->pImageInfo != nullptr) {
+ if (((pDescriptorWrites->pImageInfo->imageLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
+ (pDescriptorWrites->pImageInfo->imageLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
+ (pDescriptorWrites->pImageInfo->imageLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR)) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkUpdateDescriptorSets parameter, VkImageLayout pDescriptorWrites->pDescriptors->imageLayout, is an "
+ "unrecognized enumerator");
+ return false;
+ }
+ }
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSets(
- VkDevice device,
- uint32_t descriptorWriteCount,
- const VkWriteDescriptorSet* pDescriptorWrites,
- uint32_t descriptorCopyCount,
- const VkCopyDescriptorSet* pDescriptorCopies)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
+ uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkUpdateDescriptorSets(
- my_data->report_data,
- descriptorWriteCount,
- pDescriptorWrites,
- descriptorCopyCount,
- pDescriptorCopies);
+ skipCall |= param_check_vkUpdateDescriptorSets(my_data->report_data, descriptorWriteCount, pDescriptorWrites,
+ descriptorCopyCount, pDescriptorCopies);
if (skipCall == VK_FALSE) {
PreUpdateDescriptorSets(device, pDescriptorWrites, pDescriptorCopies);
- get_dispatch_table(pc_device_table_map, device)->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
+ get_dispatch_table(pc_device_table_map, device)
+ ->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
}
}
-bool PostCreateFramebuffer(
- VkDevice device,
- VkFramebuffer* pFramebuffer,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostCreateFramebuffer(VkDevice device, VkFramebuffer *pFramebuffer, VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkCreateFramebuffer parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(
- VkDevice device,
- const VkFramebufferCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkFramebuffer* pFramebuffer)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkFramebuffer *pFramebuffer) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCreateFramebuffer(
- my_data->report_data,
- pCreateInfo,
- pAllocator,
- pFramebuffer);
+ skipCall |= param_check_vkCreateFramebuffer(my_data->report_data, pCreateInfo, pAllocator, pFramebuffer);
if (skipCall == VK_FALSE) {
result = get_dispatch_table(pc_device_table_map, device)->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
@@ -4951,170 +3736,145 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(
}
VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer,
- const VkAllocationCallbacks *pAllocator) {
+vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
VkBool32 skipCall = VK_FALSE;
- layer_data *my_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkDestroyFramebuffer(my_data->report_data,
- framebuffer, pAllocator);
+ skipCall |= param_check_vkDestroyFramebuffer(my_data->report_data, framebuffer, pAllocator);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, device)
- ->DestroyFramebuffer(device, framebuffer, pAllocator);
+ get_dispatch_table(pc_device_table_map, device)->DestroyFramebuffer(device, framebuffer, pAllocator);
}
}
-bool PreCreateRenderPass(
- VkDevice device,
- const VkRenderPassCreateInfo* pCreateInfo)
-{
- if(pCreateInfo != nullptr)
- {
- if(pCreateInfo->pAttachments != nullptr)
- {
- if(pCreateInfo->pAttachments->format < VK_FORMAT_BEGIN_RANGE ||
- pCreateInfo->pAttachments->format > VK_FORMAT_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateRenderPass parameter, VkFormat pCreateInfo->pAttachments->format, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfo->pAttachments->loadOp < VK_ATTACHMENT_LOAD_OP_BEGIN_RANGE ||
- pCreateInfo->pAttachments->loadOp > VK_ATTACHMENT_LOAD_OP_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateRenderPass parameter, VkAttachmentLoadOp pCreateInfo->pAttachments->loadOp, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfo->pAttachments->storeOp < VK_ATTACHMENT_STORE_OP_BEGIN_RANGE ||
- pCreateInfo->pAttachments->storeOp > VK_ATTACHMENT_STORE_OP_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateRenderPass parameter, VkAttachmentStoreOp pCreateInfo->pAttachments->storeOp, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfo->pAttachments->stencilLoadOp < VK_ATTACHMENT_LOAD_OP_BEGIN_RANGE ||
- pCreateInfo->pAttachments->stencilLoadOp > VK_ATTACHMENT_LOAD_OP_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateRenderPass parameter, VkAttachmentLoadOp pCreateInfo->pAttachments->stencilLoadOp, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfo->pAttachments->stencilStoreOp < VK_ATTACHMENT_STORE_OP_BEGIN_RANGE ||
- pCreateInfo->pAttachments->stencilStoreOp > VK_ATTACHMENT_STORE_OP_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateRenderPass parameter, VkAttachmentStoreOp pCreateInfo->pAttachments->stencilStoreOp, is an unrecognized enumerator");
- return false;
- }
- if (((pCreateInfo->pAttachments->initialLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
- (pCreateInfo->pAttachments->initialLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
- (pCreateInfo->pAttachments->initialLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR))
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateRenderPass parameter, VkImageLayout pCreateInfo->pAttachments->initialLayout, is an unrecognized enumerator");
- return false;
- }
- if (((pCreateInfo->pAttachments->initialLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
- (pCreateInfo->pAttachments->initialLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
- (pCreateInfo->pAttachments->initialLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR))
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateRenderPass parameter, VkImageLayout pCreateInfo->pAttachments->finalLayout, is an unrecognized enumerator");
- return false;
- }
- }
- if(pCreateInfo->pSubpasses != nullptr)
- {
- if(pCreateInfo->pSubpasses->pipelineBindPoint < VK_PIPELINE_BIND_POINT_BEGIN_RANGE ||
- pCreateInfo->pSubpasses->pipelineBindPoint > VK_PIPELINE_BIND_POINT_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateRenderPass parameter, VkPipelineBindPoint pCreateInfo->pSubpasses->pipelineBindPoint, is an unrecognized enumerator");
- return false;
- }
- if(pCreateInfo->pSubpasses->pInputAttachments != nullptr)
- {
- if (((pCreateInfo->pSubpasses->pInputAttachments->layout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
- (pCreateInfo->pSubpasses->pInputAttachments->layout > VK_IMAGE_LAYOUT_END_RANGE)) &&
- (pCreateInfo->pSubpasses->pInputAttachments->layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR))
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateRenderPass parameter, VkImageLayout pCreateInfo->pSubpasses->pInputAttachments->layout, is an unrecognized enumerator");
- return false;
- }
- }
- if(pCreateInfo->pSubpasses->pColorAttachments != nullptr)
- {
- if (((pCreateInfo->pSubpasses->pColorAttachments->layout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
- (pCreateInfo->pSubpasses->pColorAttachments->layout > VK_IMAGE_LAYOUT_END_RANGE)) &&
- (pCreateInfo->pSubpasses->pColorAttachments->layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR))
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateRenderPass parameter, VkImageLayout pCreateInfo->pSubpasses->pColorAttachments->layout, is an unrecognized enumerator");
- return false;
- }
- }
- if(pCreateInfo->pSubpasses->pResolveAttachments != nullptr)
- {
- if (((pCreateInfo->pSubpasses->pResolveAttachments->layout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
- (pCreateInfo->pSubpasses->pResolveAttachments->layout > VK_IMAGE_LAYOUT_END_RANGE)) &&
- (pCreateInfo->pSubpasses->pResolveAttachments->layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR))
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateRenderPass parameter, VkImageLayout pCreateInfo->pSubpasses->pResolveAttachments->layout, is an unrecognized enumerator");
- return false;
- }
- }
- if(pCreateInfo->pSubpasses->pDepthStencilAttachment &&
- ((pCreateInfo->pSubpasses->pDepthStencilAttachment->layout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
- (pCreateInfo->pSubpasses->pDepthStencilAttachment->layout > VK_IMAGE_LAYOUT_END_RANGE)) &&
- (pCreateInfo->pSubpasses->pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR))
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCreateRenderPass parameter, VkImageLayout pCreateInfo->pSubpasses->pDepthStencilAttachment->layout, is an unrecognized enumerator");
- return false;
- }
- }
+bool PreCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
+ if (pCreateInfo != nullptr) {
+ if (pCreateInfo->pAttachments != nullptr) {
+ if (pCreateInfo->pAttachments->format < VK_FORMAT_BEGIN_RANGE ||
+ pCreateInfo->pAttachments->format > VK_FORMAT_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateRenderPass parameter, VkFormat pCreateInfo->pAttachments->format, is an unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfo->pAttachments->loadOp < VK_ATTACHMENT_LOAD_OP_BEGIN_RANGE ||
+ pCreateInfo->pAttachments->loadOp > VK_ATTACHMENT_LOAD_OP_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateRenderPass parameter, VkAttachmentLoadOp pCreateInfo->pAttachments->loadOp, is an unrecognized "
+ "enumerator");
+ return false;
+ }
+ if (pCreateInfo->pAttachments->storeOp < VK_ATTACHMENT_STORE_OP_BEGIN_RANGE ||
+ pCreateInfo->pAttachments->storeOp > VK_ATTACHMENT_STORE_OP_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateRenderPass parameter, VkAttachmentStoreOp pCreateInfo->pAttachments->storeOp, is an unrecognized "
+ "enumerator");
+ return false;
+ }
+ if (pCreateInfo->pAttachments->stencilLoadOp < VK_ATTACHMENT_LOAD_OP_BEGIN_RANGE ||
+ pCreateInfo->pAttachments->stencilLoadOp > VK_ATTACHMENT_LOAD_OP_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateRenderPass parameter, VkAttachmentLoadOp pCreateInfo->pAttachments->stencilLoadOp, is an "
+ "unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfo->pAttachments->stencilStoreOp < VK_ATTACHMENT_STORE_OP_BEGIN_RANGE ||
+ pCreateInfo->pAttachments->stencilStoreOp > VK_ATTACHMENT_STORE_OP_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateRenderPass parameter, VkAttachmentStoreOp pCreateInfo->pAttachments->stencilStoreOp, is an "
+ "unrecognized enumerator");
+ return false;
+ }
+ if (((pCreateInfo->pAttachments->initialLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
+ (pCreateInfo->pAttachments->initialLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
+ (pCreateInfo->pAttachments->initialLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR)) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateRenderPass parameter, VkImageLayout pCreateInfo->pAttachments->initialLayout, is an unrecognized "
+ "enumerator");
+ return false;
+ }
+ if (((pCreateInfo->pAttachments->initialLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
+ (pCreateInfo->pAttachments->initialLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
+ (pCreateInfo->pAttachments->initialLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR)) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateRenderPass parameter, VkImageLayout pCreateInfo->pAttachments->finalLayout, is an unrecognized "
+ "enumerator");
+ return false;
+ }
+ }
+ if (pCreateInfo->pSubpasses != nullptr) {
+ if (pCreateInfo->pSubpasses->pipelineBindPoint < VK_PIPELINE_BIND_POINT_BEGIN_RANGE ||
+ pCreateInfo->pSubpasses->pipelineBindPoint > VK_PIPELINE_BIND_POINT_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateRenderPass parameter, VkPipelineBindPoint pCreateInfo->pSubpasses->pipelineBindPoint, is an "
+ "unrecognized enumerator");
+ return false;
+ }
+ if (pCreateInfo->pSubpasses->pInputAttachments != nullptr) {
+ if (((pCreateInfo->pSubpasses->pInputAttachments->layout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
+ (pCreateInfo->pSubpasses->pInputAttachments->layout > VK_IMAGE_LAYOUT_END_RANGE)) &&
+ (pCreateInfo->pSubpasses->pInputAttachments->layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR)) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateRenderPass parameter, VkImageLayout pCreateInfo->pSubpasses->pInputAttachments->layout, is an "
+ "unrecognized enumerator");
+ return false;
+ }
+ }
+ if (pCreateInfo->pSubpasses->pColorAttachments != nullptr) {
+ if (((pCreateInfo->pSubpasses->pColorAttachments->layout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
+ (pCreateInfo->pSubpasses->pColorAttachments->layout > VK_IMAGE_LAYOUT_END_RANGE)) &&
+ (pCreateInfo->pSubpasses->pColorAttachments->layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR)) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateRenderPass parameter, VkImageLayout pCreateInfo->pSubpasses->pColorAttachments->layout, is an "
+ "unrecognized enumerator");
+ return false;
+ }
+ }
+ if (pCreateInfo->pSubpasses->pResolveAttachments != nullptr) {
+ if (((pCreateInfo->pSubpasses->pResolveAttachments->layout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
+ (pCreateInfo->pSubpasses->pResolveAttachments->layout > VK_IMAGE_LAYOUT_END_RANGE)) &&
+ (pCreateInfo->pSubpasses->pResolveAttachments->layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR)) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateRenderPass parameter, VkImageLayout pCreateInfo->pSubpasses->pResolveAttachments->layout, is "
+ "an unrecognized enumerator");
+ return false;
+ }
+ }
+ if (pCreateInfo->pSubpasses->pDepthStencilAttachment &&
+ ((pCreateInfo->pSubpasses->pDepthStencilAttachment->layout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
+ (pCreateInfo->pSubpasses->pDepthStencilAttachment->layout > VK_IMAGE_LAYOUT_END_RANGE)) &&
+ (pCreateInfo->pSubpasses->pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR)) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCreateRenderPass parameter, VkImageLayout pCreateInfo->pSubpasses->pDepthStencilAttachment->layout, is "
+ "an unrecognized enumerator");
+ return false;
+ }
+ }
}
return true;
}
-bool PostCreateRenderPass(
- VkDevice device,
- VkRenderPass* pRenderPass,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostCreateRenderPass(VkDevice device, VkRenderPass *pRenderPass, VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkCreateRenderPass parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(
- VkDevice device,
- const VkRenderPassCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkRenderPass* pRenderPass)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkRenderPass *pRenderPass) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCreateRenderPass(
- my_data->report_data,
- pCreateInfo,
- pAllocator,
- pRenderPass);
+ skipCall |= param_check_vkCreateRenderPass(my_data->report_data, pCreateInfo, pAllocator, pRenderPass);
if (skipCall == VK_FALSE) {
PreCreateRenderPass(device, pCreateInfo);
@@ -5128,72 +3888,51 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(
}
VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass,
- const VkAllocationCallbacks *pAllocator) {
+vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
VkBool32 skipCall = VK_FALSE;
- layer_data *my_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkDestroyRenderPass(my_data->report_data,
- renderPass, pAllocator);
+ skipCall |= param_check_vkDestroyRenderPass(my_data->report_data, renderPass, pAllocator);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, device)
- ->DestroyRenderPass(device, renderPass, pAllocator);
+ get_dispatch_table(pc_device_table_map, device)->DestroyRenderPass(device, renderPass, pAllocator);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetRenderAreaGranularity(
- VkDevice device,
- VkRenderPass renderPass,
- VkExtent2D* pGranularity)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetRenderAreaGranularity(VkDevice device, VkRenderPass renderPass, VkExtent2D *pGranularity) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkGetRenderAreaGranularity(
- my_data->report_data,
- renderPass,
- pGranularity);
+ skipCall |= param_check_vkGetRenderAreaGranularity(my_data->report_data, renderPass, pGranularity);
if (skipCall == VK_FALSE) {
get_dispatch_table(pc_device_table_map, device)->GetRenderAreaGranularity(device, renderPass, pGranularity);
}
}
-bool PostCreateCommandPool(
- VkDevice device,
- VkCommandPool* pCommandPool,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostCreateCommandPool(VkDevice device, VkCommandPool *pCommandPool, VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkCreateCommandPool parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(
- VkDevice device,
- const VkCommandPoolCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkCommandPool* pCommandPool)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkCommandPool *pCommandPool) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCreateCommandPool(
- my_data->report_data,
- pCreateInfo,
- pAllocator,
- pCommandPool);
+ skipCall |= param_check_vkCreateCommandPool(my_data->report_data, pCreateInfo, pAllocator, pCommandPool);
if (skipCall == VK_FALSE) {
result = get_dispatch_table(pc_device_table_map, device)->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
@@ -5205,46 +3944,32 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(
}
VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool,
- const VkAllocationCallbacks *pAllocator) {
+vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
VkBool32 skipCall = VK_FALSE;
- layer_data *my_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkDestroyCommandPool(my_data->report_data,
- commandPool, pAllocator);
+ skipCall |= param_check_vkDestroyCommandPool(my_data->report_data, commandPool, pAllocator);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, device)
- ->DestroyCommandPool(device, commandPool, pAllocator);
+ get_dispatch_table(pc_device_table_map, device)->DestroyCommandPool(device, commandPool, pAllocator);
}
}
-bool PostResetCommandPool(
- VkDevice device,
- VkCommandPool commandPool,
- VkCommandPoolResetFlags flags,
- VkResult result)
-{
+bool PostResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags, VkResult result) {
-
-
- if(result < VK_SUCCESS)
- {
+ if (result < VK_SUCCESS) {
std::string reason = "vkResetCommandPool parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool(
- VkDevice device,
- VkCommandPool commandPool,
- VkCommandPoolResetFlags flags)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
VkResult result = get_dispatch_table(pc_device_table_map, device)->ResetCommandPool(device, commandPool, flags);
PostResetCommandPool(device, commandPool, flags, result);
@@ -5252,53 +3977,37 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool(
return result;
}
-bool PreCreateCommandBuffer(
- VkDevice device,
- const VkCommandBufferAllocateInfo* pCreateInfo)
-{
- if(pCreateInfo != nullptr)
- {
- if(pCreateInfo->level < VK_COMMAND_BUFFER_LEVEL_BEGIN_RANGE ||
- pCreateInfo->level > VK_COMMAND_BUFFER_LEVEL_END_RANGE)
- {
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkAllocateCommandBuffers parameter, VkCommandBufferLevel pCreateInfo->level, is an unrecognized enumerator");
- return false;
- }
+bool PreCreateCommandBuffer(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo) {
+ if (pCreateInfo != nullptr) {
+ if (pCreateInfo->level < VK_COMMAND_BUFFER_LEVEL_BEGIN_RANGE || pCreateInfo->level > VK_COMMAND_BUFFER_LEVEL_END_RANGE) {
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkAllocateCommandBuffers parameter, VkCommandBufferLevel pCreateInfo->level, is an unrecognized enumerator");
+ return false;
+ }
}
return true;
}
-bool PostCreateCommandBuffer(
- VkDevice device,
- VkCommandBuffer* pCommandBuffer,
- VkResult result)
-{
- if(result < VK_SUCCESS)
- {
+bool PostCreateCommandBuffer(VkDevice device, VkCommandBuffer *pCommandBuffer, VkResult result) {
+ if (result < VK_SUCCESS) {
std::string reason = "vkAllocateCommandBuffers parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s",
+ reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers(
- VkDevice device,
- const VkCommandBufferAllocateInfo* pAllocateInfo,
- VkCommandBuffer* pCommandBuffers)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo, VkCommandBuffer *pCommandBuffers) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkAllocateCommandBuffers(
- my_data->report_data,
- pAllocateInfo,
- pCommandBuffers);
+ skipCall |= param_check_vkAllocateCommandBuffers(my_data->report_data, pAllocateInfo, pCommandBuffers);
if (skipCall == VK_FALSE) {
PreCreateCommandBuffer(device, pAllocateInfo);
@@ -5311,52 +4020,41 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool,
- uint32_t commandBufferCount,
- const VkCommandBuffer *pCommandBuffers) {
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool,
+ uint32_t commandBufferCount,
+ const VkCommandBuffer *pCommandBuffers) {
VkBool32 skipCall = VK_FALSE;
- layer_data *my_data =
- get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkFreeCommandBuffers(
- my_data->report_data, commandPool, commandBufferCount, pCommandBuffers);
+ skipCall |= param_check_vkFreeCommandBuffers(my_data->report_data, commandPool, commandBufferCount, pCommandBuffers);
if (skipCall == VK_FALSE) {
get_dispatch_table(pc_device_table_map, device)
- ->FreeCommandBuffers(device, commandPool, commandBufferCount,
- pCommandBuffers);
+ ->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
}
}
-bool PostBeginCommandBuffer(
- VkCommandBuffer commandBuffer,
- VkResult result)
-{
+bool PostBeginCommandBuffer(VkCommandBuffer commandBuffer, VkResult result) {
- if(result < VK_SUCCESS)
- {
+ if (result < VK_SUCCESS) {
std::string reason = "vkBeginCommandBuffer parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "%s", reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer(
- VkCommandBuffer commandBuffer,
- const VkCommandBufferBeginInfo* pBeginInfo)
-{
- VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
+ VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkBeginCommandBuffer(
- my_data->report_data,
- pBeginInfo);
+ skipCall |= param_check_vkBeginCommandBuffer(my_data->report_data, pBeginInfo);
if (skipCall == VK_FALSE) {
result = get_dispatch_table(pc_device_table_map, commandBuffer)->BeginCommandBuffer(commandBuffer, pBeginInfo);
@@ -5367,24 +4065,19 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer(
return result;
}
-bool PostEndCommandBuffer(
- VkCommandBuffer commandBuffer,
- VkResult result)
-{
+bool PostEndCommandBuffer(VkCommandBuffer commandBuffer, VkResult result) {
- if(result < VK_SUCCESS)
- {
+ if (result < VK_SUCCESS) {
std::string reason = "vkEndCommandBuffer parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "%s", reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(
- VkCommandBuffer commandBuffer)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffer commandBuffer) {
VkResult result = get_dispatch_table(pc_device_table_map, commandBuffer)->EndCommandBuffer(commandBuffer);
PostEndCommandBuffer(commandBuffer, result);
@@ -5392,27 +4085,20 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(
return result;
}
-bool PostResetCommandBuffer(
- VkCommandBuffer commandBuffer,
- VkCommandBufferResetFlags flags,
- VkResult result)
-{
-
+bool PostResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags, VkResult result) {
- if(result < VK_SUCCESS)
- {
+ if (result < VK_SUCCESS) {
std::string reason = "vkResetCommandBuffer parameter, VkResult result, is " + EnumeratorString(result);
- log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK", "%s", reason.c_str());
+ log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "%s", reason.c_str());
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer(
- VkCommandBuffer commandBuffer,
- VkCommandBufferResetFlags flags)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
VkResult result = get_dispatch_table(pc_device_table_map, commandBuffer)->ResetCommandBuffer(commandBuffer, flags);
PostResetCommandBuffer(commandBuffer, flags, result);
@@ -5420,234 +4106,165 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer(
return result;
}
-bool PostCmdBindPipeline(
- VkCommandBuffer commandBuffer,
- VkPipelineBindPoint pipelineBindPoint,
- VkPipeline pipeline)
-{
+bool PostCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
- if(pipelineBindPoint < VK_PIPELINE_BIND_POINT_BEGIN_RANGE ||
- pipelineBindPoint > VK_PIPELINE_BIND_POINT_END_RANGE)
- {
+ if (pipelineBindPoint < VK_PIPELINE_BIND_POINT_BEGIN_RANGE || pipelineBindPoint > VK_PIPELINE_BIND_POINT_END_RANGE) {
log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdBindPipeline parameter, VkPipelineBindPoint pipelineBindPoint, is an unrecognized enumerator");
+ "vkCmdBindPipeline parameter, VkPipelineBindPoint pipelineBindPoint, is an unrecognized enumerator");
return false;
}
-
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline(
- VkCommandBuffer commandBuffer,
- VkPipelineBindPoint pipelineBindPoint,
- VkPipeline pipeline)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
PostCmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport* pViewports)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCmdSetViewport(
- my_data->report_data,
- firstViewport,
- viewportCount,
- pViewports);
+ skipCall |= param_check_vkCmdSetViewport(my_data->report_data, firstViewport, viewportCount, pViewports);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, commandBuffer)->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
+ get_dispatch_table(pc_device_table_map, commandBuffer)
+ ->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D* pScissors)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCmdSetScissor(
- my_data->report_data,
- firstScissor,
- scissorCount,
- pScissors);
+ skipCall |= param_check_vkCmdSetScissor(my_data->report_data, firstScissor, scissorCount, pScissors);
if (skipCall == VK_FALSE) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdSetLineWidth(commandBuffer, lineWidth);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor)
-{
- get_dispatch_table(pc_device_table_map, commandBuffer)->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
+ get_dispatch_table(pc_device_table_map, commandBuffer)
+ ->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4])
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCmdSetBlendConstants(
- my_data->report_data,
- blendConstants);
+ skipCall |= param_check_vkCmdSetBlendConstants(my_data->report_data, blendConstants);
if (skipCall == VK_FALSE) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdSetBlendConstants(commandBuffer, blendConstants);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdSetStencilReference(commandBuffer, faceMask, reference);
}
-bool PostCmdBindDescriptorSets(
- VkCommandBuffer commandBuffer,
- VkPipelineBindPoint pipelineBindPoint,
- VkPipelineLayout layout,
- uint32_t firstSet,
- uint32_t setCount,
- uint32_t dynamicOffsetCount)
-{
+bool PostCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
+ uint32_t firstSet, uint32_t setCount, uint32_t dynamicOffsetCount) {
- if(pipelineBindPoint < VK_PIPELINE_BIND_POINT_BEGIN_RANGE ||
- pipelineBindPoint > VK_PIPELINE_BIND_POINT_END_RANGE)
- {
+ if (pipelineBindPoint < VK_PIPELINE_BIND_POINT_BEGIN_RANGE || pipelineBindPoint > VK_PIPELINE_BIND_POINT_END_RANGE) {
log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdBindDescriptorSets parameter, VkPipelineBindPoint pipelineBindPoint, is an unrecognized enumerator");
+ "vkCmdBindDescriptorSets parameter, VkPipelineBindPoint pipelineBindPoint, is an unrecognized enumerator");
return false;
}
-
-
-
-
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets(
- VkCommandBuffer commandBuffer,
- VkPipelineBindPoint pipelineBindPoint,
- VkPipelineLayout layout,
- uint32_t firstSet,
- uint32_t descriptorSetCount,
- const VkDescriptorSet* pDescriptorSets,
- uint32_t dynamicOffsetCount,
- const uint32_t* pDynamicOffsets)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
+ uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet *pDescriptorSets,
+ uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCmdBindDescriptorSets(
- my_data->report_data,
- pipelineBindPoint,
- layout,
- firstSet,
- descriptorSetCount,
- pDescriptorSets,
- dynamicOffsetCount,
- pDynamicOffsets);
+ skipCall |= param_check_vkCmdBindDescriptorSets(my_data->report_data, pipelineBindPoint, layout, firstSet, descriptorSetCount,
+ pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, commandBuffer)->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, descriptorSetCount, pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
+ get_dispatch_table(pc_device_table_map, commandBuffer)
+ ->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, descriptorSetCount, pDescriptorSets,
+ dynamicOffsetCount, pDynamicOffsets);
PostCmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, descriptorSetCount, dynamicOffsetCount);
}
}
-bool PostCmdBindIndexBuffer(
- VkCommandBuffer commandBuffer,
- VkBuffer buffer,
- VkDeviceSize offset,
- VkIndexType indexType)
-{
-
-
+bool PostCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
- if(indexType < VK_INDEX_TYPE_BEGIN_RANGE ||
- indexType > VK_INDEX_TYPE_END_RANGE)
- {
+ if (indexType < VK_INDEX_TYPE_BEGIN_RANGE || indexType > VK_INDEX_TYPE_END_RANGE) {
log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdBindIndexBuffer parameter, VkIndexType indexType, is an unrecognized enumerator");
+ "vkCmdBindIndexBuffer parameter, VkIndexType indexType, is an unrecognized enumerator");
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer(
- VkCommandBuffer commandBuffer,
- VkBuffer buffer,
- VkDeviceSize offset,
- VkIndexType indexType)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
PostCmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(
- VkCommandBuffer commandBuffer,
- uint32_t firstBinding,
- uint32_t bindingCount,
- const VkBuffer* pBuffers,
- const VkDeviceSize* pOffsets)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
+ uint32_t bindingCount, const VkBuffer *pBuffers,
+ const VkDeviceSize *pOffsets) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCmdBindVertexBuffers(
- my_data->report_data,
- firstBinding,
- bindingCount,
- pBuffers,
- pOffsets);
+ skipCall |= param_check_vkCmdBindVertexBuffers(my_data->report_data, firstBinding, bindingCount, pBuffers, pOffsets);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, commandBuffer)->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
+ get_dispatch_table(pc_device_table_map, commandBuffer)
+ ->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
}
}
-bool PreCmdDraw(
- VkCommandBuffer commandBuffer,
- uint32_t vertexCount,
- uint32_t instanceCount,
- uint32_t firstVertex,
- uint32_t firstInstance)
-{
+bool PreCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex,
+ uint32_t firstInstance) {
if (vertexCount == 0) {
// TODO: Verify against Valid Usage section. I don't see a non-zero vertexCount listed, may need to add that and make
// this an error or leave as is.
log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdDraw parameter, uint32_t vertexCount, is 0");
+ "vkCmdDraw parameter, uint32_t vertexCount, is 0");
return false;
}
@@ -5655,867 +4272,553 @@ bool PreCmdDraw(
// TODO: Verify against Valid Usage section. I don't see a non-zero instanceCount listed, may need to add that and make
// this an error or leave as is.
log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdDraw parameter, uint32_t instanceCount, is 0");
+ "vkCmdDraw parameter, uint32_t instanceCount, is 0");
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(
- VkCommandBuffer commandBuffer,
- uint32_t vertexCount,
- uint32_t instanceCount,
- uint32_t firstVertex,
- uint32_t firstInstance)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
+ uint32_t firstVertex, uint32_t firstInstance) {
PreCmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
- get_dispatch_table(pc_device_table_map, commandBuffer)->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
+ get_dispatch_table(pc_device_table_map, commandBuffer)
+ ->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(
- VkCommandBuffer commandBuffer,
- uint32_t indexCount,
- uint32_t instanceCount,
- uint32_t firstIndex,
- int32_t vertexOffset,
- uint32_t firstInstance)
-{
- get_dispatch_table(pc_device_table_map, commandBuffer)->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
+ uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
+ uint32_t firstInstance) {
+ get_dispatch_table(pc_device_table_map, commandBuffer)
+ ->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect(
- VkCommandBuffer commandBuffer,
- VkBuffer buffer,
- VkDeviceSize offset,
- uint32_t count,
- uint32_t stride)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect(
- VkCommandBuffer commandBuffer,
- VkBuffer buffer,
- VkDeviceSize offset,
- uint32_t count,
- uint32_t stride)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(
- VkCommandBuffer commandBuffer,
- uint32_t x,
- uint32_t y,
- uint32_t z)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdDispatch(commandBuffer, x, y, z);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect(
- VkCommandBuffer commandBuffer,
- VkBuffer buffer,
- VkDeviceSize offset)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdDispatchIndirect(commandBuffer, buffer, offset);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(
- VkCommandBuffer commandBuffer,
- VkBuffer srcBuffer,
- VkBuffer dstBuffer,
- uint32_t regionCount,
- const VkBufferCopy* pRegions)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
+ uint32_t regionCount, const VkBufferCopy *pRegions) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCmdCopyBuffer(
- my_data->report_data,
- srcBuffer,
- dstBuffer,
- regionCount,
- pRegions);
+ skipCall |= param_check_vkCmdCopyBuffer(my_data->report_data, srcBuffer, dstBuffer, regionCount, pRegions);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, commandBuffer)->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
+ get_dispatch_table(pc_device_table_map, commandBuffer)
+ ->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
}
}
-bool PreCmdCopyImage(
- VkCommandBuffer commandBuffer,
- const VkImageCopy* pRegions)
-{
- if(pRegions != nullptr)
- {
- if ((pRegions->srcSubresource.aspectMask &
- (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT)) == 0)
- {
- log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdCopyImage parameter, VkImageAspect pRegions->srcSubresource.aspectMask, is an unrecognized enumerator");
- return false;
- }
- if ((pRegions->dstSubresource.aspectMask &
- (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT)) == 0)
- {
- log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdCopyImage parameter, VkImageAspect pRegions->dstSubresource.aspectMask, is an unrecognized enumerator");
- return false;
- }
+bool PreCmdCopyImage(VkCommandBuffer commandBuffer, const VkImageCopy *pRegions) {
+ if (pRegions != nullptr) {
+ if ((pRegions->srcSubresource.aspectMask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT)) == 0) {
+ log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCmdCopyImage parameter, VkImageAspect pRegions->srcSubresource.aspectMask, is an unrecognized enumerator");
+ return false;
+ }
+ if ((pRegions->dstSubresource.aspectMask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT)) == 0) {
+ log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCmdCopyImage parameter, VkImageAspect pRegions->dstSubresource.aspectMask, is an unrecognized enumerator");
+ return false;
+ }
}
return true;
}
-bool PostCmdCopyImage(
- VkCommandBuffer commandBuffer,
- VkImage srcImage,
- VkImageLayout srcImageLayout,
- VkImage dstImage,
- VkImageLayout dstImageLayout,
- uint32_t regionCount)
-{
- if (((srcImageLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
- (srcImageLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
- (srcImageLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR))
- {
+bool PostCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
+ VkImageLayout dstImageLayout, uint32_t regionCount) {
+ if (((srcImageLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) || (srcImageLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
+ (srcImageLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR)) {
log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdCopyImage parameter, VkImageLayout srcImageLayout, is an unrecognized enumerator");
+ "vkCmdCopyImage parameter, VkImageLayout srcImageLayout, is an unrecognized enumerator");
return false;
}
-
- if (((dstImageLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
- (dstImageLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
- (dstImageLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR))
- {
+ if (((dstImageLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) || (dstImageLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
+ (dstImageLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR)) {
log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdCopyImage parameter, VkImageLayout dstImageLayout, is an unrecognized enumerator");
+ "vkCmdCopyImage parameter, VkImageLayout dstImageLayout, is an unrecognized enumerator");
return false;
}
-
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage(
- VkCommandBuffer commandBuffer,
- VkImage srcImage,
- VkImageLayout srcImageLayout,
- VkImage dstImage,
- VkImageLayout dstImageLayout,
- uint32_t regionCount,
- const VkImageCopy* pRegions)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
+ VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCmdCopyImage(
- my_data->report_data,
- srcImage,
- srcImageLayout,
- dstImage,
- dstImageLayout,
- regionCount,
- pRegions);
+ skipCall |=
+ param_check_vkCmdCopyImage(my_data->report_data, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions);
if (skipCall == VK_FALSE) {
PreCmdCopyImage(commandBuffer, pRegions);
- get_dispatch_table(pc_device_table_map, commandBuffer)->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions);
+ get_dispatch_table(pc_device_table_map, commandBuffer)
+ ->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions);
PostCmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount);
}
}
-bool PreCmdBlitImage(
- VkCommandBuffer commandBuffer,
- const VkImageBlit* pRegions)
-{
- if(pRegions != nullptr)
- {
- if ((pRegions->srcSubresource.aspectMask &
- (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT)) == 0)
- {
- log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdCopyImage parameter, VkImageAspect pRegions->srcSubresource.aspectMask, is an unrecognized enumerator");
- return false;
- }
- if ((pRegions->dstSubresource.aspectMask &
- (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT)) == 0)
- {
- log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdCopyImage parameter, VkImageAspect pRegions->dstSubresource.aspectMask, is an unrecognized enumerator");
- return false;
- }
+bool PreCmdBlitImage(VkCommandBuffer commandBuffer, const VkImageBlit *pRegions) {
+ if (pRegions != nullptr) {
+ if ((pRegions->srcSubresource.aspectMask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT)) == 0) {
+ log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCmdCopyImage parameter, VkImageAspect pRegions->srcSubresource.aspectMask, is an unrecognized enumerator");
+ return false;
+ }
+ if ((pRegions->dstSubresource.aspectMask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT)) == 0) {
+ log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCmdCopyImage parameter, VkImageAspect pRegions->dstSubresource.aspectMask, is an unrecognized enumerator");
+ return false;
+ }
}
return true;
}
-bool PostCmdBlitImage(
- VkCommandBuffer commandBuffer,
- VkImage srcImage,
- VkImageLayout srcImageLayout,
- VkImage dstImage,
- VkImageLayout dstImageLayout,
- uint32_t regionCount,
- VkFilter filter)
-{
+bool PostCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
+ VkImageLayout dstImageLayout, uint32_t regionCount, VkFilter filter) {
-
- if (((srcImageLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
- (srcImageLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
- (srcImageLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR))
- {
+ if (((srcImageLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) || (srcImageLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
+ (srcImageLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR)) {
log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdBlitImage parameter, VkImageLayout srcImageLayout, is an unrecognized enumerator");
+ "vkCmdBlitImage parameter, VkImageLayout srcImageLayout, is an unrecognized enumerator");
return false;
}
-
- if (((dstImageLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
- (dstImageLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
- (dstImageLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR))
- {
+ if (((dstImageLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) || (dstImageLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
+ (dstImageLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR)) {
log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdBlitImage parameter, VkImageLayout dstImageLayout, is an unrecognized enumerator");
+ "vkCmdBlitImage parameter, VkImageLayout dstImageLayout, is an unrecognized enumerator");
return false;
}
-
- if(filter < VK_FILTER_BEGIN_RANGE ||
- filter > VK_FILTER_END_RANGE)
- {
+ if (filter < VK_FILTER_BEGIN_RANGE || filter > VK_FILTER_END_RANGE) {
log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdBlitImage parameter, VkFilter filter, is an unrecognized enumerator");
+ "vkCmdBlitImage parameter, VkFilter filter, is an unrecognized enumerator");
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage(
- VkCommandBuffer commandBuffer,
- VkImage srcImage,
- VkImageLayout srcImageLayout,
- VkImage dstImage,
- VkImageLayout dstImageLayout,
- uint32_t regionCount,
- const VkImageBlit* pRegions,
- VkFilter filter)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
+ VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCmdBlitImage(
- my_data->report_data,
- srcImage,
- srcImageLayout,
- dstImage,
- dstImageLayout,
- regionCount,
- pRegions,
- filter);
+ skipCall |= param_check_vkCmdBlitImage(my_data->report_data, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
+ pRegions, filter);
if (skipCall == VK_FALSE) {
PreCmdBlitImage(commandBuffer, pRegions);
- get_dispatch_table(pc_device_table_map, commandBuffer)->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter);
+ get_dispatch_table(pc_device_table_map, commandBuffer)
+ ->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter);
PostCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, filter);
}
}
-bool PreCmdCopyBufferToImage(
- VkCommandBuffer commandBuffer,
- const VkBufferImageCopy* pRegions)
-{
- if(pRegions != nullptr)
- {
- if ((pRegions->imageSubresource.aspectMask &
- (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT)) == 0)
- {
- log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdCopyBufferToImage parameter, VkImageAspect pRegions->imageSubresource.aspectMask, is an unrecognized enumerator");
- return false;
- }
+bool PreCmdCopyBufferToImage(VkCommandBuffer commandBuffer, const VkBufferImageCopy *pRegions) {
+ if (pRegions != nullptr) {
+ if ((pRegions->imageSubresource.aspectMask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT)) == 0) {
+ log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCmdCopyBufferToImage parameter, VkImageAspect pRegions->imageSubresource.aspectMask, is an unrecognized "
+ "enumerator");
+ return false;
+ }
}
return true;
}
-bool PostCmdCopyBufferToImage(
- VkCommandBuffer commandBuffer,
- VkBuffer srcBuffer,
- VkImage dstImage,
- VkImageLayout dstImageLayout,
- uint32_t regionCount)
-{
-
+bool PostCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout,
+ uint32_t regionCount) {
-
- if (((dstImageLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
- (dstImageLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
- (dstImageLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR))
- {
+ if (((dstImageLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) || (dstImageLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
+ (dstImageLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR)) {
log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdCopyBufferToImage parameter, VkImageLayout dstImageLayout, is an unrecognized enumerator");
+ "vkCmdCopyBufferToImage parameter, VkImageLayout dstImageLayout, is an unrecognized enumerator");
return false;
}
-
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(
- VkCommandBuffer commandBuffer,
- VkBuffer srcBuffer,
- VkImage dstImage,
- VkImageLayout dstImageLayout,
- uint32_t regionCount,
- const VkBufferImageCopy* pRegions)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
+ VkImage dstImage, VkImageLayout dstImageLayout,
+ uint32_t regionCount, const VkBufferImageCopy *pRegions) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCmdCopyBufferToImage(
- my_data->report_data,
- srcBuffer,
- dstImage,
- dstImageLayout,
- regionCount,
- pRegions);
+ skipCall |=
+ param_check_vkCmdCopyBufferToImage(my_data->report_data, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
if (skipCall == VK_FALSE) {
PreCmdCopyBufferToImage(commandBuffer, pRegions);
- get_dispatch_table(pc_device_table_map, commandBuffer)->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
+ get_dispatch_table(pc_device_table_map, commandBuffer)
+ ->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
PostCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount);
}
}
-bool PreCmdCopyImageToBuffer(
- VkCommandBuffer commandBuffer,
- const VkBufferImageCopy* pRegions)
-{
- if(pRegions != nullptr)
- {
- if ((pRegions->imageSubresource.aspectMask &
- (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT)) == 0)
- {
- log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdCopyImageToBuffer parameter, VkImageAspect pRegions->imageSubresource.aspectMask, is an unrecognized enumerator");
- return false;
- }
+bool PreCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, const VkBufferImageCopy *pRegions) {
+ if (pRegions != nullptr) {
+ if ((pRegions->imageSubresource.aspectMask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT)) == 0) {
+ log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCmdCopyImageToBuffer parameter, VkImageAspect pRegions->imageSubresource.aspectMask, is an unrecognized "
+ "enumerator");
+ return false;
+ }
}
return true;
}
-bool PostCmdCopyImageToBuffer(
- VkCommandBuffer commandBuffer,
- VkImage srcImage,
- VkImageLayout srcImageLayout,
- VkBuffer dstBuffer,
- uint32_t regionCount)
-{
+bool PostCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer,
+ uint32_t regionCount) {
-
- if (((srcImageLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
- (srcImageLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
- (srcImageLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR))
- {
+ if (((srcImageLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) || (srcImageLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
+ (srcImageLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR)) {
log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdCopyImageToBuffer parameter, VkImageLayout srcImageLayout, is an unrecognized enumerator");
+ "vkCmdCopyImageToBuffer parameter, VkImageLayout srcImageLayout, is an unrecognized enumerator");
return false;
}
-
-
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(
- VkCommandBuffer commandBuffer,
- VkImage srcImage,
- VkImageLayout srcImageLayout,
- VkBuffer dstBuffer,
- uint32_t regionCount,
- const VkBufferImageCopy* pRegions)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
+ VkImageLayout srcImageLayout, VkBuffer dstBuffer,
+ uint32_t regionCount, const VkBufferImageCopy *pRegions) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCmdCopyImageToBuffer(
- my_data->report_data,
- srcImage,
- srcImageLayout,
- dstBuffer,
- regionCount,
- pRegions);
+ skipCall |=
+ param_check_vkCmdCopyImageToBuffer(my_data->report_data, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
if (skipCall == VK_FALSE) {
PreCmdCopyImageToBuffer(commandBuffer, pRegions);
- get_dispatch_table(pc_device_table_map, commandBuffer)->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
+ get_dispatch_table(pc_device_table_map, commandBuffer)
+ ->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
PostCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(
- VkCommandBuffer commandBuffer,
- VkBuffer dstBuffer,
- VkDeviceSize dstOffset,
- VkDeviceSize dataSize,
- const uint32_t* pData)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
+ VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCmdUpdateBuffer(
- my_data->report_data,
- dstBuffer,
- dstOffset,
- dataSize,
- pData);
+ skipCall |= param_check_vkCmdUpdateBuffer(my_data->report_data, dstBuffer, dstOffset, dataSize, pData);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, commandBuffer)->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
+ get_dispatch_table(pc_device_table_map, commandBuffer)
+ ->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer(
- VkCommandBuffer commandBuffer,
- VkBuffer dstBuffer,
- VkDeviceSize dstOffset,
- VkDeviceSize size,
- uint32_t data)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
}
-bool PostCmdClearColorImage(
- VkCommandBuffer commandBuffer,
- VkImage image,
- VkImageLayout imageLayout,
- uint32_t rangeCount)
-{
-
+bool PostCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, uint32_t rangeCount) {
- if (((imageLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
- (imageLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
- (imageLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR))
- {
+ if (((imageLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) || (imageLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
+ (imageLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR)) {
log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdClearColorImage parameter, VkImageLayout imageLayout, is an unrecognized enumerator");
+ "vkCmdClearColorImage parameter, VkImageLayout imageLayout, is an unrecognized enumerator");
return false;
}
-
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(
- VkCommandBuffer commandBuffer,
- VkImage image,
- VkImageLayout imageLayout,
- const VkClearColorValue* pColor,
- uint32_t rangeCount,
- const VkImageSubresourceRange* pRanges)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
+ VkImageLayout imageLayout, const VkClearColorValue *pColor,
+ uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCmdClearColorImage(
- my_data->report_data,
- image,
- imageLayout,
- pColor,
- rangeCount,
- pRanges);
+ skipCall |= param_check_vkCmdClearColorImage(my_data->report_data, image, imageLayout, pColor, rangeCount, pRanges);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, commandBuffer)->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
+ get_dispatch_table(pc_device_table_map, commandBuffer)
+ ->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
PostCmdClearColorImage(commandBuffer, image, imageLayout, rangeCount);
}
}
-bool PostCmdClearDepthStencilImage(
- VkCommandBuffer commandBuffer,
- VkImage image,
- VkImageLayout imageLayout,
- const VkClearDepthStencilValue* pDepthStencil,
- uint32_t rangeCount)
-{
-
+bool PostCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
+ const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount) {
- if (((imageLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
- (imageLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
- (imageLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR))
- {
+ if (((imageLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) || (imageLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
+ (imageLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR)) {
log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdClearDepthStencilImage parameter, VkImageLayout imageLayout, is an unrecognized enumerator");
+ "vkCmdClearDepthStencilImage parameter, VkImageLayout imageLayout, is an unrecognized enumerator");
return false;
}
-
-
-
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage(
- VkCommandBuffer commandBuffer,
- VkImage image,
- VkImageLayout imageLayout,
- const VkClearDepthStencilValue* pDepthStencil,
- uint32_t rangeCount,
- const VkImageSubresourceRange* pRanges)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
+ const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
+ const VkImageSubresourceRange *pRanges) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCmdClearDepthStencilImage(
- my_data->report_data,
- image,
- imageLayout,
- pDepthStencil,
- rangeCount,
- pRanges);
+ skipCall |=
+ param_check_vkCmdClearDepthStencilImage(my_data->report_data, image, imageLayout, pDepthStencil, rangeCount, pRanges);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, commandBuffer)->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
+ get_dispatch_table(pc_device_table_map, commandBuffer)
+ ->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
PostCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(
- VkCommandBuffer commandBuffer,
- uint32_t attachmentCount,
- const VkClearAttachment* pAttachments,
- uint32_t rectCount,
- const VkClearRect* pRects)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
+ const VkClearAttachment *pAttachments, uint32_t rectCount,
+ const VkClearRect *pRects) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCmdClearAttachments(
- my_data->report_data,
- attachmentCount,
- pAttachments,
- rectCount,
- pRects);
+ skipCall |= param_check_vkCmdClearAttachments(my_data->report_data, attachmentCount, pAttachments, rectCount, pRects);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, commandBuffer)->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
+ get_dispatch_table(pc_device_table_map, commandBuffer)
+ ->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
}
}
-bool PreCmdResolveImage(
- VkCommandBuffer commandBuffer,
- const VkImageResolve* pRegions)
-{
- if(pRegions != nullptr)
- {
- if ((pRegions->srcSubresource.aspectMask &
- (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT)) == 0)
- {
- log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdResolveImage parameter, VkImageAspect pRegions->srcSubresource.aspectMask, is an unrecognized enumerator");
- return false;
- }
- if ((pRegions->dstSubresource.aspectMask &
- (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT)) == 0)
- {
- log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdResolveImage parameter, VkImageAspect pRegions->dstSubresource.aspectMask, is an unrecognized enumerator");
- return false;
- }
+bool PreCmdResolveImage(VkCommandBuffer commandBuffer, const VkImageResolve *pRegions) {
+ if (pRegions != nullptr) {
+ if ((pRegions->srcSubresource.aspectMask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT)) == 0) {
+ log_msg(
+ mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCmdResolveImage parameter, VkImageAspect pRegions->srcSubresource.aspectMask, is an unrecognized enumerator");
+ return false;
+ }
+ if ((pRegions->dstSubresource.aspectMask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT |
+ VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT)) == 0) {
+ log_msg(
+ mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "vkCmdResolveImage parameter, VkImageAspect pRegions->dstSubresource.aspectMask, is an unrecognized enumerator");
+ return false;
+ }
}
return true;
}
-bool PostCmdResolveImage(
- VkCommandBuffer commandBuffer,
- VkImage srcImage,
- VkImageLayout srcImageLayout,
- VkImage dstImage,
- VkImageLayout dstImageLayout,
- uint32_t regionCount)
-{
-
+bool PostCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
+ VkImageLayout dstImageLayout, uint32_t regionCount) {
- if (((srcImageLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
- (srcImageLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
- (srcImageLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR))
- {
+ if (((srcImageLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) || (srcImageLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
+ (srcImageLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR)) {
log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdResolveImage parameter, VkImageLayout srcImageLayout, is an unrecognized enumerator");
+ "vkCmdResolveImage parameter, VkImageLayout srcImageLayout, is an unrecognized enumerator");
return false;
}
-
- if (((dstImageLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) ||
- (dstImageLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
- (dstImageLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR))
- {
+ if (((dstImageLayout < VK_IMAGE_LAYOUT_BEGIN_RANGE) || (dstImageLayout > VK_IMAGE_LAYOUT_END_RANGE)) &&
+ (dstImageLayout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR)) {
log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdResolveImage parameter, VkImageLayout dstImageLayout, is an unrecognized enumerator");
+ "vkCmdResolveImage parameter, VkImageLayout dstImageLayout, is an unrecognized enumerator");
return false;
}
-
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage(
- VkCommandBuffer commandBuffer,
- VkImage srcImage,
- VkImageLayout srcImageLayout,
- VkImage dstImage,
- VkImageLayout dstImageLayout,
- uint32_t regionCount,
- const VkImageResolve* pRegions)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
+ VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCmdResolveImage(
- my_data->report_data,
- srcImage,
- srcImageLayout,
- dstImage,
- dstImageLayout,
- regionCount,
- pRegions);
+ skipCall |= param_check_vkCmdResolveImage(my_data->report_data, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
+ pRegions);
if (skipCall == VK_FALSE) {
PreCmdResolveImage(commandBuffer, pRegions);
- get_dispatch_table(pc_device_table_map, commandBuffer)->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions);
+ get_dispatch_table(pc_device_table_map, commandBuffer)
+ ->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions);
PostCmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent(
- VkCommandBuffer commandBuffer,
- VkEvent event,
- VkPipelineStageFlags stageMask)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdSetEvent(commandBuffer, event, stageMask);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent(
- VkCommandBuffer commandBuffer,
- VkEvent event,
- VkPipelineStageFlags stageMask)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdResetEvent(commandBuffer, event, stageMask);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents(
- VkCommandBuffer commandBuffer,
- uint32_t eventCount,
- const VkEvent *pEvents,
- VkPipelineStageFlags srcStageMask,
- VkPipelineStageFlags dstStageMask,
- uint32_t memoryBarrierCount,
- const VkMemoryBarrier *pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount,
- const VkBufferMemoryBarrier *pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount,
- const VkImageMemoryBarrier *pImageMemoryBarriers)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCmdWaitEvents(
- my_data->report_data,
- eventCount,
- pEvents,
- srcStageMask,
- dstStageMask,
- memoryBarrierCount,
- pMemoryBarriers,
- bufferMemoryBarrierCount,
- pBufferMemoryBarriers,
- imageMemoryBarrierCount,
- pImageMemoryBarriers);
+ skipCall |= param_check_vkCmdWaitEvents(my_data->report_data, eventCount, pEvents, srcStageMask, dstStageMask,
+ memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
+ imageMemoryBarrierCount, pImageMemoryBarriers);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, commandBuffer)->CmdWaitEvents(commandBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
- }
-}
-
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier(
- VkCommandBuffer commandBuffer,
- VkPipelineStageFlags srcStageMask,
- VkPipelineStageFlags dstStageMask,
- VkDependencyFlags dependencyFlags,
- uint32_t memoryBarrierCount,
- const VkMemoryBarrier *pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount,
- const VkBufferMemoryBarrier *pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount,
- const VkImageMemoryBarrier *pImageMemoryBarriers)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+ get_dispatch_table(pc_device_table_map, commandBuffer)
+ ->CmdWaitEvents(commandBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers,
+ bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
+ }
+}
+
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
+ VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCmdPipelineBarrier(
- my_data->report_data,
- srcStageMask,
- dstStageMask,
- dependencyFlags,
- memoryBarrierCount,
- pMemoryBarriers,
- bufferMemoryBarrierCount,
- pBufferMemoryBarriers,
- imageMemoryBarrierCount,
- pImageMemoryBarriers);
+ skipCall |= param_check_vkCmdPipelineBarrier(my_data->report_data, srcStageMask, dstStageMask, dependencyFlags,
+ memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
+ pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, commandBuffer)->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
+ get_dispatch_table(pc_device_table_map, commandBuffer)
+ ->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers,
+ bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery(
- VkCommandBuffer commandBuffer,
- VkQueryPool queryPool,
- uint32_t slot,
- VkQueryControlFlags flags)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkQueryControlFlags flags) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(
- VkCommandBuffer commandBuffer,
- VkQueryPool queryPool,
- uint32_t slot)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdEndQuery(commandBuffer, queryPool, slot);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool(
- VkCommandBuffer commandBuffer,
- VkQueryPool queryPool,
- uint32_t firstQuery,
- uint32_t queryCount)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
}
-bool PostCmdWriteTimestamp(
- VkCommandBuffer commandBuffer,
- VkPipelineStageFlagBits pipelineStage,
- VkQueryPool queryPool,
- uint32_t slot)
-{
+bool PostCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool,
+ uint32_t slot) {
ValidateEnumerator(pipelineStage);
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp(
- VkCommandBuffer commandBuffer,
- VkPipelineStageFlagBits pipelineStage,
- VkQueryPool queryPool,
- uint32_t slot)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
PostCmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults(
- VkCommandBuffer commandBuffer,
- VkQueryPool queryPool,
- uint32_t firstQuery,
- uint32_t queryCount,
- VkBuffer dstBuffer,
- VkDeviceSize dstOffset,
- VkDeviceSize stride,
- VkQueryResultFlags flags)
-{
- get_dispatch_table(pc_device_table_map, commandBuffer)->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset, stride, flags);
-}
-
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(
- VkCommandBuffer commandBuffer,
- VkPipelineLayout layout,
- VkShaderStageFlags stageFlags,
- uint32_t offset,
- uint32_t size,
- const void* pValues)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
+ VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
+ get_dispatch_table(pc_device_table_map, commandBuffer)
+ ->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset, stride, flags);
+}
+
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
+ VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
+ const void *pValues) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCmdPushConstants(
- my_data->report_data,
- layout,
- stageFlags,
- offset,
- size,
- pValues);
+ skipCall |= param_check_vkCmdPushConstants(my_data->report_data, layout, stageFlags, offset, size, pValues);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, commandBuffer)->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
+ get_dispatch_table(pc_device_table_map, commandBuffer)
+ ->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
}
}
-bool PostCmdBeginRenderPass(
- VkCommandBuffer commandBuffer,
- VkSubpassContents contents)
-{
+bool PostCmdBeginRenderPass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
- if(contents < VK_SUBPASS_CONTENTS_BEGIN_RANGE ||
- contents > VK_SUBPASS_CONTENTS_END_RANGE)
- {
+ if (contents < VK_SUBPASS_CONTENTS_BEGIN_RANGE || contents > VK_SUBPASS_CONTENTS_END_RANGE) {
log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdBeginRenderPass parameter, VkSubpassContents contents, is an unrecognized enumerator");
+ "vkCmdBeginRenderPass parameter, VkSubpassContents contents, is an unrecognized enumerator");
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass(
- VkCommandBuffer commandBuffer,
- const VkRenderPassBeginInfo* pRenderPassBegin,
- VkSubpassContents contents)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCmdBeginRenderPass(
- my_data->report_data,
- pRenderPassBegin,
- contents);
+ skipCall |= param_check_vkCmdBeginRenderPass(my_data->report_data, pRenderPassBegin, contents);
if (skipCall == VK_FALSE) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
@@ -6524,58 +4827,42 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass(
}
}
-bool PostCmdNextSubpass(
- VkCommandBuffer commandBuffer,
- VkSubpassContents contents)
-{
+bool PostCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
- if(contents < VK_SUBPASS_CONTENTS_BEGIN_RANGE ||
- contents > VK_SUBPASS_CONTENTS_END_RANGE)
- {
+ if (contents < VK_SUBPASS_CONTENTS_BEGIN_RANGE || contents > VK_SUBPASS_CONTENTS_END_RANGE) {
log_msg(mdd(commandBuffer), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
- "vkCmdNextSubpass parameter, VkSubpassContents contents, is an unrecognized enumerator");
+ "vkCmdNextSubpass parameter, VkSubpassContents contents, is an unrecognized enumerator");
return false;
}
return true;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(
- VkCommandBuffer commandBuffer,
- VkSubpassContents contents)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdNextSubpass(commandBuffer, contents);
PostCmdNextSubpass(commandBuffer, contents);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(
- VkCommandBuffer commandBuffer)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer commandBuffer) {
get_dispatch_table(pc_device_table_map, commandBuffer)->CmdEndRenderPass(commandBuffer);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdExecuteCommands(
- VkCommandBuffer commandBuffer,
- uint32_t commandBufferCount,
- const VkCommandBuffer* pCommandBuffers)
-{
- VkBool32 skipCall = VK_FALSE;
- layer_data* my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
+ VkBool32 skipCall = VK_FALSE;
+ layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
assert(my_data != NULL);
- skipCall |= param_check_vkCmdExecuteCommands(
- my_data->report_data,
- commandBufferCount,
- pCommandBuffers);
+ skipCall |= param_check_vkCmdExecuteCommands(my_data->report_data, commandBufferCount, pCommandBuffers);
if (skipCall == VK_FALSE) {
- get_dispatch_table(pc_device_table_map, commandBuffer)->CmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers);
+ get_dispatch_table(pc_device_table_map, commandBuffer)
+ ->CmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice device, const char* funcName)
-{
+VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice device, const char *funcName) {
layer_data *data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
if (validate_string(data, "vkGetDeviceProcAddr()", "funcName", funcName) == VK_TRUE) {
@@ -6583,77 +4870,77 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkD
}
if (!strcmp(funcName, "vkGetDeviceProcAddr"))
- return (PFN_vkVoidFunction) vkGetDeviceProcAddr;
+ return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
if (!strcmp(funcName, "vkDestroyDevice"))
- return (PFN_vkVoidFunction) vkDestroyDevice;
+ return (PFN_vkVoidFunction)vkDestroyDevice;
if (!strcmp(funcName, "vkGetDeviceQueue"))
- return (PFN_vkVoidFunction) vkGetDeviceQueue;
+ return (PFN_vkVoidFunction)vkGetDeviceQueue;
if (!strcmp(funcName, "vkQueueSubmit"))
- return (PFN_vkVoidFunction) vkQueueSubmit;
+ return (PFN_vkVoidFunction)vkQueueSubmit;
if (!strcmp(funcName, "vkQueueWaitIdle"))
- return (PFN_vkVoidFunction) vkQueueWaitIdle;
+ return (PFN_vkVoidFunction)vkQueueWaitIdle;
if (!strcmp(funcName, "vkDeviceWaitIdle"))
- return (PFN_vkVoidFunction) vkDeviceWaitIdle;
+ return (PFN_vkVoidFunction)vkDeviceWaitIdle;
if (!strcmp(funcName, "vkAllocateMemory"))
- return (PFN_vkVoidFunction) vkAllocateMemory;
+ return (PFN_vkVoidFunction)vkAllocateMemory;
if (!strcmp(funcName, "vkFreeMemory"))
return (PFN_vkVoidFunction)vkFreeMemory;
if (!strcmp(funcName, "vkMapMemory"))
- return (PFN_vkVoidFunction) vkMapMemory;
+ return (PFN_vkVoidFunction)vkMapMemory;
if (!strcmp(funcName, "vkFlushMappedMemoryRanges"))
- return (PFN_vkVoidFunction) vkFlushMappedMemoryRanges;
+ return (PFN_vkVoidFunction)vkFlushMappedMemoryRanges;
if (!strcmp(funcName, "vkInvalidateMappedMemoryRanges"))
- return (PFN_vkVoidFunction) vkInvalidateMappedMemoryRanges;
+ return (PFN_vkVoidFunction)vkInvalidateMappedMemoryRanges;
if (!strcmp(funcName, "vkCreateFence"))
- return (PFN_vkVoidFunction) vkCreateFence;
+ return (PFN_vkVoidFunction)vkCreateFence;
if (!strcmp(funcName, "vkDestroyFence"))
return (PFN_vkVoidFunction)vkDestroyFence;
if (!strcmp(funcName, "vkResetFences"))
- return (PFN_vkVoidFunction) vkResetFences;
+ return (PFN_vkVoidFunction)vkResetFences;
if (!strcmp(funcName, "vkGetFenceStatus"))
- return (PFN_vkVoidFunction) vkGetFenceStatus;
+ return (PFN_vkVoidFunction)vkGetFenceStatus;
if (!strcmp(funcName, "vkWaitForFences"))
- return (PFN_vkVoidFunction) vkWaitForFences;
+ return (PFN_vkVoidFunction)vkWaitForFences;
if (!strcmp(funcName, "vkCreateSemaphore"))
- return (PFN_vkVoidFunction) vkCreateSemaphore;
+ return (PFN_vkVoidFunction)vkCreateSemaphore;
if (!strcmp(funcName, "vkDestroySemaphore"))
return (PFN_vkVoidFunction)vkDestroySemaphore;
if (!strcmp(funcName, "vkCreateEvent"))
- return (PFN_vkVoidFunction) vkCreateEvent;
+ return (PFN_vkVoidFunction)vkCreateEvent;
if (!strcmp(funcName, "vkDestroyEvent"))
return (PFN_vkVoidFunction)vkDestroyEvent;
if (!strcmp(funcName, "vkGetEventStatus"))
- return (PFN_vkVoidFunction) vkGetEventStatus;
+ return (PFN_vkVoidFunction)vkGetEventStatus;
if (!strcmp(funcName, "vkSetEvent"))
- return (PFN_vkVoidFunction) vkSetEvent;
+ return (PFN_vkVoidFunction)vkSetEvent;
if (!strcmp(funcName, "vkResetEvent"))
- return (PFN_vkVoidFunction) vkResetEvent;
+ return (PFN_vkVoidFunction)vkResetEvent;
if (!strcmp(funcName, "vkCreateQueryPool"))
- return (PFN_vkVoidFunction) vkCreateQueryPool;
+ return (PFN_vkVoidFunction)vkCreateQueryPool;
if (!strcmp(funcName, "vkDestroyQueryPool"))
return (PFN_vkVoidFunction)vkDestroyQueryPool;
if (!strcmp(funcName, "vkGetQueryPoolResults"))
- return (PFN_vkVoidFunction) vkGetQueryPoolResults;
+ return (PFN_vkVoidFunction)vkGetQueryPoolResults;
if (!strcmp(funcName, "vkCreateBuffer"))
- return (PFN_vkVoidFunction) vkCreateBuffer;
+ return (PFN_vkVoidFunction)vkCreateBuffer;
if (!strcmp(funcName, "vkDestroyBuffer"))
return (PFN_vkVoidFunction)vkDestroyBuffer;
if (!strcmp(funcName, "vkCreateBufferView"))
- return (PFN_vkVoidFunction) vkCreateBufferView;
+ return (PFN_vkVoidFunction)vkCreateBufferView;
if (!strcmp(funcName, "vkDestroyBufferView"))
return (PFN_vkVoidFunction)vkDestroyBufferView;
if (!strcmp(funcName, "vkCreateImage"))
- return (PFN_vkVoidFunction) vkCreateImage;
+ return (PFN_vkVoidFunction)vkCreateImage;
if (!strcmp(funcName, "vkDestroyImage"))
return (PFN_vkVoidFunction)vkDestroyImage;
if (!strcmp(funcName, "vkGetImageSubresourceLayout"))
- return (PFN_vkVoidFunction) vkGetImageSubresourceLayout;
+ return (PFN_vkVoidFunction)vkGetImageSubresourceLayout;
if (!strcmp(funcName, "vkCreateImageView"))
- return (PFN_vkVoidFunction) vkCreateImageView;
+ return (PFN_vkVoidFunction)vkCreateImageView;
if (!strcmp(funcName, "vkDestroyImageView"))
return (PFN_vkVoidFunction)vkDestroyImageView;
if (!strcmp(funcName, "vkCreateShaderModule"))
- return (PFN_vkVoidFunction) vkCreateShaderModule;
+ return (PFN_vkVoidFunction)vkCreateShaderModule;
if (!strcmp(funcName, "vkDestroyShaderModule"))
return (PFN_vkVoidFunction)vkDestroyShaderModule;
if (!strcmp(funcName, "vkCreatePipelineCache"))
@@ -6665,121 +4952,121 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkD
if (!strcmp(funcName, "vkMergePipelineCaches"))
return (PFN_vkVoidFunction)vkMergePipelineCaches;
if (!strcmp(funcName, "vkCreateGraphicsPipelines"))
- return (PFN_vkVoidFunction) vkCreateGraphicsPipelines;
+ return (PFN_vkVoidFunction)vkCreateGraphicsPipelines;
if (!strcmp(funcName, "vkCreateComputePipelines"))
- return (PFN_vkVoidFunction) vkCreateComputePipelines;
+ return (PFN_vkVoidFunction)vkCreateComputePipelines;
if (!strcmp(funcName, "vkDestroyPipeline"))
return (PFN_vkVoidFunction)vkDestroyPipeline;
if (!strcmp(funcName, "vkCreatePipelineLayout"))
- return (PFN_vkVoidFunction) vkCreatePipelineLayout;
+ return (PFN_vkVoidFunction)vkCreatePipelineLayout;
if (!strcmp(funcName, "vkDestroyPipelineLayout"))
return (PFN_vkVoidFunction)vkDestroyPipelineLayout;
if (!strcmp(funcName, "vkCreateSampler"))
- return (PFN_vkVoidFunction) vkCreateSampler;
+ return (PFN_vkVoidFunction)vkCreateSampler;
if (!strcmp(funcName, "vkDestroySampler"))
return (PFN_vkVoidFunction)vkDestroySampler;
if (!strcmp(funcName, "vkCreateDescriptorSetLayout"))
- return (PFN_vkVoidFunction) vkCreateDescriptorSetLayout;
+ return (PFN_vkVoidFunction)vkCreateDescriptorSetLayout;
if (!strcmp(funcName, "vkDestroyDescriptorSetLayout"))
return (PFN_vkVoidFunction)vkDestroyDescriptorSetLayout;
if (!strcmp(funcName, "vkCreateDescriptorPool"))
- return (PFN_vkVoidFunction) vkCreateDescriptorPool;
+ return (PFN_vkVoidFunction)vkCreateDescriptorPool;
if (!strcmp(funcName, "vkDestroyDescriptorPool"))
return (PFN_vkVoidFunction)vkDestroyDescriptorPool;
if (!strcmp(funcName, "vkResetDescriptorPool"))
- return (PFN_vkVoidFunction) vkResetDescriptorPool;
+ return (PFN_vkVoidFunction)vkResetDescriptorPool;
if (!strcmp(funcName, "vkAllocateDescriptorSets"))
- return (PFN_vkVoidFunction) vkAllocateDescriptorSets;
+ return (PFN_vkVoidFunction)vkAllocateDescriptorSets;
if (!strcmp(funcName, "vkCmdSetViewport"))
- return (PFN_vkVoidFunction) vkCmdSetViewport;
+ return (PFN_vkVoidFunction)vkCmdSetViewport;
if (!strcmp(funcName, "vkCmdSetScissor"))
- return (PFN_vkVoidFunction) vkCmdSetScissor;
+ return (PFN_vkVoidFunction)vkCmdSetScissor;
if (!strcmp(funcName, "vkCmdSetLineWidth"))
- return (PFN_vkVoidFunction) vkCmdSetLineWidth;
+ return (PFN_vkVoidFunction)vkCmdSetLineWidth;
if (!strcmp(funcName, "vkCmdSetDepthBias"))
- return (PFN_vkVoidFunction) vkCmdSetDepthBias;
+ return (PFN_vkVoidFunction)vkCmdSetDepthBias;
if (!strcmp(funcName, "vkCmdSetBlendConstants"))
- return (PFN_vkVoidFunction) vkCmdSetBlendConstants;
+ return (PFN_vkVoidFunction)vkCmdSetBlendConstants;
if (!strcmp(funcName, "vkCmdSetDepthBounds"))
- return (PFN_vkVoidFunction) vkCmdSetDepthBounds;
+ return (PFN_vkVoidFunction)vkCmdSetDepthBounds;
if (!strcmp(funcName, "vkCmdSetStencilCompareMask"))
- return (PFN_vkVoidFunction) vkCmdSetStencilCompareMask;
+ return (PFN_vkVoidFunction)vkCmdSetStencilCompareMask;
if (!strcmp(funcName, "vkCmdSetStencilWriteMask"))
- return (PFN_vkVoidFunction) vkCmdSetStencilWriteMask;
+ return (PFN_vkVoidFunction)vkCmdSetStencilWriteMask;
if (!strcmp(funcName, "vkCmdSetStencilReference"))
- return (PFN_vkVoidFunction) vkCmdSetStencilReference;
+ return (PFN_vkVoidFunction)vkCmdSetStencilReference;
if (!strcmp(funcName, "vkAllocateCommandBuffers"))
- return (PFN_vkVoidFunction) vkAllocateCommandBuffers;
+ return (PFN_vkVoidFunction)vkAllocateCommandBuffers;
if (!strcmp(funcName, "vkFreeCommandBuffers"))
return (PFN_vkVoidFunction)vkFreeCommandBuffers;
if (!strcmp(funcName, "vkBeginCommandBuffer"))
- return (PFN_vkVoidFunction) vkBeginCommandBuffer;
+ return (PFN_vkVoidFunction)vkBeginCommandBuffer;
if (!strcmp(funcName, "vkEndCommandBuffer"))
- return (PFN_vkVoidFunction) vkEndCommandBuffer;
+ return (PFN_vkVoidFunction)vkEndCommandBuffer;
if (!strcmp(funcName, "vkResetCommandBuffer"))
- return (PFN_vkVoidFunction) vkResetCommandBuffer;
+ return (PFN_vkVoidFunction)vkResetCommandBuffer;
if (!strcmp(funcName, "vkCmdBindPipeline"))
- return (PFN_vkVoidFunction) vkCmdBindPipeline;
+ return (PFN_vkVoidFunction)vkCmdBindPipeline;
if (!strcmp(funcName, "vkCmdBindDescriptorSets"))
- return (PFN_vkVoidFunction) vkCmdBindDescriptorSets;
+ return (PFN_vkVoidFunction)vkCmdBindDescriptorSets;
if (!strcmp(funcName, "vkCmdBindVertexBuffers"))
- return (PFN_vkVoidFunction) vkCmdBindVertexBuffers;
+ return (PFN_vkVoidFunction)vkCmdBindVertexBuffers;
if (!strcmp(funcName, "vkCmdBindIndexBuffer"))
- return (PFN_vkVoidFunction) vkCmdBindIndexBuffer;
+ return (PFN_vkVoidFunction)vkCmdBindIndexBuffer;
if (!strcmp(funcName, "vkCmdDraw"))
- return (PFN_vkVoidFunction) vkCmdDraw;
+ return (PFN_vkVoidFunction)vkCmdDraw;
if (!strcmp(funcName, "vkCmdDrawIndexed"))
- return (PFN_vkVoidFunction) vkCmdDrawIndexed;
+ return (PFN_vkVoidFunction)vkCmdDrawIndexed;
if (!strcmp(funcName, "vkCmdDrawIndirect"))
- return (PFN_vkVoidFunction) vkCmdDrawIndirect;
+ return (PFN_vkVoidFunction)vkCmdDrawIndirect;
if (!strcmp(funcName, "vkCmdDrawIndexedIndirect"))
- return (PFN_vkVoidFunction) vkCmdDrawIndexedIndirect;
+ return (PFN_vkVoidFunction)vkCmdDrawIndexedIndirect;
if (!strcmp(funcName, "vkCmdDispatch"))
- return (PFN_vkVoidFunction) vkCmdDispatch;
+ return (PFN_vkVoidFunction)vkCmdDispatch;
if (!strcmp(funcName, "vkCmdDispatchIndirect"))
- return (PFN_vkVoidFunction) vkCmdDispatchIndirect;
+ return (PFN_vkVoidFunction)vkCmdDispatchIndirect;
if (!strcmp(funcName, "vkCmdCopyBuffer"))
- return (PFN_vkVoidFunction) vkCmdCopyBuffer;
+ return (PFN_vkVoidFunction)vkCmdCopyBuffer;
if (!strcmp(funcName, "vkCmdCopyImage"))
- return (PFN_vkVoidFunction) vkCmdCopyImage;
+ return (PFN_vkVoidFunction)vkCmdCopyImage;
if (!strcmp(funcName, "vkCmdBlitImage"))
- return (PFN_vkVoidFunction) vkCmdBlitImage;
+ return (PFN_vkVoidFunction)vkCmdBlitImage;
if (!strcmp(funcName, "vkCmdCopyBufferToImage"))
- return (PFN_vkVoidFunction) vkCmdCopyBufferToImage;
+ return (PFN_vkVoidFunction)vkCmdCopyBufferToImage;
if (!strcmp(funcName, "vkCmdCopyImageToBuffer"))
- return (PFN_vkVoidFunction) vkCmdCopyImageToBuffer;
+ return (PFN_vkVoidFunction)vkCmdCopyImageToBuffer;
if (!strcmp(funcName, "vkCmdUpdateBuffer"))
- return (PFN_vkVoidFunction) vkCmdUpdateBuffer;
+ return (PFN_vkVoidFunction)vkCmdUpdateBuffer;
if (!strcmp(funcName, "vkCmdFillBuffer"))
- return (PFN_vkVoidFunction) vkCmdFillBuffer;
+ return (PFN_vkVoidFunction)vkCmdFillBuffer;
if (!strcmp(funcName, "vkCmdClearColorImage"))
- return (PFN_vkVoidFunction) vkCmdClearColorImage;
+ return (PFN_vkVoidFunction)vkCmdClearColorImage;
if (!strcmp(funcName, "vkCmdResolveImage"))
- return (PFN_vkVoidFunction) vkCmdResolveImage;
+ return (PFN_vkVoidFunction)vkCmdResolveImage;
if (!strcmp(funcName, "vkCmdSetEvent"))
- return (PFN_vkVoidFunction) vkCmdSetEvent;
+ return (PFN_vkVoidFunction)vkCmdSetEvent;
if (!strcmp(funcName, "vkCmdResetEvent"))
- return (PFN_vkVoidFunction) vkCmdResetEvent;
+ return (PFN_vkVoidFunction)vkCmdResetEvent;
if (!strcmp(funcName, "vkCmdWaitEvents"))
- return (PFN_vkVoidFunction) vkCmdWaitEvents;
+ return (PFN_vkVoidFunction)vkCmdWaitEvents;
if (!strcmp(funcName, "vkCmdPipelineBarrier"))
- return (PFN_vkVoidFunction) vkCmdPipelineBarrier;
+ return (PFN_vkVoidFunction)vkCmdPipelineBarrier;
if (!strcmp(funcName, "vkCmdBeginQuery"))
- return (PFN_vkVoidFunction) vkCmdBeginQuery;
+ return (PFN_vkVoidFunction)vkCmdBeginQuery;
if (!strcmp(funcName, "vkCmdEndQuery"))
- return (PFN_vkVoidFunction) vkCmdEndQuery;
+ return (PFN_vkVoidFunction)vkCmdEndQuery;
if (!strcmp(funcName, "vkCmdResetQueryPool"))
- return (PFN_vkVoidFunction) vkCmdResetQueryPool;
+ return (PFN_vkVoidFunction)vkCmdResetQueryPool;
if (!strcmp(funcName, "vkCmdWriteTimestamp"))
- return (PFN_vkVoidFunction) vkCmdWriteTimestamp;
+ return (PFN_vkVoidFunction)vkCmdWriteTimestamp;
if (!strcmp(funcName, "vkCmdCopyQueryPoolResults"))
- return (PFN_vkVoidFunction) vkCmdCopyQueryPoolResults;
+ return (PFN_vkVoidFunction)vkCmdCopyQueryPoolResults;
if (!strcmp(funcName, "vkCreateFramebuffer"))
- return (PFN_vkVoidFunction) vkCreateFramebuffer;
+ return (PFN_vkVoidFunction)vkCreateFramebuffer;
if (!strcmp(funcName, "vkDestroyFramebuffer"))
return (PFN_vkVoidFunction)vkDestroyFramebuffer;
if (!strcmp(funcName, "vkCreateRenderPass"))
- return (PFN_vkVoidFunction) vkCreateRenderPass;
+ return (PFN_vkVoidFunction)vkCreateRenderPass;
if (!strcmp(funcName, "vkDestroyRenderPass"))
return (PFN_vkVoidFunction)vkDestroyRenderPass;
if (!strcmp(funcName, "vkGetRenderAreaGranularity"))
@@ -6789,9 +5076,9 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkD
if (!strcmp(funcName, "vkDestroyCommandPool"))
return (PFN_vkVoidFunction)vkDestroyCommandPool;
if (!strcmp(funcName, "vkCmdBeginRenderPass"))
- return (PFN_vkVoidFunction) vkCmdBeginRenderPass;
+ return (PFN_vkVoidFunction)vkCmdBeginRenderPass;
if (!strcmp(funcName, "vkCmdNextSubpass"))
- return (PFN_vkVoidFunction) vkCmdNextSubpass;
+ return (PFN_vkVoidFunction)vkCmdNextSubpass;
if (device == NULL) {
return NULL;
@@ -6802,32 +5089,31 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkD
return get_dispatch_table(pc_device_table_map, device)->GetDeviceProcAddr(device, funcName);
}
-VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char* funcName)
-{
+VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
if (!strcmp(funcName, "vkGetInstanceProcAddr"))
- return (PFN_vkVoidFunction) vkGetInstanceProcAddr;
+ return (PFN_vkVoidFunction)vkGetInstanceProcAddr;
if (!strcmp(funcName, "vkCreateInstance"))
- return (PFN_vkVoidFunction) vkCreateInstance;
+ return (PFN_vkVoidFunction)vkCreateInstance;
if (!strcmp(funcName, "vkDestroyInstance"))
- return (PFN_vkVoidFunction) vkDestroyInstance;
+ return (PFN_vkVoidFunction)vkDestroyInstance;
if (!strcmp(funcName, "vkCreateDevice"))
- return (PFN_vkVoidFunction) vkCreateDevice;
+ return (PFN_vkVoidFunction)vkCreateDevice;
if (!strcmp(funcName, "vkEnumeratePhysicalDevices"))
- return (PFN_vkVoidFunction) vkEnumeratePhysicalDevices;
+ return (PFN_vkVoidFunction)vkEnumeratePhysicalDevices;
if (!strcmp(funcName, "vkGetPhysicalDeviceProperties"))
- return (PFN_vkVoidFunction) vkGetPhysicalDeviceProperties;
+ return (PFN_vkVoidFunction)vkGetPhysicalDeviceProperties;
if (!strcmp(funcName, "vkGetPhysicalDeviceFeatures"))
- return (PFN_vkVoidFunction) vkGetPhysicalDeviceFeatures;
+ return (PFN_vkVoidFunction)vkGetPhysicalDeviceFeatures;
if (!strcmp(funcName, "vkGetPhysicalDeviceFormatProperties"))
- return (PFN_vkVoidFunction) vkGetPhysicalDeviceFormatProperties;
+ return (PFN_vkVoidFunction)vkGetPhysicalDeviceFormatProperties;
if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties"))
- return (PFN_vkVoidFunction) vkEnumerateInstanceLayerProperties;
+ return (PFN_vkVoidFunction)vkEnumerateInstanceLayerProperties;
if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties"))
- return (PFN_vkVoidFunction) vkEnumerateInstanceExtensionProperties;
+ return (PFN_vkVoidFunction)vkEnumerateInstanceExtensionProperties;
if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties"))
- return (PFN_vkVoidFunction) vkEnumerateDeviceLayerProperties;
+ return (PFN_vkVoidFunction)vkEnumerateDeviceLayerProperties;
if (!strcmp(funcName, "vkEnumerateDeviceExtensionProperties"))
- return (PFN_vkVoidFunction) vkEnumerateDeviceExtensionProperties;
+ return (PFN_vkVoidFunction)vkEnumerateDeviceExtensionProperties;
if (instance == NULL) {
return NULL;
@@ -6836,7 +5122,7 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(V
layer_data *data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
PFN_vkVoidFunction fptr = debug_report_get_instance_proc_addr(data->report_data, funcName);
- if(fptr)
+ if (fptr)
return fptr;
if (get_dispatch_table(pc_instance_table_map, instance)->GetInstanceProcAddr == NULL)
diff --git a/layers/param_checker_utils.h b/layers/param_checker_utils.h
index d6058c82b..e62fada7b 100644
--- a/layers/param_checker_utils.h
+++ b/layers/param_checker_utils.h
@@ -46,22 +46,14 @@
* @param value Pointer to validate.
* @return Boolean value indicating that the call should be skipped.
*/
-static
-VkBool32 validate_required_pointer(
- debug_report_data* report_data,
- const char* apiName,
- const char* parameterName,
- const void* value)
-{
+static VkBool32 validate_required_pointer(debug_report_data *report_data, const char *apiName, const char *parameterName,
+ const void *value) {
VkBool32 skipCall = VK_FALSE;
if (value == NULL) {
- skipCall |= log_msg(
- report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
- "PARAMCHECK", "%s: required parameter %s specified as NULL",
- apiName, parameterName);
- }
+ skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "%s: required parameter %s specified as NULL", apiName, parameterName);
+ }
return skipCall;
}
@@ -87,31 +79,18 @@ VkBool32 validate_required_pointer(
* @return Boolean value indicating that the call should be skipped.
*/
template <typename T>
-VkBool32 validate_array(
- debug_report_data* report_data,
- const char* apiName,
- const char* countName,
- const char* arrayName,
- const T* count,
- const void* array,
- VkBool32 countPtrRequired,
- VkBool32 countValueRequired,
- VkBool32 arrayRequired)
-{
+VkBool32 validate_array(debug_report_data *report_data, const char *apiName, const char *countName, const char *arrayName,
+ const T *count, const void *array, VkBool32 countPtrRequired, VkBool32 countValueRequired,
+ VkBool32 arrayRequired) {
VkBool32 skipCall = VK_FALSE;
if (count == NULL) {
if (countPtrRequired == VK_TRUE) {
- skipCall |= log_msg(
- report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
- "PARAMCHECK", "%s: required parameter %s specified as NULL",
- apiName, countName);
+ skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
+ "PARAMCHECK", "%s: required parameter %s specified as NULL", apiName, countName);
}
} else {
- skipCall |= validate_array(
- report_data, apiName, countName, arrayName, (*count), array,
- countValueRequired, arrayRequired);
+ skipCall |= validate_array(report_data, apiName, countName, arrayName, (*count), array, countValueRequired, arrayRequired);
}
return skipCall;
@@ -135,35 +114,21 @@ VkBool32 validate_array(
* @return Boolean value indicating that the call should be skipped.
*/
template <typename T>
-VkBool32 validate_array(
- debug_report_data* report_data,
- const char* apiName,
- const char* countName,
- const char* arrayName,
- T count,
- const void* array,
- VkBool32 countRequired,
- VkBool32 arrayRequired)
-{
+VkBool32 validate_array(debug_report_data *report_data, const char *apiName, const char *countName, const char *arrayName, T count,
+ const void *array, VkBool32 countRequired, VkBool32 arrayRequired) {
VkBool32 skipCall = VK_FALSE;
// Count parameters not tagged as optional cannot be 0
if ((count == 0) && (countRequired == VK_TRUE)) {
- skipCall |= log_msg(
- report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
- "PARAMCHECK", "%s: value of %s must be greater than 0",
- apiName, countName);
+ skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "%s: value of %s must be greater than 0", apiName, countName);
}
// Array parameters not tagged as optional cannot be NULL,
// unless the count is 0
if ((array == NULL) && (arrayRequired == VK_TRUE) && (count != 0)) {
- skipCall |= log_msg(
- report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
- "PARAMCHECK", "%s: required parameter %s specified as NULL",
- apiName, arrayName);
+ skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "%s: required parameter %s specified as NULL", apiName, arrayName);
}
return skipCall;
@@ -182,30 +147,18 @@ VkBool32 validate_array(
* @return Boolean value indicating that the call should be skipped.
*/
template <typename T>
-VkBool32 validate_struct_type(
- debug_report_data* report_data,
- const char* apiName,
- const char* parameterName,
- const char* sTypeName,
- const T* value,
- VkStructureType sType,
- VkBool32 required)
-{
+VkBool32 validate_struct_type(debug_report_data *report_data, const char *apiName, const char *parameterName, const char *sTypeName,
+ const T *value, VkStructureType sType, VkBool32 required) {
VkBool32 skipCall = VK_FALSE;
if (value == NULL) {
if (required == VK_TRUE) {
- skipCall |= log_msg(
- report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
- "PARAMCHECK", "%s: required parameter %s specified as NULL",
- apiName, parameterName);
+ skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
+ "PARAMCHECK", "%s: required parameter %s specified as NULL", apiName, parameterName);
}
} else if (value->sType != sType) {
- skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
- "PARAMCHECK", "%s: parameter %s->sType must be %s",
- apiName, parameterName, sTypeName);
+ skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1, "PARAMCHECK",
+ "%s: parameter %s->sType must be %s", apiName, parameterName, sTypeName);
}
return skipCall;
@@ -233,33 +186,20 @@ VkBool32 validate_struct_type(
* @return Boolean value indicating that the call should be skipped.
*/
template <typename T>
-VkBool32 validate_struct_type_array(
- debug_report_data* report_data,
- const char* apiName,
- const char* countName,
- const char* arrayName,
- const char* sTypeName,
- const uint32_t* count,
- const T* array,
- VkStructureType sType,
- VkBool32 countPtrRequired,
- VkBool32 countValueRequired,
- VkBool32 arrayRequired)
-{
+VkBool32 validate_struct_type_array(debug_report_data *report_data, const char *apiName, const char *countName,
+ const char *arrayName, const char *sTypeName, const uint32_t *count, const T *array,
+ VkStructureType sType, VkBool32 countPtrRequired, VkBool32 countValueRequired,
+ VkBool32 arrayRequired) {
VkBool32 skipCall = VK_FALSE;
if (count == NULL) {
if (countPtrRequired == VK_TRUE) {
- skipCall |= log_msg(
- report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
- "PARAMCHECK", "%s: required parameter %s specified as NULL",
- apiName, countName);
+ skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
+ "PARAMCHECK", "%s: required parameter %s specified as NULL", apiName, countName);
}
} else {
- skipCall |= validate_struct_type_array(
- report_data, apiName, countName, arrayName, sTypeName,
- (*count), array, sType, countValueRequired, arrayRequired);
+ skipCall |= validate_struct_type_array(report_data, apiName, countName, arrayName, sTypeName, (*count), array, sType,
+ countValueRequired, arrayRequired);
}
return skipCall;
@@ -285,48 +225,30 @@ VkBool32 validate_struct_type_array(
* @return Boolean value indicating that the call should be skipped.
*/
template <typename T>
-VkBool32 validate_struct_type_array(
- debug_report_data* report_data,
- const char* apiName,
- const char* countName,
- const char* arrayName,
- const char* sTypeName,
- uint32_t count,
- const T* array,
- VkStructureType sType,
- VkBool32 countRequired,
- VkBool32 arrayRequired)
-{
+VkBool32 validate_struct_type_array(debug_report_data *report_data, const char *apiName, const char *countName,
+ const char *arrayName, const char *sTypeName, uint32_t count, const T *array,
+ VkStructureType sType, VkBool32 countRequired, VkBool32 arrayRequired) {
VkBool32 skipCall = VK_FALSE;
if ((count == 0) || (array == NULL)) {
// Count parameters not tagged as optional cannot be 0
if ((count == 0) && (countRequired == VK_TRUE)) {
- skipCall |= log_msg(
- report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
- "PARAMCHECK", "%s: parameter %s must be greater than 0",
- apiName, countName);
+ skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
+ "PARAMCHECK", "%s: parameter %s must be greater than 0", apiName, countName);
}
// Array parameters not tagged as optional cannot be NULL,
// unless the count is 0
if ((array == NULL) && (arrayRequired == VK_TRUE) && (count != 0)) {
- skipCall |= log_msg(
- report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
- "PARAMCHECK", "%s: required parameter %s specified as NULL",
- apiName, arrayName);
+ skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
+ "PARAMCHECK", "%s: required parameter %s specified as NULL", apiName, arrayName);
}
} else {
// Verify that all structs in the array have the correct type
for (uint32_t i = 0; i < count; ++i) {
if (array[i].sType != sType) {
- skipCall |= log_msg(
- report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
- "PARAMCHECK", "%s: parameter %s[%d].sType must be %s",
- apiName, arrayName, i, sTypeName);
+ skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
+ "PARAMCHECK", "%s: parameter %s[%d].sType must be %s", apiName, arrayName, i, sTypeName);
}
}
}
@@ -352,47 +274,30 @@ VkBool32 validate_struct_type_array(
* @param arrayRequired The 'array' parameter may not be NULL when true.
* @return Boolean value indicating that the call should be skipped.
*/
-static
-VkBool32 validate_string_array(
- debug_report_data* report_data,
- const char* apiName,
- const char* countName,
- const char* arrayName,
- uint32_t count,
- const char* const* array,
- VkBool32 countRequired,
- VkBool32 arrayRequired)
-{
+static VkBool32 validate_string_array(debug_report_data *report_data, const char *apiName, const char *countName,
+ const char *arrayName, uint32_t count, const char *const *array, VkBool32 countRequired,
+ VkBool32 arrayRequired) {
VkBool32 skipCall = VK_FALSE;
if ((count == 0) || (array == NULL)) {
// Count parameters not tagged as optional cannot be 0
if ((count == 0) && (countRequired == VK_TRUE)) {
- skipCall |= log_msg(
- report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
- "PARAMCHECK", "%s: parameter %s must be greater than 0",
- apiName, countName);
+ skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
+ "PARAMCHECK", "%s: parameter %s must be greater than 0", apiName, countName);
}
// Array parameters not tagged as optional cannot be NULL,
// unless the count is 0
if ((array == NULL) && (arrayRequired == VK_TRUE) && (count != 0)) {
- skipCall |= log_msg(
- report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
- "PARAMCHECK", "%s: required parameter %s specified as NULL",
- apiName, arrayName);
+ skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
+ "PARAMCHECK", "%s: required parameter %s specified as NULL", apiName, arrayName);
}
} else {
// Verify that strings in the array not NULL
for (uint32_t i = 0; i < count; ++i) {
if (array[i] == NULL) {
- skipCall |= log_msg(
- report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
- "PARAMCHECK", "%s: required parameter %s[%d] specified as NULL",
- apiName, arrayName, i);
+ skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__, 1,
+ "PARAMCHECK", "%s: required parameter %s[%d] specified as NULL", apiName, arrayName, i);
}
}
}
diff --git a/layers/swapchain.cpp b/layers/swapchain.cpp
index 282a271a7..a61449fbf 100644
--- a/layers/swapchain.cpp
+++ b/layers/swapchain.cpp
@@ -40,37 +40,22 @@ static loader_platform_thread_mutex globalLock;
// The following is for logging error messages:
static std::unordered_map<void *, layer_data *> layer_data_map;
-template layer_data *get_my_data_ptr<layer_data>(
- void *data_key,
- std::unordered_map<void *, layer_data *> &data_map);
-
-static const VkExtensionProperties instance_extensions[] = {
- {
- VK_EXT_DEBUG_REPORT_EXTENSION_NAME,
- VK_EXT_DEBUG_REPORT_SPEC_VERSION
- }
-};
-
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(
- const char *pLayerName,
- uint32_t *pCount,
- VkExtensionProperties* pProperties)
-{
- return util_GetExtensionProperties(ARRAY_SIZE(instance_extensions),
- instance_extensions, pCount,
- pProperties);
-}
+template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map);
+
+static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
-vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
- const char *pLayerName, uint32_t *pCount,
- VkExtensionProperties *pProperties) {
+vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
+ return util_GetExtensionProperties(ARRAY_SIZE(instance_extensions), instance_extensions, pCount, pProperties);
+}
+
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
+ const char *pLayerName, uint32_t *pCount,
+ VkExtensionProperties *pProperties) {
if (pLayerName == NULL) {
dispatch_key key = get_dispatch_key(physicalDevice);
layer_data *my_data = get_my_data_ptr(key, layer_data_map);
- return my_data->instance_dispatch_table
- ->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount,
- pProperties);
+ return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
} else {
return util_GetExtensionProperties(0, nullptr, pCount, pProperties);
}
@@ -80,36 +65,31 @@ static const VkLayerProperties swapchain_layers[] = {{
"VK_LAYER_LUNARG_swapchain", VK_API_VERSION, 1, "LunarG Validation Layer",
}};
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(
- uint32_t *pCount,
- VkLayerProperties* pProperties)
-{
- return util_GetLayerProperties(ARRAY_SIZE(swapchain_layers),
- swapchain_layers, pCount, pProperties);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
+ return util_GetLayerProperties(ARRAY_SIZE(swapchain_layers), swapchain_layers, pCount, pProperties);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(
- VkPhysicalDevice physicalDevice, uint32_t *pCount,
- VkLayerProperties *pProperties) {
- return util_GetLayerProperties(ARRAY_SIZE(swapchain_layers),
- swapchain_layers, pCount, pProperties);
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
+ return util_GetLayerProperties(ARRAY_SIZE(swapchain_layers), swapchain_layers, pCount, pProperties);
}
-static void createDeviceRegisterExtensions(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, VkDevice device)
-{
+static void createDeviceRegisterExtensions(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo,
+ VkDevice device) {
uint32_t i;
- layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
+ layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
- VkLayerDispatchTable *pDisp = my_device_data->device_dispatch_table;
- PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
+ VkLayerDispatchTable *pDisp = my_device_data->device_dispatch_table;
+ PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
- pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR) gpa(device, "vkCreateSwapchainKHR");
- pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR) gpa(device, "vkDestroySwapchainKHR");
- pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR) gpa(device, "vkGetSwapchainImagesKHR");
- pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR) gpa(device, "vkAcquireNextImageKHR");
- pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR) gpa(device, "vkQueuePresentKHR");
- pDisp->GetDeviceQueue = (PFN_vkGetDeviceQueue) gpa(device, "vkGetDeviceQueue");
+ pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
+ pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
+ pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
+ pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
+ pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
+ pDisp->GetDeviceQueue = (PFN_vkGetDeviceQueue)gpa(device, "vkGetDeviceQueue");
SwpPhysicalDevice *pPhysicalDevice = &my_instance_data->physicalDeviceMap[physicalDevice];
if (pPhysicalDevice) {
@@ -119,7 +99,7 @@ static void createDeviceRegisterExtensions(VkPhysicalDevice physicalDevice, cons
// TBD: Should we leave error in (since Swapchain really needs this
// link)?
log_msg(my_instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
- (uint64_t)physicalDevice , __LINE__, SWAPCHAIN_INVALID_HANDLE, "Swapchain",
+ (uint64_t)physicalDevice, __LINE__, SWAPCHAIN_INVALID_HANDLE, "Swapchain",
"vkCreateDevice() called with a non-valid VkPhysicalDevice.");
}
my_device_data->deviceMap[device].device = device;
@@ -136,40 +116,48 @@ static void createDeviceRegisterExtensions(VkPhysicalDevice physicalDevice, cons
}
}
-static void createInstanceRegisterExtensions(const VkInstanceCreateInfo* pCreateInfo, VkInstance instance)
-{
+static void createInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, VkInstance instance) {
uint32_t i;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
- VkLayerInstanceDispatchTable *pDisp = my_data->instance_dispatch_table;
+ VkLayerInstanceDispatchTable *pDisp = my_data->instance_dispatch_table;
PFN_vkGetInstanceProcAddr gpa = pDisp->GetInstanceProcAddr;
#ifdef VK_USE_PLATFORM_ANDROID_KHR
- pDisp->CreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR) gpa(instance, "vkCreateAndroidSurfaceKHR");
+ pDisp->CreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR)gpa(instance, "vkCreateAndroidSurfaceKHR");
#endif // VK_USE_PLATFORM_ANDROID_KHR
#ifdef VK_USE_PLATFORM_MIR_KHR
- pDisp->CreateMirSurfaceKHR = (PFN_vkCreateMirSurfaceKHR) gpa(instance, "vkCreateMirSurfaceKHR");
- pDisp->GetPhysicalDeviceMirPresentationSupportKHR = (PFN_vkGetPhysicalDeviceMirPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceMirPresentationSupportKHR");
+ pDisp->CreateMirSurfaceKHR = (PFN_vkCreateMirSurfaceKHR)gpa(instance, "vkCreateMirSurfaceKHR");
+ pDisp->GetPhysicalDeviceMirPresentationSupportKHR =
+ (PFN_vkGetPhysicalDeviceMirPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceMirPresentationSupportKHR");
#endif // VK_USE_PLATFORM_MIR_KHR
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
- pDisp->CreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR) gpa(instance, "vkCreateWaylandSurfaceKHR");
- pDisp->GetPhysicalDeviceWaylandPresentationSupportKHR = (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR");
+ pDisp->CreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR)gpa(instance, "vkCreateWaylandSurfaceKHR");
+ pDisp->GetPhysicalDeviceWaylandPresentationSupportKHR =
+ (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR");
#endif // VK_USE_PLATFORM_WAYLAND_KHR
#ifdef VK_USE_PLATFORM_WIN32_KHR
- pDisp->CreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR) gpa(instance, "vkCreateWin32SurfaceKHR");
- pDisp->GetPhysicalDeviceWin32PresentationSupportKHR = (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR");
+ pDisp->CreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR)gpa(instance, "vkCreateWin32SurfaceKHR");
+ pDisp->GetPhysicalDeviceWin32PresentationSupportKHR =
+ (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR");
#endif // VK_USE_PLATFORM_WIN32_KHR
#ifdef VK_USE_PLATFORM_XCB_KHR
- pDisp->CreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR) gpa(instance, "vkCreateXcbSurfaceKHR");
- pDisp->GetPhysicalDeviceXcbPresentationSupportKHR = (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR");
+ pDisp->CreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR)gpa(instance, "vkCreateXcbSurfaceKHR");
+ pDisp->GetPhysicalDeviceXcbPresentationSupportKHR =
+ (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR");
#endif // VK_USE_PLATFORM_XCB_KHR
#ifdef VK_USE_PLATFORM_XLIB_KHR
- pDisp->CreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR) gpa(instance, "vkCreateXlibSurfaceKHR");
- pDisp->GetPhysicalDeviceXlibPresentationSupportKHR = (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR");
+ pDisp->CreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR)gpa(instance, "vkCreateXlibSurfaceKHR");
+ pDisp->GetPhysicalDeviceXlibPresentationSupportKHR =
+ (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR");
#endif // VK_USE_PLATFORM_XLIB_KHR
- pDisp->DestroySurfaceKHR = (PFN_vkDestroySurfaceKHR) gpa(instance, "vkDestroySurfaceKHR");
- pDisp->GetPhysicalDeviceSurfaceSupportKHR = (PFN_vkGetPhysicalDeviceSurfaceSupportKHR) gpa(instance, "vkGetPhysicalDeviceSurfaceSupportKHR");
- pDisp->GetPhysicalDeviceSurfaceCapabilitiesKHR = (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR) gpa(instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
- pDisp->GetPhysicalDeviceSurfaceFormatsKHR = (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR) gpa(instance, "vkGetPhysicalDeviceSurfaceFormatsKHR");
- pDisp->GetPhysicalDeviceSurfacePresentModesKHR = (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR) gpa(instance, "vkGetPhysicalDeviceSurfacePresentModesKHR");
+ pDisp->DestroySurfaceKHR = (PFN_vkDestroySurfaceKHR)gpa(instance, "vkDestroySurfaceKHR");
+ pDisp->GetPhysicalDeviceSurfaceSupportKHR =
+ (PFN_vkGetPhysicalDeviceSurfaceSupportKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceSupportKHR");
+ pDisp->GetPhysicalDeviceSurfaceCapabilitiesKHR =
+ (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
+ pDisp->GetPhysicalDeviceSurfaceFormatsKHR =
+ (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceFormatsKHR");
+ pDisp->GetPhysicalDeviceSurfacePresentModesKHR =
+ (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)gpa(instance, "vkGetPhysicalDeviceSurfacePresentModesKHR");
// Remember this instance, and whether the VK_KHR_surface extension
// was enabled for it:
@@ -194,7 +182,6 @@ static void createInstanceRegisterExtensions(const VkInstanceCreateInfo* pCreate
my_data->instanceMap[instance].xlibSurfaceExtensionEnabled = false;
#endif // VK_USE_PLATFORM_XLIB_KHR
-
// Record whether the WSI instance extension was enabled for this
// VkInstance. No need to check if the extension was advertised by
// vkEnumerateInstanceExtensionProperties(), since the loader handles that.
@@ -206,8 +193,7 @@ static void createInstanceRegisterExtensions(const VkInstanceCreateInfo* pCreate
#ifdef VK_USE_PLATFORM_ANDROID_KHR
if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_ANDROID_SURFACE_EXTENSION_NAME) == 0) {
- my_data->instanceMap[instance].androidSurfaceExtensionEnabled =
- true;
+ my_data->instanceMap[instance].androidSurfaceExtensionEnabled = true;
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
#ifdef VK_USE_PLATFORM_MIR_KHR
@@ -219,8 +205,7 @@ static void createInstanceRegisterExtensions(const VkInstanceCreateInfo* pCreate
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME) == 0) {
- my_data->instanceMap[instance].waylandSurfaceExtensionEnabled =
- true;
+ my_data->instanceMap[instance].waylandSurfaceExtensionEnabled = true;
}
#endif // VK_USE_PLATFORM_WAYLAND_KHR
#ifdef VK_USE_PLATFORM_WIN32_KHR
@@ -245,8 +230,7 @@ static void createInstanceRegisterExtensions(const VkInstanceCreateInfo* pCreate
}
#include "vk_dispatch_table_helper.h"
-static void initSwapchain(layer_data *my_data, const VkAllocationCallbacks *pAllocator)
-{
+static void initSwapchain(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
uint32_t report_flags = 0;
uint32_t debug_action = 0;
FILE *log_output = NULL;
@@ -255,10 +239,9 @@ static void initSwapchain(layer_data *my_data, const VkAllocationCallbacks *pAll
// Initialize swapchain options:
report_flags = getLayerOptionFlags("lunarg_swapchain.report_flags", 0);
- getLayerOptionEnum("lunarg_swapchain.debug_action", (uint32_t *) &debug_action);
+ getLayerOptionEnum("lunarg_swapchain.debug_action", (uint32_t *)&debug_action);
- if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG)
- {
+ if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG) {
// Turn on logging, since it was requested:
option_str = getLayerOption("lunarg_swapchain.log_filename");
log_output = getLayerLogOutput(option_str, "lunarg_swapchain");
@@ -268,10 +251,7 @@ static void initSwapchain(layer_data *my_data, const VkAllocationCallbacks *pAll
dbgInfo.pfnCallback = log_callback;
dbgInfo.pUserData = log_output;
dbgInfo.flags = report_flags;
- layer_create_msg_callback(my_data->report_data,
- &dbgInfo,
- pAllocator,
- &callback);
+ layer_create_msg_callback(my_data->report_data, &dbgInfo, pAllocator, &callback);
my_data->logging_callback.push_back(callback);
}
if (debug_action & VK_DBG_LAYER_ACTION_DEBUG_OUTPUT) {
@@ -284,45 +264,39 @@ static void initSwapchain(layer_data *my_data, const VkAllocationCallbacks *pAll
layer_create_msg_callback(my_data->report_data, &dbgInfo, pAllocator, &callback);
my_data->logging_callback.push_back(callback);
}
- if (!globalLockInitialized)
- {
+ if (!globalLockInitialized) {
loader_platform_thread_create_mutex(&globalLock);
globalLockInitialized = 1;
}
}
-static const char *surfaceTransformStr(VkSurfaceTransformFlagBitsKHR value)
-{
+static const char *surfaceTransformStr(VkSurfaceTransformFlagBitsKHR value) {
// Return a string corresponding to the value:
return string_VkSurfaceTransformFlagBitsKHR(value);
}
-static const char *surfaceCompositeAlphaStr(VkCompositeAlphaFlagBitsKHR value)
-{
+static const char *surfaceCompositeAlphaStr(VkCompositeAlphaFlagBitsKHR value) {
// Return a string corresponding to the value:
return string_VkCompositeAlphaFlagBitsKHR(value);
}
-static const char *presentModeStr(VkPresentModeKHR value)
-{
+static const char *presentModeStr(VkPresentModeKHR value) {
// Return a string corresponding to the value:
return string_VkPresentModeKHR(value);
}
-static const char *sharingModeStr(VkSharingMode value)
-{
+static const char *sharingModeStr(VkSharingMode value) {
// Return a string corresponding to the value:
return string_VkSharingMode(value);
}
-
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
- PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance) fpGetInstanceProcAddr(NULL, "vkCreateInstance");
+ PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
if (fpCreateInstance == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
@@ -339,11 +313,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(const VkInstance
my_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
layer_init_instance_dispatch_table(*pInstance, my_data->instance_dispatch_table, fpGetInstanceProcAddr);
- my_data->report_data = debug_report_create_instance(
- my_data->instance_dispatch_table,
- *pInstance,
- pCreateInfo->enabledExtensionCount,
- pCreateInfo->ppEnabledExtensionNames);
+ my_data->report_data = debug_report_create_instance(my_data->instance_dispatch_table, *pInstance,
+ pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
// Call the following function after my_data is initialized:
createInstanceRegisterExtensions(pCreateInfo, *pInstance);
@@ -352,8 +323,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(const VkInstance
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks* pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
dispatch_key key = get_dispatch_key(instance);
layer_data *my_data = get_my_data_ptr(key, layer_data_map);
SwpInstance *pInstance = &(my_data->instanceMap[instance]);
@@ -367,15 +337,13 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance
if (pInstance) {
// Delete all of the SwpPhysicalDevice's, SwpSurface's, and the
// SwpInstance associated with this instance:
- for (auto it = pInstance->physicalDevices.begin() ;
- it != pInstance->physicalDevices.end() ; it++) {
+ for (auto it = pInstance->physicalDevices.begin(); it != pInstance->physicalDevices.end(); it++) {
// Free memory that was allocated for/by this SwpPhysicalDevice:
SwpPhysicalDevice *pPhysicalDevice = it->second;
if (pPhysicalDevice) {
if (pPhysicalDevice->pDevice) {
- LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, instance, "VkInstance",
- SWAPCHAIN_DEL_OBJECT_BEFORE_CHILDREN,
+ LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, instance, "VkInstance", SWAPCHAIN_DEL_OBJECT_BEFORE_CHILDREN,
"%s() called before all of its associated "
"VkDevices were destroyed.",
__FUNCTION__);
@@ -388,14 +356,12 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance
// are simply pointed to by the SwpInstance):
my_data->physicalDeviceMap.erase(it->second->physicalDevice);
}
- for (auto it = pInstance->surfaces.begin() ;
- it != pInstance->surfaces.end() ; it++) {
+ for (auto it = pInstance->surfaces.begin(); it != pInstance->surfaces.end(); it++) {
// Free memory that was allocated for/by this SwpPhysicalDevice:
SwpSurface *pSurface = it->second;
if (pSurface) {
- LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, instance, "VkInstance",
- SWAPCHAIN_DEL_OBJECT_BEFORE_CHILDREN,
+ LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, instance, "VkInstance", SWAPCHAIN_DEL_OBJECT_BEFORE_CHILDREN,
"%s() called before all of its associated "
"VkSurfaceKHRs were destroyed.",
__FUNCTION__);
@@ -424,38 +390,29 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance
}
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties(
- VkPhysicalDevice physicalDevice,
- uint32_t* pQueueFamilyPropertyCount,
- VkQueueFamilyProperties* pQueueFamilyProperties)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount,
+ VkQueueFamilyProperties *pQueueFamilyProperties) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
// Call down the call chain:
- my_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
- physicalDevice,
- pQueueFamilyPropertyCount,
- pQueueFamilyProperties);
+ my_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount,
+ pQueueFamilyProperties);
// Record the result of this query:
loader_platform_thread_lock_mutex(&globalLock);
SwpPhysicalDevice *pPhysicalDevice = &my_data->physicalDeviceMap[physicalDevice];
- if (pPhysicalDevice &&
- pQueueFamilyPropertyCount && !pQueueFamilyProperties) {
+ if (pPhysicalDevice && pQueueFamilyPropertyCount && !pQueueFamilyProperties) {
pPhysicalDevice->gotQueueFamilyPropertyCount = true;
- pPhysicalDevice->numOfQueueFamilies =
- *pQueueFamilyPropertyCount;
+ pPhysicalDevice->numOfQueueFamilies = *pQueueFamilyPropertyCount;
}
loader_platform_thread_unlock_mutex(&globalLock);
}
#ifdef VK_USE_PLATFORM_ANDROID_KHR
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateAndroidSurfaceKHR(
- VkInstance instance,
- const VkAndroidSurfaceCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkSurfaceKHR* pSurface)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
VkResult result = VK_SUCCESS;
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
@@ -464,37 +421,27 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateAndroidSurfaceKHR(
// Validate that the platform extension was enabled:
if (pInstance && !pInstance->androidSurfaceExtensionEnabled) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
- pInstance,
- "VkInstance",
- SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
- "%s() called even though the %s extension was not enabled for this VkInstance.",
- __FUNCTION__, VK_KHR_ANDROID_SURFACE_EXTENSION_NAME);
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, pInstance, "VkInstance", SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
+ "%s() called even though the %s extension was not enabled for this VkInstance.", __FUNCTION__,
+ VK_KHR_ANDROID_SURFACE_EXTENSION_NAME);
}
if (!pCreateInfo) {
- skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pCreateInfo");
+ skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pCreateInfo");
} else {
if (pCreateInfo->sType != VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR) {
- skipCall |= LOG_ERROR_WRONG_STYPE(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pCreateInfo",
+ skipCall |= LOG_ERROR_WRONG_STYPE(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pCreateInfo",
"VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR");
}
if (pCreateInfo->pNext != NULL) {
- skipCall |= LOG_INFO_WRONG_NEXT(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pCreateInfo");
+ skipCall |= LOG_INFO_WRONG_NEXT(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pCreateInfo");
}
}
if (VK_FALSE == skipCall) {
// Call down the call chain:
loader_platform_thread_unlock_mutex(&globalLock);
- result = my_data->instance_dispatch_table->CreateAndroidSurfaceKHR(
- instance, pCreateInfo, pAllocator, pSurface);
+ result = my_data->instance_dispatch_table->CreateAndroidSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
loader_platform_thread_lock_mutex(&globalLock);
// Obtain this pointer again after locking:
@@ -503,8 +450,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateAndroidSurfaceKHR(
// Record the VkSurfaceKHR returned by the ICD:
my_data->surfaceMap[*pSurface].surface = *pSurface;
my_data->surfaceMap[*pSurface].pInstance = pInstance;
- my_data->surfaceMap[*pSurface].usedAllocatorToCreate =
- (pAllocator != NULL);
+ my_data->surfaceMap[*pSurface].usedAllocatorToCreate = (pAllocator != NULL);
my_data->surfaceMap[*pSurface].numQueueFamilyIndexSupport = 0;
my_data->surfaceMap[*pSurface].pQueueFamilyIndexSupport = NULL;
// Point to the associated SwpInstance:
@@ -519,12 +465,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateAndroidSurfaceKHR(
#endif // VK_USE_PLATFORM_ANDROID_KHR
#ifdef VK_USE_PLATFORM_MIR_KHR
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateMirSurfaceKHR(
- VkInstance instance,
- const VkMirSurfaceCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkSurfaceKHR* pSurface)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator,
+ VkSurfaceKHR *pSurface) {
VkResult result = VK_SUCCESS;
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
@@ -533,37 +476,27 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateMirSurfaceKHR(
// Validate that the platform extension was enabled:
if (pInstance && !pInstance->mirSurfaceExtensionEnabled) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
- pInstance,
- "VkInstance",
- SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
- "%s() called even though the %s extension was not enabled for this VkInstance.",
- __FUNCTION__, VK_KHR_MIR_SURFACE_EXTENSION_NAME);
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, pInstance, "VkInstance", SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
+ "%s() called even though the %s extension was not enabled for this VkInstance.", __FUNCTION__,
+ VK_KHR_MIR_SURFACE_EXTENSION_NAME);
}
if (!pCreateInfo) {
- skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pCreateInfo");
+ skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pCreateInfo");
} else {
if (pCreateInfo->sType != VK_STRUCTURE_TYPE_MIR_SURFACE_CREATE_INFO_KHR) {
- skipCall |= LOG_ERROR_WRONG_STYPE(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pCreateInfo",
+ skipCall |= LOG_ERROR_WRONG_STYPE(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pCreateInfo",
"VK_STRUCTURE_TYPE_MIR_SURFACE_CREATE_INFO_KHR");
}
if (pCreateInfo->pNext != NULL) {
- skipCall |= LOG_INFO_WRONG_NEXT(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pCreateInfo");
+ skipCall |= LOG_INFO_WRONG_NEXT(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pCreateInfo");
}
}
if (VK_FALSE == skipCall) {
// Call down the call chain:
loader_platform_thread_unlock_mutex(&globalLock);
- result = my_data->instance_dispatch_table->CreateMirSurfaceKHR(
- instance, pCreateInfo, pAllocator, pSurface);
+ result = my_data->instance_dispatch_table->CreateMirSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
loader_platform_thread_lock_mutex(&globalLock);
// Obtain this pointer again after locking:
@@ -572,8 +505,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateMirSurfaceKHR(
// Record the VkSurfaceKHR returned by the ICD:
my_data->surfaceMap[*pSurface].surface = *pSurface;
my_data->surfaceMap[*pSurface].pInstance = pInstance;
- my_data->surfaceMap[*pSurface].usedAllocatorToCreate =
- (pAllocator != NULL);
+ my_data->surfaceMap[*pSurface].usedAllocatorToCreate = (pAllocator != NULL);
my_data->surfaceMap[*pSurface].numQueueFamilyIndexSupport = 0;
my_data->surfaceMap[*pSurface].pQueueFamilyIndexSupport = NULL;
// Point to the associated SwpInstance:
@@ -586,11 +518,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateMirSurfaceKHR(
return VK_ERROR_VALIDATION_FAILED_EXT;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceMirPresentationSupportKHR(
- VkPhysicalDevice physicalDevice,
- uint32_t queueFamilyIndex,
- MirConnection* connection)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceMirPresentationSupportKHR(VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex,
+ MirConnection *connection) {
VkBool32 result = VK_FALSE;
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
@@ -598,41 +528,32 @@ VK_LAYER_EXPORT VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceMirPresentatio
SwpPhysicalDevice *pPhysicalDevice = &my_data->physicalDeviceMap[physicalDevice];
// Validate that the platform extension was enabled:
- if (pPhysicalDevice && pPhysicalDevice->pInstance &&
- !pPhysicalDevice->pInstance->mirSurfaceExtensionEnabled) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
- pPhysicalDevice->pInstance,
- "VkInstance",
+ if (pPhysicalDevice && pPhysicalDevice->pInstance && !pPhysicalDevice->pInstance->mirSurfaceExtensionEnabled) {
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, pPhysicalDevice->pInstance, "VkInstance",
SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
- "%s() called even though the %s extension was not enabled for this VkInstance.",
- __FUNCTION__, VK_KHR_MIR_SURFACE_EXTENSION_NAME);
+ "%s() called even though the %s extension was not enabled for this VkInstance.", __FUNCTION__,
+ VK_KHR_MIR_SURFACE_EXTENSION_NAME);
}
- if (pPhysicalDevice->gotQueueFamilyPropertyCount &&
- (queueFamilyIndex >= pPhysicalDevice->numOfQueueFamilies)) {
- skipCall |= LOG_ERROR_QUEUE_FAMILY_INDEX_TOO_LARGE(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
- pPhysicalDevice,
- "VkPhysicalDevice",
- queueFamilyIndex,
- pPhysicalDevice->numOfQueueFamilies);
+ if (pPhysicalDevice->gotQueueFamilyPropertyCount && (queueFamilyIndex >= pPhysicalDevice->numOfQueueFamilies)) {
+ skipCall |=
+ LOG_ERROR_QUEUE_FAMILY_INDEX_TOO_LARGE(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, pPhysicalDevice,
+ "VkPhysicalDevice", queueFamilyIndex, pPhysicalDevice->numOfQueueFamilies);
}
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall) {
// Call down the call chain:
- result = my_data->instance_dispatch_table->GetPhysicalDeviceMirPresentationSupportKHR(
- physicalDevice, queueFamilyIndex, connection);
+ result = my_data->instance_dispatch_table->GetPhysicalDeviceMirPresentationSupportKHR(physicalDevice, queueFamilyIndex,
+ connection);
}
return result;
}
#endif // VK_USE_PLATFORM_MIR_KHR
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateWaylandSurfaceKHR(
- VkInstance instance,
- const VkWaylandSurfaceCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkSurfaceKHR* pSurface)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
VkResult result = VK_SUCCESS;
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
@@ -641,37 +562,27 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateWaylandSurfaceKHR(
// Validate that the platform extension was enabled:
if (pInstance && !pInstance->waylandSurfaceExtensionEnabled) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
- pInstance,
- "VkInstance",
- SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
- "%s() called even though the %s extension was not enabled for this VkInstance.",
- __FUNCTION__, VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME);
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, pInstance, "VkInstance", SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
+ "%s() called even though the %s extension was not enabled for this VkInstance.", __FUNCTION__,
+ VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME);
}
if (!pCreateInfo) {
- skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pCreateInfo");
+ skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pCreateInfo");
} else {
if (pCreateInfo->sType != VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR) {
- skipCall |= LOG_ERROR_WRONG_STYPE(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pCreateInfo",
+ skipCall |= LOG_ERROR_WRONG_STYPE(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pCreateInfo",
"VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR");
}
if (pCreateInfo->pNext != NULL) {
- skipCall |= LOG_INFO_WRONG_NEXT(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pCreateInfo");
+ skipCall |= LOG_INFO_WRONG_NEXT(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pCreateInfo");
}
}
if (VK_FALSE == skipCall) {
// Call down the call chain:
loader_platform_thread_unlock_mutex(&globalLock);
- result = my_data->instance_dispatch_table->CreateWaylandSurfaceKHR(
- instance, pCreateInfo, pAllocator, pSurface);
+ result = my_data->instance_dispatch_table->CreateWaylandSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
loader_platform_thread_lock_mutex(&globalLock);
// Obtain this pointer again after locking:
@@ -680,8 +591,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateWaylandSurfaceKHR(
// Record the VkSurfaceKHR returned by the ICD:
my_data->surfaceMap[*pSurface].surface = *pSurface;
my_data->surfaceMap[*pSurface].pInstance = pInstance;
- my_data->surfaceMap[*pSurface].usedAllocatorToCreate =
- (pAllocator != NULL);
+ my_data->surfaceMap[*pSurface].usedAllocatorToCreate = (pAllocator != NULL);
my_data->surfaceMap[*pSurface].numQueueFamilyIndexSupport = 0;
my_data->surfaceMap[*pSurface].pQueueFamilyIndexSupport = NULL;
// Point to the associated SwpInstance:
@@ -694,11 +604,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateWaylandSurfaceKHR(
return VK_ERROR_VALIDATION_FAILED_EXT;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWaylandPresentationSupportKHR(
- VkPhysicalDevice physicalDevice,
- uint32_t queueFamilyIndex,
- struct wl_display* display)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex,
+ struct wl_display *display) {
VkBool32 result = VK_FALSE;
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
@@ -706,41 +614,32 @@ VK_LAYER_EXPORT VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWaylandPresent
SwpPhysicalDevice *pPhysicalDevice = &my_data->physicalDeviceMap[physicalDevice];
// Validate that the platform extension was enabled:
- if (pPhysicalDevice && pPhysicalDevice->pInstance &&
- !pPhysicalDevice->pInstance->waylandSurfaceExtensionEnabled) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
- pPhysicalDevice->pInstance,
- "VkInstance",
+ if (pPhysicalDevice && pPhysicalDevice->pInstance && !pPhysicalDevice->pInstance->waylandSurfaceExtensionEnabled) {
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, pPhysicalDevice->pInstance, "VkInstance",
SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
- "%s() called even though the %s extension was not enabled for this VkInstance.",
- __FUNCTION__, VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME);
+ "%s() called even though the %s extension was not enabled for this VkInstance.", __FUNCTION__,
+ VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME);
}
- if (pPhysicalDevice->gotQueueFamilyPropertyCount &&
- (queueFamilyIndex >= pPhysicalDevice->numOfQueueFamilies)) {
- skipCall |= LOG_ERROR_QUEUE_FAMILY_INDEX_TOO_LARGE(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
- pPhysicalDevice,
- "VkPhysicalDevice",
- queueFamilyIndex,
- pPhysicalDevice->numOfQueueFamilies);
+ if (pPhysicalDevice->gotQueueFamilyPropertyCount && (queueFamilyIndex >= pPhysicalDevice->numOfQueueFamilies)) {
+ skipCall |=
+ LOG_ERROR_QUEUE_FAMILY_INDEX_TOO_LARGE(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, pPhysicalDevice,
+ "VkPhysicalDevice", queueFamilyIndex, pPhysicalDevice->numOfQueueFamilies);
}
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall) {
// Call down the call chain:
- result = my_data->instance_dispatch_table->GetPhysicalDeviceWaylandPresentationSupportKHR(
- physicalDevice, queueFamilyIndex, display);
+ result = my_data->instance_dispatch_table->GetPhysicalDeviceWaylandPresentationSupportKHR(physicalDevice, queueFamilyIndex,
+ display);
}
return result;
}
#endif // VK_USE_PLATFORM_WAYLAND_KHR
#ifdef VK_USE_PLATFORM_WIN32_KHR
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR(
- VkInstance instance,
- const VkWin32SurfaceCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkSurfaceKHR* pSurface)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
VkResult result = VK_SUCCESS;
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
@@ -749,37 +648,27 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR(
// Validate that the platform extension was enabled:
if (pInstance && !pInstance->win32SurfaceExtensionEnabled) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
- pInstance,
- "VkInstance",
- SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
- "%s() called even though the %s extension was not enabled for this VkInstance.",
- __FUNCTION__, VK_KHR_WIN32_SURFACE_EXTENSION_NAME);
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, pInstance, "VkInstance", SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
+ "%s() called even though the %s extension was not enabled for this VkInstance.", __FUNCTION__,
+ VK_KHR_WIN32_SURFACE_EXTENSION_NAME);
}
if (!pCreateInfo) {
- skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pCreateInfo");
+ skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pCreateInfo");
} else {
if (pCreateInfo->sType != VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR) {
- skipCall |= LOG_ERROR_WRONG_STYPE(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pCreateInfo",
+ skipCall |= LOG_ERROR_WRONG_STYPE(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pCreateInfo",
"VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR");
}
if (pCreateInfo->pNext != NULL) {
- skipCall |= LOG_INFO_WRONG_NEXT(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pCreateInfo");
+ skipCall |= LOG_INFO_WRONG_NEXT(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pCreateInfo");
}
}
if (VK_FALSE == skipCall) {
// Call down the call chain:
loader_platform_thread_unlock_mutex(&globalLock);
- result = my_data->instance_dispatch_table->CreateWin32SurfaceKHR(
- instance, pCreateInfo, pAllocator, pSurface);
+ result = my_data->instance_dispatch_table->CreateWin32SurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
loader_platform_thread_lock_mutex(&globalLock);
// Obtain this pointer again after locking:
@@ -788,8 +677,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR(
// Record the VkSurfaceKHR returned by the ICD:
my_data->surfaceMap[*pSurface].surface = *pSurface;
my_data->surfaceMap[*pSurface].pInstance = pInstance;
- my_data->surfaceMap[*pSurface].usedAllocatorToCreate =
- (pAllocator != NULL);
+ my_data->surfaceMap[*pSurface].usedAllocatorToCreate = (pAllocator != NULL);
my_data->surfaceMap[*pSurface].numQueueFamilyIndexSupport = 0;
my_data->surfaceMap[*pSurface].pQueueFamilyIndexSupport = NULL;
// Point to the associated SwpInstance:
@@ -802,10 +690,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR(
return VK_ERROR_VALIDATION_FAILED_EXT;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWin32PresentationSupportKHR(
- VkPhysicalDevice physicalDevice,
- uint32_t queueFamilyIndex)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkBool32 VKAPI_CALL
+vkGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex) {
VkBool32 result = VK_FALSE;
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
@@ -813,41 +699,31 @@ VK_LAYER_EXPORT VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWin32Presentat
SwpPhysicalDevice *pPhysicalDevice = &my_data->physicalDeviceMap[physicalDevice];
// Validate that the platform extension was enabled:
- if (pPhysicalDevice && pPhysicalDevice->pInstance &&
- !pPhysicalDevice->pInstance->win32SurfaceExtensionEnabled) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
- pPhysicalDevice->pInstance,
- "VkInstance",
+ if (pPhysicalDevice && pPhysicalDevice->pInstance && !pPhysicalDevice->pInstance->win32SurfaceExtensionEnabled) {
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, pPhysicalDevice->pInstance, "VkInstance",
SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
- "%s() called even though the %s extension was not enabled for this VkInstance.",
- __FUNCTION__, VK_KHR_WIN32_SURFACE_EXTENSION_NAME);
+ "%s() called even though the %s extension was not enabled for this VkInstance.", __FUNCTION__,
+ VK_KHR_WIN32_SURFACE_EXTENSION_NAME);
}
- if (pPhysicalDevice->gotQueueFamilyPropertyCount &&
- (queueFamilyIndex >= pPhysicalDevice->numOfQueueFamilies)) {
- skipCall |= LOG_ERROR_QUEUE_FAMILY_INDEX_TOO_LARGE(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
- pPhysicalDevice,
- "VkPhysicalDevice",
- queueFamilyIndex,
- pPhysicalDevice->numOfQueueFamilies);
+ if (pPhysicalDevice->gotQueueFamilyPropertyCount && (queueFamilyIndex >= pPhysicalDevice->numOfQueueFamilies)) {
+ skipCall |=
+ LOG_ERROR_QUEUE_FAMILY_INDEX_TOO_LARGE(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, pPhysicalDevice,
+ "VkPhysicalDevice", queueFamilyIndex, pPhysicalDevice->numOfQueueFamilies);
}
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall) {
// Call down the call chain:
- result = my_data->instance_dispatch_table->GetPhysicalDeviceWin32PresentationSupportKHR(
- physicalDevice, queueFamilyIndex);
+ result = my_data->instance_dispatch_table->GetPhysicalDeviceWin32PresentationSupportKHR(physicalDevice, queueFamilyIndex);
}
return result;
}
#endif // VK_USE_PLATFORM_WIN32_KHR
#ifdef VK_USE_PLATFORM_XCB_KHR
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR(
- VkInstance instance,
- const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkSurfaceKHR* pSurface)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator,
+ VkSurfaceKHR *pSurface) {
VkResult result = VK_SUCCESS;
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
@@ -856,37 +732,27 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR(
// Validate that the platform extension was enabled:
if (pInstance && !pInstance->xcbSurfaceExtensionEnabled) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
- pInstance,
- "VkInstance",
- SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
- "%s() called even though the %s extension was not enabled for this VkInstance.",
- __FUNCTION__, VK_KHR_XCB_SURFACE_EXTENSION_NAME);
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, pInstance, "VkInstance", SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
+ "%s() called even though the %s extension was not enabled for this VkInstance.", __FUNCTION__,
+ VK_KHR_XCB_SURFACE_EXTENSION_NAME);
}
if (!pCreateInfo) {
- skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pCreateInfo");
+ skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pCreateInfo");
} else {
if (pCreateInfo->sType != VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR) {
- skipCall |= LOG_ERROR_WRONG_STYPE(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pCreateInfo",
+ skipCall |= LOG_ERROR_WRONG_STYPE(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pCreateInfo",
"VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR");
}
if (pCreateInfo->pNext != NULL) {
- skipCall |= LOG_INFO_WRONG_NEXT(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pCreateInfo");
+ skipCall |= LOG_INFO_WRONG_NEXT(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pCreateInfo");
}
}
if (VK_FALSE == skipCall) {
// Call down the call chain:
loader_platform_thread_unlock_mutex(&globalLock);
- result = my_data->instance_dispatch_table->CreateXcbSurfaceKHR(
- instance, pCreateInfo, pAllocator, pSurface);
+ result = my_data->instance_dispatch_table->CreateXcbSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
loader_platform_thread_lock_mutex(&globalLock);
// Obtain this pointer again after locking:
@@ -895,8 +761,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR(
// Record the VkSurfaceKHR returned by the ICD:
my_data->surfaceMap[*pSurface].surface = *pSurface;
my_data->surfaceMap[*pSurface].pInstance = pInstance;
- my_data->surfaceMap[*pSurface].usedAllocatorToCreate =
- (pAllocator != NULL);
+ my_data->surfaceMap[*pSurface].usedAllocatorToCreate = (pAllocator != NULL);
my_data->surfaceMap[*pSurface].numQueueFamilyIndexSupport = 0;
my_data->surfaceMap[*pSurface].pQueueFamilyIndexSupport = NULL;
// Point to the associated SwpInstance:
@@ -909,12 +774,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR(
return VK_ERROR_VALIDATION_FAILED_EXT;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXcbPresentationSupportKHR(
- VkPhysicalDevice physicalDevice,
- uint32_t queueFamilyIndex,
- xcb_connection_t* connection,
- xcb_visualid_t visual_id)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkBool32 VKAPI_CALL
+vkGetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
+ xcb_connection_t *connection, xcb_visualid_t visual_id) {
VkBool32 result = VK_FALSE;
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
@@ -922,41 +784,32 @@ VK_LAYER_EXPORT VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXcbPresentatio
SwpPhysicalDevice *pPhysicalDevice = &my_data->physicalDeviceMap[physicalDevice];
// Validate that the platform extension was enabled:
- if (pPhysicalDevice && pPhysicalDevice->pInstance &&
- !pPhysicalDevice->pInstance->xcbSurfaceExtensionEnabled) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
- pPhysicalDevice->pInstance,
- "VkInstance",
+ if (pPhysicalDevice && pPhysicalDevice->pInstance && !pPhysicalDevice->pInstance->xcbSurfaceExtensionEnabled) {
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, pPhysicalDevice->pInstance, "VkInstance",
SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
- "%s() called even though the %s extension was not enabled for this VkInstance.",
- __FUNCTION__, VK_KHR_XCB_SURFACE_EXTENSION_NAME);
+ "%s() called even though the %s extension was not enabled for this VkInstance.", __FUNCTION__,
+ VK_KHR_XCB_SURFACE_EXTENSION_NAME);
}
- if (pPhysicalDevice->gotQueueFamilyPropertyCount &&
- (queueFamilyIndex >= pPhysicalDevice->numOfQueueFamilies)) {
- skipCall |= LOG_ERROR_QUEUE_FAMILY_INDEX_TOO_LARGE(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
- pPhysicalDevice,
- "VkPhysicalDevice",
- queueFamilyIndex,
- pPhysicalDevice->numOfQueueFamilies);
+ if (pPhysicalDevice->gotQueueFamilyPropertyCount && (queueFamilyIndex >= pPhysicalDevice->numOfQueueFamilies)) {
+ skipCall |=
+ LOG_ERROR_QUEUE_FAMILY_INDEX_TOO_LARGE(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, pPhysicalDevice,
+ "VkPhysicalDevice", queueFamilyIndex, pPhysicalDevice->numOfQueueFamilies);
}
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall) {
// Call down the call chain:
- result = my_data->instance_dispatch_table->GetPhysicalDeviceXcbPresentationSupportKHR(
- physicalDevice, queueFamilyIndex, connection, visual_id);
+ result = my_data->instance_dispatch_table->GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice, queueFamilyIndex,
+ connection, visual_id);
}
return result;
}
#endif // VK_USE_PLATFORM_XCB_KHR
#ifdef VK_USE_PLATFORM_XLIB_KHR
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateXlibSurfaceKHR(
- VkInstance instance,
- const VkXlibSurfaceCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkSurfaceKHR* pSurface)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator,
+ VkSurfaceKHR *pSurface) {
VkResult result = VK_SUCCESS;
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
@@ -965,37 +818,27 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateXlibSurfaceKHR(
// Validate that the platform extension was enabled:
if (pInstance && !pInstance->xlibSurfaceExtensionEnabled) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
- pInstance,
- "VkInstance",
- SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
- "%s() called even though the %s extension was not enabled for this VkInstance.",
- __FUNCTION__, VK_KHR_XLIB_SURFACE_EXTENSION_NAME);
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, pInstance, "VkInstance", SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
+ "%s() called even though the %s extension was not enabled for this VkInstance.", __FUNCTION__,
+ VK_KHR_XLIB_SURFACE_EXTENSION_NAME);
}
if (!pCreateInfo) {
- skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pCreateInfo");
+ skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pCreateInfo");
} else {
if (pCreateInfo->sType != VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR) {
- skipCall |= LOG_ERROR_WRONG_STYPE(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pCreateInfo",
+ skipCall |= LOG_ERROR_WRONG_STYPE(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pCreateInfo",
"VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR");
}
if (pCreateInfo->pNext != NULL) {
- skipCall |= LOG_INFO_WRONG_NEXT(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pCreateInfo");
+ skipCall |= LOG_INFO_WRONG_NEXT(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pCreateInfo");
}
}
if (VK_FALSE == skipCall) {
// Call down the call chain:
loader_platform_thread_unlock_mutex(&globalLock);
- result = my_data->instance_dispatch_table->CreateXlibSurfaceKHR(
- instance, pCreateInfo, pAllocator, pSurface);
+ result = my_data->instance_dispatch_table->CreateXlibSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
loader_platform_thread_lock_mutex(&globalLock);
// Obtain this pointer again after locking:
@@ -1004,8 +847,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateXlibSurfaceKHR(
// Record the VkSurfaceKHR returned by the ICD:
my_data->surfaceMap[*pSurface].surface = *pSurface;
my_data->surfaceMap[*pSurface].pInstance = pInstance;
- my_data->surfaceMap[*pSurface].usedAllocatorToCreate =
- (pAllocator != NULL);
+ my_data->surfaceMap[*pSurface].usedAllocatorToCreate = (pAllocator != NULL);
my_data->surfaceMap[*pSurface].numQueueFamilyIndexSupport = 0;
my_data->surfaceMap[*pSurface].pQueueFamilyIndexSupport = NULL;
// Point to the associated SwpInstance:
@@ -1018,12 +860,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateXlibSurfaceKHR(
return VK_ERROR_VALIDATION_FAILED_EXT;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXlibPresentationSupportKHR(
- VkPhysicalDevice physicalDevice,
- uint32_t queueFamilyIndex,
- Display* dpy,
- VisualID visualID)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex,
+ Display *dpy, VisualID visualID) {
VkBool32 result = VK_FALSE;
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
@@ -1031,36 +870,30 @@ VK_LAYER_EXPORT VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXlibPresentati
SwpPhysicalDevice *pPhysicalDevice = &my_data->physicalDeviceMap[physicalDevice];
// Validate that the platform extension was enabled:
- if (pPhysicalDevice && pPhysicalDevice->pInstance &&
- !pPhysicalDevice->pInstance->xlibSurfaceExtensionEnabled) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
- pPhysicalDevice->pInstance,
- "VkInstance",
+ if (pPhysicalDevice && pPhysicalDevice->pInstance && !pPhysicalDevice->pInstance->xlibSurfaceExtensionEnabled) {
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, pPhysicalDevice->pInstance, "VkInstance",
SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
- "%s() called even though the %s extension was not enabled for this VkInstance.",
- __FUNCTION__, VK_KHR_XLIB_SURFACE_EXTENSION_NAME);
+ "%s() called even though the %s extension was not enabled for this VkInstance.", __FUNCTION__,
+ VK_KHR_XLIB_SURFACE_EXTENSION_NAME);
}
- if (pPhysicalDevice->gotQueueFamilyPropertyCount &&
- (queueFamilyIndex >= pPhysicalDevice->numOfQueueFamilies)) {
- skipCall |= LOG_ERROR_QUEUE_FAMILY_INDEX_TOO_LARGE(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
- pPhysicalDevice,
- "VkPhysicalDevice",
- queueFamilyIndex,
- pPhysicalDevice->numOfQueueFamilies);
+ if (pPhysicalDevice->gotQueueFamilyPropertyCount && (queueFamilyIndex >= pPhysicalDevice->numOfQueueFamilies)) {
+ skipCall |=
+ LOG_ERROR_QUEUE_FAMILY_INDEX_TOO_LARGE(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, pPhysicalDevice,
+ "VkPhysicalDevice", queueFamilyIndex, pPhysicalDevice->numOfQueueFamilies);
}
loader_platform_thread_unlock_mutex(&globalLock);
if (VK_FALSE == skipCall) {
// Call down the call chain:
- result = my_data->instance_dispatch_table->GetPhysicalDeviceXlibPresentationSupportKHR(
- physicalDevice, queueFamilyIndex, dpy, visualID);
+ result = my_data->instance_dispatch_table->GetPhysicalDeviceXlibPresentationSupportKHR(physicalDevice, queueFamilyIndex,
+ dpy, visualID);
}
return result;
}
#endif // VK_USE_PLATFORM_XLIB_KHR
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
@@ -1073,14 +906,12 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR(VkInstance insta
pSurface->pInstance->surfaces.erase(surface);
}
if (!pSurface->swapchains.empty()) {
- LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, instance, "VkInstance",
- SWAPCHAIN_DEL_OBJECT_BEFORE_CHILDREN,
+ LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, instance, "VkInstance", SWAPCHAIN_DEL_OBJECT_BEFORE_CHILDREN,
"%s() called before all of its associated "
"VkSwapchainKHRs were destroyed.",
__FUNCTION__);
// Empty and then delete all SwpSwapchain's
- for (auto it = pSurface->swapchains.begin() ;
- it != pSurface->swapchains.end() ; it++) {
+ for (auto it = pSurface->swapchains.begin(); it != pSurface->swapchains.end(); it++) {
// Delete all SwpImage's
it->second->images.clear();
// In case the swapchain's device hasn't been destroyed yet
@@ -1094,8 +925,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR(VkInstance insta
pSurface->swapchains.clear();
}
if ((pAllocator != NULL) != pSurface->usedAllocatorToCreate) {
- LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, instance, "VkInstance",
- SWAPCHAIN_INCOMPATIBLE_ALLOCATOR,
+ LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, instance, "VkInstance", SWAPCHAIN_INCOMPATIBLE_ALLOCATOR,
"%s() called with incompatible pAllocator from when "
"the object was created.",
__FUNCTION__);
@@ -1106,28 +936,24 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR(VkInstance insta
if (VK_FALSE == skipCall) {
// Call down the call chain:
- my_data->instance_dispatch_table->DestroySurfaceKHR(
- instance, surface, pAllocator);
+ my_data->instance_dispatch_table->DestroySurfaceKHR(instance, surface, pAllocator);
}
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkEnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount, VkPhysicalDevice *pPhysicalDevices) {
VkResult result = VK_SUCCESS;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
// Call down the call chain:
- result = my_data->instance_dispatch_table->EnumeratePhysicalDevices(
- instance, pPhysicalDeviceCount, pPhysicalDevices);
+ result = my_data->instance_dispatch_table->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
loader_platform_thread_lock_mutex(&globalLock);
SwpInstance *pInstance = &(my_data->instanceMap[instance]);
- if ((result == VK_SUCCESS) && pInstance && pPhysicalDevices &&
- (*pPhysicalDeviceCount > 0)) {
+ if ((result == VK_SUCCESS) && pInstance && pPhysicalDevices && (*pPhysicalDeviceCount > 0)) {
// Record the VkPhysicalDevices returned by the ICD:
for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
- my_data->physicalDeviceMap[pPhysicalDevices[i]].physicalDevice =
- pPhysicalDevices[i];
+ my_data->physicalDeviceMap[pPhysicalDevices[i]].physicalDevice = pPhysicalDevices[i];
my_data->physicalDeviceMap[pPhysicalDevices[i]].pInstance = pInstance;
my_data->physicalDeviceMap[pPhysicalDevices[i]].pDevice = NULL;
my_data->physicalDeviceMap[pPhysicalDevices[i]].gotQueueFamilyPropertyCount = false;
@@ -1138,8 +964,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices(VkInst
my_data->physicalDeviceMap[pPhysicalDevices[i]].pPresentModes = NULL;
// Point to the associated SwpInstance:
if (pInstance) {
- pInstance->physicalDevices[pPhysicalDevices[i]] =
- &my_data->physicalDeviceMap[pPhysicalDevices[i]];
+ pInstance->physicalDevices[pPhysicalDevices[i]] = &my_data->physicalDeviceMap[pPhysicalDevices[i]];
}
}
}
@@ -1147,14 +972,15 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices(VkInst
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice physicalDevice,
+ const VkDeviceCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
- PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice) fpGetInstanceProcAddr(NULL, "vkCreateDevice");
+ PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
if (fpCreateDevice == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
@@ -1182,8 +1008,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice p
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks* pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
dispatch_key key = get_dispatch_key(device);
layer_data *my_data = get_my_data_ptr(key, layer_data_map);
@@ -1199,14 +1024,12 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, cons
pDevice->pPhysicalDevice->pDevice = NULL;
}
if (!pDevice->swapchains.empty()) {
- LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
- SWAPCHAIN_DEL_OBJECT_BEFORE_CHILDREN,
+ LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice", SWAPCHAIN_DEL_OBJECT_BEFORE_CHILDREN,
"%s() called before all of its associated "
"VkSwapchainKHRs were destroyed.",
__FUNCTION__);
// Empty and then delete all SwpSwapchain's
- for (auto it = pDevice->swapchains.begin() ;
- it != pDevice->swapchains.end() ; it++) {
+ for (auto it = pDevice->swapchains.begin(); it != pDevice->swapchains.end(); it++) {
// Delete all SwpImage's
it->second->images.clear();
// In case the swapchain's surface hasn't been destroyed yet
@@ -1226,12 +1049,9 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, cons
loader_platform_thread_unlock_mutex(&globalLock);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupportKHR(
- VkPhysicalDevice physicalDevice,
- uint32_t queueFamilyIndex,
- VkSurfaceKHR surface,
- VkBool32* pSupported)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex, VkSurfaceKHR surface,
+ VkBool32 *pSupported) {
VkResult result = VK_SUCCESS;
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
@@ -1239,44 +1059,32 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupport
SwpPhysicalDevice *pPhysicalDevice = &my_data->physicalDeviceMap[physicalDevice];
// Validate that the surface extension was enabled:
- if (pPhysicalDevice && pPhysicalDevice->pInstance &&
- !pPhysicalDevice->pInstance->surfaceExtensionEnabled) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
- pPhysicalDevice->pInstance,
- "VkInstance",
+ if (pPhysicalDevice && pPhysicalDevice->pInstance && !pPhysicalDevice->pInstance->surfaceExtensionEnabled) {
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, pPhysicalDevice->pInstance, "VkInstance",
SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
- "%s() called even though the %s extension was not enabled for this VkInstance.",
- __FUNCTION__, VK_KHR_SURFACE_EXTENSION_NAME);
+ "%s() called even though the %s extension was not enabled for this VkInstance.", __FUNCTION__,
+ VK_KHR_SURFACE_EXTENSION_NAME);
}
if (!pPhysicalDevice->gotQueueFamilyPropertyCount) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
- pPhysicalDevice,
- "VkPhysicalDevice",
- SWAPCHAIN_DID_NOT_QUERY_QUEUE_FAMILIES,
- "%s() called before calling the "
- "vkGetPhysicalDeviceQueueFamilyProperties "
- "function.",
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, pPhysicalDevice, "VkPhysicalDevice",
+ SWAPCHAIN_DID_NOT_QUERY_QUEUE_FAMILIES, "%s() called before calling the "
+ "vkGetPhysicalDeviceQueueFamilyProperties "
+ "function.",
__FUNCTION__);
- } else if (pPhysicalDevice->gotQueueFamilyPropertyCount &&
- (queueFamilyIndex >= pPhysicalDevice->numOfQueueFamilies)) {
- skipCall |= LOG_ERROR_QUEUE_FAMILY_INDEX_TOO_LARGE(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
- pPhysicalDevice,
- "VkPhysicalDevice",
- queueFamilyIndex,
- pPhysicalDevice->numOfQueueFamilies);
+ } else if (pPhysicalDevice->gotQueueFamilyPropertyCount && (queueFamilyIndex >= pPhysicalDevice->numOfQueueFamilies)) {
+ skipCall |=
+ LOG_ERROR_QUEUE_FAMILY_INDEX_TOO_LARGE(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, pPhysicalDevice,
+ "VkPhysicalDevice", queueFamilyIndex, pPhysicalDevice->numOfQueueFamilies);
}
if (!pSupported) {
- skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
- physicalDevice,
- "pSupported");
+ skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, physicalDevice, "pSupported");
}
if (VK_FALSE == skipCall) {
// Call down the call chain:
loader_platform_thread_unlock_mutex(&globalLock);
- result = my_data->instance_dispatch_table->GetPhysicalDeviceSurfaceSupportKHR(
- physicalDevice, queueFamilyIndex, surface,
- pSupported);
+ result = my_data->instance_dispatch_table->GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface,
+ pSupported);
loader_platform_thread_lock_mutex(&globalLock);
// Obtain this pointer again after locking:
@@ -1284,24 +1092,20 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupport
if ((result == VK_SUCCESS) && pSupported && pPhysicalDevice) {
// Record the result of this query:
SwpInstance *pInstance = pPhysicalDevice->pInstance;
- SwpSurface *pSurface =
- (pInstance) ? pInstance->surfaces[surface] : NULL;
+ SwpSurface *pSurface = (pInstance) ? pInstance->surfaces[surface] : NULL;
if (pSurface) {
pPhysicalDevice->supportedSurfaces[surface] = pSurface;
if (!pSurface->numQueueFamilyIndexSupport) {
if (pPhysicalDevice->gotQueueFamilyPropertyCount) {
- pSurface->pQueueFamilyIndexSupport = (VkBool32 *)
- malloc(pPhysicalDevice->numOfQueueFamilies *
- sizeof(VkBool32));
+ pSurface->pQueueFamilyIndexSupport =
+ (VkBool32 *)malloc(pPhysicalDevice->numOfQueueFamilies * sizeof(VkBool32));
if (pSurface->pQueueFamilyIndexSupport != NULL) {
- pSurface->numQueueFamilyIndexSupport =
- pPhysicalDevice->numOfQueueFamilies;
+ pSurface->numQueueFamilyIndexSupport = pPhysicalDevice->numOfQueueFamilies;
}
}
}
if (pSurface->numQueueFamilyIndexSupport) {
- pSurface->pQueueFamilyIndexSupport[queueFamilyIndex] =
- *pSupported;
+ pSurface->pQueueFamilyIndexSupport[queueFamilyIndex] = *pSupported;
}
}
}
@@ -1312,11 +1116,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupport
return VK_ERROR_VALIDATION_FAILED_EXT;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
- VkPhysicalDevice physicalDevice,
- VkSurfaceKHR surface,
- VkSurfaceCapabilitiesKHR* pSurfaceCapabilities)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkGetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
+ VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
VkResult result = VK_SUCCESS;
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
@@ -1324,26 +1126,21 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabil
SwpPhysicalDevice *pPhysicalDevice = &my_data->physicalDeviceMap[physicalDevice];
// Validate that the surface extension was enabled:
- if (pPhysicalDevice && pPhysicalDevice->pInstance &&
- !pPhysicalDevice->pInstance->surfaceExtensionEnabled) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
- pPhysicalDevice->pInstance,
- "VkInstance",
+ if (pPhysicalDevice && pPhysicalDevice->pInstance && !pPhysicalDevice->pInstance->surfaceExtensionEnabled) {
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, pPhysicalDevice->pInstance, "VkInstance",
SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
- "%s() called even though the %s extension was not enabled for this VkInstance.",
- __FUNCTION__, VK_KHR_SURFACE_EXTENSION_NAME);
+ "%s() called even though the %s extension was not enabled for this VkInstance.", __FUNCTION__,
+ VK_KHR_SURFACE_EXTENSION_NAME);
}
if (!pSurfaceCapabilities) {
- skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
- physicalDevice,
- "pSurfaceCapabilities");
+ skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, physicalDevice, "pSurfaceCapabilities");
}
if (VK_FALSE == skipCall) {
// Call down the call chain:
loader_platform_thread_unlock_mutex(&globalLock);
- result = my_data->instance_dispatch_table->GetPhysicalDeviceSurfaceCapabilitiesKHR(
- physicalDevice, surface, pSurfaceCapabilities);
+ result = my_data->instance_dispatch_table->GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface,
+ pSurfaceCapabilities);
loader_platform_thread_lock_mutex(&globalLock);
// Obtain this pointer again after locking:
@@ -1351,7 +1148,7 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabil
if ((result == VK_SUCCESS) && pPhysicalDevice) {
// Record the result of this query:
pPhysicalDevice->gotSurfaceCapabilities = true;
-// FIXME: NEED TO COPY THIS DATA, BECAUSE pSurfaceCapabilities POINTS TO APP-ALLOCATED DATA
+ // FIXME: NEED TO COPY THIS DATA, BECAUSE pSurfaceCapabilities POINTS TO APP-ALLOCATED DATA
pPhysicalDevice->surfaceCapabilities = *pSurfaceCapabilities;
}
loader_platform_thread_unlock_mutex(&globalLock);
@@ -1361,12 +1158,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabil
return VK_ERROR_VALIDATION_FAILED_EXT;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormatsKHR(
- VkPhysicalDevice physicalDevice,
- VkSurfaceKHR surface,
- uint32_t* pSurfaceFormatCount,
- VkSurfaceFormatKHR* pSurfaceFormats)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t *pSurfaceFormatCount,
+ VkSurfaceFormatKHR *pSurfaceFormats) {
VkResult result = VK_SUCCESS;
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
@@ -1374,54 +1168,40 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormats
SwpPhysicalDevice *pPhysicalDevice = &my_data->physicalDeviceMap[physicalDevice];
// Validate that the surface extension was enabled:
- if (pPhysicalDevice && pPhysicalDevice->pInstance &&
- !pPhysicalDevice->pInstance->surfaceExtensionEnabled) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
- pPhysicalDevice->pInstance,
- "VkInstance",
+ if (pPhysicalDevice && pPhysicalDevice->pInstance && !pPhysicalDevice->pInstance->surfaceExtensionEnabled) {
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, pPhysicalDevice->pInstance, "VkInstance",
SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
- "%s() called even though the %s extension was not enabled for this VkInstance.",
- __FUNCTION__, VK_KHR_SURFACE_EXTENSION_NAME);
+ "%s() called even though the %s extension was not enabled for this VkInstance.", __FUNCTION__,
+ VK_KHR_SURFACE_EXTENSION_NAME);
}
if (!pSurfaceFormatCount) {
- skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
- physicalDevice,
- "pSurfaceFormatCount");
+ skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, physicalDevice, "pSurfaceFormatCount");
}
if (VK_FALSE == skipCall) {
// Call down the call chain:
loader_platform_thread_unlock_mutex(&globalLock);
- result = my_data->instance_dispatch_table->GetPhysicalDeviceSurfaceFormatsKHR(
- physicalDevice, surface, pSurfaceFormatCount, pSurfaceFormats);
+ result = my_data->instance_dispatch_table->GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount,
+ pSurfaceFormats);
loader_platform_thread_lock_mutex(&globalLock);
// Obtain this pointer again after locking:
pPhysicalDevice = &my_data->physicalDeviceMap[physicalDevice];
- if ((result == VK_SUCCESS) && pPhysicalDevice && !pSurfaceFormats &&
- pSurfaceFormatCount) {
+ if ((result == VK_SUCCESS) && pPhysicalDevice && !pSurfaceFormats && pSurfaceFormatCount) {
// Record the result of this preliminary query:
pPhysicalDevice->surfaceFormatCount = *pSurfaceFormatCount;
- }
- else if ((result == VK_SUCCESS) && pPhysicalDevice && pSurfaceFormats &&
- pSurfaceFormatCount) {
+ } else if ((result == VK_SUCCESS) && pPhysicalDevice && pSurfaceFormats && pSurfaceFormatCount) {
// Compare the preliminary value of *pSurfaceFormatCount with the
// value this time:
if (*pSurfaceFormatCount > pPhysicalDevice->surfaceFormatCount) {
- LOG_ERROR_INVALID_COUNT(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
- physicalDevice,
- "pSurfaceFormatCount",
- "pSurfaceFormats",
- *pSurfaceFormatCount,
- pPhysicalDevice->surfaceFormatCount);
- }
- else if (*pSurfaceFormatCount > 0) {
+ LOG_ERROR_INVALID_COUNT(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, physicalDevice, "pSurfaceFormatCount",
+ "pSurfaceFormats", *pSurfaceFormatCount, pPhysicalDevice->surfaceFormatCount);
+ } else if (*pSurfaceFormatCount > 0) {
// Record the result of this query:
pPhysicalDevice->surfaceFormatCount = *pSurfaceFormatCount;
- pPhysicalDevice->pSurfaceFormats = (VkSurfaceFormatKHR *)
- malloc(*pSurfaceFormatCount * sizeof(VkSurfaceFormatKHR));
+ pPhysicalDevice->pSurfaceFormats = (VkSurfaceFormatKHR *)malloc(*pSurfaceFormatCount * sizeof(VkSurfaceFormatKHR));
if (pPhysicalDevice->pSurfaceFormats) {
- for (uint32_t i = 0 ; i < *pSurfaceFormatCount ; i++) {
+ for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
pPhysicalDevice->pSurfaceFormats[i] = pSurfaceFormats[i];
}
} else {
@@ -1436,12 +1216,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormats
return VK_ERROR_VALIDATION_FAILED_EXT;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModesKHR(
- VkPhysicalDevice physicalDevice,
- VkSurfaceKHR surface,
- uint32_t* pPresentModeCount,
- VkPresentModeKHR* pPresentModes)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkGetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t *pPresentModeCount,
+ VkPresentModeKHR *pPresentModes) {
VkResult result = VK_SUCCESS;
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
@@ -1449,54 +1226,40 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresent
SwpPhysicalDevice *pPhysicalDevice = &my_data->physicalDeviceMap[physicalDevice];
// Validate that the surface extension was enabled:
- if (pPhysicalDevice && pPhysicalDevice->pInstance &&
- !pPhysicalDevice->pInstance->surfaceExtensionEnabled) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
- pPhysicalDevice->pInstance,
- "VkInstance",
+ if (pPhysicalDevice && pPhysicalDevice->pInstance && !pPhysicalDevice->pInstance->surfaceExtensionEnabled) {
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, pPhysicalDevice->pInstance, "VkInstance",
SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
- "%s() called even though the %s extension was not enabled for this VkInstance.",
- __FUNCTION__, VK_KHR_SURFACE_EXTENSION_NAME);
+ "%s() called even though the %s extension was not enabled for this VkInstance.", __FUNCTION__,
+ VK_KHR_SURFACE_EXTENSION_NAME);
}
if (!pPresentModeCount) {
- skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
- physicalDevice,
- "pPresentModeCount");
+ skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, physicalDevice, "pPresentModeCount");
}
if (VK_FALSE == skipCall) {
// Call down the call chain:
loader_platform_thread_unlock_mutex(&globalLock);
- result = my_data->instance_dispatch_table->GetPhysicalDeviceSurfacePresentModesKHR(
- physicalDevice, surface, pPresentModeCount, pPresentModes);
+ result = my_data->instance_dispatch_table->GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface,
+ pPresentModeCount, pPresentModes);
loader_platform_thread_lock_mutex(&globalLock);
// Obtain this pointer again after locking:
pPhysicalDevice = &my_data->physicalDeviceMap[physicalDevice];
- if ((result == VK_SUCCESS) && pPhysicalDevice && !pPresentModes &&
- pPresentModeCount) {
+ if ((result == VK_SUCCESS) && pPhysicalDevice && !pPresentModes && pPresentModeCount) {
// Record the result of this preliminary query:
pPhysicalDevice->presentModeCount = *pPresentModeCount;
- }
- else if ((result == VK_SUCCESS) && pPhysicalDevice && pPresentModes &&
- pPresentModeCount) {
+ } else if ((result == VK_SUCCESS) && pPhysicalDevice && pPresentModes && pPresentModeCount) {
// Compare the preliminary value of *pPresentModeCount with the
// value this time:
if (*pPresentModeCount > pPhysicalDevice->presentModeCount) {
- LOG_ERROR_INVALID_COUNT(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
- physicalDevice,
- "pPresentModeCount",
- "pPresentModes",
- *pPresentModeCount,
- pPhysicalDevice->presentModeCount);
- }
- else if (*pPresentModeCount > 0) {
+ LOG_ERROR_INVALID_COUNT(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, physicalDevice, "pPresentModeCount",
+ "pPresentModes", *pPresentModeCount, pPhysicalDevice->presentModeCount);
+ } else if (*pPresentModeCount > 0) {
// Record the result of this query:
pPhysicalDevice->presentModeCount = *pPresentModeCount;
- pPhysicalDevice->pPresentModes = (VkPresentModeKHR *)
- malloc(*pPresentModeCount * sizeof(VkPresentModeKHR));
+ pPhysicalDevice->pPresentModes = (VkPresentModeKHR *)malloc(*pPresentModeCount * sizeof(VkPresentModeKHR));
if (pPhysicalDevice->pPresentModes) {
- for (uint32_t i = 0 ; i < *pPresentModeCount ; i++) {
+ for (uint32_t i = 0; i < *pPresentModeCount; i++) {
pPhysicalDevice->pPresentModes[i] = pPresentModes[i];
}
} else {
@@ -1514,13 +1277,10 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresent
// This function does the up-front validation work for vkCreateSwapchainKHR(),
// and returns VK_TRUE if a logging callback indicates that the call down the
// chain should be skipped:
-static VkBool32 validateCreateSwapchainKHR(
- VkDevice device,
- const VkSwapchainCreateInfoKHR* pCreateInfo,
- VkSwapchainKHR* pSwapchain)
-{
-// TODO: Validate cases of re-creating a swapchain (the current code
-// assumes a new swapchain is being created).
+static VkBool32 validateCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
+ VkSwapchainKHR *pSwapchain) {
+ // TODO: Validate cases of re-creating a swapchain (the current code
+ // assumes a new swapchain is being created).
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
char fn[] = "vkCreateSwapchainKHR";
@@ -1528,32 +1288,23 @@ static VkBool32 validateCreateSwapchainKHR(
// Validate that the swapchain extension was enabled:
if (pDevice && !pDevice->swapchainExtensionEnabled) {
- return LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
- SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
- "%s() called even though the %s extension was not enabled for this VkDevice.",
- fn, VK_KHR_SWAPCHAIN_EXTENSION_NAME );
+ return LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice", SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
+ "%s() called even though the %s extension was not enabled for this VkDevice.", fn,
+ VK_KHR_SWAPCHAIN_EXTENSION_NAME);
}
if (!pCreateInfo) {
- skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pCreateInfo");
+ skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pCreateInfo");
} else {
if (pCreateInfo->sType != VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR) {
- skipCall |= LOG_ERROR_WRONG_STYPE(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pCreateInfo",
+ skipCall |= LOG_ERROR_WRONG_STYPE(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pCreateInfo",
"VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR");
}
if (pCreateInfo->pNext != NULL) {
- skipCall |= LOG_INFO_WRONG_NEXT(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pCreateInfo");
+ skipCall |= LOG_INFO_WRONG_NEXT(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pCreateInfo");
}
}
if (!pSwapchain) {
- skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pSwapchain");
+ skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pSwapchain");
}
// Keep around a useful pointer to pPhysicalDevice:
@@ -1563,13 +1314,10 @@ static VkBool32 validateCreateSwapchainKHR(
// vkGetPhysicalDeviceQueueFamilyProperties
if (pPhysicalDevice && pPhysicalDevice->gotQueueFamilyPropertyCount) {
for (auto i = 0; i < pCreateInfo->queueFamilyIndexCount; i++) {
- if (pCreateInfo->pQueueFamilyIndices[i] >=
- pPhysicalDevice->numOfQueueFamilies) {
- skipCall |= LOG_ERROR_QUEUE_FAMILY_INDEX_TOO_LARGE(
- VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
- pPhysicalDevice, "VkPhysicalDevice",
- pCreateInfo->pQueueFamilyIndices[i],
- pPhysicalDevice->numOfQueueFamilies);
+ if (pCreateInfo->pQueueFamilyIndices[i] >= pPhysicalDevice->numOfQueueFamilies) {
+ skipCall |= LOG_ERROR_QUEUE_FAMILY_INDEX_TOO_LARGE(VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, pPhysicalDevice,
+ "VkPhysicalDevice", pCreateInfo->pQueueFamilyIndices[i],
+ pPhysicalDevice->numOfQueueFamilies);
}
}
}
@@ -1577,8 +1325,7 @@ static VkBool32 validateCreateSwapchainKHR(
// Validate pCreateInfo values with the results of
// vkGetPhysicalDeviceSurfaceCapabilitiesKHR():
if (!pPhysicalDevice || !pPhysicalDevice->gotSurfaceCapabilities) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
- SWAPCHAIN_CREATE_SWAP_WITHOUT_QUERY,
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice", SWAPCHAIN_CREATE_SWAP_WITHOUT_QUERY,
"%s() called before calling "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR().",
fn);
@@ -1586,12 +1333,9 @@ static VkBool32 validateCreateSwapchainKHR(
// Validate pCreateInfo->surface to make sure that
// vkGetPhysicalDeviceSurfaceSupportKHR() reported this as a supported
// surface:
- SwpSurface *pSurface =
- ((pPhysicalDevice) ?
- pPhysicalDevice->supportedSurfaces[pCreateInfo->surface] : NULL);
+ SwpSurface *pSurface = ((pPhysicalDevice) ? pPhysicalDevice->supportedSurfaces[pCreateInfo->surface] : NULL);
if (!pSurface) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
- SWAPCHAIN_CREATE_UNSUPPORTED_SURFACE,
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice", SWAPCHAIN_CREATE_UNSUPPORTED_SURFACE,
"%s() called with pCreateInfo->surface that "
"was not returned by "
"vkGetPhysicalDeviceSurfaceSupportKHR() "
@@ -1603,18 +1347,13 @@ static VkBool32 validateCreateSwapchainKHR(
// VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
VkSurfaceCapabilitiesKHR *pCapabilities = &pPhysicalDevice->surfaceCapabilities;
if ((pCreateInfo->minImageCount < pCapabilities->minImageCount) ||
- ((pCapabilities->maxImageCount > 0) &&
- (pCreateInfo->minImageCount > pCapabilities->maxImageCount))) {
+ ((pCapabilities->maxImageCount > 0) && (pCreateInfo->minImageCount > pCapabilities->maxImageCount))) {
skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
- SWAPCHAIN_CREATE_SWAP_BAD_MIN_IMG_COUNT,
- "%s() called with pCreateInfo->minImageCount "
- "= %d, which is outside the bounds returned "
- "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. "
- "minImageCount = %d, maxImageCount = %d).",
- fn,
- pCreateInfo->minImageCount,
- pCapabilities->minImageCount,
- pCapabilities->maxImageCount);
+ SWAPCHAIN_CREATE_SWAP_BAD_MIN_IMG_COUNT, "%s() called with pCreateInfo->minImageCount "
+ "= %d, which is outside the bounds returned "
+ "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. "
+ "minImageCount = %d, maxImageCount = %d).",
+ fn, pCreateInfo->minImageCount, pCapabilities->minImageCount, pCapabilities->maxImageCount);
}
// Validate pCreateInfo->imageExtent against
// VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
@@ -1623,43 +1362,32 @@ static VkBool32 validateCreateSwapchainKHR(
(pCreateInfo->imageExtent.width > pCapabilities->maxImageExtent.width) ||
(pCreateInfo->imageExtent.height < pCapabilities->minImageExtent.height) ||
(pCreateInfo->imageExtent.height > pCapabilities->maxImageExtent.height))) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
- SWAPCHAIN_CREATE_SWAP_OUT_OF_BOUNDS_EXTENTS,
- "%s() called with pCreateInfo->imageExtent = "
- "(%d,%d), which is outside the bounds "
- "returned by vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): "
- "currentExtent = (%d,%d), minImageExtent = "
- "(%d,%d), maxImageExtent = (%d,%d).",
- fn,
- pCreateInfo->imageExtent.width,
- pCreateInfo->imageExtent.height,
- pCapabilities->currentExtent.width,
- pCapabilities->currentExtent.height,
- pCapabilities->minImageExtent.width,
- pCapabilities->minImageExtent.height,
- pCapabilities->maxImageExtent.width,
- pCapabilities->maxImageExtent.height);
+ skipCall |= LOG_ERROR(
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice", SWAPCHAIN_CREATE_SWAP_OUT_OF_BOUNDS_EXTENTS,
+ "%s() called with pCreateInfo->imageExtent = "
+ "(%d,%d), which is outside the bounds "
+ "returned by vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): "
+ "currentExtent = (%d,%d), minImageExtent = "
+ "(%d,%d), maxImageExtent = (%d,%d).",
+ fn, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height, pCapabilities->currentExtent.width,
+ pCapabilities->currentExtent.height, pCapabilities->minImageExtent.width, pCapabilities->minImageExtent.height,
+ pCapabilities->maxImageExtent.width, pCapabilities->maxImageExtent.height);
}
if ((pCapabilities->currentExtent.width != -1) &&
((pCreateInfo->imageExtent.width != pCapabilities->currentExtent.width) ||
(pCreateInfo->imageExtent.height != pCapabilities->currentExtent.height))) {
skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
- SWAPCHAIN_CREATE_SWAP_EXTENTS_NO_MATCH_WIN,
- "%s() called with pCreateInfo->imageExtent = "
- "(%d,%d), which is not equal to the "
- "currentExtent = (%d,%d) returned by "
- "vkGetPhysicalDeviceSurfaceCapabilitiesKHR().",
- fn,
- pCreateInfo->imageExtent.width,
- pCreateInfo->imageExtent.height,
- pCapabilities->currentExtent.width,
- pCapabilities->currentExtent.height);
+ SWAPCHAIN_CREATE_SWAP_EXTENTS_NO_MATCH_WIN, "%s() called with pCreateInfo->imageExtent = "
+ "(%d,%d), which is not equal to the "
+ "currentExtent = (%d,%d) returned by "
+ "vkGetPhysicalDeviceSurfaceCapabilitiesKHR().",
+ fn, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
+ pCapabilities->currentExtent.width, pCapabilities->currentExtent.height);
}
// Validate pCreateInfo->preTransform has one bit set (1st two
// lines of if-statement), which bit is also set in
// VkSurfaceCapabilitiesKHR::supportedTransforms (3rd line of if-statement):
- if (!pCreateInfo->preTransform ||
- (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
+ if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
!(pCreateInfo->preTransform & pCapabilities->supportedTransforms)) {
// This is an error situation; one for which we'd like to give
// the developer a helpful, multi-line error message. Build it
@@ -1668,34 +1396,27 @@ static VkBool32 validateCreateSwapchainKHR(
char str[1024];
// Here's the first part of the message:
sprintf(str, "%s() called with a non-supported "
- "pCreateInfo->preTransform (i.e. %s). "
- "Supported values are:\n",
- fn,
- surfaceTransformStr(pCreateInfo->preTransform));
+ "pCreateInfo->preTransform (i.e. %s). "
+ "Supported values are:\n",
+ fn, surfaceTransformStr(pCreateInfo->preTransform));
errorString += str;
for (int i = 0; i < 32; i++) {
// Build up the rest of the message:
if ((1 << i) & pCapabilities->supportedTransforms) {
- const char *newStr =
- surfaceTransformStr((VkSurfaceTransformFlagBitsKHR) (1 << i));
+ const char *newStr = surfaceTransformStr((VkSurfaceTransformFlagBitsKHR)(1 << i));
sprintf(str, " %s\n", newStr);
errorString += str;
}
}
// Log the message that we've built up:
- skipCall |= debug_report_log_msg(my_data->report_data,
- VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- (uint64_t) device, __LINE__,
- SWAPCHAIN_CREATE_SWAP_BAD_PRE_TRANSFORM,
- LAYER_NAME,
- errorString.c_str());
+ skipCall |= debug_report_log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, (uint64_t)device, __LINE__,
+ SWAPCHAIN_CREATE_SWAP_BAD_PRE_TRANSFORM, LAYER_NAME, errorString.c_str());
}
// Validate pCreateInfo->compositeAlpha has one bit set (1st two
// lines of if-statement), which bit is also set in
// VkSurfaceCapabilitiesKHR::supportedCompositeAlpha (3rd line of if-statement):
- if (!pCreateInfo->compositeAlpha ||
- (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
+ if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
!((pCreateInfo->compositeAlpha) & pCapabilities->supportedCompositeAlpha)) {
// This is an error situation; one for which we'd like to give
// the developer a helpful, multi-line error message. Build it
@@ -1704,62 +1425,47 @@ static VkBool32 validateCreateSwapchainKHR(
char str[1024];
// Here's the first part of the message:
sprintf(str, "%s() called with a non-supported "
- "pCreateInfo->compositeAlpha (i.e. %s). "
- "Supported values are:\n",
- fn,
- surfaceCompositeAlphaStr(pCreateInfo->compositeAlpha));
+ "pCreateInfo->compositeAlpha (i.e. %s). "
+ "Supported values are:\n",
+ fn, surfaceCompositeAlphaStr(pCreateInfo->compositeAlpha));
errorString += str;
for (int i = 0; i < 32; i++) {
// Build up the rest of the message:
if ((1 << i) & pCapabilities->supportedCompositeAlpha) {
- const char *newStr =
- surfaceCompositeAlphaStr((VkCompositeAlphaFlagBitsKHR) (1 << i));
+ const char *newStr = surfaceCompositeAlphaStr((VkCompositeAlphaFlagBitsKHR)(1 << i));
sprintf(str, " %s\n", newStr);
errorString += str;
}
}
// Log the message that we've built up:
- skipCall |= debug_report_log_msg(my_data->report_data,
- VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- (uint64_t) device, 0,
- SWAPCHAIN_CREATE_SWAP_BAD_COMPOSITE_ALPHA,
- LAYER_NAME,
- errorString.c_str());
+ skipCall |= debug_report_log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, (uint64_t)device, 0,
+ SWAPCHAIN_CREATE_SWAP_BAD_COMPOSITE_ALPHA, LAYER_NAME, errorString.c_str());
}
// Validate pCreateInfo->imageArraySize against
// VkSurfaceCapabilitiesKHR::maxImageArraySize:
- if ((pCreateInfo->imageArrayLayers < 1) ||
- (pCreateInfo->imageArrayLayers > pCapabilities->maxImageArrayLayers)) {
+ if ((pCreateInfo->imageArrayLayers < 1) || (pCreateInfo->imageArrayLayers > pCapabilities->maxImageArrayLayers)) {
skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
- SWAPCHAIN_CREATE_SWAP_BAD_IMG_ARRAY_SIZE,
- "%s() called with a non-supported "
- "pCreateInfo->imageArraySize (i.e. %d). "
- "Minimum value is 1, maximum value is %d.",
- fn,
- pCreateInfo->imageArrayLayers,
- pCapabilities->maxImageArrayLayers);
+ SWAPCHAIN_CREATE_SWAP_BAD_IMG_ARRAY_SIZE, "%s() called with a non-supported "
+ "pCreateInfo->imageArraySize (i.e. %d). "
+ "Minimum value is 1, maximum value is %d.",
+ fn, pCreateInfo->imageArrayLayers, pCapabilities->maxImageArrayLayers);
}
// Validate pCreateInfo->imageUsage against
// VkSurfaceCapabilitiesKHR::supportedUsageFlags:
- if (pCreateInfo->imageUsage !=
- (pCreateInfo->imageUsage & pCapabilities->supportedUsageFlags)) {
+ if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & pCapabilities->supportedUsageFlags)) {
skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
- SWAPCHAIN_CREATE_SWAP_BAD_IMG_USAGE_FLAGS,
- "%s() called with a non-supported "
- "pCreateInfo->imageUsage (i.e. 0x%08x)."
- " Supported flag bits are 0x%08x.",
- fn,
- pCreateInfo->imageUsage,
- pCapabilities->supportedUsageFlags);
+ SWAPCHAIN_CREATE_SWAP_BAD_IMG_USAGE_FLAGS, "%s() called with a non-supported "
+ "pCreateInfo->imageUsage (i.e. 0x%08x)."
+ " Supported flag bits are 0x%08x.",
+ fn, pCreateInfo->imageUsage, pCapabilities->supportedUsageFlags);
}
}
// Validate pCreateInfo values with the results of
// vkGetPhysicalDeviceSurfaceFormatsKHR():
if (!pPhysicalDevice || !pPhysicalDevice->surfaceFormatCount) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
- SWAPCHAIN_CREATE_SWAP_WITHOUT_QUERY,
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice", SWAPCHAIN_CREATE_SWAP_WITHOUT_QUERY,
"%s() called before calling "
"vkGetPhysicalDeviceSurfaceFormatsKHR().",
fn);
@@ -1769,7 +1475,7 @@ static VkBool32 validateCreateSwapchainKHR(
bool foundFormat = false;
bool foundColorSpace = false;
bool foundMatch = false;
- for (uint32_t i = 0 ; i < pPhysicalDevice->surfaceFormatCount ; i++) {
+ for (uint32_t i = 0; i < pPhysicalDevice->surfaceFormatCount; i++) {
if (pCreateInfo->imageFormat == pPhysicalDevice->pSurfaceFormats[i].format) {
// Validate pCreateInfo->imageColorSpace against
// VkSurfaceFormatKHR::colorSpace:
@@ -1787,30 +1493,23 @@ static VkBool32 validateCreateSwapchainKHR(
if (!foundMatch) {
if (!foundFormat) {
if (!foundColorSpace) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device,
- "VkDevice",
- SWAPCHAIN_CREATE_SWAP_BAD_IMG_FMT_CLR_SP,
- "%s() called with neither a "
- "supported pCreateInfo->imageFormat "
- "(i.e. %d) nor a supported "
- "pCreateInfo->imageColorSpace "
- "(i.e. %d).",
- fn,
- pCreateInfo->imageFormat,
- pCreateInfo->imageColorSpace);
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
+ SWAPCHAIN_CREATE_SWAP_BAD_IMG_FMT_CLR_SP, "%s() called with neither a "
+ "supported pCreateInfo->imageFormat "
+ "(i.e. %d) nor a supported "
+ "pCreateInfo->imageColorSpace "
+ "(i.e. %d).",
+ fn, pCreateInfo->imageFormat, pCreateInfo->imageColorSpace);
} else {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device,
- "VkDevice",
- SWAPCHAIN_CREATE_SWAP_BAD_IMG_FORMAT,
- "%s() called with a non-supported "
- "pCreateInfo->imageFormat (i.e. %d).",
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
+ SWAPCHAIN_CREATE_SWAP_BAD_IMG_FORMAT, "%s() called with a non-supported "
+ "pCreateInfo->imageFormat (i.e. %d).",
fn, pCreateInfo->imageFormat);
}
} else if (!foundColorSpace) {
skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
- SWAPCHAIN_CREATE_SWAP_BAD_IMG_COLOR_SPACE,
- "%s() called with a non-supported "
- "pCreateInfo->imageColorSpace (i.e. %d).",
+ SWAPCHAIN_CREATE_SWAP_BAD_IMG_COLOR_SPACE, "%s() called with a non-supported "
+ "pCreateInfo->imageColorSpace (i.e. %d).",
fn, pCreateInfo->imageColorSpace);
}
}
@@ -1819,8 +1518,7 @@ static VkBool32 validateCreateSwapchainKHR(
// Validate pCreateInfo values with the results of
// vkGetPhysicalDeviceSurfacePresentModesKHR():
if (!pPhysicalDevice || !pPhysicalDevice->presentModeCount) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
- SWAPCHAIN_CREATE_SWAP_WITHOUT_QUERY,
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice", SWAPCHAIN_CREATE_SWAP_WITHOUT_QUERY,
"%s() called before calling "
"vkGetPhysicalDeviceSurfacePresentModesKHR().",
fn);
@@ -1828,7 +1526,7 @@ static VkBool32 validateCreateSwapchainKHR(
// Validate pCreateInfo->presentMode against
// vkGetPhysicalDeviceSurfacePresentModesKHR():
bool foundMatch = false;
- for (uint32_t i = 0 ; i < pPhysicalDevice->presentModeCount ; i++) {
+ for (uint32_t i = 0; i < pPhysicalDevice->presentModeCount; i++) {
if (pPhysicalDevice->pPresentModes[i] == pCreateInfo->presentMode) {
foundMatch = true;
break;
@@ -1836,49 +1534,37 @@ static VkBool32 validateCreateSwapchainKHR(
}
if (!foundMatch) {
skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
- SWAPCHAIN_CREATE_SWAP_BAD_PRESENT_MODE,
- "%s() called with a non-supported "
- "pCreateInfo->presentMode (i.e. %s).",
- fn,
- presentModeStr(pCreateInfo->presentMode));
+ SWAPCHAIN_CREATE_SWAP_BAD_PRESENT_MODE, "%s() called with a non-supported "
+ "pCreateInfo->presentMode (i.e. %s).",
+ fn, presentModeStr(pCreateInfo->presentMode));
}
}
// Validate pCreateInfo->imageSharingMode and related values:
if (pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT) {
- if ((pCreateInfo->queueFamilyIndexCount <= 1) ||
- !pCreateInfo->pQueueFamilyIndices) {
+ if ((pCreateInfo->queueFamilyIndexCount <= 1) || !pCreateInfo->pQueueFamilyIndices) {
skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
- SWAPCHAIN_CREATE_SWAP_BAD_SHARING_VALUES,
- "%s() called with a supported "
- "pCreateInfo->sharingMode of (i.e. %s),"
- "but with a bad value(s) for "
- "pCreateInfo->queueFamilyIndexCount or "
- "pCreateInfo->pQueueFamilyIndices).",
- fn,
- sharingModeStr(pCreateInfo->imageSharingMode));
+ SWAPCHAIN_CREATE_SWAP_BAD_SHARING_VALUES, "%s() called with a supported "
+ "pCreateInfo->sharingMode of (i.e. %s),"
+ "but with a bad value(s) for "
+ "pCreateInfo->queueFamilyIndexCount or "
+ "pCreateInfo->pQueueFamilyIndices).",
+ fn, sharingModeStr(pCreateInfo->imageSharingMode));
}
} else if (pCreateInfo->imageSharingMode != VK_SHARING_MODE_EXCLUSIVE) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
- SWAPCHAIN_CREATE_SWAP_BAD_SHARING_MODE,
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice", SWAPCHAIN_CREATE_SWAP_BAD_SHARING_MODE,
"%s() called with a non-supported "
"pCreateInfo->imageSharingMode (i.e. %s).",
- fn,
- sharingModeStr(pCreateInfo->imageSharingMode));
+ fn, sharingModeStr(pCreateInfo->imageSharingMode));
}
// Validate pCreateInfo->clipped:
- if (pCreateInfo &&
- (pCreateInfo->clipped != VK_FALSE) &&
- (pCreateInfo->clipped != VK_TRUE)) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device, "VkDevice",
- SWAPCHAIN_BAD_BOOL,
+ if (pCreateInfo && (pCreateInfo->clipped != VK_FALSE) && (pCreateInfo->clipped != VK_TRUE)) {
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice", SWAPCHAIN_BAD_BOOL,
"%s() called with a VkBool32 value that is "
"neither VK_TRUE nor VK_FALSE, but has the "
"numeric value of %d.",
- fn,
- pCreateInfo->clipped);
+ fn, pCreateInfo->clipped);
}
// Validate pCreateInfo->oldSwapchain:
@@ -1886,50 +1572,40 @@ static VkBool32 validateCreateSwapchainKHR(
SwpSwapchain *pOldSwapchain = &my_data->swapchainMap[pCreateInfo->oldSwapchain];
if (pOldSwapchain) {
if (device != pOldSwapchain->pDevice->device) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device, "VkDevice",
- SWAPCHAIN_DESTROY_SWAP_DIFF_DEVICE,
- "%s() called with a different VkDevice "
- "than the VkSwapchainKHR was created with.",
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
+ SWAPCHAIN_DESTROY_SWAP_DIFF_DEVICE, "%s() called with a different VkDevice "
+ "than the VkSwapchainKHR was created with.",
__FUNCTION__);
}
if (pCreateInfo->surface != pOldSwapchain->pSurface->surface) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device, "VkDevice",
- SWAPCHAIN_CREATE_SWAP_DIFF_SURFACE,
- "%s() called with pCreateInfo->oldSwapchain "
- "that has a different VkSurfaceKHR than "
- "pCreateInfo->surface.",
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
+ SWAPCHAIN_CREATE_SWAP_DIFF_SURFACE, "%s() called with pCreateInfo->oldSwapchain "
+ "that has a different VkSurfaceKHR than "
+ "pCreateInfo->surface.",
fn);
}
} else {
// TBD: Leave this in (not sure object_track will check this)?
- skipCall |= LOG_ERROR_NON_VALID_OBJ(VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
- pCreateInfo->oldSwapchain,
- "VkSwapchainKHR");
+ skipCall |=
+ LOG_ERROR_NON_VALID_OBJ(VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, pCreateInfo->oldSwapchain, "VkSwapchainKHR");
}
}
return skipCall;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(
- VkDevice device,
- const VkSwapchainCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkSwapchainKHR* pSwapchain)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkSwapchainKHR *pSwapchain) {
VkResult result = VK_SUCCESS;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
- VkBool32 skipCall = validateCreateSwapchainKHR(device, pCreateInfo,
- pSwapchain);
+ VkBool32 skipCall = validateCreateSwapchainKHR(device, pCreateInfo, pSwapchain);
if (VK_FALSE == skipCall) {
// Call down the call chain:
loader_platform_thread_unlock_mutex(&globalLock);
- result = my_data->device_dispatch_table->CreateSwapchainKHR(
- device, pCreateInfo, pAllocator, pSwapchain);
+ result = my_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
loader_platform_thread_lock_mutex(&globalLock);
if (result == VK_SUCCESS) {
@@ -1938,28 +1614,20 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(
my_data->swapchainMap[*pSwapchain].swapchain = *pSwapchain;
if (pDevice) {
- pDevice->swapchains[*pSwapchain] =
- &my_data->swapchainMap[*pSwapchain];
+ pDevice->swapchains[*pSwapchain] = &my_data->swapchainMap[*pSwapchain];
}
my_data->swapchainMap[*pSwapchain].pDevice = pDevice;
my_data->swapchainMap[*pSwapchain].imageCount = 0;
- my_data->swapchainMap[*pSwapchain].usedAllocatorToCreate =
- (pAllocator != NULL);
+ my_data->swapchainMap[*pSwapchain].usedAllocatorToCreate = (pAllocator != NULL);
// Store a pointer to the surface
SwpPhysicalDevice *pPhysicalDevice = pDevice->pPhysicalDevice;
- SwpInstance *pInstance =
- (pPhysicalDevice) ? pPhysicalDevice->pInstance : NULL;
+ SwpInstance *pInstance = (pPhysicalDevice) ? pPhysicalDevice->pInstance : NULL;
layer_data *my_instance_data =
- ((pInstance) ?
- get_my_data_ptr(get_dispatch_key(pInstance->instance), layer_data_map) :
- NULL);
- SwpSurface *pSurface =
- ((my_data && pCreateInfo) ?
- &my_instance_data->surfaceMap[pCreateInfo->surface] : NULL);
+ ((pInstance) ? get_my_data_ptr(get_dispatch_key(pInstance->instance), layer_data_map) : NULL);
+ SwpSurface *pSurface = ((my_data && pCreateInfo) ? &my_instance_data->surfaceMap[pCreateInfo->surface] : NULL);
my_data->swapchainMap[*pSwapchain].pSurface = pSurface;
if (pSurface) {
- pSurface->swapchains[*pSwapchain] =
- &my_data->swapchainMap[*pSwapchain];
+ pSurface->swapchains[*pSwapchain] = &my_data->swapchainMap[*pSwapchain];
}
}
loader_platform_thread_unlock_mutex(&globalLock);
@@ -1969,16 +1637,13 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(
return VK_ERROR_VALIDATION_FAILED_EXT;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR(
- VkDevice device,
- VkSwapchainKHR swapchain,
- const VkAllocationCallbacks* pAllocator)
-{
-// TODOs:
-//
-// - Implement a check for validity language that reads: All uses of
-// presentable images acquired from pname:swapchain and owned by the
-// application must: have completed execution
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
+ // TODOs:
+ //
+ // - Implement a check for validity language that reads: All uses of
+ // presentable images acquired from pname:swapchain and owned by the
+ // application must: have completed execution
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
loader_platform_thread_lock_mutex(&globalLock);
@@ -1986,10 +1651,9 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR(
// Validate that the swapchain extension was enabled:
if (pDevice && !pDevice->swapchainExtensionEnabled) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
- SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
- "%s() called even though the %s extension was not enabled for this VkDevice.",
- __FUNCTION__, VK_KHR_SWAPCHAIN_EXTENSION_NAME);
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice", SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
+ "%s() called even though the %s extension was not enabled for this VkDevice.", __FUNCTION__,
+ VK_KHR_SWAPCHAIN_EXTENSION_NAME);
}
// Regardless of skipCall value, do some internal cleanup:
@@ -1999,8 +1663,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR(
if (pSwapchain->pDevice) {
pSwapchain->pDevice->swapchains.erase(swapchain);
if (device != pSwapchain->pDevice->device) {
- LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
- SWAPCHAIN_DESTROY_SWAP_DIFF_DEVICE,
+ LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice", SWAPCHAIN_DESTROY_SWAP_DIFF_DEVICE,
"%s() called with a different VkDevice than the "
"VkSwapchainKHR was created with.",
__FUNCTION__);
@@ -2013,8 +1676,7 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR(
pSwapchain->images.clear();
}
if ((pAllocator != NULL) != pSwapchain->usedAllocatorToCreate) {
- LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, instance, "VkInstance",
- SWAPCHAIN_INCOMPATIBLE_ALLOCATOR,
+ LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, instance, "VkInstance", SWAPCHAIN_INCOMPATIBLE_ALLOCATOR,
"%s() called with incompatible pAllocator from when "
"the object was created.",
__FUNCTION__);
@@ -2029,12 +1691,8 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR(
}
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR(
- VkDevice device,
- VkSwapchainKHR swapchain,
- uint32_t* pSwapchainImageCount,
- VkImage* pSwapchainImages)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages) {
VkResult result = VK_SUCCESS;
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
@@ -2043,48 +1701,36 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR(
// Validate that the swapchain extension was enabled:
if (pDevice && !pDevice->swapchainExtensionEnabled) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
- SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
- "%s() called even though the %s extension was not enabled for this VkDevice.",
- __FUNCTION__, VK_KHR_SWAPCHAIN_EXTENSION_NAME);
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice", SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
+ "%s() called even though the %s extension was not enabled for this VkDevice.", __FUNCTION__,
+ VK_KHR_SWAPCHAIN_EXTENSION_NAME);
}
SwpSwapchain *pSwapchain = &my_data->swapchainMap[swapchain];
if (!pSwapchainImageCount) {
- skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pSwapchainImageCount");
+ skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pSwapchainImageCount");
}
if (VK_FALSE == skipCall) {
// Call down the call chain:
loader_platform_thread_unlock_mutex(&globalLock);
- result = my_data->device_dispatch_table->GetSwapchainImagesKHR(
- device, swapchain, pSwapchainImageCount, pSwapchainImages);
+ result = my_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
loader_platform_thread_lock_mutex(&globalLock);
// Obtain this pointer again after locking:
pSwapchain = &my_data->swapchainMap[swapchain];
- if ((result == VK_SUCCESS) && pSwapchain && !pSwapchainImages &&
- pSwapchainImageCount) {
+ if ((result == VK_SUCCESS) && pSwapchain && !pSwapchainImages && pSwapchainImageCount) {
// Record the result of this preliminary query:
pSwapchain->imageCount = *pSwapchainImageCount;
- }
- else if ((result == VK_SUCCESS) && pSwapchain && pSwapchainImages &&
- pSwapchainImageCount) {
+ } else if ((result == VK_SUCCESS) && pSwapchain && pSwapchainImages && pSwapchainImageCount) {
// Compare the preliminary value of *pSwapchainImageCount with the
// value this time:
if (*pSwapchainImageCount > pSwapchain->imageCount) {
- LOG_ERROR_INVALID_COUNT(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pSwapchainImageCount",
- "pSwapchainImages",
- *pSwapchainImageCount,
- pSwapchain->imageCount);
- }
- else if (*pSwapchainImageCount > 0) {
+ LOG_ERROR_INVALID_COUNT(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pSwapchainImageCount", "pSwapchainImages",
+ *pSwapchainImageCount, pSwapchain->imageCount);
+ } else if (*pSwapchainImageCount > 0) {
// Record the images and their state:
pSwapchain->imageCount = *pSwapchainImageCount;
- for (uint32_t i = 0 ; i < *pSwapchainImageCount ; i++) {
+ for (uint32_t i = 0; i < *pSwapchainImageCount; i++) {
pSwapchain->images[i].image = pSwapchainImages[i];
pSwapchain->images[i].pSwapchain = pSwapchain;
pSwapchain->images[i].ownedByApp = false;
@@ -2098,26 +1744,20 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR(
return VK_ERROR_VALIDATION_FAILED_EXT;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(
- VkDevice device,
- VkSwapchainKHR swapchain,
- uint64_t timeout,
- VkSemaphore semaphore,
- VkFence fence,
- uint32_t* pImageIndex)
-{
-// TODOs:
-//
-// - Address the timeout. Possibilities include looking at the state of the
-// swapchain's images, depending on the timeout value.
-// - Implement a check for validity language that reads: If pname:semaphore is
-// not sname:VK_NULL_HANDLE it must: be unsignalled
-// - Implement a check for validity language that reads: If pname:fence is not
-// sname:VK_NULL_HANDLE it must: be unsignalled and mustnot: be associated
-// with any other queue command that has not yet completed execution on that
-// queue
-// - Record/update the state of the swapchain, in case an error occurs
-// (e.g. VK_ERROR_OUT_OF_DATE_KHR).
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
+ VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
+ // TODOs:
+ //
+ // - Address the timeout. Possibilities include looking at the state of the
+ // swapchain's images, depending on the timeout value.
+ // - Implement a check for validity language that reads: If pname:semaphore is
+ // not sname:VK_NULL_HANDLE it must: be unsignalled
+ // - Implement a check for validity language that reads: If pname:fence is not
+ // sname:VK_NULL_HANDLE it must: be unsignalled and mustnot: be associated
+ // with any other queue command that has not yet completed execution on that
+ // queue
+ // - Record/update the state of the swapchain, in case an error occurs
+ // (e.g. VK_ERROR_OUT_OF_DATE_KHR).
VkResult result = VK_SUCCESS;
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
@@ -2126,56 +1766,48 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(
// Validate that the swapchain extension was enabled:
if (pDevice && !pDevice->swapchainExtensionEnabled) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice",
- SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
- "%s() called even though the %s extension was not enabled for this VkDevice.",
- __FUNCTION__, VK_KHR_SWAPCHAIN_EXTENSION_NAME);
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "VkDevice", SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
+ "%s() called even though the %s extension was not enabled for this VkDevice.", __FUNCTION__,
+ VK_KHR_SWAPCHAIN_EXTENSION_NAME);
}
SwpSwapchain *pSwapchain = &my_data->swapchainMap[swapchain];
if (pSwapchain) {
// Look to see if the application is trying to own too many images at
// the same time (i.e. not leave any to display):
uint32_t imagesOwnedByApp = 0;
- for (uint32_t i = 0 ; i < pSwapchain->imageCount ; i++) {
+ for (uint32_t i = 0; i < pSwapchain->imageCount; i++) {
if (pSwapchain->images[i].ownedByApp) {
imagesOwnedByApp++;
}
}
if (imagesOwnedByApp >= (pSwapchain->imageCount - 1)) {
- skipCall |= LOG_PERF_WARNING(VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
- swapchain,
- "VkSwapchainKHR",
- SWAPCHAIN_APP_OWNS_TOO_MANY_IMAGES,
- "%s() called when the application "
- "already owns all presentable images "
- "in this swapchain except for the "
- "image currently being displayed. "
- "This call to %s() cannot succeed "
- "unless another thread calls the "
- "vkQueuePresentKHR() function in "
- "order to release ownership of one of "
- "the presentable images of this "
- "swapchain.",
+ skipCall |= LOG_PERF_WARNING(VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, swapchain, "VkSwapchainKHR",
+ SWAPCHAIN_APP_OWNS_TOO_MANY_IMAGES, "%s() called when the application "
+ "already owns all presentable images "
+ "in this swapchain except for the "
+ "image currently being displayed. "
+ "This call to %s() cannot succeed "
+ "unless another thread calls the "
+ "vkQueuePresentKHR() function in "
+ "order to release ownership of one of "
+ "the presentable images of this "
+ "swapchain.",
__FUNCTION__, __FUNCTION__);
}
}
if (!pImageIndex) {
- skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pImageIndex");
+ skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pImageIndex");
}
if (VK_FALSE == skipCall) {
// Call down the call chain:
loader_platform_thread_unlock_mutex(&globalLock);
- result = my_data->device_dispatch_table->AcquireNextImageKHR(
- device, swapchain, timeout, semaphore, fence, pImageIndex);
+ result = my_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
loader_platform_thread_lock_mutex(&globalLock);
// Obtain this pointer again after locking:
pSwapchain = &my_data->swapchainMap[swapchain];
- if (((result == VK_SUCCESS) || (result == VK_SUBOPTIMAL_KHR)) &&
- pSwapchain) {
+ if (((result == VK_SUCCESS) || (result == VK_SUBOPTIMAL_KHR)) && pSwapchain) {
// Change the state of the image (now owned by the application):
pSwapchain->images[*pImageIndex].ownedByApp = true;
}
@@ -2186,90 +1818,64 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(
return VK_ERROR_VALIDATION_FAILED_EXT;
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(
- VkQueue queue,
- const VkPresentInfoKHR* pPresentInfo)
-{
-// TODOs:
-//
-// - Implement a check for validity language that reads: Any given element of
-// sname:VkSemaphore in pname:pWaitSemaphores must: refer to a prior signal
-// of that sname:VkSemaphore that won't be consumed by any other wait on that
-// semaphore
-// - Record/update the state of the swapchain, in case an error occurs
-// (e.g. VK_ERROR_OUT_OF_DATE_KHR).
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
+ // TODOs:
+ //
+ // - Implement a check for validity language that reads: Any given element of
+ // sname:VkSemaphore in pname:pWaitSemaphores must: refer to a prior signal
+ // of that sname:VkSemaphore that won't be consumed by any other wait on that
+ // semaphore
+ // - Record/update the state of the swapchain, in case an error occurs
+ // (e.g. VK_ERROR_OUT_OF_DATE_KHR).
VkResult result = VK_SUCCESS;
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
if (!pPresentInfo) {
- skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pPresentInfo");
+ skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pPresentInfo");
} else {
if (pPresentInfo->sType != VK_STRUCTURE_TYPE_PRESENT_INFO_KHR) {
- skipCall |= LOG_ERROR_WRONG_STYPE(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pPresentInfo",
+ skipCall |= LOG_ERROR_WRONG_STYPE(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pPresentInfo",
"VK_STRUCTURE_TYPE_PRESENT_INFO_KHR");
}
if (pPresentInfo->pNext != NULL) {
- skipCall |= LOG_INFO_WRONG_NEXT(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pPresentInfo");
+ skipCall |= LOG_INFO_WRONG_NEXT(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pPresentInfo");
}
if (!pPresentInfo->swapchainCount) {
- skipCall |= LOG_ERROR_ZERO_VALUE(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pPresentInfo->swapchainCount");
+ skipCall |= LOG_ERROR_ZERO_VALUE(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pPresentInfo->swapchainCount");
}
if (!pPresentInfo->pSwapchains) {
- skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pPresentInfo->pSwapchains");
+ skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pPresentInfo->pSwapchains");
}
if (!pPresentInfo->pImageIndices) {
- skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- device,
- "pPresentInfo->pImageIndices");
+ skipCall |= LOG_ERROR_NULL_POINTER(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device, "pPresentInfo->pImageIndices");
}
// Note: pPresentInfo->pResults is allowed to be NULL
}
loader_platform_thread_lock_mutex(&globalLock);
- for (uint32_t i = 0;
- pPresentInfo && (i < pPresentInfo->swapchainCount);
- i++) {
+ for (uint32_t i = 0; pPresentInfo && (i < pPresentInfo->swapchainCount); i++) {
uint32_t index = pPresentInfo->pImageIndices[i];
- SwpSwapchain *pSwapchain =
- &my_data->swapchainMap[pPresentInfo->pSwapchains[i]];
+ SwpSwapchain *pSwapchain = &my_data->swapchainMap[pPresentInfo->pSwapchains[i]];
if (pSwapchain) {
if (!pSwapchain->pDevice->swapchainExtensionEnabled) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
- pSwapchain->pDevice, "VkDevice",
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, pSwapchain->pDevice, "VkDevice",
SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED,
- "%s() called even though the %s extension was not enabled for this VkDevice.",
- __FUNCTION__, VK_KHR_SWAPCHAIN_EXTENSION_NAME);
+ "%s() called even though the %s extension was not enabled for this VkDevice.", __FUNCTION__,
+ VK_KHR_SWAPCHAIN_EXTENSION_NAME);
}
if (index >= pSwapchain->imageCount) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
- pPresentInfo->pSwapchains[i],
- "VkSwapchainKHR",
- SWAPCHAIN_INDEX_TOO_LARGE,
- "%s() called for an index that is too "
- "large (i.e. %d). There are only %d "
- "images in this VkSwapchainKHR.\n",
- __FUNCTION__, index,
- pSwapchain->imageCount);
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, pPresentInfo->pSwapchains[i], "VkSwapchainKHR",
+ SWAPCHAIN_INDEX_TOO_LARGE, "%s() called for an index that is too "
+ "large (i.e. %d). There are only %d "
+ "images in this VkSwapchainKHR.\n",
+ __FUNCTION__, index, pSwapchain->imageCount);
} else {
if (!pSwapchain->images[index].ownedByApp) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
- pPresentInfo->pSwapchains[i],
- "VkSwapchainKHR",
- SWAPCHAIN_INDEX_NOT_IN_USE,
- "%s() returned an index (i.e. %d) "
- "for an image that is not owned by "
- "the application.",
+ skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, pPresentInfo->pSwapchains[i],
+ "VkSwapchainKHR", SWAPCHAIN_INDEX_NOT_IN_USE, "%s() returned an index (i.e. %d) "
+ "for an image that is not owned by "
+ "the application.",
__FUNCTION__, index);
}
}
@@ -2281,16 +1887,14 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(
// and the 2nd test is the validation check:
if ((pSurface->numQueueFamilyIndexSupport > queueFamilyIndex) &&
(!pSurface->pQueueFamilyIndexSupport[queueFamilyIndex])) {
- skipCall |= LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
- pPresentInfo->pSwapchains[i],
- "VkSwapchainKHR",
- SWAPCHAIN_SURFACE_NOT_SUPPORTED_WITH_QUEUE,
- "%s() called with a swapchain whose "
- "surface is not supported for "
- "presention on this device with the "
- "queueFamilyIndex (i.e. %d) of the "
- "given queue.",
- __FUNCTION__, queueFamilyIndex);
+ skipCall |=
+ LOG_ERROR(VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, pPresentInfo->pSwapchains[i], "VkSwapchainKHR",
+ SWAPCHAIN_SURFACE_NOT_SUPPORTED_WITH_QUEUE, "%s() called with a swapchain whose "
+ "surface is not supported for "
+ "presention on this device with the "
+ "queueFamilyIndex (i.e. %d) of the "
+ "given queue.",
+ __FUNCTION__, queueFamilyIndex);
}
}
}
@@ -2299,16 +1903,13 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(
if (VK_FALSE == skipCall) {
// Call down the call chain:
loader_platform_thread_unlock_mutex(&globalLock);
- result = my_data->device_dispatch_table->QueuePresentKHR(queue,
- pPresentInfo);
+ result = my_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
loader_platform_thread_lock_mutex(&globalLock);
- if (pPresentInfo &&
- ((result == VK_SUCCESS) || (result == VK_SUBOPTIMAL_KHR))) {
- for (uint32_t i = 0; i < pPresentInfo->swapchainCount ; i++) {
+ if (pPresentInfo && ((result == VK_SUCCESS) || (result == VK_SUBOPTIMAL_KHR))) {
+ for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) {
int index = pPresentInfo->pImageIndices[i];
- SwpSwapchain *pSwapchain =
- &my_data->swapchainMap[pPresentInfo->pSwapchains[i]];
+ SwpSwapchain *pSwapchain = &my_data->swapchainMap[pPresentInfo->pSwapchains[i]];
if (pSwapchain) {
// Change the state of the image (no longer owned by the
// application):
@@ -2323,19 +1924,14 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(
return VK_ERROR_VALIDATION_FAILED_EXT;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(
- VkDevice device,
- uint32_t queueFamilyIndex,
- uint32_t queueIndex,
- VkQueue* pQueue)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
VkBool32 skipCall = VK_FALSE;
layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
if (VK_FALSE == skipCall) {
// Call down the call chain:
- my_data->device_dispatch_table->GetDeviceQueue(
- device, queueFamilyIndex, queueIndex, pQueue);
+ my_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
// Remember the queue's handle, and link it to the device:
loader_platform_thread_lock_mutex(&globalLock);
@@ -2350,15 +1946,12 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(
}
}
-
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(
- VkInstance instance,
- const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkDebugReportCallbackEXT* pMsgCallback)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
- VkResult result = my_data->instance_dispatch_table->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
+ VkResult result =
+ my_data->instance_dispatch_table->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
if (VK_SUCCESS == result) {
loader_platform_thread_lock_mutex(&globalLock);
result = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pMsgCallback);
@@ -2367,8 +1960,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback, const VkAllocationCallbacks *pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance,
+ VkDebugReportCallbackEXT msgCallback,
+ const VkAllocationCallbacks *pAllocator) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
my_data->instance_dispatch_table->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
loader_platform_thread_lock_mutex(&globalLock);
@@ -2376,26 +1970,19 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkIns
loader_platform_thread_unlock_mutex(&globalLock);
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT(
- VkInstance instance,
- VkDebugReportFlagsEXT flags,
- VkDebugReportObjectTypeEXT objType,
- uint64_t object,
- size_t location,
- int32_t msgCode,
- const char* pLayerPrefix,
- const char* pMsg)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
+ size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
- my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
+ my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
+ pMsg);
}
-VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice device, const char* funcName)
-{
+VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice device, const char *funcName) {
if (!strcmp("vkGetDeviceProcAddr", funcName))
- return (PFN_vkVoidFunction) vkGetDeviceProcAddr;
+ return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
if (!strcmp(funcName, "vkDestroyDevice"))
- return (PFN_vkVoidFunction) vkDestroyDevice;
+ return (PFN_vkVoidFunction)vkDestroyDevice;
if (device == VK_NULL_HANDLE) {
return NULL;
@@ -2404,10 +1991,8 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkD
layer_data *my_data;
my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- VkLayerDispatchTable *pDisp = my_data->device_dispatch_table;
- if (my_data->deviceMap.size() != 0 &&
- my_data->deviceMap[device].swapchainExtensionEnabled)
- {
+ VkLayerDispatchTable *pDisp = my_data->device_dispatch_table;
+ if (my_data->deviceMap.size() != 0 && my_data->deviceMap[device].swapchainExtensionEnabled) {
if (!strcmp("vkCreateSwapchainKHR", funcName))
return reinterpret_cast<PFN_vkVoidFunction>(vkCreateSwapchainKHR);
if (!strcmp("vkDestroySwapchainKHR", funcName))
@@ -2427,28 +2012,27 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkD
return pDisp->GetDeviceProcAddr(device, funcName);
}
-VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char* funcName)
-{
+VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
if (!strcmp("vkGetInstanceProcAddr", funcName))
- return (PFN_vkVoidFunction) vkGetInstanceProcAddr;
+ return (PFN_vkVoidFunction)vkGetInstanceProcAddr;
if (!strcmp(funcName, "vkCreateInstance"))
- return (PFN_vkVoidFunction) vkCreateInstance;
+ return (PFN_vkVoidFunction)vkCreateInstance;
if (!strcmp(funcName, "vkDestroyInstance"))
- return (PFN_vkVoidFunction) vkDestroyInstance;
+ return (PFN_vkVoidFunction)vkDestroyInstance;
if (!strcmp(funcName, "vkCreateDevice"))
- return (PFN_vkVoidFunction) vkCreateDevice;
+ return (PFN_vkVoidFunction)vkCreateDevice;
if (!strcmp(funcName, "vkEnumeratePhysicalDevices"))
- return (PFN_vkVoidFunction) vkEnumeratePhysicalDevices;
+ return (PFN_vkVoidFunction)vkEnumeratePhysicalDevices;
if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties"))
- return (PFN_vkVoidFunction) vkEnumerateInstanceLayerProperties;
+ return (PFN_vkVoidFunction)vkEnumerateInstanceLayerProperties;
if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties"))
return (PFN_vkVoidFunction)vkEnumerateDeviceLayerProperties;
if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties"))
- return (PFN_vkVoidFunction) vkEnumerateInstanceExtensionProperties;
+ return (PFN_vkVoidFunction)vkEnumerateInstanceExtensionProperties;
if (!strcmp(funcName, "vkEnumerateDeviceExtensionProperties"))
return (PFN_vkVoidFunction)vkEnumerateDeviceExtensionProperties;
if (!strcmp(funcName, "vkGetPhysicalDeviceQueueFamilyProperties"))
- return (PFN_vkVoidFunction) vkGetPhysicalDeviceQueueFamilyProperties;
+ return (PFN_vkVoidFunction)vkGetPhysicalDeviceQueueFamilyProperties;
if (instance == VK_NULL_HANDLE) {
return NULL;
@@ -2458,24 +2042,20 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(V
layer_data *my_data;
my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
- VkLayerInstanceDispatchTable* pTable = my_data->instance_dispatch_table;
+ VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
addr = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
if (addr) {
return addr;
}
#ifdef VK_USE_PLATFORM_ANDROID_KHR
- if (my_data->instanceMap.size() != 0 &&
- my_data->instanceMap[instance].androidSurfaceExtensionEnabled)
- {
+ if (my_data->instanceMap.size() != 0 && my_data->instanceMap[instance].androidSurfaceExtensionEnabled) {
if (!strcmp("vkCreateAndroidSurfaceKHR", funcName))
return reinterpret_cast<PFN_vkVoidFunction>(vkCreateAndroidSurfaceKHR);
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
#ifdef VK_USE_PLATFORM_MIR_KHR
- if (my_data->instanceMap.size() != 0 &&
- my_data->instanceMap[instance].mirSurfaceExtensionEnabled)
- {
+ if (my_data->instanceMap.size() != 0 && my_data->instanceMap[instance].mirSurfaceExtensionEnabled) {
if (!strcmp("vkCreateMirSurfaceKHR", funcName))
return reinterpret_cast<PFN_vkVoidFunction>(vkCreateMirSurfaceKHR);
if (!strcmp("vkGetPhysicalDeviceMirPresentationSupportKHR", funcName))
@@ -2483,9 +2063,7 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(V
}
#endif // VK_USE_PLATFORM_MIR_KHR
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
- if (my_data->instanceMap.size() != 0 &&
- my_data->instanceMap[instance].waylandSurfaceExtensionEnabled)
- {
+ if (my_data->instanceMap.size() != 0 && my_data->instanceMap[instance].waylandSurfaceExtensionEnabled) {
if (!strcmp("vkCreateWaylandSurfaceKHR", funcName))
return reinterpret_cast<PFN_vkVoidFunction>(vkCreateWaylandSurfaceKHR);
if (!strcmp("vkGetPhysicalDeviceWaylandPresentationSupportKHR", funcName))
@@ -2493,9 +2071,7 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(V
}
#endif // VK_USE_PLATFORM_WAYLAND_KHR
#ifdef VK_USE_PLATFORM_WIN32_KHR
- if (my_data->instanceMap.size() != 0 &&
- my_data->instanceMap[instance].win32SurfaceExtensionEnabled)
- {
+ if (my_data->instanceMap.size() != 0 && my_data->instanceMap[instance].win32SurfaceExtensionEnabled) {
if (!strcmp("vkCreateWin32SurfaceKHR", funcName))
return reinterpret_cast<PFN_vkVoidFunction>(vkCreateWin32SurfaceKHR);
if (!strcmp("vkGetPhysicalDeviceWin32PresentationSupportKHR", funcName))
@@ -2503,9 +2079,7 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(V
}
#endif // VK_USE_PLATFORM_WIN32_KHR
#ifdef VK_USE_PLATFORM_XCB_KHR
- if (my_data->instanceMap.size() != 0 &&
- my_data->instanceMap[instance].xcbSurfaceExtensionEnabled)
- {
+ if (my_data->instanceMap.size() != 0 && my_data->instanceMap[instance].xcbSurfaceExtensionEnabled) {
if (!strcmp("vkCreateXcbSurfaceKHR", funcName))
return reinterpret_cast<PFN_vkVoidFunction>(vkCreateXcbSurfaceKHR);
if (!strcmp("vkGetPhysicalDeviceXcbPresentationSupportKHR", funcName))
@@ -2513,18 +2087,14 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(V
}
#endif // VK_USE_PLATFORM_XCB_KHR
#ifdef VK_USE_PLATFORM_XLIB_KHR
- if (my_data->instanceMap.size() != 0 &&
- my_data->instanceMap[instance].xlibSurfaceExtensionEnabled)
- {
+ if (my_data->instanceMap.size() != 0 && my_data->instanceMap[instance].xlibSurfaceExtensionEnabled) {
if (!strcmp("vkCreateXlibSurfaceKHR", funcName))
return reinterpret_cast<PFN_vkVoidFunction>(vkCreateXlibSurfaceKHR);
if (!strcmp("vkGetPhysicalDeviceXlibPresentationSupportKHR", funcName))
return reinterpret_cast<PFN_vkVoidFunction>(vkGetPhysicalDeviceXlibPresentationSupportKHR);
}
#endif // VK_USE_PLATFORM_XLIB_KHR
- if (my_data->instanceMap.size() != 0 &&
- my_data->instanceMap[instance].surfaceExtensionEnabled)
- {
+ if (my_data->instanceMap.size() != 0 && my_data->instanceMap[instance].surfaceExtensionEnabled) {
if (!strcmp("vkDestroySurfaceKHR", funcName))
return reinterpret_cast<PFN_vkVoidFunction>(vkDestroySurfaceKHR);
if (!strcmp("vkGetPhysicalDeviceSurfaceSupportKHR", funcName))
@@ -2541,4 +2111,3 @@ VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(V
return NULL;
return pTable->GetInstanceProcAddr(instance, funcName);
}
-
diff --git a/layers/swapchain.h b/layers/swapchain.h
index 756daaeaf..ed86b0fad 100644
--- a/layers/swapchain.h
+++ b/layers/swapchain.h
@@ -37,18 +37,18 @@
using namespace std;
-
// Swapchain ERROR codes
-typedef enum _SWAPCHAIN_ERROR
-{
- SWAPCHAIN_INVALID_HANDLE, // Handle used that isn't currently valid
- SWAPCHAIN_NULL_POINTER, // Pointer set to NULL, instead of being a valid pointer
- SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED, // Did not enable WSI extension, but called WSI function
- SWAPCHAIN_DEL_OBJECT_BEFORE_CHILDREN, // Called vkDestroyDevice() before vkDestroySwapchainKHR()
- SWAPCHAIN_CREATE_UNSUPPORTED_SURFACE, // Called vkCreateSwapchainKHR() with a pCreateInfo->surface that wasn't seen as supported by vkGetPhysicalDeviceSurfaceSupportKHR for the device
- SWAPCHAIN_CREATE_SWAP_WITHOUT_QUERY, // Called vkCreateSwapchainKHR() without calling a query (e.g. vkGetPhysicalDeviceSurfaceCapabilitiesKHR())
- SWAPCHAIN_CREATE_SWAP_BAD_MIN_IMG_COUNT, // Called vkCreateSwapchainKHR() with out-of-bounds minImageCount
- SWAPCHAIN_CREATE_SWAP_OUT_OF_BOUNDS_EXTENTS,// Called vkCreateSwapchainKHR() with out-of-bounds imageExtent
+typedef enum _SWAPCHAIN_ERROR {
+ SWAPCHAIN_INVALID_HANDLE, // Handle used that isn't currently valid
+ SWAPCHAIN_NULL_POINTER, // Pointer set to NULL, instead of being a valid pointer
+ SWAPCHAIN_EXT_NOT_ENABLED_BUT_USED, // Did not enable WSI extension, but called WSI function
+ SWAPCHAIN_DEL_OBJECT_BEFORE_CHILDREN, // Called vkDestroyDevice() before vkDestroySwapchainKHR()
+ SWAPCHAIN_CREATE_UNSUPPORTED_SURFACE, // Called vkCreateSwapchainKHR() with a pCreateInfo->surface that wasn't seen as supported
+ // by vkGetPhysicalDeviceSurfaceSupportKHR for the device
+ SWAPCHAIN_CREATE_SWAP_WITHOUT_QUERY, // Called vkCreateSwapchainKHR() without calling a query (e.g.
+ // vkGetPhysicalDeviceSurfaceCapabilitiesKHR())
+ SWAPCHAIN_CREATE_SWAP_BAD_MIN_IMG_COUNT, // Called vkCreateSwapchainKHR() with out-of-bounds minImageCount
+ SWAPCHAIN_CREATE_SWAP_OUT_OF_BOUNDS_EXTENTS, // Called vkCreateSwapchainKHR() with out-of-bounds imageExtent
SWAPCHAIN_CREATE_SWAP_EXTENTS_NO_MATCH_WIN, // Called vkCreateSwapchainKHR() with imageExtent that doesn't match window's extent
SWAPCHAIN_CREATE_SWAP_BAD_PRE_TRANSFORM, // Called vkCreateSwapchainKHR() with a non-supported preTransform
SWAPCHAIN_CREATE_SWAP_BAD_COMPOSITE_ALPHA, // Called vkCreateSwapchainKHR() with a non-supported compositeAlpha
@@ -59,90 +59,75 @@ typedef enum _SWAPCHAIN_ERROR
SWAPCHAIN_CREATE_SWAP_BAD_IMG_FMT_CLR_SP, // Called vkCreateSwapchainKHR() with a non-supported imageColorSpace
SWAPCHAIN_CREATE_SWAP_BAD_PRESENT_MODE, // Called vkCreateSwapchainKHR() with a non-supported presentMode
SWAPCHAIN_CREATE_SWAP_BAD_SHARING_MODE, // Called vkCreateSwapchainKHR() with a non-supported imageSharingMode
- SWAPCHAIN_CREATE_SWAP_BAD_SHARING_VALUES, // Called vkCreateSwapchainKHR() with bad values when imageSharingMode is VK_SHARING_MODE_CONCURRENT
- SWAPCHAIN_CREATE_SWAP_DIFF_SURFACE, // Called vkCreateSwapchainKHR() with pCreateInfo->oldSwapchain that has a different surface than pCreateInfo->surface
- SWAPCHAIN_DESTROY_SWAP_DIFF_DEVICE, // Called vkDestroySwapchainKHR() with a different VkDevice than vkCreateSwapchainKHR()
- SWAPCHAIN_APP_OWNS_TOO_MANY_IMAGES, // vkAcquireNextImageKHR() asked for more images than are available
- SWAPCHAIN_INDEX_TOO_LARGE, // Index is too large for swapchain
- SWAPCHAIN_INDEX_NOT_IN_USE, // vkQueuePresentKHR() given index that is not owned by app
- SWAPCHAIN_BAD_BOOL, // VkBool32 that doesn't have value of VK_TRUE or VK_FALSE (e.g. is a non-zero form of true)
- SWAPCHAIN_INVALID_COUNT, // Second time a query called, the pCount value didn't match first time
- SWAPCHAIN_WRONG_STYPE, // The sType for a struct has the wrong value
- SWAPCHAIN_WRONG_NEXT, // The pNext for a struct is not NULL
- SWAPCHAIN_ZERO_VALUE, // A value should be non-zero
- SWAPCHAIN_INCOMPATIBLE_ALLOCATOR, // pAllocator must be compatible (i.e. NULL or not) when object is created and destroyed
- SWAPCHAIN_DID_NOT_QUERY_QUEUE_FAMILIES, // A function using a queueFamilyIndex was called before vkGetPhysicalDeviceQueueFamilyProperties() was called
- SWAPCHAIN_QUEUE_FAMILY_INDEX_TOO_LARGE, // A queueFamilyIndex value is not less than pQueueFamilyPropertyCount returned by vkGetPhysicalDeviceQueueFamilyProperties()
- SWAPCHAIN_SURFACE_NOT_SUPPORTED_WITH_QUEUE, // A surface is not supported by a given queueFamilyIndex, as seen by vkGetPhysicalDeviceSurfaceSupportKHR()
+ SWAPCHAIN_CREATE_SWAP_BAD_SHARING_VALUES, // Called vkCreateSwapchainKHR() with bad values when imageSharingMode is
+ // VK_SHARING_MODE_CONCURRENT
+ SWAPCHAIN_CREATE_SWAP_DIFF_SURFACE, // Called vkCreateSwapchainKHR() with pCreateInfo->oldSwapchain that has a different surface
+ // than pCreateInfo->surface
+ SWAPCHAIN_DESTROY_SWAP_DIFF_DEVICE, // Called vkDestroySwapchainKHR() with a different VkDevice than vkCreateSwapchainKHR()
+ SWAPCHAIN_APP_OWNS_TOO_MANY_IMAGES, // vkAcquireNextImageKHR() asked for more images than are available
+ SWAPCHAIN_INDEX_TOO_LARGE, // Index is too large for swapchain
+ SWAPCHAIN_INDEX_NOT_IN_USE, // vkQueuePresentKHR() given index that is not owned by app
+ SWAPCHAIN_BAD_BOOL, // VkBool32 that doesn't have value of VK_TRUE or VK_FALSE (e.g. is a non-zero form of true)
+ SWAPCHAIN_INVALID_COUNT, // Second time a query called, the pCount value didn't match first time
+ SWAPCHAIN_WRONG_STYPE, // The sType for a struct has the wrong value
+ SWAPCHAIN_WRONG_NEXT, // The pNext for a struct is not NULL
+ SWAPCHAIN_ZERO_VALUE, // A value should be non-zero
+ SWAPCHAIN_INCOMPATIBLE_ALLOCATOR, // pAllocator must be compatible (i.e. NULL or not) when object is created and destroyed
+ SWAPCHAIN_DID_NOT_QUERY_QUEUE_FAMILIES, // A function using a queueFamilyIndex was called before
+ // vkGetPhysicalDeviceQueueFamilyProperties() was called
+ SWAPCHAIN_QUEUE_FAMILY_INDEX_TOO_LARGE, // A queueFamilyIndex value is not less than pQueueFamilyPropertyCount returned by
+ // vkGetPhysicalDeviceQueueFamilyProperties()
+ SWAPCHAIN_SURFACE_NOT_SUPPORTED_WITH_QUEUE, // A surface is not supported by a given queueFamilyIndex, as seen by
+ // vkGetPhysicalDeviceSurfaceSupportKHR()
} SWAPCHAIN_ERROR;
-
// The following is for logging error messages:
#define LAYER_NAME (char *) "Swapchain"
-#define LOG_ERROR_NON_VALID_OBJ(objType, type, obj) \
- (my_data) ? \
- log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (objType), \
- (uint64_t) (obj), __LINE__, SWAPCHAIN_INVALID_HANDLE, LAYER_NAME, \
- "%s() called with a non-valid %s.", __FUNCTION__, (obj)) \
- : VK_FALSE
-#define LOG_ERROR_NULL_POINTER(objType, type, obj) \
- (my_data) ? \
- log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (objType), \
- (uint64_t) (obj), 0, SWAPCHAIN_NULL_POINTER, LAYER_NAME, \
- "%s() called with NULL pointer %s.", __FUNCTION__, (obj)) \
- : VK_FALSE
-#define LOG_ERROR_INVALID_COUNT(objType, type, obj, obj2, val, val2) \
- (my_data) ? \
- log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (objType), \
- (uint64_t) (obj), 0, SWAPCHAIN_INVALID_COUNT, LAYER_NAME, \
- "%s() called with non-NULL %s, and with %s set to a " \
- "value (%d) that is greater than the value (%d) that " \
- "was returned when %s was NULL.", \
- __FUNCTION__, (obj2), (obj), (val), (val2), (obj2)) \
- : VK_FALSE
-#define LOG_ERROR_WRONG_STYPE(objType, type, obj, val) \
- (my_data) ? \
- log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (objType), \
- (uint64_t) (obj), 0, SWAPCHAIN_WRONG_STYPE, LAYER_NAME, \
- "%s() called with the wrong value for %s->sType " \
- "(expected %s).", \
- __FUNCTION__, (obj), (val)) \
- : VK_FALSE
-#define LOG_ERROR_ZERO_VALUE(objType, type, obj) \
- (my_data) ? \
- log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (objType), \
- (uint64_t) (obj), 0, SWAPCHAIN_ZERO_VALUE, LAYER_NAME, \
- "%s() called with a zero value for %s.", \
- __FUNCTION__, (obj)) \
- : VK_FALSE
-#define LOG_ERROR(objType, type, obj, enm, fmt, ...) \
- (my_data) ? \
- log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (objType), \
- (uint64_t) (obj), __LINE__, (enm), LAYER_NAME, (fmt), __VA_ARGS__) \
- : VK_FALSE
-#define LOG_ERROR_QUEUE_FAMILY_INDEX_TOO_LARGE(objType, type, obj, val1, val2) \
- (my_data) ? \
- log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (objType), \
- (uint64_t) (obj), 0, SWAPCHAIN_QUEUE_FAMILY_INDEX_TOO_LARGE, LAYER_NAME, \
- "%s() called with a queueFamilyIndex that is too " \
- "large (i.e. %d). The maximum value (returned " \
- "by vkGetPhysicalDeviceQueueFamilyProperties) is " \
- "only %d.\n", \
- __FUNCTION__, (val1), (val2)) \
- : VK_FALSE
-#define LOG_PERF_WARNING(objType, type, obj, enm, fmt, ...) \
- (my_data) ? \
- log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (objType), \
- (uint64_t) (obj), __LINE__, (enm), LAYER_NAME, (fmt), __VA_ARGS__) \
- : VK_FALSE
-#define LOG_INFO_WRONG_NEXT(objType, type, obj) \
- (my_data) ? \
- log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (objType), \
- (uint64_t) (obj), 0, SWAPCHAIN_WRONG_NEXT, LAYER_NAME, \
- "%s() called with non-NULL value for %s->pNext.", \
- __FUNCTION__, (obj)) \
- : VK_FALSE
-
+#define LOG_ERROR_NON_VALID_OBJ(objType, type, obj) \
+ (my_data) ? log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (objType), (uint64_t)(obj), __LINE__, \
+ SWAPCHAIN_INVALID_HANDLE, LAYER_NAME, "%s() called with a non-valid %s.", __FUNCTION__, (obj)) \
+ : VK_FALSE
+#define LOG_ERROR_NULL_POINTER(objType, type, obj) \
+ (my_data) ? log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (objType), (uint64_t)(obj), 0, \
+ SWAPCHAIN_NULL_POINTER, LAYER_NAME, "%s() called with NULL pointer %s.", __FUNCTION__, (obj)) \
+ : VK_FALSE
+#define LOG_ERROR_INVALID_COUNT(objType, type, obj, obj2, val, val2) \
+ (my_data) ? log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (objType), (uint64_t)(obj), 0, \
+ SWAPCHAIN_INVALID_COUNT, LAYER_NAME, "%s() called with non-NULL %s, and with %s set to a " \
+ "value (%d) that is greater than the value (%d) that " \
+ "was returned when %s was NULL.", \
+ __FUNCTION__, (obj2), (obj), (val), (val2), (obj2)) \
+ : VK_FALSE
+#define LOG_ERROR_WRONG_STYPE(objType, type, obj, val) \
+ (my_data) ? log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (objType), (uint64_t)(obj), 0, SWAPCHAIN_WRONG_STYPE, \
+ LAYER_NAME, "%s() called with the wrong value for %s->sType " \
+ "(expected %s).", \
+ __FUNCTION__, (obj), (val)) \
+ : VK_FALSE
+#define LOG_ERROR_ZERO_VALUE(objType, type, obj) \
+ (my_data) ? log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (objType), (uint64_t)(obj), 0, SWAPCHAIN_ZERO_VALUE, \
+ LAYER_NAME, "%s() called with a zero value for %s.", __FUNCTION__, (obj)) \
+ : VK_FALSE
+#define LOG_ERROR(objType, type, obj, enm, fmt, ...) \
+ (my_data) ? log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (objType), (uint64_t)(obj), __LINE__, (enm), \
+ LAYER_NAME, (fmt), __VA_ARGS__) \
+ : VK_FALSE
+#define LOG_ERROR_QUEUE_FAMILY_INDEX_TOO_LARGE(objType, type, obj, val1, val2) \
+ (my_data) ? log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (objType), (uint64_t)(obj), 0, \
+ SWAPCHAIN_QUEUE_FAMILY_INDEX_TOO_LARGE, LAYER_NAME, "%s() called with a queueFamilyIndex that is too " \
+ "large (i.e. %d). The maximum value (returned " \
+ "by vkGetPhysicalDeviceQueueFamilyProperties) is " \
+ "only %d.\n", \
+ __FUNCTION__, (val1), (val2)) \
+ : VK_FALSE
+#define LOG_PERF_WARNING(objType, type, obj, enm, fmt, ...) \
+ (my_data) ? log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (objType), (uint64_t)(obj), __LINE__, \
+ (enm), LAYER_NAME, (fmt), __VA_ARGS__) \
+ : VK_FALSE
+#define LOG_INFO_WRONG_NEXT(objType, type, obj) \
+ (my_data) ? log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (objType), (uint64_t)(obj), 0, \
+ SWAPCHAIN_WRONG_NEXT, LAYER_NAME, "%s() called with non-NULL value for %s->pNext.", __FUNCTION__, (obj)) \
+ : VK_FALSE
// NOTE: The following struct's/typedef's are for keeping track of
// info that is used for validating the WSI extensions.
@@ -157,7 +142,8 @@ struct _SwpImage;
struct _SwpQueue;
typedef _SwpInstance SwpInstance;
-typedef _SwpSurface SwpSurface;;
+typedef _SwpSurface SwpSurface;
+;
typedef _SwpPhysicalDevice SwpPhysicalDevice;
typedef _SwpDevice SwpDevice;
typedef _SwpSwapchain SwpSwapchain;
@@ -170,16 +156,16 @@ struct _SwpInstance {
VkInstance instance;
// Remember the VkSurfaceKHR's that are created for this VkInstance:
- unordered_map<VkSurfaceKHR, SwpSurface*> surfaces;
+ unordered_map<VkSurfaceKHR, SwpSurface *> surfaces;
// When vkEnumeratePhysicalDevices is called, the VkPhysicalDevice's are
// remembered:
- unordered_map<const void*, SwpPhysicalDevice*> physicalDevices;
+ unordered_map<const void *, SwpPhysicalDevice *> physicalDevices;
// Set to true if VK_KHR_SURFACE_EXTENSION_NAME was enabled for this VkInstance:
bool surfaceExtensionEnabled;
- // TODO: Add additional booleans for platform-specific extensions:
+// TODO: Add additional booleans for platform-specific extensions:
#ifdef VK_USE_PLATFORM_ANDROID_KHR
// Set to true if VK_KHR_ANDROID_SURFACE_EXTENSION_NAME was enabled for this VkInstance:
bool androidSurfaceExtensionEnabled;
@@ -205,7 +191,7 @@ struct _SwpInstance {
bool xlibSurfaceExtensionEnabled;
#endif // VK_USE_PLATFORM_XLIB_KHR
};
-
+
// Create one of these for each VkSurfaceKHR:
struct _SwpSurface {
// The actual handle for this VkSurfaceKHR:
@@ -216,7 +202,7 @@ struct _SwpSurface {
// When vkCreateSwapchainKHR is called, the VkSwapchainKHR's are
// remembered:
- unordered_map<VkSwapchainKHR, SwpSwapchain*> swapchains;
+ unordered_map<VkSwapchainKHR, SwpSwapchain *> swapchains;
// 'true' if pAllocator was non-NULL when vkCreate*SurfaceKHR was called:
bool usedAllocatorToCreate;
@@ -250,25 +236,25 @@ struct _SwpPhysicalDevice {
// Record all surfaces that vkGetPhysicalDeviceSurfaceSupportKHR() was
// called for:
- unordered_map<VkSurfaceKHR, SwpSurface*> supportedSurfaces;
+ unordered_map<VkSurfaceKHR, SwpSurface *> supportedSurfaces;
-// TODO: Record/use this info per-surface, not per-device, once a
-// non-dispatchable surface object is added to WSI:
+ // TODO: Record/use this info per-surface, not per-device, once a
+ // non-dispatchable surface object is added to WSI:
// Results of vkGetPhysicalDeviceSurfaceCapabilitiesKHR():
bool gotSurfaceCapabilities;
VkSurfaceCapabilitiesKHR surfaceCapabilities;
-// TODO: Record/use this info per-surface, not per-device, once a
-// non-dispatchable surface object is added to WSI:
+ // TODO: Record/use this info per-surface, not per-device, once a
+ // non-dispatchable surface object is added to WSI:
// Count and VkSurfaceFormatKHR's returned by vkGetPhysicalDeviceSurfaceFormatsKHR():
uint32_t surfaceFormatCount;
- VkSurfaceFormatKHR* pSurfaceFormats;
+ VkSurfaceFormatKHR *pSurfaceFormats;
-// TODO: Record/use this info per-surface, not per-device, once a
-// non-dispatchable surface object is added to WSI:
+ // TODO: Record/use this info per-surface, not per-device, once a
+ // non-dispatchable surface object is added to WSI:
// Count and VkPresentModeKHR's returned by vkGetPhysicalDeviceSurfacePresentModesKHR():
uint32_t presentModeCount;
- VkPresentModeKHR* pPresentModes;
+ VkPresentModeKHR *pPresentModes;
};
// Create one of these for each VkDevice within a VkInstance:
@@ -284,10 +270,10 @@ struct _SwpDevice {
// When vkCreateSwapchainKHR is called, the VkSwapchainKHR's are
// remembered:
- unordered_map<VkSwapchainKHR, SwpSwapchain*> swapchains;
+ unordered_map<VkSwapchainKHR, SwpSwapchain *> swapchains;
// When vkGetDeviceQueue is called, the VkQueue's are remembered:
- unordered_map<VkQueue, SwpQueue*> queues;
+ unordered_map<VkQueue, SwpQueue *> queues;
};
// Create one of these for each VkImage within a VkSwapchainKHR:
@@ -338,22 +324,18 @@ struct _SwpQueue {
struct layer_data {
debug_report_data *report_data;
std::vector<VkDebugReportCallbackEXT> logging_callback;
- VkLayerDispatchTable* device_dispatch_table;
- VkLayerInstanceDispatchTable* instance_dispatch_table;
+ VkLayerDispatchTable *device_dispatch_table;
+ VkLayerInstanceDispatchTable *instance_dispatch_table;
// NOTE: The following are for keeping track of info that is used for
// validating the WSI extensions.
- std::unordered_map<void *, SwpInstance> instanceMap;
- std::unordered_map<VkSurfaceKHR, SwpSurface> surfaceMap;
+ std::unordered_map<void *, SwpInstance> instanceMap;
+ std::unordered_map<VkSurfaceKHR, SwpSurface> surfaceMap;
std::unordered_map<void *, SwpPhysicalDevice> physicalDeviceMap;
- std::unordered_map<void *, SwpDevice> deviceMap;
- std::unordered_map<VkSwapchainKHR, SwpSwapchain> swapchainMap;
- std::unordered_map<void *, SwpQueue> queueMap;
-
- layer_data() :
- report_data(nullptr),
- device_dispatch_table(nullptr),
- instance_dispatch_table(nullptr)
- {};
+ std::unordered_map<void *, SwpDevice> deviceMap;
+ std::unordered_map<VkSwapchainKHR, SwpSwapchain> swapchainMap;
+ std::unordered_map<void *, SwpQueue> queueMap;
+
+ layer_data() : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr){};
};
#endif // SWAPCHAIN_H
diff --git a/layers/threading.cpp b/layers/threading.cpp
index 88cc54d50..f7150143e 100644
--- a/layers/threading.cpp
+++ b/layers/threading.cpp
@@ -45,8 +45,7 @@
#include "thread_check.h"
-static void initThreading(layer_data *my_data, const VkAllocationCallbacks *pAllocator)
-{
+static void initThreading(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
uint32_t report_flags = 0;
uint32_t debug_action = 0;
@@ -55,10 +54,9 @@ static void initThreading(layer_data *my_data, const VkAllocationCallbacks *pAll
VkDebugReportCallbackEXT callback;
// initialize threading options
report_flags = getLayerOptionFlags("google_threading.report_flags", 0);
- getLayerOptionEnum("google_threading.debug_action", (uint32_t *) &debug_action);
+ getLayerOptionEnum("google_threading.debug_action", (uint32_t *)&debug_action);
- if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG)
- {
+ if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG) {
strOpt = getLayerOption("google_threading.log_filename");
log_output = getLayerLogOutput(strOpt, "google_threading");
VkDebugReportCallbackCreateInfoEXT dbgCreateInfo;
@@ -66,7 +64,7 @@ static void initThreading(layer_data *my_data, const VkAllocationCallbacks *pAll
dbgCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
dbgCreateInfo.flags = report_flags;
dbgCreateInfo.pfnCallback = log_callback;
- dbgCreateInfo.pUserData = (void *) log_output;
+ dbgCreateInfo.pUserData = (void *)log_output;
layer_create_msg_callback(my_data->report_data, &dbgCreateInfo, pAllocator, &callback);
my_data->logging_callback.push_back(callback);
}
@@ -82,22 +80,20 @@ static void initThreading(layer_data *my_data, const VkAllocationCallbacks *pAll
my_data->logging_callback.push_back(callback);
}
- if (!threadingLockInitialized)
- {
+ if (!threadingLockInitialized) {
loader_platform_thread_create_mutex(&threadingLock);
loader_platform_thread_init_cond(&threadingCond);
threadingLockInitialized = 1;
}
}
-
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
- PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance) fpGetInstanceProcAddr(NULL, "vkCreateInstance");
+ PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
if (fpCreateInstance == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
@@ -113,18 +109,13 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(const VkInstance
my_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
layer_init_instance_dispatch_table(*pInstance, my_data->instance_dispatch_table, fpGetInstanceProcAddr);
- my_data->report_data = debug_report_create_instance(
- my_data->instance_dispatch_table,
- *pInstance,
- pCreateInfo->enabledExtensionCount,
- pCreateInfo->ppEnabledExtensionNames);
+ my_data->report_data = debug_report_create_instance(my_data->instance_dispatch_table, *pInstance,
+ pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
initThreading(my_data, pAllocator);
return result;
}
-
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks* pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
dispatch_key key = get_dispatch_key(instance);
layer_data *my_data = get_my_data_ptr(key, layer_data_map);
VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
@@ -150,15 +141,14 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance
}
}
-
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
- PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice) fpGetInstanceProcAddr(NULL, "vkCreateDevice");
+ PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
if (fpCreateDevice == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
@@ -182,11 +172,9 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice g
return result;
}
-
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks* pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
dispatch_key key = get_dispatch_key(device);
- layer_data* dev_data = get_my_data_ptr(key, layer_data_map);
+ layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
startWriteObject(dev_data, device);
dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
finishWriteObject(dev_data, device);
@@ -194,105 +182,81 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, cons
}
static const VkExtensionProperties threading_extensions[] = {
- {
- VK_EXT_DEBUG_REPORT_EXTENSION_NAME,
- VK_EXT_DEBUG_REPORT_SPEC_VERSION
- }
-};
+ {VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
-VK_LAYER_EXPORT VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties* pProperties)
-{
+VK_LAYER_EXPORT VkResult VKAPI_CALL
+vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
return util_GetExtensionProperties(ARRAY_SIZE(threading_extensions), threading_extensions, pCount, pProperties);
}
-static const VkLayerProperties globalLayerProps[] = {
- {
- "VK_LAYER_GOOGLE_threading",
- VK_API_VERSION, // specVersion
- 1,
- "Google Validation Layer",
- }
-};
-
+static const VkLayerProperties globalLayerProps[] = {{
+ "VK_LAYER_GOOGLE_threading",
+ VK_API_VERSION, // specVersion
+ 1, "Google Validation Layer",
+}};
-VK_LAYER_EXPORT VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties* pProperties)
-{
+VK_LAYER_EXPORT VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
return util_GetLayerProperties(ARRAY_SIZE(globalLayerProps), globalLayerProps, pCount, pProperties);
}
-static const VkLayerProperties deviceLayerProps[] = {
- {
- "VK_LAYER_GOOGLE_threading",
- VK_API_VERSION, // specVersion
- 1,
- "Google Validation Layer",
- }
-};
-
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(
- VkPhysicalDevice physicalDevice,
- const char *pLayerName,
- uint32_t *pCount,
- VkExtensionProperties* pProperties)
-{
+static const VkLayerProperties deviceLayerProps[] = {{
+ "VK_LAYER_GOOGLE_threading",
+ VK_API_VERSION, // specVersion
+ 1, "Google Validation Layer",
+}};
+
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
+ const char *pLayerName, uint32_t *pCount,
+ VkExtensionProperties *pProperties) {
if (pLayerName == NULL) {
dispatch_key key = get_dispatch_key(physicalDevice);
layer_data *my_data = get_my_data_ptr(key, layer_data_map);
- return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(
- physicalDevice,
- NULL,
- pCount,
- pProperties);
+ return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
} else {
// Threading layer does not have any device extensions
- return util_GetExtensionProperties(0,
- nullptr,
- pCount, pProperties);
+ return util_GetExtensionProperties(0, nullptr, pCount, pProperties);
}
}
-VK_LAYER_EXPORT VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties* pProperties)
-{
+VK_LAYER_EXPORT VkResult VKAPI_CALL
+vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
return util_GetLayerProperties(ARRAY_SIZE(deviceLayerProps), deviceLayerProps, pCount, pProperties);
}
-static inline PFN_vkVoidFunction layer_intercept_proc(const char *name)
-{
- for (int i=0; i<sizeof(procmap)/sizeof(procmap[0]); i++) {
- if (!strcmp(name, procmap[i].name)) return procmap[i].pFunc;
+static inline PFN_vkVoidFunction layer_intercept_proc(const char *name) {
+ for (int i = 0; i < sizeof(procmap) / sizeof(procmap[0]); i++) {
+ if (!strcmp(name, procmap[i].name))
+ return procmap[i].pFunc;
}
return NULL;
}
-
-static inline PFN_vkVoidFunction layer_intercept_instance_proc(const char *name)
-{
+static inline PFN_vkVoidFunction layer_intercept_instance_proc(const char *name) {
if (!name || name[0] != 'v' || name[1] != 'k')
return NULL;
name += 2;
if (!strcmp(name, "CreateInstance"))
- return (PFN_vkVoidFunction) vkCreateInstance;
+ return (PFN_vkVoidFunction)vkCreateInstance;
if (!strcmp(name, "DestroyInstance"))
- return (PFN_vkVoidFunction) vkDestroyInstance;
+ return (PFN_vkVoidFunction)vkDestroyInstance;
if (!strcmp(name, "EnumerateInstanceExtensionProperties"))
- return (PFN_vkVoidFunction) vkEnumerateInstanceExtensionProperties;
+ return (PFN_vkVoidFunction)vkEnumerateInstanceExtensionProperties;
if (!strcmp(name, "EnumerateInstanceLayerProperties"))
- return (PFN_vkVoidFunction) vkEnumerateInstanceLayerProperties;
+ return (PFN_vkVoidFunction)vkEnumerateInstanceLayerProperties;
if (!strcmp(name, "EnumerateDeviceExtensionProperties"))
- return (PFN_vkVoidFunction) vkEnumerateDeviceExtensionProperties;
+ return (PFN_vkVoidFunction)vkEnumerateDeviceExtensionProperties;
if (!strcmp(name, "EnumerateDeviceLayerProperties"))
- return (PFN_vkVoidFunction) vkEnumerateDeviceLayerProperties;
+ return (PFN_vkVoidFunction)vkEnumerateDeviceLayerProperties;
if (!strcmp(name, "CreateDevice"))
- return (PFN_vkVoidFunction) vkCreateDevice;
+ return (PFN_vkVoidFunction)vkCreateDevice;
if (!strcmp(name, "GetInstanceProcAddr"))
- return (PFN_vkVoidFunction) vkGetInstanceProcAddr;
+ return (PFN_vkVoidFunction)vkGetInstanceProcAddr;
return NULL;
}
-VK_LAYER_EXPORT PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice device, const char* funcName)
-{
+VK_LAYER_EXPORT PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice device, const char *funcName) {
PFN_vkVoidFunction addr;
layer_data *dev_data;
if (device == VK_NULL_HANDLE) {
@@ -304,17 +268,16 @@ VK_LAYER_EXPORT PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice devic
return addr;
dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
- VkLayerDispatchTable* pTable = dev_data->device_dispatch_table;
+ VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
if (pTable->GetDeviceProcAddr == NULL)
return NULL;
return pTable->GetDeviceProcAddr(device, funcName);
}
-VK_LAYER_EXPORT PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char* funcName)
-{
+VK_LAYER_EXPORT PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
PFN_vkVoidFunction addr;
- layer_data* my_data;
+ layer_data *my_data;
addr = layer_intercept_instance_proc(funcName);
if (addr) {
@@ -331,22 +294,20 @@ VK_LAYER_EXPORT PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance i
return addr;
}
- VkLayerInstanceDispatchTable* pTable = my_data->instance_dispatch_table;
+ VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
if (pTable->GetInstanceProcAddr == NULL) {
return NULL;
}
return pTable->GetInstanceProcAddr(instance, funcName);
}
-VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(
- VkInstance instance,
- const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkDebugReportCallbackEXT* pMsgCallback)
-{
+VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
+vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
startReadObject(my_data, instance);
- VkResult result = my_data->instance_dispatch_table->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
+ VkResult result =
+ my_data->instance_dispatch_table->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
if (VK_SUCCESS == result) {
result = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pMsgCallback);
}
@@ -354,11 +315,8 @@ VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(
return result;
}
-VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(
- VkInstance instance,
- VkDebugReportCallbackEXT callback,
- const VkAllocationCallbacks* pAllocator)
-{
+VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
+vkDestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT callback, const VkAllocationCallbacks *pAllocator) {
layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
startReadObject(my_data, instance);
startWriteObject(my_data, callback);
@@ -368,11 +326,8 @@ VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(
finishWriteObject(my_data, callback);
}
-VkResult VKAPI_CALL vkAllocateCommandBuffers(
- VkDevice device,
- const VkCommandBufferAllocateInfo* pAllocateInfo,
- VkCommandBuffer* pCommandBuffers)
-{
+VkResult VKAPI_CALL
+vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo, VkCommandBuffer *pCommandBuffers) {
dispatch_key key = get_dispatch_key(device);
layer_data *my_data = get_my_data_ptr(key, layer_data_map);
VkLayerDispatchTable *pTable = my_data->device_dispatch_table;
@@ -386,7 +341,7 @@ VkResult VKAPI_CALL vkAllocateCommandBuffers(
// Record mapping from command buffer to command pool
if (VK_SUCCESS == result) {
- for (int index=0;index<pAllocateInfo->commandBufferCount;index++) {
+ for (int index = 0; index < pAllocateInfo->commandBufferCount; index++) {
loader_platform_thread_lock_mutex(&threadingLock);
command_pool_map[pCommandBuffers[index]] = pAllocateInfo->commandPool;
loader_platform_thread_unlock_mutex(&threadingLock);
@@ -396,30 +351,25 @@ VkResult VKAPI_CALL vkAllocateCommandBuffers(
return result;
}
-void VKAPI_CALL vkFreeCommandBuffers(
- VkDevice device,
- VkCommandPool commandPool,
- uint32_t commandBufferCount,
- const VkCommandBuffer* pCommandBuffers)
-{
+void VKAPI_CALL vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
+ const VkCommandBuffer *pCommandBuffers) {
dispatch_key key = get_dispatch_key(device);
layer_data *my_data = get_my_data_ptr(key, layer_data_map);
VkLayerDispatchTable *pTable = my_data->device_dispatch_table;
const bool lockCommandPool = false; // pool is already directly locked
startReadObject(my_data, device);
startWriteObject(my_data, commandPool);
- for (int index=0;index<commandBufferCount;index++) {
+ for (int index = 0; index < commandBufferCount; index++) {
startWriteObject(my_data, pCommandBuffers[index], lockCommandPool);
}
- pTable->FreeCommandBuffers(device,commandPool,commandBufferCount,pCommandBuffers);
+ pTable->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
finishReadObject(my_data, device);
finishWriteObject(my_data, commandPool);
- for (int index=0;index<commandBufferCount;index++) {
+ for (int index = 0; index < commandBufferCount; index++) {
finishWriteObject(my_data, pCommandBuffers[index], lockCommandPool);
loader_platform_thread_lock_mutex(&threadingLock);
command_pool_map.erase(pCommandBuffers[index]);
loader_platform_thread_unlock_mutex(&threadingLock);
}
}
-
diff --git a/layers/threading.h b/layers/threading.h
index 1a04a62fa..0e2336392 100644
--- a/layers/threading.h
+++ b/layers/threading.h
@@ -31,18 +31,18 @@
#include "vk_layer_config.h"
#include "vk_layer_logging.h"
-#if defined(__LP64__) || defined(_WIN64) || defined(__x86_64__) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
+#if defined(__LP64__) || defined(_WIN64) || defined(__x86_64__) || defined(_M_X64) || defined(__ia64) || defined(_M_IA64) || \
+ defined(__aarch64__) || defined(__powerpc64__)
// If pointers are 64-bit, then there can be separate counters for each
// NONDISPATCHABLE_HANDLE type. Otherwise they are all typedef uint64_t.
#define DISTINCT_NONDISPATCHABLE_HANDLES
#endif
// Draw State ERROR codes
-typedef enum _THREADING_CHECKER_ERROR
-{
- THREADING_CHECKER_NONE, // Used for INFO & other non-error messages
- THREADING_CHECKER_MULTIPLE_THREADS, // Object used simultaneously by multiple threads
- THREADING_CHECKER_SINGLE_THREAD_REUSE, // Object used simultaneously by recursion in single thread
+typedef enum _THREADING_CHECKER_ERROR {
+ THREADING_CHECKER_NONE, // Used for INFO & other non-error messages
+ THREADING_CHECKER_MULTIPLE_THREADS, // Object used simultaneously by multiple threads
+ THREADING_CHECKER_SINGLE_THREAD_REUSE, // Object used simultaneously by recursion in single thread
} THREADING_CHECKER_ERROR;
struct object_use_data {
@@ -58,12 +58,11 @@ static loader_platform_thread_mutex threadingLock;
static loader_platform_thread_cond threadingCond;
template <typename T> class counter {
- public:
+ public:
const char *typeName;
VkDebugReportObjectTypeEXT objectType;
std::unordered_map<T, object_use_data> uses;
- void startWrite(debug_report_data *report_data, T object)
- {
+ void startWrite(debug_report_data *report_data, T object) {
VkBool32 skipCall = VK_FALSE;
loader_platform_thread_id tid = loader_platform_get_thread_id();
loader_platform_thread_lock_mutex(&threadingLock);
@@ -79,9 +78,9 @@ template <typename T> class counter {
// There are no readers. Two writers just collided.
if (use_data->thread != tid) {
skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
- /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
- "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
- typeName, use_data->thread, tid);
+ /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
+ "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
+ typeName, use_data->thread, tid);
if (skipCall) {
// Wait for thread-safe access to object instead of skipping call.
while (uses.find(object) != uses.end()) {
@@ -89,12 +88,12 @@ template <typename T> class counter {
}
// There is now no current use of the object. Record writer thread.
struct object_use_data *use_data = &uses[object];
- use_data->thread = tid ;
+ use_data->thread = tid;
use_data->reader_count = 0;
use_data->writer_count = 1;
} else {
// Continue with an unsafe use of the object.
- use_data->thread = tid ;
+ use_data->thread = tid;
use_data->writer_count += 1;
}
} else {
@@ -106,9 +105,9 @@ template <typename T> class counter {
// There are readers. This writer collided with them.
if (use_data->thread != tid) {
skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
- /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
- "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
- typeName, use_data->thread, tid);
+ /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
+ "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
+ typeName, use_data->thread, tid);
if (skipCall) {
// Wait for thread-safe access to object instead of skipping call.
while (uses.find(object) != uses.end()) {
@@ -116,12 +115,12 @@ template <typename T> class counter {
}
// There is now no current use of the object. Record writer thread.
struct object_use_data *use_data = &uses[object];
- use_data->thread = tid ;
+ use_data->thread = tid;
use_data->reader_count = 0;
use_data->writer_count = 1;
} else {
// Continue with an unsafe use of the object.
- use_data->thread = tid ;
+ use_data->thread = tid;
use_data->writer_count += 1;
}
} else {
@@ -134,8 +133,7 @@ template <typename T> class counter {
loader_platform_thread_unlock_mutex(&threadingLock);
}
- void finishWrite(T object)
- {
+ void finishWrite(T object) {
// Object is no longer in use
loader_platform_thread_lock_mutex(&threadingLock);
uses[object].writer_count -= 1;
@@ -160,9 +158,9 @@ template <typename T> class counter {
} else if (uses[object].writer_count > 0 && uses[object].thread != tid) {
// There is a writer of the object.
skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
- /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
- "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
- typeName, uses[object].thread, tid);
+ /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
+ "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld", typeName,
+ uses[object].thread, tid);
if (skipCall) {
// Wait for thread-safe access to object instead of skipping call.
while (uses.find(object) != uses.end()) {
@@ -192,18 +190,17 @@ template <typename T> class counter {
loader_platform_thread_cond_broadcast(&threadingCond);
loader_platform_thread_unlock_mutex(&threadingLock);
}
- counter(const char *name = "",
- VkDebugReportObjectTypeEXT type=VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT) {
+ counter(const char *name = "", VkDebugReportObjectTypeEXT type = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT) {
typeName = name;
- objectType=type;
+ objectType = type;
}
};
struct layer_data {
debug_report_data *report_data;
std::vector<VkDebugReportCallbackEXT> logging_callback;
- VkLayerDispatchTable* device_dispatch_table;
- VkLayerInstanceDispatchTable* instance_dispatch_table;
+ VkLayerDispatchTable *device_dispatch_table;
+ VkLayerInstanceDispatchTable *instance_dispatch_table;
counter<VkCommandBuffer> c_VkCommandBuffer;
counter<VkDevice> c_VkDevice;
counter<VkInstance> c_VkInstance;
@@ -230,48 +227,50 @@ struct layer_data {
counter<VkSemaphore> c_VkSemaphore;
counter<VkShaderModule> c_VkShaderModule;
counter<VkDebugReportCallbackEXT> c_VkDebugReportCallbackEXT;
-#else // DISTINCT_NONDISPATCHABLE_HANDLES
+#else // DISTINCT_NONDISPATCHABLE_HANDLES
counter<uint64_t> c_uint64_t;
#endif // DISTINCT_NONDISPATCHABLE_HANDLES
- layer_data():
- report_data(nullptr),
- c_VkCommandBuffer("VkCommandBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT),
- c_VkDevice("VkDevice", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT),
- c_VkInstance("VkInstance", VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT),
- c_VkQueue("VkQueue", VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT),
+ layer_data()
+ : report_data(nullptr), c_VkCommandBuffer("VkCommandBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT),
+ c_VkDevice("VkDevice", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT),
+ c_VkInstance("VkInstance", VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT),
+ c_VkQueue("VkQueue", VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT),
#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
- c_VkBuffer("VkBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT),
- c_VkBufferView("VkBufferView", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT),
- c_VkCommandPool("VkCommandPool", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT),
- c_VkDescriptorPool("VkDescriptorPool", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT),
- c_VkDescriptorSet("VkDescriptorSet", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT),
- c_VkDescriptorSetLayout("VkDescriptorSetLayout", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT),
- c_VkDeviceMemory("VkDeviceMemory", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT),
- c_VkEvent("VkEvent", VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT),
- c_VkFence("VkFence", VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT),
- c_VkFramebuffer("VkFramebuffer", VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT),
- c_VkImage("VkImage", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT),
- c_VkImageView("VkImageView", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT),
- c_VkPipeline("VkPipeline", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT),
- c_VkPipelineCache("VkPipelineCache", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT),
- c_VkPipelineLayout("VkPipelineLayout", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT),
- c_VkQueryPool("VkQueryPool", VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT),
- c_VkRenderPass("VkRenderPass", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT),
- c_VkSampler("VkSampler", VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT),
- c_VkSemaphore("VkSemaphore", VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT),
- c_VkShaderModule("VkShaderModule", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT),
- c_VkDebugReportCallbackEXT("VkDebugReportCallbackEXT", VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT)
-#else // DISTINCT_NONDISPATCHABLE_HANDLES
- c_uint64_t("NON_DISPATCHABLE_HANDLE", VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT)
+ c_VkBuffer("VkBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT),
+ c_VkBufferView("VkBufferView", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT),
+ c_VkCommandPool("VkCommandPool", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT),
+ c_VkDescriptorPool("VkDescriptorPool", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT),
+ c_VkDescriptorSet("VkDescriptorSet", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT),
+ c_VkDescriptorSetLayout("VkDescriptorSetLayout", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT),
+ c_VkDeviceMemory("VkDeviceMemory", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT),
+ c_VkEvent("VkEvent", VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT), c_VkFence("VkFence", VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT),
+ c_VkFramebuffer("VkFramebuffer", VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT),
+ c_VkImage("VkImage", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT),
+ c_VkImageView("VkImageView", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT),
+ c_VkPipeline("VkPipeline", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT),
+ c_VkPipelineCache("VkPipelineCache", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT),
+ c_VkPipelineLayout("VkPipelineLayout", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT),
+ c_VkQueryPool("VkQueryPool", VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT),
+ c_VkRenderPass("VkRenderPass", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT),
+ c_VkSampler("VkSampler", VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT),
+ c_VkSemaphore("VkSemaphore", VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT),
+ c_VkShaderModule("VkShaderModule", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT),
+ c_VkDebugReportCallbackEXT("VkDebugReportCallbackEXT", VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT)
+#else // DISTINCT_NONDISPATCHABLE_HANDLES
+ c_uint64_t("NON_DISPATCHABLE_HANDLE", VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT)
#endif // DISTINCT_NONDISPATCHABLE_HANDLES
- {};
+ {};
};
-#define WRAPPER(type) \
-static void startWriteObject(struct layer_data *my_data, type object){my_data->c_##type.startWrite(my_data->report_data, object);}\
-static void finishWriteObject(struct layer_data *my_data, type object){my_data->c_##type.finishWrite(object);}\
-static void startReadObject(struct layer_data *my_data, type object){my_data->c_##type.startRead(my_data->report_data, object);}\
-static void finishReadObject(struct layer_data *my_data, type object){my_data->c_##type.finishRead(object);}
+#define WRAPPER(type) \
+ static void startWriteObject(struct layer_data *my_data, type object) { \
+ my_data->c_##type.startWrite(my_data->report_data, object); \
+ } \
+ static void finishWriteObject(struct layer_data *my_data, type object) { my_data->c_##type.finishWrite(object); } \
+ static void startReadObject(struct layer_data *my_data, type object) { \
+ my_data->c_##type.startRead(my_data->report_data, object); \
+ } \
+ static void finishReadObject(struct layer_data *my_data, type object) { my_data->c_##type.finishRead(object); }
WRAPPER(VkDevice)
WRAPPER(VkInstance)
@@ -298,16 +297,15 @@ WRAPPER(VkSampler)
WRAPPER(VkSemaphore)
WRAPPER(VkShaderModule)
WRAPPER(VkDebugReportCallbackEXT)
-#else // DISTINCT_NONDISPATCHABLE_HANDLES
+#else // DISTINCT_NONDISPATCHABLE_HANDLES
WRAPPER(uint64_t)
#endif // DISTINCT_NONDISPATCHABLE_HANDLES
-static std::unordered_map<void*, layer_data *> layer_data_map;
+static std::unordered_map<void *, layer_data *> layer_data_map;
static std::unordered_map<VkCommandBuffer, VkCommandPool> command_pool_map;
// VkCommandBuffer needs check for implicit use of command pool
-static void startWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool=true)
-{
+static void startWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) {
if (lockPool) {
loader_platform_thread_lock_mutex(&threadingLock);
VkCommandPool pool = command_pool_map[object];
@@ -316,8 +314,7 @@ static void startWriteObject(struct layer_data *my_data, VkCommandBuffer object,
}
my_data->c_VkCommandBuffer.startWrite(my_data->report_data, object);
}
-static void finishWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool=true)
-{
+static void finishWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) {
my_data->c_VkCommandBuffer.finishWrite(object);
if (lockPool) {
loader_platform_thread_lock_mutex(&threadingLock);
@@ -326,16 +323,14 @@ static void finishWriteObject(struct layer_data *my_data, VkCommandBuffer object
finishWriteObject(my_data, pool);
}
}
-static void startReadObject(struct layer_data *my_data, VkCommandBuffer object)
-{
+static void startReadObject(struct layer_data *my_data, VkCommandBuffer object) {
loader_platform_thread_lock_mutex(&threadingLock);
VkCommandPool pool = command_pool_map[object];
loader_platform_thread_unlock_mutex(&threadingLock);
startReadObject(my_data, pool);
my_data->c_VkCommandBuffer.startRead(my_data->report_data, object);
}
-static void finishReadObject(struct layer_data *my_data, VkCommandBuffer object)
-{
+static void finishReadObject(struct layer_data *my_data, VkCommandBuffer object) {
my_data->c_VkCommandBuffer.finishRead(object);
loader_platform_thread_lock_mutex(&threadingLock);
VkCommandPool pool = command_pool_map[object];
diff --git a/layers/unique_objects.h b/layers/unique_objects.h
index 42c85d0db..42008448d 100644
--- a/layers/unique_objects.h
+++ b/layers/unique_objects.h
@@ -47,9 +47,7 @@
struct layer_data {
bool wsi_enabled;
- layer_data() :
- wsi_enabled(false)
- {};
+ layer_data() : wsi_enabled(false){};
};
struct instExts {
@@ -62,51 +60,58 @@ struct instExts {
bool win32_enabled;
};
-static std::unordered_map<void*, struct instExts> instanceExtMap;
-static std::unordered_map<void*, layer_data *> layer_data_map;
-static device_table_map unique_objects_device_table_map;
-static instance_table_map unique_objects_instance_table_map;
+static std::unordered_map<void *, struct instExts> instanceExtMap;
+static std::unordered_map<void *, layer_data *> layer_data_map;
+static device_table_map unique_objects_device_table_map;
+static instance_table_map unique_objects_instance_table_map;
// Structure to wrap returned non-dispatchable objects to guarantee they have unique handles
// address of struct will be used as the unique handle
-struct VkUniqueObject
-{
+struct VkUniqueObject {
uint64_t actualObject;
};
// Handle CreateInstance
-static void createInstanceRegisterExtensions(const VkInstanceCreateInfo* pCreateInfo, VkInstance instance)
-{
+static void createInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, VkInstance instance) {
uint32_t i;
VkLayerInstanceDispatchTable *pDisp = get_dispatch_table(unique_objects_instance_table_map, instance);
PFN_vkGetInstanceProcAddr gpa = pDisp->GetInstanceProcAddr;
pDisp->DestroySurfaceKHR = (PFN_vkDestroySurfaceKHR)gpa(instance, "vkDestroySurfaceKHR");
- pDisp->GetPhysicalDeviceSurfaceSupportKHR = (PFN_vkGetPhysicalDeviceSurfaceSupportKHR) gpa(instance, "vkGetPhysicalDeviceSurfaceSupportKHR");
- pDisp->GetPhysicalDeviceSurfaceCapabilitiesKHR = (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR) gpa(instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
- pDisp->GetPhysicalDeviceSurfaceFormatsKHR = (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR) gpa(instance, "vkGetPhysicalDeviceSurfaceFormatsKHR");
- pDisp->GetPhysicalDeviceSurfacePresentModesKHR = (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR) gpa(instance, "vkGetPhysicalDeviceSurfacePresentModesKHR");
+ pDisp->GetPhysicalDeviceSurfaceSupportKHR =
+ (PFN_vkGetPhysicalDeviceSurfaceSupportKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceSupportKHR");
+ pDisp->GetPhysicalDeviceSurfaceCapabilitiesKHR =
+ (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
+ pDisp->GetPhysicalDeviceSurfaceFormatsKHR =
+ (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceFormatsKHR");
+ pDisp->GetPhysicalDeviceSurfacePresentModesKHR =
+ (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)gpa(instance, "vkGetPhysicalDeviceSurfacePresentModesKHR");
#ifdef VK_USE_PLATFORM_WIN32_KHR
- pDisp->CreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR) gpa(instance, "vkCreateWin32SurfaceKHR");
- pDisp->GetPhysicalDeviceWin32PresentationSupportKHR = (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR");
+ pDisp->CreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR)gpa(instance, "vkCreateWin32SurfaceKHR");
+ pDisp->GetPhysicalDeviceWin32PresentationSupportKHR =
+ (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR");
#endif // VK_USE_PLATFORM_WIN32_KHR
#ifdef VK_USE_PLATFORM_XCB_KHR
- pDisp->CreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR) gpa(instance, "vkCreateXcbSurfaceKHR");
- pDisp->GetPhysicalDeviceXcbPresentationSupportKHR = (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR");
+ pDisp->CreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR)gpa(instance, "vkCreateXcbSurfaceKHR");
+ pDisp->GetPhysicalDeviceXcbPresentationSupportKHR =
+ (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR");
#endif // VK_USE_PLATFORM_XCB_KHR
#ifdef VK_USE_PLATFORM_XLIB_KHR
- pDisp->CreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR) gpa(instance, "vkCreateXlibSurfaceKHR");
- pDisp->GetPhysicalDeviceXlibPresentationSupportKHR = (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR");
+ pDisp->CreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR)gpa(instance, "vkCreateXlibSurfaceKHR");
+ pDisp->GetPhysicalDeviceXlibPresentationSupportKHR =
+ (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR");
#endif // VK_USE_PLATFORM_XLIB_KHR
#ifdef VK_USE_PLATFORM_MIR_KHR
- pDisp->CreateMirSurfaceKHR = (PFN_vkCreateMirSurfaceKHR) gpa(instance, "vkCreateMirSurfaceKHR");
- pDisp->GetPhysicalDeviceMirPresentationSupportKHR = (PFN_vkGetPhysicalDeviceMirPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceMirPresentationSupportKHR");
+ pDisp->CreateMirSurfaceKHR = (PFN_vkCreateMirSurfaceKHR)gpa(instance, "vkCreateMirSurfaceKHR");
+ pDisp->GetPhysicalDeviceMirPresentationSupportKHR =
+ (PFN_vkGetPhysicalDeviceMirPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceMirPresentationSupportKHR");
#endif // VK_USE_PLATFORM_MIR_KHR
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
- pDisp->CreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR) gpa(instance, "vkCreateWaylandSurfaceKHR");
- pDisp->GetPhysicalDeviceWaylandPresentationSupportKHR = (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR) gpa(instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR");
+ pDisp->CreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR)gpa(instance, "vkCreateWaylandSurfaceKHR");
+ pDisp->GetPhysicalDeviceWaylandPresentationSupportKHR =
+ (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR");
#endif // VK_USE_PLATFORM_WAYLAND_KHR
#ifdef VK_USE_PLATFORM_ANDROID_KHR
- pDisp->CreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR) gpa(instance, "vkCreateAndroidSurfaceKHR");
+ pDisp->CreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR)gpa(instance, "vkCreateAndroidSurfaceKHR");
#endif // VK_USE_PLATFORM_ANDROID_KHR
instanceExtMap[pDisp] = {};
@@ -140,17 +145,13 @@ static void createInstanceRegisterExtensions(const VkInstanceCreateInfo* pCreate
}
}
-VkResult
-explicit_CreateInstance(
- const VkInstanceCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkInstance *pInstance)
-{
+VkResult explicit_CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
+ VkInstance *pInstance) {
VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
- PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance) fpGetInstanceProcAddr(NULL, "vkCreateInstance");
+ PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
if (fpCreateInstance == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
@@ -171,16 +172,15 @@ explicit_CreateInstance(
}
// Handle CreateDevice
-static void createDeviceRegisterExtensions(const VkDeviceCreateInfo* pCreateInfo, VkDevice device)
-{
+static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
VkLayerDispatchTable *pDisp = get_dispatch_table(unique_objects_device_table_map, device);
PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
- pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR) gpa(device, "vkCreateSwapchainKHR");
- pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR) gpa(device, "vkDestroySwapchainKHR");
- pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR) gpa(device, "vkGetSwapchainImagesKHR");
- pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR) gpa(device, "vkAcquireNextImageKHR");
- pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR) gpa(device, "vkQueuePresentKHR");
+ pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
+ pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
+ pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
+ pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
+ pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
my_device_data->wsi_enabled = false;
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
@@ -188,19 +188,14 @@ static void createDeviceRegisterExtensions(const VkDeviceCreateInfo* pCreateInfo
}
}
-VkResult
-explicit_CreateDevice(
- VkPhysicalDevice gpu,
- const VkDeviceCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkDevice *pDevice)
-{
+VkResult explicit_CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
+ VkDevice *pDevice) {
VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
assert(chain_info->u.pLayerInfo);
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
- PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice) fpGetInstanceProcAddr(NULL, "vkCreateDevice");
+ PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
if (fpCreateDevice == NULL) {
return VK_ERROR_INITIALIZATION_FAILED;
}
@@ -221,47 +216,48 @@ explicit_CreateDevice(
return result;
}
-VkResult explicit_QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence)
-{
-// UNWRAP USES:
-// 0 : fence,VkFence
+VkResult explicit_QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
+ // UNWRAP USES:
+ // 0 : fence,VkFence
if (VK_NULL_HANDLE != fence) {
- fence = (VkFence)((VkUniqueObject*)fence)->actualObject;
+ fence = (VkFence)((VkUniqueObject *)fence)->actualObject;
}
-// waitSemaphoreCount : pSubmits[submitCount]->pWaitSemaphores,VkSemaphore
+ // waitSemaphoreCount : pSubmits[submitCount]->pWaitSemaphores,VkSemaphore
std::vector<VkSemaphore> original_pWaitSemaphores = {};
-// signalSemaphoreCount : pSubmits[submitCount]->pSignalSemaphores,VkSemaphore
+ // signalSemaphoreCount : pSubmits[submitCount]->pSignalSemaphores,VkSemaphore
std::vector<VkSemaphore> original_pSignalSemaphores = {};
if (pSubmits) {
- for (uint32_t index0=0; index0<submitCount; ++index0) {
+ for (uint32_t index0 = 0; index0 < submitCount; ++index0) {
if (pSubmits[index0].pWaitSemaphores) {
- for (uint32_t index1=0; index1<pSubmits[index0].waitSemaphoreCount; ++index1) {
- VkSemaphore** ppSemaphore = (VkSemaphore**)&(pSubmits[index0].pWaitSemaphores);
+ for (uint32_t index1 = 0; index1 < pSubmits[index0].waitSemaphoreCount; ++index1) {
+ VkSemaphore **ppSemaphore = (VkSemaphore **)&(pSubmits[index0].pWaitSemaphores);
original_pWaitSemaphores.push_back(pSubmits[index0].pWaitSemaphores[index1]);
- *(ppSemaphore[index1]) = (VkSemaphore)((VkUniqueObject*)pSubmits[index0].pWaitSemaphores[index1])->actualObject;
+ *(ppSemaphore[index1]) =
+ (VkSemaphore)((VkUniqueObject *)pSubmits[index0].pWaitSemaphores[index1])->actualObject;
}
}
if (pSubmits[index0].pSignalSemaphores) {
- for (uint32_t index1=0; index1<pSubmits[index0].signalSemaphoreCount; ++index1) {
- VkSemaphore** ppSemaphore = (VkSemaphore**)&(pSubmits[index0].pSignalSemaphores);
+ for (uint32_t index1 = 0; index1 < pSubmits[index0].signalSemaphoreCount; ++index1) {
+ VkSemaphore **ppSemaphore = (VkSemaphore **)&(pSubmits[index0].pSignalSemaphores);
original_pSignalSemaphores.push_back(pSubmits[index0].pSignalSemaphores[index1]);
- *(ppSemaphore[index1]) = (VkSemaphore)((VkUniqueObject*)pSubmits[index0].pSignalSemaphores[index1])->actualObject;
+ *(ppSemaphore[index1]) =
+ (VkSemaphore)((VkUniqueObject *)pSubmits[index0].pSignalSemaphores[index1])->actualObject;
}
}
}
}
VkResult result = get_dispatch_table(unique_objects_device_table_map, queue)->QueueSubmit(queue, submitCount, pSubmits, fence);
if (pSubmits) {
- for (uint32_t index0=0; index0<submitCount; ++index0) {
+ for (uint32_t index0 = 0; index0 < submitCount; ++index0) {
if (pSubmits[index0].pWaitSemaphores) {
- for (uint32_t index1=0; index1<pSubmits[index0].waitSemaphoreCount; ++index1) {
- VkSemaphore** ppSemaphore = (VkSemaphore**)&(pSubmits[index0].pWaitSemaphores);
+ for (uint32_t index1 = 0; index1 < pSubmits[index0].waitSemaphoreCount; ++index1) {
+ VkSemaphore **ppSemaphore = (VkSemaphore **)&(pSubmits[index0].pWaitSemaphores);
*(ppSemaphore[index1]) = original_pWaitSemaphores[index1];
}
}
if (pSubmits[index0].pSignalSemaphores) {
- for (uint32_t index1=0; index1<pSubmits[index0].signalSemaphoreCount; ++index1) {
- VkSemaphore** ppSemaphore = (VkSemaphore**)&(pSubmits[index0].pSignalSemaphores);
+ for (uint32_t index1 = 0; index1 < pSubmits[index0].signalSemaphoreCount; ++index1) {
+ VkSemaphore **ppSemaphore = (VkSemaphore **)&(pSubmits[index0].pSignalSemaphores);
*(ppSemaphore[index1]) = original_pSignalSemaphores[index1];
}
}
@@ -270,10 +266,14 @@ VkResult explicit_QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmi
return result;
}
-VkResult explicit_QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence)
-{
-// UNWRAP USES:
-// 0 : pBindInfo[bindInfoCount]->pBufferBinds[bufferBindCount]->buffer,VkBuffer, pBindInfo[bindInfoCount]->pBufferBinds[bufferBindCount]->pBinds[bindCount]->memory,VkDeviceMemory, pBindInfo[bindInfoCount]->pImageOpaqueBinds[imageOpaqueBindCount]->image,VkImage, pBindInfo[bindInfoCount]->pImageOpaqueBinds[imageOpaqueBindCount]->pBinds[bindCount]->memory,VkDeviceMemory, pBindInfo[bindInfoCount]->pImageBinds[imageBindCount]->image,VkImage, pBindInfo[bindInfoCount]->pImageBinds[imageBindCount]->pBinds[bindCount]->memory,VkDeviceMemory
+VkResult explicit_QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
+ // UNWRAP USES:
+ // 0 : pBindInfo[bindInfoCount]->pBufferBinds[bufferBindCount]->buffer,VkBuffer,
+ // pBindInfo[bindInfoCount]->pBufferBinds[bufferBindCount]->pBinds[bindCount]->memory,VkDeviceMemory,
+ // pBindInfo[bindInfoCount]->pImageOpaqueBinds[imageOpaqueBindCount]->image,VkImage,
+ // pBindInfo[bindInfoCount]->pImageOpaqueBinds[imageOpaqueBindCount]->pBinds[bindCount]->memory,VkDeviceMemory,
+ // pBindInfo[bindInfoCount]->pImageBinds[imageBindCount]->image,VkImage,
+ // pBindInfo[bindInfoCount]->pImageBinds[imageBindCount]->pBinds[bindCount]->memory,VkDeviceMemory
std::vector<VkBuffer> original_buffer = {};
std::vector<VkDeviceMemory> original_memory1 = {};
std::vector<VkImage> original_image1 = {};
@@ -283,93 +283,107 @@ VkResult explicit_QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const V
std::vector<VkSemaphore> original_pWaitSemaphores = {};
std::vector<VkSemaphore> original_pSignalSemaphores = {};
if (pBindInfo) {
- for (uint32_t index0=0; index0<bindInfoCount; ++index0) {
+ for (uint32_t index0 = 0; index0 < bindInfoCount; ++index0) {
if (pBindInfo[index0].pBufferBinds) {
- for (uint32_t index1=0; index1<pBindInfo[index0].bufferBindCount; ++index1) {
+ for (uint32_t index1 = 0; index1 < pBindInfo[index0].bufferBindCount; ++index1) {
if (pBindInfo[index0].pBufferBinds[index1].buffer) {
- VkBuffer* pBuffer = (VkBuffer*)&(pBindInfo[index0].pBufferBinds[index1].buffer);
+ VkBuffer *pBuffer = (VkBuffer *)&(pBindInfo[index0].pBufferBinds[index1].buffer);
original_buffer.push_back(pBindInfo[index0].pBufferBinds[index1].buffer);
- *(pBuffer) = (VkBuffer)((VkUniqueObject*)pBindInfo[index0].pBufferBinds[index1].buffer)->actualObject;
+ *(pBuffer) = (VkBuffer)((VkUniqueObject *)pBindInfo[index0].pBufferBinds[index1].buffer)->actualObject;
}
if (pBindInfo[index0].pBufferBinds[index1].pBinds) {
- for (uint32_t index2=0; index2<pBindInfo[index0].pBufferBinds[index1].bindCount; ++index2) {
+ for (uint32_t index2 = 0; index2 < pBindInfo[index0].pBufferBinds[index1].bindCount; ++index2) {
if (pBindInfo[index0].pBufferBinds[index1].pBinds[index2].memory) {
- VkDeviceMemory* pDeviceMemory = (VkDeviceMemory*)&(pBindInfo[index0].pBufferBinds[index1].pBinds[index2].memory);
+ VkDeviceMemory *pDeviceMemory =
+ (VkDeviceMemory *)&(pBindInfo[index0].pBufferBinds[index1].pBinds[index2].memory);
original_memory1.push_back(pBindInfo[index0].pBufferBinds[index1].pBinds[index2].memory);
- *(pDeviceMemory) = (VkDeviceMemory)((VkUniqueObject*)pBindInfo[index0].pBufferBinds[index1].pBinds[index2].memory)->actualObject;
+ *(pDeviceMemory) =
+ (VkDeviceMemory)((VkUniqueObject *)pBindInfo[index0].pBufferBinds[index1].pBinds[index2].memory)
+ ->actualObject;
}
}
}
}
}
if (pBindInfo[index0].pImageOpaqueBinds) {
- for (uint32_t index1=0; index1<pBindInfo[index0].imageOpaqueBindCount; ++index1) {
+ for (uint32_t index1 = 0; index1 < pBindInfo[index0].imageOpaqueBindCount; ++index1) {
if (pBindInfo[index0].pImageOpaqueBinds[index1].image) {
- VkImage* pImage = (VkImage*)&(pBindInfo[index0].pImageOpaqueBinds[index1].image);
+ VkImage *pImage = (VkImage *)&(pBindInfo[index0].pImageOpaqueBinds[index1].image);
original_image1.push_back(pBindInfo[index0].pImageOpaqueBinds[index1].image);
- *(pImage) = (VkImage)((VkUniqueObject*)pBindInfo[index0].pImageOpaqueBinds[index1].image)->actualObject;
+ *(pImage) = (VkImage)((VkUniqueObject *)pBindInfo[index0].pImageOpaqueBinds[index1].image)->actualObject;
}
if (pBindInfo[index0].pImageOpaqueBinds[index1].pBinds) {
- for (uint32_t index2=0; index2<pBindInfo[index0].pImageOpaqueBinds[index1].bindCount; ++index2) {
+ for (uint32_t index2 = 0; index2 < pBindInfo[index0].pImageOpaqueBinds[index1].bindCount; ++index2) {
if (pBindInfo[index0].pImageOpaqueBinds[index1].pBinds[index2].memory) {
- VkDeviceMemory* pDeviceMemory = (VkDeviceMemory*)&(pBindInfo[index0].pImageOpaqueBinds[index1].pBinds[index2].memory);
+ VkDeviceMemory *pDeviceMemory =
+ (VkDeviceMemory *)&(pBindInfo[index0].pImageOpaqueBinds[index1].pBinds[index2].memory);
original_memory2.push_back(pBindInfo[index0].pImageOpaqueBinds[index1].pBinds[index2].memory);
- *(pDeviceMemory) = (VkDeviceMemory)((VkUniqueObject*)pBindInfo[index0].pImageOpaqueBinds[index1].pBinds[index2].memory)->actualObject;
+ *(pDeviceMemory) =
+ (VkDeviceMemory)(
+ (VkUniqueObject *)pBindInfo[index0].pImageOpaqueBinds[index1].pBinds[index2].memory)
+ ->actualObject;
}
}
}
}
}
if (pBindInfo[index0].pImageBinds) {
- for (uint32_t index1=0; index1<pBindInfo[index0].imageBindCount; ++index1) {
+ for (uint32_t index1 = 0; index1 < pBindInfo[index0].imageBindCount; ++index1) {
if (pBindInfo[index0].pImageBinds[index1].image) {
- VkImage* pImage = (VkImage*)&(pBindInfo[index0].pImageBinds[index1].image);
+ VkImage *pImage = (VkImage *)&(pBindInfo[index0].pImageBinds[index1].image);
original_image2.push_back(pBindInfo[index0].pImageBinds[index1].image);
- *(pImage) = (VkImage)((VkUniqueObject*)pBindInfo[index0].pImageBinds[index1].image)->actualObject;
+ *(pImage) = (VkImage)((VkUniqueObject *)pBindInfo[index0].pImageBinds[index1].image)->actualObject;
}
if (pBindInfo[index0].pImageBinds[index1].pBinds) {
- for (uint32_t index2=0; index2<pBindInfo[index0].pImageBinds[index1].bindCount; ++index2) {
+ for (uint32_t index2 = 0; index2 < pBindInfo[index0].pImageBinds[index1].bindCount; ++index2) {
if (pBindInfo[index0].pImageBinds[index1].pBinds[index2].memory) {
- VkDeviceMemory* pDeviceMemory = (VkDeviceMemory*)&(pBindInfo[index0].pImageBinds[index1].pBinds[index2].memory);
+ VkDeviceMemory *pDeviceMemory =
+ (VkDeviceMemory *)&(pBindInfo[index0].pImageBinds[index1].pBinds[index2].memory);
original_memory3.push_back(pBindInfo[index0].pImageBinds[index1].pBinds[index2].memory);
- *(pDeviceMemory) = (VkDeviceMemory)((VkUniqueObject*)pBindInfo[index0].pImageBinds[index1].pBinds[index2].memory)->actualObject;
+ *(pDeviceMemory) =
+ (VkDeviceMemory)((VkUniqueObject *)pBindInfo[index0].pImageBinds[index1].pBinds[index2].memory)
+ ->actualObject;
}
}
}
}
}
if (pBindInfo[index0].pWaitSemaphores) {
- for (uint32_t index1=0; index1<pBindInfo[index0].waitSemaphoreCount; ++index1) {
- VkSemaphore** ppSemaphore = (VkSemaphore**)&(pBindInfo[index0].pWaitSemaphores);
+ for (uint32_t index1 = 0; index1 < pBindInfo[index0].waitSemaphoreCount; ++index1) {
+ VkSemaphore **ppSemaphore = (VkSemaphore **)&(pBindInfo[index0].pWaitSemaphores);
original_pWaitSemaphores.push_back(pBindInfo[index0].pWaitSemaphores[index1]);
- *(ppSemaphore[index1]) = (VkSemaphore)((VkUniqueObject*)pBindInfo[index0].pWaitSemaphores[index1])->actualObject;
+ *(ppSemaphore[index1]) =
+ (VkSemaphore)((VkUniqueObject *)pBindInfo[index0].pWaitSemaphores[index1])->actualObject;
}
}
if (pBindInfo[index0].pSignalSemaphores) {
- for (uint32_t index1=0; index1<pBindInfo[index0].signalSemaphoreCount; ++index1) {
- VkSemaphore** ppSemaphore = (VkSemaphore**)&(pBindInfo[index0].pSignalSemaphores);
+ for (uint32_t index1 = 0; index1 < pBindInfo[index0].signalSemaphoreCount; ++index1) {
+ VkSemaphore **ppSemaphore = (VkSemaphore **)&(pBindInfo[index0].pSignalSemaphores);
original_pSignalSemaphores.push_back(pBindInfo[index0].pSignalSemaphores[index1]);
- *(ppSemaphore[index1]) = (VkSemaphore)((VkUniqueObject*)pBindInfo[index0].pSignalSemaphores[index1])->actualObject;
+ *(ppSemaphore[index1]) =
+ (VkSemaphore)((VkUniqueObject *)pBindInfo[index0].pSignalSemaphores[index1])->actualObject;
}
}
}
}
if (VK_NULL_HANDLE != fence) {
- fence = (VkFence)((VkUniqueObject*)fence)->actualObject;
+ fence = (VkFence)((VkUniqueObject *)fence)->actualObject;
}
- VkResult result = get_dispatch_table(unique_objects_device_table_map, queue)->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
+ VkResult result =
+ get_dispatch_table(unique_objects_device_table_map, queue)->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
if (pBindInfo) {
- for (uint32_t index0=0; index0<bindInfoCount; ++index0) {
+ for (uint32_t index0 = 0; index0 < bindInfoCount; ++index0) {
if (pBindInfo[index0].pBufferBinds) {
- for (uint32_t index1=0; index1<pBindInfo[index0].bufferBindCount; ++index1) {
+ for (uint32_t index1 = 0; index1 < pBindInfo[index0].bufferBindCount; ++index1) {
if (pBindInfo[index0].pBufferBinds[index1].buffer) {
- VkBuffer* pBuffer = (VkBuffer*)&(pBindInfo[index0].pBufferBinds[index1].buffer);
+ VkBuffer *pBuffer = (VkBuffer *)&(pBindInfo[index0].pBufferBinds[index1].buffer);
*(pBuffer) = original_buffer[index1];
}
if (pBindInfo[index0].pBufferBinds[index1].pBinds) {
- for (uint32_t index2=0; index2<pBindInfo[index0].pBufferBinds[index1].bindCount; ++index2) {
+ for (uint32_t index2 = 0; index2 < pBindInfo[index0].pBufferBinds[index1].bindCount; ++index2) {
if (pBindInfo[index0].pBufferBinds[index1].pBinds[index2].memory) {
- VkDeviceMemory* pDeviceMemory = (VkDeviceMemory*)&(pBindInfo[index0].pBufferBinds[index1].pBinds[index2].memory);
+ VkDeviceMemory *pDeviceMemory =
+ (VkDeviceMemory *)&(pBindInfo[index0].pBufferBinds[index1].pBinds[index2].memory);
*(pDeviceMemory) = original_memory1[index2];
}
}
@@ -377,15 +391,16 @@ VkResult explicit_QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const V
}
}
if (pBindInfo[index0].pImageOpaqueBinds) {
- for (uint32_t index1=0; index1<pBindInfo[index0].imageOpaqueBindCount; ++index1) {
+ for (uint32_t index1 = 0; index1 < pBindInfo[index0].imageOpaqueBindCount; ++index1) {
if (pBindInfo[index0].pImageOpaqueBinds[index1].image) {
- VkImage* pImage = (VkImage*)&(pBindInfo[index0].pImageOpaqueBinds[index1].image);
+ VkImage *pImage = (VkImage *)&(pBindInfo[index0].pImageOpaqueBinds[index1].image);
*(pImage) = original_image1[index1];
}
if (pBindInfo[index0].pImageOpaqueBinds[index1].pBinds) {
- for (uint32_t index2=0; index2<pBindInfo[index0].pImageOpaqueBinds[index1].bindCount; ++index2) {
+ for (uint32_t index2 = 0; index2 < pBindInfo[index0].pImageOpaqueBinds[index1].bindCount; ++index2) {
if (pBindInfo[index0].pImageOpaqueBinds[index1].pBinds[index2].memory) {
- VkDeviceMemory* pDeviceMemory = (VkDeviceMemory*)&(pBindInfo[index0].pImageOpaqueBinds[index1].pBinds[index2].memory);
+ VkDeviceMemory *pDeviceMemory =
+ (VkDeviceMemory *)&(pBindInfo[index0].pImageOpaqueBinds[index1].pBinds[index2].memory);
*(pDeviceMemory) = original_memory2[index2];
}
}
@@ -393,15 +408,16 @@ VkResult explicit_QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const V
}
}
if (pBindInfo[index0].pImageBinds) {
- for (uint32_t index1=0; index1<pBindInfo[index0].imageBindCount; ++index1) {
+ for (uint32_t index1 = 0; index1 < pBindInfo[index0].imageBindCount; ++index1) {
if (pBindInfo[index0].pImageBinds[index1].image) {
- VkImage* pImage = (VkImage*)&(pBindInfo[index0].pImageBinds[index1].image);
+ VkImage *pImage = (VkImage *)&(pBindInfo[index0].pImageBinds[index1].image);
*(pImage) = original_image2[index1];
}
if (pBindInfo[index0].pImageBinds[index1].pBinds) {
- for (uint32_t index2=0; index2<pBindInfo[index0].pImageBinds[index1].bindCount; ++index2) {
+ for (uint32_t index2 = 0; index2 < pBindInfo[index0].pImageBinds[index1].bindCount; ++index2) {
if (pBindInfo[index0].pImageBinds[index1].pBinds[index2].memory) {
- VkDeviceMemory* pDeviceMemory = (VkDeviceMemory*)&(pBindInfo[index0].pImageBinds[index1].pBinds[index2].memory);
+ VkDeviceMemory *pDeviceMemory =
+ (VkDeviceMemory *)&(pBindInfo[index0].pImageBinds[index1].pBinds[index2].memory);
*(pDeviceMemory) = original_memory3[index2];
}
}
@@ -409,14 +425,14 @@ VkResult explicit_QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const V
}
}
if (pBindInfo[index0].pWaitSemaphores) {
- for (uint32_t index1=0; index1<pBindInfo[index0].waitSemaphoreCount; ++index1) {
- VkSemaphore** ppSemaphore = (VkSemaphore**)&(pBindInfo[index0].pWaitSemaphores);
+ for (uint32_t index1 = 0; index1 < pBindInfo[index0].waitSemaphoreCount; ++index1) {
+ VkSemaphore **ppSemaphore = (VkSemaphore **)&(pBindInfo[index0].pWaitSemaphores);
*(ppSemaphore[index1]) = original_pWaitSemaphores[index1];
}
}
if (pBindInfo[index0].pSignalSemaphores) {
- for (uint32_t index1=0; index1<pBindInfo[index0].signalSemaphoreCount; ++index1) {
- VkSemaphore** ppSemaphore = (VkSemaphore**)&(pBindInfo[index0].pSignalSemaphores);
+ for (uint32_t index1 = 0; index1 < pBindInfo[index0].signalSemaphoreCount; ++index1) {
+ VkSemaphore **ppSemaphore = (VkSemaphore **)&(pBindInfo[index0].pSignalSemaphores);
*(ppSemaphore[index1]) = original_pSignalSemaphores[index1];
}
}
@@ -425,35 +441,41 @@ VkResult explicit_QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const V
return result;
}
-VkResult explicit_CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
-{
-// STRUCT USES:{'pipelineCache': 'VkPipelineCache', 'pCreateInfos[createInfoCount]': {'stage': {'module': 'VkShaderModule'}, 'layout': 'VkPipelineLayout', 'basePipelineHandle': 'VkPipeline'}}
-//LOCAL DECLS:{'pCreateInfos': 'VkComputePipelineCreateInfo*'}
- safe_VkComputePipelineCreateInfo* local_pCreateInfos = NULL;
+VkResult explicit_CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
+ const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
+ VkPipeline *pPipelines) {
+ // STRUCT USES:{'pipelineCache': 'VkPipelineCache', 'pCreateInfos[createInfoCount]': {'stage': {'module': 'VkShaderModule'},
+ // 'layout': 'VkPipelineLayout', 'basePipelineHandle': 'VkPipeline'}}
+ // LOCAL DECLS:{'pCreateInfos': 'VkComputePipelineCreateInfo*'}
+ safe_VkComputePipelineCreateInfo *local_pCreateInfos = NULL;
if (pCreateInfos) {
local_pCreateInfos = new safe_VkComputePipelineCreateInfo[createInfoCount];
- for (uint32_t idx0=0; idx0<createInfoCount; ++idx0) {
+ for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0]);
if (pCreateInfos[idx0].basePipelineHandle) {
- local_pCreateInfos[idx0].basePipelineHandle = (VkPipeline)((VkUniqueObject*)pCreateInfos[idx0].basePipelineHandle)->actualObject;
+ local_pCreateInfos[idx0].basePipelineHandle =
+ (VkPipeline)((VkUniqueObject *)pCreateInfos[idx0].basePipelineHandle)->actualObject;
}
if (pCreateInfos[idx0].layout) {
- local_pCreateInfos[idx0].layout = (VkPipelineLayout)((VkUniqueObject*)pCreateInfos[idx0].layout)->actualObject;
+ local_pCreateInfos[idx0].layout = (VkPipelineLayout)((VkUniqueObject *)pCreateInfos[idx0].layout)->actualObject;
}
if (pCreateInfos[idx0].stage.module) {
- local_pCreateInfos[idx0].stage.module = (VkShaderModule)((VkUniqueObject*)pCreateInfos[idx0].stage.module)->actualObject;
+ local_pCreateInfos[idx0].stage.module =
+ (VkShaderModule)((VkUniqueObject *)pCreateInfos[idx0].stage.module)->actualObject;
}
}
}
if (pipelineCache) {
- pipelineCache = (VkPipelineCache)((VkUniqueObject*)pipelineCache)->actualObject;
+ pipelineCache = (VkPipelineCache)((VkUniqueObject *)pipelineCache)->actualObject;
}
-// CODEGEN : file /usr/local/google/home/tobine/vulkan_work/LoaderAndTools/vk-layer-generate.py line #1671
- VkResult result = get_dispatch_table(unique_objects_device_table_map, device)->CreateComputePipelines(device, pipelineCache, createInfoCount, (const VkComputePipelineCreateInfo*)local_pCreateInfos, pAllocator, pPipelines);
+ // CODEGEN : file /usr/local/google/home/tobine/vulkan_work/LoaderAndTools/vk-layer-generate.py line #1671
+ VkResult result = get_dispatch_table(unique_objects_device_table_map, device)
+ ->CreateComputePipelines(device, pipelineCache, createInfoCount,
+ (const VkComputePipelineCreateInfo *)local_pCreateInfos, pAllocator, pPipelines);
delete[] local_pCreateInfos;
if (VK_SUCCESS == result) {
- VkUniqueObject* pUO = NULL;
- for (uint32_t i=0; i<createInfoCount; ++i) {
+ VkUniqueObject *pUO = NULL;
+ for (uint32_t i = 0; i < createInfoCount; ++i) {
pUO = new VkUniqueObject();
pUO->actualObject = (uint64_t)pPipelines[i];
pPipelines[i] = (VkPipeline)pUO;
@@ -462,42 +484,49 @@ VkResult explicit_CreateComputePipelines(VkDevice device, VkPipelineCache pipeli
return result;
}
-VkResult explicit_CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines)
-{
-// STRUCT USES:{'pipelineCache': 'VkPipelineCache', 'pCreateInfos[createInfoCount]': {'layout': 'VkPipelineLayout', 'pStages[stageCount]': {'module': 'VkShaderModule'}, 'renderPass': 'VkRenderPass', 'basePipelineHandle': 'VkPipeline'}}
-//LOCAL DECLS:{'pCreateInfos': 'VkGraphicsPipelineCreateInfo*'}
- safe_VkGraphicsPipelineCreateInfo* local_pCreateInfos = NULL;
+VkResult explicit_CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
+ const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
+ VkPipeline *pPipelines) {
+ // STRUCT USES:{'pipelineCache': 'VkPipelineCache', 'pCreateInfos[createInfoCount]': {'layout': 'VkPipelineLayout',
+ // 'pStages[stageCount]': {'module': 'VkShaderModule'}, 'renderPass': 'VkRenderPass', 'basePipelineHandle': 'VkPipeline'}}
+ // LOCAL DECLS:{'pCreateInfos': 'VkGraphicsPipelineCreateInfo*'}
+ safe_VkGraphicsPipelineCreateInfo *local_pCreateInfos = NULL;
if (pCreateInfos) {
local_pCreateInfos = new safe_VkGraphicsPipelineCreateInfo[createInfoCount];
- for (uint32_t idx0=0; idx0<createInfoCount; ++idx0) {
+ for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0]);
if (pCreateInfos[idx0].basePipelineHandle) {
- local_pCreateInfos[idx0].basePipelineHandle = (VkPipeline)((VkUniqueObject*)pCreateInfos[idx0].basePipelineHandle)->actualObject;
+ local_pCreateInfos[idx0].basePipelineHandle =
+ (VkPipeline)((VkUniqueObject *)pCreateInfos[idx0].basePipelineHandle)->actualObject;
}
if (pCreateInfos[idx0].layout) {
- local_pCreateInfos[idx0].layout = (VkPipelineLayout)((VkUniqueObject*)pCreateInfos[idx0].layout)->actualObject;
+ local_pCreateInfos[idx0].layout = (VkPipelineLayout)((VkUniqueObject *)pCreateInfos[idx0].layout)->actualObject;
}
if (pCreateInfos[idx0].pStages) {
- for (uint32_t idx1=0; idx1<pCreateInfos[idx0].stageCount; ++idx1) {
+ for (uint32_t idx1 = 0; idx1 < pCreateInfos[idx0].stageCount; ++idx1) {
if (pCreateInfos[idx0].pStages[idx1].module) {
- local_pCreateInfos[idx0].pStages[idx1].module = (VkShaderModule)((VkUniqueObject*)pCreateInfos[idx0].pStages[idx1].module)->actualObject;
+ local_pCreateInfos[idx0].pStages[idx1].module =
+ (VkShaderModule)((VkUniqueObject *)pCreateInfos[idx0].pStages[idx1].module)->actualObject;
}
}
}
if (pCreateInfos[idx0].renderPass) {
- local_pCreateInfos[idx0].renderPass = (VkRenderPass)((VkUniqueObject*)pCreateInfos[idx0].renderPass)->actualObject;
+ local_pCreateInfos[idx0].renderPass = (VkRenderPass)((VkUniqueObject *)pCreateInfos[idx0].renderPass)->actualObject;
}
}
}
if (pipelineCache) {
- pipelineCache = (VkPipelineCache)((VkUniqueObject*)pipelineCache)->actualObject;
+ pipelineCache = (VkPipelineCache)((VkUniqueObject *)pipelineCache)->actualObject;
}
-// CODEGEN : file /usr/local/google/home/tobine/vulkan_work/LoaderAndTools/vk-layer-generate.py line #1671
- VkResult result = get_dispatch_table(unique_objects_device_table_map, device)->CreateGraphicsPipelines(device, pipelineCache, createInfoCount, (const VkGraphicsPipelineCreateInfo*)local_pCreateInfos, pAllocator, pPipelines);
+ // CODEGEN : file /usr/local/google/home/tobine/vulkan_work/LoaderAndTools/vk-layer-generate.py line #1671
+ VkResult result =
+ get_dispatch_table(unique_objects_device_table_map, device)
+ ->CreateGraphicsPipelines(device, pipelineCache, createInfoCount,
+ (const VkGraphicsPipelineCreateInfo *)local_pCreateInfos, pAllocator, pPipelines);
delete[] local_pCreateInfos;
if (VK_SUCCESS == result) {
- VkUniqueObject* pUO = NULL;
- for (uint32_t i=0; i<createInfoCount; ++i) {
+ VkUniqueObject *pUO = NULL;
+ for (uint32_t i = 0; i < createInfoCount; ++i) {
pUO = new VkUniqueObject();
pUO->actualObject = (uint64_t)pPipelines[i];
pPipelines[i] = (VkPipeline)pUO;
@@ -506,19 +535,20 @@ VkResult explicit_CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipel
return result;
}
-VkResult explicit_GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages)
-{
-// UNWRAP USES:
-// 0 : swapchain,VkSwapchainKHR, pSwapchainImages,VkImage
+VkResult explicit_GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
+ VkImage *pSwapchainImages) {
+ // UNWRAP USES:
+ // 0 : swapchain,VkSwapchainKHR, pSwapchainImages,VkImage
if (VK_NULL_HANDLE != swapchain) {
- swapchain = (VkSwapchainKHR)((VkUniqueObject*)swapchain)->actualObject;
+ swapchain = (VkSwapchainKHR)((VkUniqueObject *)swapchain)->actualObject;
}
- VkResult result = get_dispatch_table(unique_objects_device_table_map, device)->GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
+ VkResult result = get_dispatch_table(unique_objects_device_table_map, device)
+ ->GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
// TODO : Need to add corresponding code to delete these images
if (VK_SUCCESS == result) {
if ((*pSwapchainImageCount > 0) && pSwapchainImages) {
- std::vector<VkUniqueObject*> uniqueImages = {};
- for (uint32_t i=0; i<*pSwapchainImageCount; ++i) {
+ std::vector<VkUniqueObject *> uniqueImages = {};
+ for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) {
uniqueImages.push_back(new VkUniqueObject());
uniqueImages[i]->actualObject = (uint64_t)pSwapchainImages[i];
pSwapchainImages[i] = (VkImage)uniqueImages[i];
diff --git a/layers/vk_layer_config.cpp b/layers/vk_layer_config.cpp
index ba66cbd60..e916cfe6d 100644
--- a/layers/vk_layer_config.cpp
+++ b/layers/vk_layer_config.cpp
@@ -37,16 +37,15 @@
#define MAX_CHARS_PER_LINE 4096
-class ConfigFile
-{
-public:
+class ConfigFile {
+ public:
ConfigFile();
~ConfigFile();
const char *getOption(const std::string &_option);
void setOption(const std::string &_option, const std::string &_val);
-private:
+ private:
bool m_fileIsParsed;
std::map<std::string, std::string> m_valueMap;
@@ -55,8 +54,7 @@ private:
static ConfigFile g_configFileObj;
-static VkLayerDbgAction stringToDbgAction(const char *_enum)
-{
+static VkLayerDbgAction stringToDbgAction(const char *_enum) {
// only handles single enum values
if (!strcmp(_enum, "VK_DBG_LAYER_ACTION_IGNORE"))
return VK_DBG_LAYER_ACTION_IGNORE;
@@ -68,11 +66,10 @@ static VkLayerDbgAction stringToDbgAction(const char *_enum)
#endif
else if (!strcmp(_enum, "VK_DBG_LAYER_ACTION_BREAK"))
return VK_DBG_LAYER_ACTION_BREAK;
- return (VkLayerDbgAction) 0;
+ return (VkLayerDbgAction)0;
}
-static VkFlags stringToDbgReportFlags(const char *_enum)
-{
+static VkFlags stringToDbgReportFlags(const char *_enum) {
// only handles single enum values
if (!strcmp(_enum, "VK_DEBUG_REPORT_INFO"))
return VK_DEBUG_REPORT_INFORMATION_BIT_EXT;
@@ -84,11 +81,10 @@ static VkFlags stringToDbgReportFlags(const char *_enum)
return VK_DEBUG_REPORT_ERROR_BIT_EXT;
else if (!strcmp(_enum, "VK_DEBUG_REPORT_DEBUG"))
return VK_DEBUG_REPORT_DEBUG_BIT_EXT;
- return (VkFlags) 0;
+ return (VkFlags)0;
}
-static unsigned int convertStringEnumVal(const char *_enum)
-{
+static unsigned int convertStringEnumVal(const char *_enum) {
unsigned int ret;
ret = stringToDbgAction(_enum);
@@ -98,31 +94,29 @@ static unsigned int convertStringEnumVal(const char *_enum)
return stringToDbgReportFlags(_enum);
}
-const char *getLayerOption(const char *_option)
-{
- return g_configFileObj.getOption(_option);
-}
+const char *getLayerOption(const char *_option) { return g_configFileObj.getOption(_option); }
// If option is NULL or stdout, return stdout, otherwise try to open option
// as a filename. If successful, return file handle, otherwise stdout
-FILE* getLayerLogOutput(const char *_option, const char *layerName)
-{
- FILE* log_output = NULL;
+FILE *getLayerLogOutput(const char *_option, const char *layerName) {
+ FILE *log_output = NULL;
if (!_option || !strcmp("stdout", _option))
log_output = stdout;
else {
log_output = fopen(_option, "w");
if (log_output == NULL) {
if (_option)
- std::cout << std::endl << layerName << " ERROR: Bad output filename specified: " << _option << ". Writing to STDOUT instead" << std::endl << std::endl;
+ std::cout << std::endl
+ << layerName << " ERROR: Bad output filename specified: " << _option << ". Writing to STDOUT instead"
+ << std::endl
+ << std::endl;
log_output = stdout;
}
}
return log_output;
}
-VkDebugReportFlagsEXT getLayerOptionFlags(const char *_option, uint32_t optionDefault)
-{
+VkDebugReportFlagsEXT getLayerOptionFlags(const char *_option, uint32_t optionDefault) {
VkDebugReportFlagsEXT flags = optionDefault;
const char *option = (g_configFileObj.getOption(_option));
@@ -158,8 +152,7 @@ VkDebugReportFlagsEXT getLayerOptionFlags(const char *_option, uint32_t optionDe
return flags;
}
-bool getLayerOptionEnum(const char *_option, uint32_t *optionDefault)
-{
+bool getLayerOptionEnum(const char *_option, uint32_t *optionDefault) {
bool res;
const char *option = (g_configFileObj.getOption(_option));
if (option != NULL) {
@@ -171,32 +164,22 @@ bool getLayerOptionEnum(const char *_option, uint32_t *optionDefault)
return res;
}
-void setLayerOptionEnum(const char *_option, const char *_valEnum)
-{
+void setLayerOptionEnum(const char *_option, const char *_valEnum) {
unsigned int val = convertStringEnumVal(_valEnum);
char strVal[24];
snprintf(strVal, 24, "%u", val);
g_configFileObj.setOption(_option, strVal);
}
-void setLayerOption(const char *_option, const char *_val)
-{
- g_configFileObj.setOption(_option, _val);
-}
+void setLayerOption(const char *_option, const char *_val) { g_configFileObj.setOption(_option, _val); }
-ConfigFile::ConfigFile() : m_fileIsParsed(false)
-{
-}
+ConfigFile::ConfigFile() : m_fileIsParsed(false) {}
-ConfigFile::~ConfigFile()
-{
-}
+ConfigFile::~ConfigFile() {}
-const char *ConfigFile::getOption(const std::string &_option)
-{
+const char *ConfigFile::getOption(const std::string &_option) {
std::map<std::string, std::string>::const_iterator it;
- if (!m_fileIsParsed)
- {
+ if (!m_fileIsParsed) {
parseFile("vk_layer_settings.txt");
}
@@ -206,18 +189,15 @@ const char *ConfigFile::getOption(const std::string &_option)
return it->second.c_str();
}
-void ConfigFile::setOption(const std::string &_option, const std::string &_val)
-{
- if (!m_fileIsParsed)
- {
+void ConfigFile::setOption(const std::string &_option, const std::string &_val) {
+ if (!m_fileIsParsed) {
parseFile("vk_layer_settings.txt");
}
m_valueMap[_option] = _val;
}
-void ConfigFile::parseFile(const char *filename)
-{
+void ConfigFile::parseFile(const char *filename) {
std::ifstream file;
char buf[MAX_CHARS_PER_LINE];
@@ -230,20 +210,18 @@ void ConfigFile::parseFile(const char *filename)
// read tokens from the file and form option, value pairs
file.getline(buf, MAX_CHARS_PER_LINE);
- while (!file.eof())
- {
+ while (!file.eof()) {
char option[512];
char value[512];
char *pComment;
- //discard any comments delimited by '#' in the line
+ // discard any comments delimited by '#' in the line
pComment = strchr(buf, '#');
if (pComment)
*pComment = '\0';
- if (sscanf(buf, " %511[^\n\t =] = %511[^\n \t]", option, value) == 2)
- {
+ if (sscanf(buf, " %511[^\n\t =] = %511[^\n \t]", option, value) == 2) {
std::string optStr(option);
std::string valStr(value);
m_valueMap[optStr] = valStr;
@@ -252,8 +230,7 @@ void ConfigFile::parseFile(const char *filename)
}
}
-void print_msg_flags(VkFlags msgFlags, char *msg_flags)
-{
+void print_msg_flags(VkFlags msgFlags, char *msg_flags) {
bool separator = false;
msg_flags[0] = 0;
@@ -262,23 +239,26 @@ void print_msg_flags(VkFlags msgFlags, char *msg_flags)
separator = true;
}
if (msgFlags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT) {
- if (separator) strcat(msg_flags, ",");
+ if (separator)
+ strcat(msg_flags, ",");
strcat(msg_flags, "INFO");
separator = true;
}
if (msgFlags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
- if (separator) strcat(msg_flags, ",");
+ if (separator)
+ strcat(msg_flags, ",");
strcat(msg_flags, "WARN");
separator = true;
}
if (msgFlags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
- if (separator) strcat(msg_flags, ",");
+ if (separator)
+ strcat(msg_flags, ",");
strcat(msg_flags, "PERF");
separator = true;
}
if (msgFlags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
- if (separator) strcat(msg_flags, ",");
+ if (separator)
+ strcat(msg_flags, ",");
strcat(msg_flags, "ERROR");
}
}
-
diff --git a/layers/vk_layer_config.h b/layers/vk_layer_config.h
index 7d62041ce..0111e0522 100644
--- a/layers/vk_layer_config.h
+++ b/layers/vk_layer_config.h
@@ -31,7 +31,7 @@ extern "C" {
#endif
const char *getLayerOption(const char *_option);
-FILE* getLayerLogOutput(const char *_option, const char *layerName);
+FILE *getLayerLogOutput(const char *_option, const char *layerName);
VkDebugReportFlagsEXT getLayerOptionFlags(const char *_option, uint32_t optionDefault);
bool getLayerOptionEnum(const char *_option, uint32_t *optionDefault);
diff --git a/layers/vk_layer_data.h b/layers/vk_layer_data.h
index b51736c7a..36eb1b500 100644
--- a/layers/vk_layer_data.h
+++ b/layers/vk_layer_data.h
@@ -30,19 +30,16 @@
#include <unordered_map>
#include "vk_layer_table.h"
-template<typename DATA_T>
-DATA_T *get_my_data_ptr(void *data_key,
- std::unordered_map<void *, DATA_T*> &layer_data_map)
-{
+template <typename DATA_T> DATA_T *get_my_data_ptr(void *data_key, std::unordered_map<void *, DATA_T *> &layer_data_map) {
DATA_T *debug_data;
typename std::unordered_map<void *, DATA_T *>::const_iterator got;
/* TODO: We probably should lock here, or have caller lock */
got = layer_data_map.find(data_key);
- if ( got == layer_data_map.end() ) {
+ if (got == layer_data_map.end()) {
debug_data = new DATA_T;
- layer_data_map[(void *) data_key] = debug_data;
+ layer_data_map[(void *)data_key] = debug_data;
} else {
debug_data = got->second;
}
@@ -51,4 +48,3 @@ DATA_T *get_my_data_ptr(void *data_key,
}
#endif // LAYER_DATA_H
-
diff --git a/layers/vk_layer_extension_utils.cpp b/layers/vk_layer_extension_utils.cpp
index 06ca6c008..e566f50e8 100644
--- a/layers/vk_layer_extension_utils.cpp
+++ b/layers/vk_layer_extension_utils.cpp
@@ -34,12 +34,8 @@
* This file contains utility functions for layers
*/
-VkResult util_GetExtensionProperties(
- const uint32_t count,
- const VkExtensionProperties *layer_extensions,
- uint32_t* pCount,
- VkExtensionProperties* pProperties)
-{
+VkResult util_GetExtensionProperties(const uint32_t count, const VkExtensionProperties *layer_extensions, uint32_t *pCount,
+ VkExtensionProperties *pProperties) {
uint32_t copy_size;
if (pProperties == NULL || layer_extensions == NULL) {
@@ -57,12 +53,8 @@ VkResult util_GetExtensionProperties(
return VK_SUCCESS;
}
-VkResult util_GetLayerProperties(
- const uint32_t count,
- const VkLayerProperties *layer_properties,
- uint32_t* pCount,
- VkLayerProperties* pProperties)
-{
+VkResult util_GetLayerProperties(const uint32_t count, const VkLayerProperties *layer_properties, uint32_t *pCount,
+ VkLayerProperties *pProperties) {
uint32_t copy_size;
if (pProperties == NULL || layer_properties == NULL) {
diff --git a/layers/vk_layer_extension_utils.h b/layers/vk_layer_extension_utils.h
index 05e15f88f..0a07a78e9 100644
--- a/layers/vk_layer_extension_utils.h
+++ b/layers/vk_layer_extension_utils.h
@@ -37,18 +37,11 @@
*/
extern "C" {
-VkResult util_GetExtensionProperties(
- const uint32_t count,
- const VkExtensionProperties *layer_extensions,
- uint32_t* pCount,
- VkExtensionProperties* pProperties);
+VkResult util_GetExtensionProperties(const uint32_t count, const VkExtensionProperties *layer_extensions, uint32_t *pCount,
+ VkExtensionProperties *pProperties);
-VkResult util_GetLayerProperties(
- const uint32_t count,
- const VkLayerProperties *layer_properties,
- uint32_t* pCount,
- VkLayerProperties* pProperties);
+VkResult util_GetLayerProperties(const uint32_t count, const VkLayerProperties *layer_properties, uint32_t *pCount,
+ VkLayerProperties *pProperties);
} // extern "C"
#endif // LAYER_EXTENSION_UTILS_H
-
diff --git a/layers/vk_layer_logging.h b/layers/vk_layer_logging.h
index 102c278b4..096259328 100644
--- a/layers/vk_layer_logging.h
+++ b/layers/vk_layer_logging.h
@@ -45,32 +45,18 @@ typedef struct _debug_report_data {
bool g_DEBUG_REPORT;
} debug_report_data;
-template debug_report_data *get_my_data_ptr<debug_report_data>(
- void *data_key,
- std::unordered_map<void *, debug_report_data *> &data_map);
+template debug_report_data *get_my_data_ptr<debug_report_data>(void *data_key,
+ std::unordered_map<void *, debug_report_data *> &data_map);
// Utility function to handle reporting
-static inline VkBool32 debug_report_log_msg(
- debug_report_data *debug_data,
- VkFlags msgFlags,
- VkDebugReportObjectTypeEXT objectType,
- uint64_t srcObject,
- size_t location,
- int32_t msgCode,
- const char* pLayerPrefix,
- const char* pMsg)
-{
+static inline VkBool32 debug_report_log_msg(debug_report_data *debug_data, VkFlags msgFlags, VkDebugReportObjectTypeEXT objectType,
+ uint64_t srcObject, size_t location, int32_t msgCode, const char *pLayerPrefix,
+ const char *pMsg) {
VkBool32 bail = false;
VkLayerDbgFunctionNode *pTrav = debug_data->g_pDbgFunctionHead;
while (pTrav) {
if (pTrav->msgFlags & msgFlags) {
- if (pTrav->pfnMsgCallback(msgFlags,
- objectType, srcObject,
- location,
- msgCode,
- pLayerPrefix,
- pMsg,
- pTrav->pUserData)) {
+ if (pTrav->pfnMsgCallback(msgFlags, objectType, srcObject, location, msgCode, pLayerPrefix, pMsg, pTrav->pUserData)) {
bail = true;
}
}
@@ -80,21 +66,20 @@ static inline VkBool32 debug_report_log_msg(
return bail;
}
-static inline debug_report_data *debug_report_create_instance(
- VkLayerInstanceDispatchTable *table,
- VkInstance inst,
- uint32_t extension_count,
- const char*const* ppEnabledExtensions) // layer or extension name to be enabled
+static inline debug_report_data *
+debug_report_create_instance(VkLayerInstanceDispatchTable *table, VkInstance inst, uint32_t extension_count,
+ const char *const *ppEnabledExtensions) // layer or extension name to be enabled
{
- debug_report_data *debug_data;
+ debug_report_data *debug_data;
PFN_vkGetInstanceProcAddr gpa = table->GetInstanceProcAddr;
- table->CreateDebugReportCallbackEXT = (PFN_vkCreateDebugReportCallbackEXT) gpa(inst, "vkCreateDebugReportCallbackEXT");
- table->DestroyDebugReportCallbackEXT = (PFN_vkDestroyDebugReportCallbackEXT) gpa(inst, "vkDestroyDebugReportCallbackEXT");
- table->DebugReportMessageEXT = (PFN_vkDebugReportMessageEXT) gpa(inst, "vkDebugReportMessageEXT");
+ table->CreateDebugReportCallbackEXT = (PFN_vkCreateDebugReportCallbackEXT)gpa(inst, "vkCreateDebugReportCallbackEXT");
+ table->DestroyDebugReportCallbackEXT = (PFN_vkDestroyDebugReportCallbackEXT)gpa(inst, "vkDestroyDebugReportCallbackEXT");
+ table->DebugReportMessageEXT = (PFN_vkDebugReportMessageEXT)gpa(inst, "vkDebugReportMessageEXT");
- debug_data = (debug_report_data *) malloc(sizeof(debug_report_data));
- if (!debug_data) return NULL;
+ debug_data = (debug_report_data *)malloc(sizeof(debug_report_data));
+ if (!debug_data)
+ return NULL;
memset(debug_data, 0, sizeof(debug_report_data));
for (uint32_t i = 0; i < extension_count; i++) {
@@ -106,8 +91,7 @@ static inline debug_report_data *debug_report_create_instance(
return debug_data;
}
-static inline void layer_debug_report_destroy_instance(debug_report_data *debug_data)
-{
+static inline void layer_debug_report_destroy_instance(debug_report_data *debug_data) {
VkLayerDbgFunctionNode *pTrav;
VkLayerDbgFunctionNode *pTravNext;
@@ -120,12 +104,9 @@ static inline void layer_debug_report_destroy_instance(debug_report_data *debug_
while (pTrav) {
pTravNext = pTrav->pNext;
- debug_report_log_msg(
- debug_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT, (uint64_t) pTrav->msgCallback,
- 0, VK_DEBUG_REPORT_ERROR_CALLBACK_REF_EXT,
- "DebugReport",
- "Debug Report callbacks not removed before DestroyInstance");
+ debug_report_log_msg(debug_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT,
+ (uint64_t)pTrav->msgCallback, 0, VK_DEBUG_REPORT_ERROR_CALLBACK_REF_EXT, "DebugReport",
+ "Debug Report callbacks not removed before DestroyInstance");
free(pTrav);
pTrav = pTravNext;
@@ -135,34 +116,25 @@ static inline void layer_debug_report_destroy_instance(debug_report_data *debug_
free(debug_data);
}
-static inline debug_report_data *layer_debug_report_create_device(
- debug_report_data *instance_debug_data,
- VkDevice device)
-{
+static inline debug_report_data *layer_debug_report_create_device(debug_report_data *instance_debug_data, VkDevice device) {
/* DEBUG_REPORT shares data between Instance and Device,
* so just return instance's data pointer */
return instance_debug_data;
}
-static inline void layer_debug_report_destroy_device(VkDevice device)
-{
- /* Nothing to do since we're using instance data record */
-}
+static inline void layer_debug_report_destroy_device(VkDevice device) { /* Nothing to do since we're using instance data record */ }
-static inline VkResult layer_create_msg_callback(
- debug_report_data *debug_data,
- const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkDebugReportCallbackEXT *pCallback)
-{
+static inline VkResult layer_create_msg_callback(debug_report_data *debug_data,
+ const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pCallback) {
/* TODO: Use app allocator */
- VkLayerDbgFunctionNode *pNewDbgFuncNode = (VkLayerDbgFunctionNode*)malloc(sizeof(VkLayerDbgFunctionNode));
+ VkLayerDbgFunctionNode *pNewDbgFuncNode = (VkLayerDbgFunctionNode *)malloc(sizeof(VkLayerDbgFunctionNode));
if (!pNewDbgFuncNode)
return VK_ERROR_OUT_OF_HOST_MEMORY;
// Handle of 0 is logging_callback so use allocated Node address as unique handle
if (!(*pCallback))
- *pCallback = (VkDebugReportCallbackEXT) pNewDbgFuncNode;
+ *pCallback = (VkDebugReportCallbackEXT)pNewDbgFuncNode;
pNewDbgFuncNode->msgCallback = *pCallback;
pNewDbgFuncNode->pfnMsgCallback = pCreateInfo->pfnCallback;
pNewDbgFuncNode->msgFlags = pCreateInfo->flags;
@@ -172,20 +144,13 @@ static inline VkResult layer_create_msg_callback(
debug_data->g_pDbgFunctionHead = pNewDbgFuncNode;
debug_data->active_flags |= pCreateInfo->flags;
- debug_report_log_msg(
- debug_data, VK_DEBUG_REPORT_DEBUG_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT, (uint64_t) *pCallback,
- 0, VK_DEBUG_REPORT_ERROR_CALLBACK_REF_EXT,
- "DebugReport",
- "Added callback");
+ debug_report_log_msg(debug_data, VK_DEBUG_REPORT_DEBUG_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT,
+ (uint64_t)*pCallback, 0, VK_DEBUG_REPORT_ERROR_CALLBACK_REF_EXT, "DebugReport", "Added callback");
return VK_SUCCESS;
}
-static inline void layer_destroy_msg_callback(
- debug_report_data *debug_data,
- VkDebugReportCallbackEXT callback,
- const VkAllocationCallbacks *pAllocator)
-{
+static inline void layer_destroy_msg_callback(debug_report_data *debug_data, VkDebugReportCallbackEXT callback,
+ const VkAllocationCallbacks *pAllocator) {
VkLayerDbgFunctionNode *pTrav = debug_data->g_pDbgFunctionHead;
VkLayerDbgFunctionNode *pPrev = pTrav;
bool matched;
@@ -198,12 +163,9 @@ static inline void layer_destroy_msg_callback(
if (debug_data->g_pDbgFunctionHead == pTrav) {
debug_data->g_pDbgFunctionHead = pTrav->pNext;
}
- debug_report_log_msg(
- debug_data, VK_DEBUG_REPORT_DEBUG_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT, (uint64_t) pTrav->msgCallback,
- 0, VK_DEBUG_REPORT_ERROR_CALLBACK_REF_EXT,
- "DebugReport",
- "Destroyed callback");
+ debug_report_log_msg(debug_data, VK_DEBUG_REPORT_DEBUG_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT,
+ (uint64_t)pTrav->msgCallback, 0, VK_DEBUG_REPORT_ERROR_CALLBACK_REF_EXT, "DebugReport",
+ "Destroyed callback");
} else {
matched = false;
debug_data->active_flags |= pTrav->msgFlags;
@@ -217,23 +179,20 @@ static inline void layer_destroy_msg_callback(
}
}
-static inline PFN_vkVoidFunction debug_report_get_instance_proc_addr(
- debug_report_data *debug_data,
- const char *funcName)
-{
+static inline PFN_vkVoidFunction debug_report_get_instance_proc_addr(debug_report_data *debug_data, const char *funcName) {
if (!debug_data || !debug_data->g_DEBUG_REPORT) {
return NULL;
}
if (!strcmp(funcName, "vkCreateDebugReportCallbackEXT")) {
- return (PFN_vkVoidFunction) vkCreateDebugReportCallbackEXT;
+ return (PFN_vkVoidFunction)vkCreateDebugReportCallbackEXT;
}
if (!strcmp(funcName, "vkDestroyDebugReportCallbackEXT")) {
- return (PFN_vkVoidFunction) vkDestroyDebugReportCallbackEXT;
+ return (PFN_vkVoidFunction)vkDestroyDebugReportCallbackEXT;
}
if (!strcmp(funcName, "vkDebugReportMessageEXT")) {
- return (PFN_vkVoidFunction) vkDebugReportMessageEXT;
+ return (PFN_vkVoidFunction)vkDebugReportMessageEXT;
}
return NULL;
@@ -244,10 +203,7 @@ static inline PFN_vkVoidFunction debug_report_get_instance_proc_addr(
* Allows layer to defer collecting & formating data if the
* message will be discarded.
*/
-static inline VkBool32 will_log_msg(
- debug_report_data *debug_data,
- VkFlags msgFlags)
-{
+static inline VkBool32 will_log_msg(debug_report_data *debug_data, VkFlags msgFlags) {
if (!debug_data || !(debug_data->active_flags & msgFlags)) {
/* message is not wanted */
return false;
@@ -262,28 +218,13 @@ static inline VkBool32 will_log_msg(
* is only computed if a message needs to be logged
*/
#ifndef WIN32
-static inline VkBool32 log_msg(
- debug_report_data *debug_data,
- VkFlags msgFlags,
- VkDebugReportObjectTypeEXT objectType,
- uint64_t srcObject,
- size_t location,
- int32_t msgCode,
- const char* pLayerPrefix,
- const char* format,
- ...) __attribute__ ((format (printf, 8, 9)));
+static inline VkBool32 log_msg(debug_report_data *debug_data, VkFlags msgFlags, VkDebugReportObjectTypeEXT objectType,
+ uint64_t srcObject, size_t location, int32_t msgCode, const char *pLayerPrefix, const char *format,
+ ...) __attribute__((format(printf, 8, 9)));
#endif
-static inline VkBool32 log_msg(
- debug_report_data *debug_data,
- VkFlags msgFlags,
- VkDebugReportObjectTypeEXT objectType,
- uint64_t srcObject,
- size_t location,
- int32_t msgCode,
- const char* pLayerPrefix,
- const char* format,
- ...)
-{
+static inline VkBool32 log_msg(debug_report_data *debug_data, VkFlags msgFlags, VkDebugReportObjectTypeEXT objectType,
+ uint64_t srcObject, size_t location, int32_t msgCode, const char *pLayerPrefix, const char *format,
+ ...) {
if (!debug_data || !(debug_data->active_flags & msgFlags)) {
/* message is not wanted */
return false;
@@ -294,50 +235,34 @@ static inline VkBool32 log_msg(
va_start(argptr, format);
vsnprintf(str, 1024, format, argptr);
va_end(argptr);
- return debug_report_log_msg(
- debug_data, msgFlags, objectType,
- srcObject, location, msgCode,
- pLayerPrefix, str);
+ return debug_report_log_msg(debug_data, msgFlags, objectType, srcObject, location, msgCode, pLayerPrefix, str);
}
-static inline VKAPI_ATTR VkBool32 VKAPI_CALL log_callback(
- VkFlags msgFlags,
- VkDebugReportObjectTypeEXT objType,
- uint64_t srcObject,
- size_t location,
- int32_t msgCode,
- const char* pLayerPrefix,
- const char* pMsg,
- void* pUserData)
-{
+static inline VKAPI_ATTR VkBool32 VKAPI_CALL log_callback(VkFlags msgFlags, VkDebugReportObjectTypeEXT objType, uint64_t srcObject,
+ size_t location, int32_t msgCode, const char *pLayerPrefix,
+ const char *pMsg, void *pUserData) {
char msg_flags[30];
print_msg_flags(msgFlags, msg_flags);
- fprintf((FILE *) pUserData, "%s(%s): object: %#" PRIx64 " type: %d location: %lu msgCode: %d: %s\n",
- pLayerPrefix, msg_flags, srcObject, objType, (unsigned long)location, msgCode, pMsg);
- fflush((FILE *) pUserData);
+ fprintf((FILE *)pUserData, "%s(%s): object: %#" PRIx64 " type: %d location: %lu msgCode: %d: %s\n", pLayerPrefix, msg_flags,
+ srcObject, objType, (unsigned long)location, msgCode, pMsg);
+ fflush((FILE *)pUserData);
return false;
}
-static inline VKAPI_ATTR VkBool32 VKAPI_CALL win32_debug_output_msg(
- VkFlags msgFlags,
- VkDebugReportObjectTypeEXT objType,
- uint64_t srcObject,
- size_t location,
- int32_t msgCode,
- const char* pLayerPrefix,
- const char* pMsg,
- void* pUserData)
-{
+static inline VKAPI_ATTR VkBool32 VKAPI_CALL win32_debug_output_msg(VkFlags msgFlags, VkDebugReportObjectTypeEXT objType,
+ uint64_t srcObject, size_t location, int32_t msgCode,
+ const char *pLayerPrefix, const char *pMsg, void *pUserData) {
#ifdef WIN32
char msg_flags[30];
char buf[2048];
print_msg_flags(msgFlags, msg_flags);
- _snprintf(buf, sizeof(buf) - 1, "%s (%s): object: 0x%" PRIxPTR " type: %d location: " PRINTF_SIZE_T_SPECIFIER " msgCode: %d: %s\n",
- pLayerPrefix, msg_flags, (size_t)srcObject, objType, location, msgCode, pMsg);
+ _snprintf(buf, sizeof(buf) - 1,
+ "%s (%s): object: 0x%" PRIxPTR " type: %d location: " PRINTF_SIZE_T_SPECIFIER " msgCode: %d: %s\n", pLayerPrefix,
+ msg_flags, (size_t)srcObject, objType, location, msgCode, pMsg);
OutputDebugString(buf);
#endif
diff --git a/layers/vk_layer_table.cpp b/layers/vk_layer_table.cpp
index 5e364e5fa..153f565c3 100644
--- a/layers/vk_layer_table.cpp
+++ b/layers/vk_layer_table.cpp
@@ -35,21 +35,20 @@ static instance_table_map tableInstanceMap;
#define DISPATCH_MAP_DEBUG 0
// Map lookup must be thread safe
-VkLayerDispatchTable *device_dispatch_table(void* object)
-{
+VkLayerDispatchTable *device_dispatch_table(void *object) {
dispatch_key key = get_dispatch_key(object);
- device_table_map::const_iterator it = tableMap.find((void *) key);
+ device_table_map::const_iterator it = tableMap.find((void *)key);
assert(it != tableMap.end() && "Not able to find device dispatch entry");
return it->second;
}
-VkLayerInstanceDispatchTable *instance_dispatch_table(void* object)
-{
+VkLayerInstanceDispatchTable *instance_dispatch_table(void *object) {
dispatch_key key = get_dispatch_key(object);
- instance_table_map::const_iterator it = tableInstanceMap.find((void *) key);
+ instance_table_map::const_iterator it = tableInstanceMap.find((void *)key);
#if DISPATCH_MAP_DEBUG
if (it != tableInstanceMap.end()) {
- fprintf(stderr, "instance_dispatch_table: map: %p, object: %p, key: %p, table: %p\n", &tableInstanceMap, object, key, it->second);
+ fprintf(stderr, "instance_dispatch_table: map: %p, object: %p, key: %p, table: %p\n", &tableInstanceMap, object, key,
+ it->second);
} else {
fprintf(stderr, "instance_dispatch_table: map: %p, object: %p, key: %p, table: UNKNOWN\n", &tableInstanceMap, object, key);
}
@@ -58,8 +57,7 @@ VkLayerInstanceDispatchTable *instance_dispatch_table(void* object)
return it->second;
}
-void destroy_dispatch_table(device_table_map &map, dispatch_key key)
-{
+void destroy_dispatch_table(device_table_map &map, dispatch_key key) {
#if DISPATCH_MAP_DEBUG
device_table_map::const_iterator it = map.find((void *)key);
if (it != map.end()) {
@@ -72,8 +70,7 @@ void destroy_dispatch_table(device_table_map &map, dispatch_key key)
map.erase(key);
}
-void destroy_dispatch_table(instance_table_map &map, dispatch_key key)
-{
+void destroy_dispatch_table(instance_table_map &map, dispatch_key key) {
#if DISPATCH_MAP_DEBUG
instance_table_map::const_iterator it = map.find((void *)key);
if (it != map.end()) {
@@ -86,23 +83,17 @@ void destroy_dispatch_table(instance_table_map &map, dispatch_key key)
map.erase(key);
}
-void destroy_device_dispatch_table(dispatch_key key)
-{
- destroy_dispatch_table(tableMap, key);
-}
+void destroy_device_dispatch_table(dispatch_key key) { destroy_dispatch_table(tableMap, key); }
-void destroy_instance_dispatch_table(dispatch_key key)
-{
- destroy_dispatch_table(tableInstanceMap, key);
-}
+void destroy_instance_dispatch_table(dispatch_key key) { destroy_dispatch_table(tableInstanceMap, key); }
-VkLayerDispatchTable *get_dispatch_table(device_table_map &map, void* object)
-{
+VkLayerDispatchTable *get_dispatch_table(device_table_map &map, void *object) {
dispatch_key key = get_dispatch_key(object);
- device_table_map::const_iterator it = map.find((void *) key);
+ device_table_map::const_iterator it = map.find((void *)key);
#if DISPATCH_MAP_DEBUG
if (it != map.end()) {
- fprintf(stderr, "device_dispatch_table: map: %p, object: %p, key: %p, table: %p\n", &tableInstanceMap, object, key, it->second);
+ fprintf(stderr, "device_dispatch_table: map: %p, object: %p, key: %p, table: %p\n", &tableInstanceMap, object, key,
+ it->second);
} else {
fprintf(stderr, "device_dispatch_table: map: %p, object: %p, key: %p, table: UNKNOWN\n", &tableInstanceMap, object, key);
}
@@ -111,14 +102,14 @@ VkLayerDispatchTable *get_dispatch_table(device_table_map &map, void* object)
return it->second;
}
-VkLayerInstanceDispatchTable *get_dispatch_table(instance_table_map &map, void* object)
-{
-// VkLayerInstanceDispatchTable *pDisp = *(VkLayerInstanceDispatchTable **) object;
+VkLayerInstanceDispatchTable *get_dispatch_table(instance_table_map &map, void *object) {
+ // VkLayerInstanceDispatchTable *pDisp = *(VkLayerInstanceDispatchTable **) object;
dispatch_key key = get_dispatch_key(object);
- instance_table_map::const_iterator it = map.find((void *) key);
+ instance_table_map::const_iterator it = map.find((void *)key);
#if DISPATCH_MAP_DEBUG
if (it != map.end()) {
- fprintf(stderr, "instance_dispatch_table: map: %p, object: %p, key: %p, table: %p\n", &tableInstanceMap, object, key, it->second);
+ fprintf(stderr, "instance_dispatch_table: map: %p, object: %p, key: %p, table: %p\n", &tableInstanceMap, object, key,
+ it->second);
} else {
fprintf(stderr, "instance_dispatch_table: map: %p, object: %p, key: %p, table: UNKNOWN\n", &tableInstanceMap, object, key);
}
@@ -127,23 +118,19 @@ VkLayerInstanceDispatchTable *get_dispatch_table(instance_table_map &map, void*
return it->second;
}
-VkLayerInstanceCreateInfo *get_chain_info(const VkInstanceCreateInfo *pCreateInfo, VkLayerFunction func)
-{
- VkLayerInstanceCreateInfo *chain_info = (VkLayerInstanceCreateInfo *) pCreateInfo->pNext;
- while (chain_info && !(chain_info->sType == VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO
- && chain_info->function == func)) {
- chain_info = (VkLayerInstanceCreateInfo *) chain_info->pNext;
+VkLayerInstanceCreateInfo *get_chain_info(const VkInstanceCreateInfo *pCreateInfo, VkLayerFunction func) {
+ VkLayerInstanceCreateInfo *chain_info = (VkLayerInstanceCreateInfo *)pCreateInfo->pNext;
+ while (chain_info && !(chain_info->sType == VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO && chain_info->function == func)) {
+ chain_info = (VkLayerInstanceCreateInfo *)chain_info->pNext;
}
assert(chain_info != NULL);
return chain_info;
}
-VkLayerDeviceCreateInfo *get_chain_info(const VkDeviceCreateInfo *pCreateInfo, VkLayerFunction func)
-{
- VkLayerDeviceCreateInfo *chain_info = (VkLayerDeviceCreateInfo *) pCreateInfo->pNext;
- while (chain_info && !(chain_info->sType == VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO
- && chain_info->function == func)) {
- chain_info = (VkLayerDeviceCreateInfo *) chain_info->pNext;
+VkLayerDeviceCreateInfo *get_chain_info(const VkDeviceCreateInfo *pCreateInfo, VkLayerFunction func) {
+ VkLayerDeviceCreateInfo *chain_info = (VkLayerDeviceCreateInfo *)pCreateInfo->pNext;
+ while (chain_info && !(chain_info->sType == VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO && chain_info->function == func)) {
+ chain_info = (VkLayerDeviceCreateInfo *)chain_info->pNext;
}
assert(chain_info != NULL);
return chain_info;
@@ -156,21 +143,18 @@ VkLayerDeviceCreateInfo *get_chain_info(const VkDeviceCreateInfo *pCreateInfo, V
* Device -> CommandBuffer or Queue
* If use the object themselves as key to map then implies Create entrypoints have to be intercepted
* and a new key inserted into map */
-VkLayerInstanceDispatchTable * initInstanceTable(VkInstance instance, const PFN_vkGetInstanceProcAddr gpa, instance_table_map &map)
-{
+VkLayerInstanceDispatchTable *initInstanceTable(VkInstance instance, const PFN_vkGetInstanceProcAddr gpa, instance_table_map &map) {
VkLayerInstanceDispatchTable *pTable;
dispatch_key key = get_dispatch_key(instance);
- instance_table_map::const_iterator it = map.find((void *) key);
+ instance_table_map::const_iterator it = map.find((void *)key);
- if (it == map.end())
- {
- pTable = new VkLayerInstanceDispatchTable;
- map[(void *) key] = pTable;
+ if (it == map.end()) {
+ pTable = new VkLayerInstanceDispatchTable;
+ map[(void *)key] = pTable;
#if DISPATCH_MAP_DEBUG
fprintf(stderr, "New, Instance: map: %p, key: %p, table: %p\n", &map, key, pTable);
#endif
- } else
- {
+ } else {
#if DISPATCH_MAP_DEBUG
fprintf(stderr, "Instance: map: %p, key: %p, table: %p\n", &map, key, it->second);
#endif
@@ -182,26 +166,22 @@ VkLayerInstanceDispatchTable * initInstanceTable(VkInstance instance, const PFN_
return pTable;
}
-VkLayerInstanceDispatchTable * initInstanceTable(VkInstance instance, const PFN_vkGetInstanceProcAddr gpa)
-{
+VkLayerInstanceDispatchTable *initInstanceTable(VkInstance instance, const PFN_vkGetInstanceProcAddr gpa) {
return initInstanceTable(instance, gpa, tableInstanceMap);
}
-VkLayerDispatchTable * initDeviceTable(VkDevice device, const PFN_vkGetDeviceProcAddr gpa, device_table_map &map)
-{
+VkLayerDispatchTable *initDeviceTable(VkDevice device, const PFN_vkGetDeviceProcAddr gpa, device_table_map &map) {
VkLayerDispatchTable *pTable;
dispatch_key key = get_dispatch_key(device);
- device_table_map::const_iterator it = map.find((void *) key);
+ device_table_map::const_iterator it = map.find((void *)key);
- if (it == map.end())
- {
- pTable = new VkLayerDispatchTable;
- map[(void *) key] = pTable;
+ if (it == map.end()) {
+ pTable = new VkLayerDispatchTable;
+ map[(void *)key] = pTable;
#if DISPATCH_MAP_DEBUG
fprintf(stderr, "New, Device: map: %p, key: %p, table: %p\n", &map, key, pTable);
#endif
- } else
- {
+ } else {
#if DISPATCH_MAP_DEBUG
fprintf(stderr, "Device: map: %p, key: %p, table: %p\n", &map, key, it->second);
#endif
@@ -213,7 +193,6 @@ VkLayerDispatchTable * initDeviceTable(VkDevice device, const PFN_vkGetDevicePro
return pTable;
}
-VkLayerDispatchTable * initDeviceTable(VkDevice device, const PFN_vkGetDeviceProcAddr gpa)
-{
+VkLayerDispatchTable *initDeviceTable(VkDevice device, const PFN_vkGetDeviceProcAddr gpa) {
return initDeviceTable(device, gpa, tableMap);
}
diff --git a/layers/vk_layer_table.h b/layers/vk_layer_table.h
index 33a4cf728..eb7efd37a 100644
--- a/layers/vk_layer_table.h
+++ b/layers/vk_layer_table.h
@@ -31,26 +31,22 @@
typedef std::unordered_map<void *, VkLayerDispatchTable *> device_table_map;
typedef std::unordered_map<void *, VkLayerInstanceDispatchTable *> instance_table_map;
-VkLayerDispatchTable * initDeviceTable(VkDevice device, const PFN_vkGetDeviceProcAddr gpa, device_table_map &map);
-VkLayerDispatchTable * initDeviceTable(VkDevice device, const PFN_vkGetDeviceProcAddr gpa);
-VkLayerInstanceDispatchTable * initInstanceTable(VkInstance instance, const PFN_vkGetInstanceProcAddr gpa, instance_table_map &map);
-VkLayerInstanceDispatchTable * initInstanceTable(VkInstance instance, const PFN_vkGetInstanceProcAddr gpa);
-
+VkLayerDispatchTable *initDeviceTable(VkDevice device, const PFN_vkGetDeviceProcAddr gpa, device_table_map &map);
+VkLayerDispatchTable *initDeviceTable(VkDevice device, const PFN_vkGetDeviceProcAddr gpa);
+VkLayerInstanceDispatchTable *initInstanceTable(VkInstance instance, const PFN_vkGetInstanceProcAddr gpa, instance_table_map &map);
+VkLayerInstanceDispatchTable *initInstanceTable(VkInstance instance, const PFN_vkGetInstanceProcAddr gpa);
typedef void *dispatch_key;
-static inline dispatch_key get_dispatch_key(const void* object)
-{
- return (dispatch_key) *(VkLayerDispatchTable **) object;
-}
+static inline dispatch_key get_dispatch_key(const void *object) { return (dispatch_key) * (VkLayerDispatchTable **)object; }
-VkLayerDispatchTable *device_dispatch_table(void* object);
+VkLayerDispatchTable *device_dispatch_table(void *object);
-VkLayerInstanceDispatchTable *instance_dispatch_table(void* object);
+VkLayerInstanceDispatchTable *instance_dispatch_table(void *object);
-VkLayerDispatchTable *get_dispatch_table(device_table_map &map, void* object);
+VkLayerDispatchTable *get_dispatch_table(device_table_map &map, void *object);
-VkLayerInstanceDispatchTable *get_dispatch_table(instance_table_map &map, void* object);
+VkLayerInstanceDispatchTable *get_dispatch_table(instance_table_map &map, void *object);
VkLayerInstanceCreateInfo *get_chain_info(const VkInstanceCreateInfo *pCreateInfo, VkLayerFunction func);
VkLayerDeviceCreateInfo *get_chain_info(const VkDeviceCreateInfo *pCreateInfo, VkLayerFunction func);
diff --git a/layers/vk_layer_utils.cpp b/layers/vk_layer_utils.cpp
index 22fb52fef..9fea38998 100644
--- a/layers/vk_layer_utils.cpp
+++ b/layers/vk_layer_utils.cpp
@@ -30,215 +30,209 @@
#include "vulkan/vulkan.h"
#include "vk_layer_utils.h"
-
typedef struct _VULKAN_FORMAT_INFO {
- size_t size;
- uint32_t channel_count;
- VkFormatCompatibilityClass format_class;
+ size_t size;
+ uint32_t channel_count;
+ VkFormatCompatibilityClass format_class;
} VULKAN_FORMAT_INFO;
-
// Set up data structure with number of bytes and number of channels
// for each Vulkan format.
static const VULKAN_FORMAT_INFO vk_format_table[VK_FORMAT_RANGE_SIZE] = {
- { 0, 0, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT }, // [VK_FORMAT_UNDEFINED]
- { 1, 2, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT }, // [VK_FORMAT_R4G4_UNORM_PACK8]
- { 2, 4, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT }, // [VK_FORMAT_R4G4B4A4_UNORM_PACK16]
- { 2, 4, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT }, // [VK_FORMAT_B4G4R4A4_UNORM_PACK16]
- { 2, 3, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT }, // [VK_FORMAT_R5G6B5_UNORM_PACK16]
- { 2, 3, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT }, // [VK_FORMAT_B5G6R5_UNORM_PACK16]
- { 2, 4, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT }, // [VK_FORMAT_R5G5B5A1_UNORM_PACK16]
- { 2, 4, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT }, // [VK_FORMAT_B5G5R5A1_UNORM_PACK16]
- { 2, 4, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT }, // [VK_FORMAT_A1R5G5B5_UNORM_PACK16]
- { 1, 1, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT }, // [VK_FORMAT_R8_UNORM]
- { 1, 1, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT }, // [VK_FORMAT_R8_SNORM]
- { 1, 1, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT }, // [VK_FORMAT_R8_USCALED]
- { 1, 1, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT }, // [VK_FORMAT_R8_SSCALED]
- { 1, 1, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT }, // [VK_FORMAT_R8_UINT]
- { 1, 1, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT }, // [VK_FORMAT_R8_SINT]
- { 1, 1, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT }, // [VK_FORMAT_R8_SRGB]
- { 2, 2, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT }, // [VK_FORMAT_R8G8_UNORM]
- { 2, 2, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT }, // [VK_FORMAT_R8G8_SNORM]
- { 2, 2, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT }, // [VK_FORMAT_R8G8_USCALED]
- { 2, 2, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT }, // [VK_FORMAT_R8G8_SSCALED]
- { 2, 2, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT }, // [VK_FORMAT_R8G8_UINT]
- { 2, 2, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT }, // [VK_FORMAT_R8G8_SINT]
- { 2, 2, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT }, // [VK_FORMAT_R8G8_SRGB]
- { 3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT }, // [VK_FORMAT_R8G8B8_UNORM]
- { 3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT }, // [VK_FORMAT_R8G8B8_SNORM]
- { 3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT }, // [VK_FORMAT_R8G8B8_USCALED]
- { 3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT }, // [VK_FORMAT_R8G8B8_SSCALED]
- { 3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT }, // [VK_FORMAT_R8G8B8_UINT]
- { 3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT }, // [VK_FORMAT_R8G8B8_SINT]
- { 3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT }, // [VK_FORMAT_R8G8B8_SRGB]
- { 3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT }, // [VK_FORMAT_B8G8R8_UNORM]
- { 3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT }, // [VK_FORMAT_B8G8R8_SNORM]
- { 3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT }, // [VK_FORMAT_B8G8R8_USCALED]
- { 3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT }, // [VK_FORMAT_B8G8R8_SSCALED]
- { 3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT }, // [VK_FORMAT_B8G8R8_UINT]
- { 3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT }, // [VK_FORMAT_B8G8R8_SINT]
- { 3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT }, // [VK_FORMAT_B8G8R8_SRGB]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_R8G8B8A8_UNORM]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_R8G8B8A8_SNORM]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_R8G8B8A8_USCALED]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_R8G8B8A8_SSCALED]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_R8G8B8A8_UINT]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_R8G8B8A8_SINT]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_R8G8B8A8_SRGB]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_B8G8R8A8_UNORM]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_B8G8R8A8_SNORM]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_B8G8R8A8_USCALED]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_B8G8R8A8_SSCALED]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_B8G8R8A8_UINT]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_B8G8R8A8_SINT]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_B8G8R8A8_SRGB]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_A8B8G8R8_UNORM_PACK32]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_A8B8G8R8_SNORM_PACK32]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_A8B8G8R8_USCALED_PACK32]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_A8B8G8R8_SSCALED_PACK32]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_A8B8G8R8_UINT_PACK32]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_A8B8G8R8_SINT_PACK32]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_B8G8R8A8_SRGB_PACK32]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_A2R10G10B10_UNORM_PACK32]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_A2R10G10B10_SNORM_PACK32]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_A2R10G10B10_USCALED_PACK32]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_A2R10G10B10_SSCALED_PACK32]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_A2R10G10B10_UINT_PACK32]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_A2R10G10B10_SINT_PACK32]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_A2B10G10R10_UNORM_PACK32]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_A2B10G10R10_SNORM_PACK32]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_A2B10G10R10_USCALED_PACK32]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_A2B10G10R10_SSCALED_PACK32]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_A2B10G10R10_UINT_PACK32]
- { 4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_A2B10G10R10_SINT_PACK32]
- { 2, 1, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT }, // [VK_FORMAT_R16_UNORM]
- { 2, 1, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT }, // [VK_FORMAT_R16_SNORM]
- { 2, 1, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT }, // [VK_FORMAT_R16_USCALED]
- { 2, 1, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT }, // [VK_FORMAT_R16_SSCALED]
- { 2, 1, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT }, // [VK_FORMAT_R16_UINT]
- { 2, 1, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT }, // [VK_FORMAT_R16_SINT]
- { 2, 1, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT }, // [VK_FORMAT_R16_SFLOAT]
- { 4, 2, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_R16G16_UNORM]
- { 4, 2, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_R16G16_SNORM]
- { 4, 2, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_R16G16_USCALED]
- { 4, 2, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_R16G16_SSCALED]
- { 4, 2, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_R16G16_UINT]
- { 4, 2, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_R16G16_SINT]
- { 4, 2, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_R16G16_SFLOAT]
- { 6, 3, VK_FORMAT_COMPATIBILITY_CLASS_48_BIT }, // [VK_FORMAT_R16G16B16_UNORM]
- { 6, 3, VK_FORMAT_COMPATIBILITY_CLASS_48_BIT }, // [VK_FORMAT_R16G16B16_SNORM]
- { 6, 3, VK_FORMAT_COMPATIBILITY_CLASS_48_BIT }, // [VK_FORMAT_R16G16B16_USCALED]
- { 6, 3, VK_FORMAT_COMPATIBILITY_CLASS_48_BIT }, // [VK_FORMAT_R16G16B16_SSCALED]
- { 6, 3, VK_FORMAT_COMPATIBILITY_CLASS_48_BIT }, // [VK_FORMAT_R16G16B16_UINT]
- { 6, 3, VK_FORMAT_COMPATIBILITY_CLASS_48_BIT }, // [VK_FORMAT_R16G16B16_SINT]
- { 6, 3, VK_FORMAT_COMPATIBILITY_CLASS_48_BIT }, // [VK_FORMAT_R16G16B16_SFLOAT]
- { 8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT }, // [VK_FORMAT_R16G16B16A16_UNORM]
- { 8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT }, // [VK_FORMAT_R16G16B16A16_SNORM]
- { 8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT }, // [VK_FORMAT_R16G16B16A16_USCALED]
- { 8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT }, // [VK_FORMAT_R16G16B16A16_SSCALED]
- { 8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT }, // [VK_FORMAT_R16G16B16A16_UINT]
- { 8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT }, // [VK_FORMAT_R16G16B16A16_SINT]
- { 8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT }, // [VK_FORMAT_R16G16B16A16_SFLOAT]
- { 4, 1, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_R32_UINT]
- { 4, 1, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_R32_SINT]
- { 4, 1, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_R32_SFLOAT]
- { 8, 2, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT }, // [VK_FORMAT_R32G32_UINT]
- { 8, 2, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT }, // [VK_FORMAT_R32G32_SINT]
- { 8, 2, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT }, // [VK_FORMAT_R32G32_SFLOAT]
- { 12, 3, VK_FORMAT_COMPATIBILITY_CLASS_96_BIT }, // [VK_FORMAT_R32G32B32_UINT]
- { 12, 3, VK_FORMAT_COMPATIBILITY_CLASS_96_BIT }, // [VK_FORMAT_R32G32B32_SINT]
- { 12, 3, VK_FORMAT_COMPATIBILITY_CLASS_96_BIT }, // [VK_FORMAT_R32G32B32_SFLOAT]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_128_BIT }, // [VK_FORMAT_R32G32B32A32_UINT]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_128_BIT }, // [VK_FORMAT_R32G32B32A32_SINT]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_128_BIT }, // [VK_FORMAT_R32G32B32A32_SFLOAT]
- { 8, 1, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT }, // [VK_FORMAT_R64_UINT]
- { 8, 1, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT }, // [VK_FORMAT_R64_SINT]
- { 8, 1, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT }, // [VK_FORMAT_R64_SFLOAT]
- { 16, 2, VK_FORMAT_COMPATIBILITY_CLASS_128_BIT }, // [VK_FORMAT_R64G64_UINT]
- { 16, 2, VK_FORMAT_COMPATIBILITY_CLASS_128_BIT }, // [VK_FORMAT_R64G64_SINT]
- { 16, 2, VK_FORMAT_COMPATIBILITY_CLASS_128_BIT }, // [VK_FORMAT_R64G64_SFLOAT]
- { 24, 3, VK_FORMAT_COMPATIBILITY_CLASS_192_BIT }, // [VK_FORMAT_R64G64B64_UINT]
- { 24, 3, VK_FORMAT_COMPATIBILITY_CLASS_192_BIT }, // [VK_FORMAT_R64G64B64_SINT]
- { 24, 3, VK_FORMAT_COMPATIBILITY_CLASS_192_BIT }, // [VK_FORMAT_R64G64B64_SFLOAT]
- { 32, 4, VK_FORMAT_COMPATIBILITY_CLASS_256_BIT }, // [VK_FORMAT_R64G64B64A64_UINT]
- { 32, 4, VK_FORMAT_COMPATIBILITY_CLASS_256_BIT }, // [VK_FORMAT_R64G64B64A64_SINT]
- { 32, 4, VK_FORMAT_COMPATIBILITY_CLASS_256_BIT }, // [VK_FORMAT_R64G64B64A64_SFLOAT]
- { 4, 3, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_B10G11R11_UFLOAT_PACK32]
- { 4, 3, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT }, // [VK_FORMAT_E5B9G9R9_UFLOAT_PACK32]
- { 2, 1, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT }, // [VK_FORMAT_D16_UNORM]
- { 3, 1, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT }, // [VK_FORMAT_X8_D24_UNORM_PACK32]
- { 4, 1, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT }, // [VK_FORMAT_D32_SFLOAT]
- { 1, 1, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT }, // [VK_FORMAT_S8_UINT]
- { 3, 2, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT }, // [VK_FORMAT_D16_UNORM_S8_UINT]
- { 4, 2, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT }, // [VK_FORMAT_D24_UNORM_S8_UINT]
- { 4, 2, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT }, // [VK_FORMAT_D32_SFLOAT_S8_UINT]
- { 8, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC1_RGB_BIT }, // [VK_FORMAT_BC1_RGB_UNORM_BLOCK]
- { 8, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC1_RGB_BIT }, // [VK_FORMAT_BC1_RGB_SRGB_BLOCK]
- { 8, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC1_RGBA_BIT }, // [VK_FORMAT_BC1_RGBA_UNORM_BLOCK]
- { 8, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC1_RGBA_BIT }, // [VK_FORMAT_BC1_RGBA_SRGB_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC2_BIT }, // [VK_FORMAT_BC2_UNORM_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC2_BIT }, // [VK_FORMAT_BC2_SRGB_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC3_BIT }, // [VK_FORMAT_BC3_UNORM_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC3_BIT }, // [VK_FORMAT_BC3_SRGB_BLOCK]
- { 8, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC4_BIT }, // [VK_FORMAT_BC4_UNORM_BLOCK]
- { 8, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC4_BIT }, // [VK_FORMAT_BC4_SNORM_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC5_BIT }, // [VK_FORMAT_BC5_UNORM_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC5_BIT }, // [VK_FORMAT_BC5_SNORM_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC6H_BIT }, // [VK_FORMAT_BC6H_UFLOAT_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC6H_BIT }, // [VK_FORMAT_BC6H_SFLOAT_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC7_BIT }, // [VK_FORMAT_BC7_UNORM_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC7_BIT }, // [VK_FORMAT_BC7_SRGB_BLOCK]
- { 8, 3, VK_FORMAT_COMPATIBILITY_CLASS_ETC2_RGB_BIT }, // [VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK]
- { 8, 3, VK_FORMAT_COMPATIBILITY_CLASS_ETC2_RGB_BIT }, // [VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK]
- { 8, 4, VK_FORMAT_COMPATIBILITY_CLASS_ETC2_RGBA_BIT }, // [VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK]
- { 8, 4, VK_FORMAT_COMPATIBILITY_CLASS_ETC2_RGBA_BIT }, // [VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK]
- { 8, 4, VK_FORMAT_COMPATIBILITY_CLASS_ETC2_EAC_RGBA_BIT }, // [VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK]
- { 8, 4, VK_FORMAT_COMPATIBILITY_CLASS_ETC2_EAC_RGBA_BIT }, // [VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK]
- { 8, 1, VK_FORMAT_COMPATIBILITY_CLASS_EAC_R_BIT }, // [VK_FORMAT_EAC_R11_UNORM_BLOCK]
- { 8, 1, VK_FORMAT_COMPATIBILITY_CLASS_EAC_R_BIT }, // [VK_FORMAT_EAC_R11_SNORM_BLOCK]
- { 16, 2, VK_FORMAT_COMPATIBILITY_CLASS_EAC_RG_BIT }, // [VK_FORMAT_EAC_R11G11_UNORM_BLOCK]
- { 16, 2, VK_FORMAT_COMPATIBILITY_CLASS_EAC_RG_BIT }, // [VK_FORMAT_EAC_R11G11_SNORM_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_4X4_BIT }, // [VK_FORMAT_ASTC_4x4_UNORM_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_4X4_BIT }, // [VK_FORMAT_ASTC_4x4_SRGB_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_5X4_BIT }, // [VK_FORMAT_ASTC_5x4_UNORM_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_5X4_BIT }, // [VK_FORMAT_ASTC_5x4_SRGB_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_5X5_BIT }, // [VK_FORMAT_ASTC_5x5_UNORM_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_5X5_BIT }, // [VK_FORMAT_ASTC_5x5_SRGB_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_6X5_BIT }, // [VK_FORMAT_ASTC_6x5_UNORM_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_6X5_BIT }, // [VK_FORMAT_ASTC_6x5_SRGB_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_6X6_BIT }, // [VK_FORMAT_ASTC_6x6_UNORM_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_6X6_BIT }, // [VK_FORMAT_ASTC_6x6_SRGB_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X5_BIT }, // [VK_FORMAT_ASTC_8x5_UNORM_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X5_BIT }, // [VK_FORMAT_ASTC_8x5_SRGB_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X6_BIT }, // [VK_FORMAT_ASTC_8x6_UNORM_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X6_BIT }, // [VK_FORMAT_ASTC_8x6_SRGB_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X8_BIT }, // [VK_FORMAT_ASTC_8x8_UNORM_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X8_BIT }, // [VK_FORMAT_ASTC_8x8_SRGB_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X5_BIT }, // [VK_FORMAT_ASTC_10x5_UNORM_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X5_BIT }, // [VK_FORMAT_ASTC_10x5_SRGB_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X6_BIT }, // [VK_FORMAT_ASTC_10x6_UNORM_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X6_BIT }, // [VK_FORMAT_ASTC_10x6_SRGB_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X8_BIT }, // [VK_FORMAT_ASTC_10x8_UNORM_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X8_BIT }, // [VK_FORMAT_ASTC_10x8_SRGB_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X10_BIT }, // [VK_FORMAT_ASTC_10x10_UNORM_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X10_BIT }, // [VK_FORMAT_ASTC_10x10_SRGB_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_12X10_BIT }, // [VK_FORMAT_ASTC_12x10_UNORM_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_12X10_BIT }, // [VK_FORMAT_ASTC_12x10_SRGB_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_12X12_BIT }, // [VK_FORMAT_ASTC_12x12_UNORM_BLOCK]
- { 16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_12X12_BIT }, // [VK_FORMAT_ASTC_12x12_SRGB_BLOCK]
+ {0, 0, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT}, // [VK_FORMAT_UNDEFINED]
+ {1, 2, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT}, // [VK_FORMAT_R4G4_UNORM_PACK8]
+ {2, 4, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}, // [VK_FORMAT_R4G4B4A4_UNORM_PACK16]
+ {2, 4, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}, // [VK_FORMAT_B4G4R4A4_UNORM_PACK16]
+ {2, 3, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}, // [VK_FORMAT_R5G6B5_UNORM_PACK16]
+ {2, 3, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}, // [VK_FORMAT_B5G6R5_UNORM_PACK16]
+ {2, 4, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}, // [VK_FORMAT_R5G5B5A1_UNORM_PACK16]
+ {2, 4, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}, // [VK_FORMAT_B5G5R5A1_UNORM_PACK16]
+ {2, 4, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}, // [VK_FORMAT_A1R5G5B5_UNORM_PACK16]
+ {1, 1, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT}, // [VK_FORMAT_R8_UNORM]
+ {1, 1, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT}, // [VK_FORMAT_R8_SNORM]
+ {1, 1, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT}, // [VK_FORMAT_R8_USCALED]
+ {1, 1, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT}, // [VK_FORMAT_R8_SSCALED]
+ {1, 1, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT}, // [VK_FORMAT_R8_UINT]
+ {1, 1, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT}, // [VK_FORMAT_R8_SINT]
+ {1, 1, VK_FORMAT_COMPATIBILITY_CLASS_8_BIT}, // [VK_FORMAT_R8_SRGB]
+ {2, 2, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}, // [VK_FORMAT_R8G8_UNORM]
+ {2, 2, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}, // [VK_FORMAT_R8G8_SNORM]
+ {2, 2, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}, // [VK_FORMAT_R8G8_USCALED]
+ {2, 2, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}, // [VK_FORMAT_R8G8_SSCALED]
+ {2, 2, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}, // [VK_FORMAT_R8G8_UINT]
+ {2, 2, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}, // [VK_FORMAT_R8G8_SINT]
+ {2, 2, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}, // [VK_FORMAT_R8G8_SRGB]
+ {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}, // [VK_FORMAT_R8G8B8_UNORM]
+ {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}, // [VK_FORMAT_R8G8B8_SNORM]
+ {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}, // [VK_FORMAT_R8G8B8_USCALED]
+ {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}, // [VK_FORMAT_R8G8B8_SSCALED]
+ {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}, // [VK_FORMAT_R8G8B8_UINT]
+ {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}, // [VK_FORMAT_R8G8B8_SINT]
+ {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}, // [VK_FORMAT_R8G8B8_SRGB]
+ {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}, // [VK_FORMAT_B8G8R8_UNORM]
+ {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}, // [VK_FORMAT_B8G8R8_SNORM]
+ {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}, // [VK_FORMAT_B8G8R8_USCALED]
+ {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}, // [VK_FORMAT_B8G8R8_SSCALED]
+ {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}, // [VK_FORMAT_B8G8R8_UINT]
+ {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}, // [VK_FORMAT_B8G8R8_SINT]
+ {3, 3, VK_FORMAT_COMPATIBILITY_CLASS_24_BIT}, // [VK_FORMAT_B8G8R8_SRGB]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_R8G8B8A8_UNORM]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_R8G8B8A8_SNORM]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_R8G8B8A8_USCALED]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_R8G8B8A8_SSCALED]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_R8G8B8A8_UINT]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_R8G8B8A8_SINT]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_R8G8B8A8_SRGB]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_B8G8R8A8_UNORM]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_B8G8R8A8_SNORM]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_B8G8R8A8_USCALED]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_B8G8R8A8_SSCALED]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_B8G8R8A8_UINT]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_B8G8R8A8_SINT]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_B8G8R8A8_SRGB]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_A8B8G8R8_UNORM_PACK32]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_A8B8G8R8_SNORM_PACK32]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_A8B8G8R8_USCALED_PACK32]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_A8B8G8R8_SSCALED_PACK32]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_A8B8G8R8_UINT_PACK32]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_A8B8G8R8_SINT_PACK32]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_B8G8R8A8_SRGB_PACK32]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_A2R10G10B10_UNORM_PACK32]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_A2R10G10B10_SNORM_PACK32]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_A2R10G10B10_USCALED_PACK32]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_A2R10G10B10_SSCALED_PACK32]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_A2R10G10B10_UINT_PACK32]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_A2R10G10B10_SINT_PACK32]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_A2B10G10R10_UNORM_PACK32]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_A2B10G10R10_SNORM_PACK32]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_A2B10G10R10_USCALED_PACK32]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_A2B10G10R10_SSCALED_PACK32]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_A2B10G10R10_UINT_PACK32]
+ {4, 4, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_A2B10G10R10_SINT_PACK32]
+ {2, 1, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}, // [VK_FORMAT_R16_UNORM]
+ {2, 1, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}, // [VK_FORMAT_R16_SNORM]
+ {2, 1, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}, // [VK_FORMAT_R16_USCALED]
+ {2, 1, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}, // [VK_FORMAT_R16_SSCALED]
+ {2, 1, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}, // [VK_FORMAT_R16_UINT]
+ {2, 1, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}, // [VK_FORMAT_R16_SINT]
+ {2, 1, VK_FORMAT_COMPATIBILITY_CLASS_16_BIT}, // [VK_FORMAT_R16_SFLOAT]
+ {4, 2, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_R16G16_UNORM]
+ {4, 2, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_R16G16_SNORM]
+ {4, 2, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_R16G16_USCALED]
+ {4, 2, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_R16G16_SSCALED]
+ {4, 2, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_R16G16_UINT]
+ {4, 2, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_R16G16_SINT]
+ {4, 2, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_R16G16_SFLOAT]
+ {6, 3, VK_FORMAT_COMPATIBILITY_CLASS_48_BIT}, // [VK_FORMAT_R16G16B16_UNORM]
+ {6, 3, VK_FORMAT_COMPATIBILITY_CLASS_48_BIT}, // [VK_FORMAT_R16G16B16_SNORM]
+ {6, 3, VK_FORMAT_COMPATIBILITY_CLASS_48_BIT}, // [VK_FORMAT_R16G16B16_USCALED]
+ {6, 3, VK_FORMAT_COMPATIBILITY_CLASS_48_BIT}, // [VK_FORMAT_R16G16B16_SSCALED]
+ {6, 3, VK_FORMAT_COMPATIBILITY_CLASS_48_BIT}, // [VK_FORMAT_R16G16B16_UINT]
+ {6, 3, VK_FORMAT_COMPATIBILITY_CLASS_48_BIT}, // [VK_FORMAT_R16G16B16_SINT]
+ {6, 3, VK_FORMAT_COMPATIBILITY_CLASS_48_BIT}, // [VK_FORMAT_R16G16B16_SFLOAT]
+ {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}, // [VK_FORMAT_R16G16B16A16_UNORM]
+ {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}, // [VK_FORMAT_R16G16B16A16_SNORM]
+ {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}, // [VK_FORMAT_R16G16B16A16_USCALED]
+ {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}, // [VK_FORMAT_R16G16B16A16_SSCALED]
+ {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}, // [VK_FORMAT_R16G16B16A16_UINT]
+ {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}, // [VK_FORMAT_R16G16B16A16_SINT]
+ {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}, // [VK_FORMAT_R16G16B16A16_SFLOAT]
+ {4, 1, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_R32_UINT]
+ {4, 1, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_R32_SINT]
+ {4, 1, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_R32_SFLOAT]
+ {8, 2, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}, // [VK_FORMAT_R32G32_UINT]
+ {8, 2, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}, // [VK_FORMAT_R32G32_SINT]
+ {8, 2, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}, // [VK_FORMAT_R32G32_SFLOAT]
+ {12, 3, VK_FORMAT_COMPATIBILITY_CLASS_96_BIT}, // [VK_FORMAT_R32G32B32_UINT]
+ {12, 3, VK_FORMAT_COMPATIBILITY_CLASS_96_BIT}, // [VK_FORMAT_R32G32B32_SINT]
+ {12, 3, VK_FORMAT_COMPATIBILITY_CLASS_96_BIT}, // [VK_FORMAT_R32G32B32_SFLOAT]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_128_BIT}, // [VK_FORMAT_R32G32B32A32_UINT]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_128_BIT}, // [VK_FORMAT_R32G32B32A32_SINT]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_128_BIT}, // [VK_FORMAT_R32G32B32A32_SFLOAT]
+ {8, 1, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}, // [VK_FORMAT_R64_UINT]
+ {8, 1, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}, // [VK_FORMAT_R64_SINT]
+ {8, 1, VK_FORMAT_COMPATIBILITY_CLASS_64_BIT}, // [VK_FORMAT_R64_SFLOAT]
+ {16, 2, VK_FORMAT_COMPATIBILITY_CLASS_128_BIT}, // [VK_FORMAT_R64G64_UINT]
+ {16, 2, VK_FORMAT_COMPATIBILITY_CLASS_128_BIT}, // [VK_FORMAT_R64G64_SINT]
+ {16, 2, VK_FORMAT_COMPATIBILITY_CLASS_128_BIT}, // [VK_FORMAT_R64G64_SFLOAT]
+ {24, 3, VK_FORMAT_COMPATIBILITY_CLASS_192_BIT}, // [VK_FORMAT_R64G64B64_UINT]
+ {24, 3, VK_FORMAT_COMPATIBILITY_CLASS_192_BIT}, // [VK_FORMAT_R64G64B64_SINT]
+ {24, 3, VK_FORMAT_COMPATIBILITY_CLASS_192_BIT}, // [VK_FORMAT_R64G64B64_SFLOAT]
+ {32, 4, VK_FORMAT_COMPATIBILITY_CLASS_256_BIT}, // [VK_FORMAT_R64G64B64A64_UINT]
+ {32, 4, VK_FORMAT_COMPATIBILITY_CLASS_256_BIT}, // [VK_FORMAT_R64G64B64A64_SINT]
+ {32, 4, VK_FORMAT_COMPATIBILITY_CLASS_256_BIT}, // [VK_FORMAT_R64G64B64A64_SFLOAT]
+ {4, 3, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_B10G11R11_UFLOAT_PACK32]
+ {4, 3, VK_FORMAT_COMPATIBILITY_CLASS_32_BIT}, // [VK_FORMAT_E5B9G9R9_UFLOAT_PACK32]
+ {2, 1, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT}, // [VK_FORMAT_D16_UNORM]
+ {3, 1, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT}, // [VK_FORMAT_X8_D24_UNORM_PACK32]
+ {4, 1, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT}, // [VK_FORMAT_D32_SFLOAT]
+ {1, 1, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT}, // [VK_FORMAT_S8_UINT]
+ {3, 2, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT}, // [VK_FORMAT_D16_UNORM_S8_UINT]
+ {4, 2, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT}, // [VK_FORMAT_D24_UNORM_S8_UINT]
+ {4, 2, VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT}, // [VK_FORMAT_D32_SFLOAT_S8_UINT]
+ {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC1_RGB_BIT}, // [VK_FORMAT_BC1_RGB_UNORM_BLOCK]
+ {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC1_RGB_BIT}, // [VK_FORMAT_BC1_RGB_SRGB_BLOCK]
+ {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC1_RGBA_BIT}, // [VK_FORMAT_BC1_RGBA_UNORM_BLOCK]
+ {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC1_RGBA_BIT}, // [VK_FORMAT_BC1_RGBA_SRGB_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC2_BIT}, // [VK_FORMAT_BC2_UNORM_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC2_BIT}, // [VK_FORMAT_BC2_SRGB_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC3_BIT}, // [VK_FORMAT_BC3_UNORM_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC3_BIT}, // [VK_FORMAT_BC3_SRGB_BLOCK]
+ {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC4_BIT}, // [VK_FORMAT_BC4_UNORM_BLOCK]
+ {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC4_BIT}, // [VK_FORMAT_BC4_SNORM_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC5_BIT}, // [VK_FORMAT_BC5_UNORM_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC5_BIT}, // [VK_FORMAT_BC5_SNORM_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC6H_BIT}, // [VK_FORMAT_BC6H_UFLOAT_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC6H_BIT}, // [VK_FORMAT_BC6H_SFLOAT_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC7_BIT}, // [VK_FORMAT_BC7_UNORM_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_BC7_BIT}, // [VK_FORMAT_BC7_SRGB_BLOCK]
+ {8, 3, VK_FORMAT_COMPATIBILITY_CLASS_ETC2_RGB_BIT}, // [VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK]
+ {8, 3, VK_FORMAT_COMPATIBILITY_CLASS_ETC2_RGB_BIT}, // [VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK]
+ {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_ETC2_RGBA_BIT}, // [VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK]
+ {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_ETC2_RGBA_BIT}, // [VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK]
+ {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_ETC2_EAC_RGBA_BIT}, // [VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK]
+ {8, 4, VK_FORMAT_COMPATIBILITY_CLASS_ETC2_EAC_RGBA_BIT}, // [VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK]
+ {8, 1, VK_FORMAT_COMPATIBILITY_CLASS_EAC_R_BIT}, // [VK_FORMAT_EAC_R11_UNORM_BLOCK]
+ {8, 1, VK_FORMAT_COMPATIBILITY_CLASS_EAC_R_BIT}, // [VK_FORMAT_EAC_R11_SNORM_BLOCK]
+ {16, 2, VK_FORMAT_COMPATIBILITY_CLASS_EAC_RG_BIT}, // [VK_FORMAT_EAC_R11G11_UNORM_BLOCK]
+ {16, 2, VK_FORMAT_COMPATIBILITY_CLASS_EAC_RG_BIT}, // [VK_FORMAT_EAC_R11G11_SNORM_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_4X4_BIT}, // [VK_FORMAT_ASTC_4x4_UNORM_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_4X4_BIT}, // [VK_FORMAT_ASTC_4x4_SRGB_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_5X4_BIT}, // [VK_FORMAT_ASTC_5x4_UNORM_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_5X4_BIT}, // [VK_FORMAT_ASTC_5x4_SRGB_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_5X5_BIT}, // [VK_FORMAT_ASTC_5x5_UNORM_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_5X5_BIT}, // [VK_FORMAT_ASTC_5x5_SRGB_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_6X5_BIT}, // [VK_FORMAT_ASTC_6x5_UNORM_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_6X5_BIT}, // [VK_FORMAT_ASTC_6x5_SRGB_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_6X6_BIT}, // [VK_FORMAT_ASTC_6x6_UNORM_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_6X6_BIT}, // [VK_FORMAT_ASTC_6x6_SRGB_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X5_BIT}, // [VK_FORMAT_ASTC_8x5_UNORM_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X5_BIT}, // [VK_FORMAT_ASTC_8x5_SRGB_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X6_BIT}, // [VK_FORMAT_ASTC_8x6_UNORM_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X6_BIT}, // [VK_FORMAT_ASTC_8x6_SRGB_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X8_BIT}, // [VK_FORMAT_ASTC_8x8_UNORM_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X8_BIT}, // [VK_FORMAT_ASTC_8x8_SRGB_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X5_BIT}, // [VK_FORMAT_ASTC_10x5_UNORM_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X5_BIT}, // [VK_FORMAT_ASTC_10x5_SRGB_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X6_BIT}, // [VK_FORMAT_ASTC_10x6_UNORM_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X6_BIT}, // [VK_FORMAT_ASTC_10x6_SRGB_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X8_BIT}, // [VK_FORMAT_ASTC_10x8_UNORM_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X8_BIT}, // [VK_FORMAT_ASTC_10x8_SRGB_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X10_BIT}, // [VK_FORMAT_ASTC_10x10_UNORM_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X10_BIT}, // [VK_FORMAT_ASTC_10x10_SRGB_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_12X10_BIT}, // [VK_FORMAT_ASTC_12x10_UNORM_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_12X10_BIT}, // [VK_FORMAT_ASTC_12x10_SRGB_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_12X12_BIT}, // [VK_FORMAT_ASTC_12x12_UNORM_BLOCK]
+ {16, 4, VK_FORMAT_COMPATIBILITY_CLASS_ASTC_12X12_BIT}, // [VK_FORMAT_ASTC_12x12_SRGB_BLOCK]
};
// Return true if format is a depth or stencil format
-bool vk_format_is_depth_or_stencil(VkFormat format)
-{
- return (vk_format_is_depth_and_stencil(format) ||
- vk_format_is_depth_only(format) ||
- vk_format_is_stencil_only(format));
+bool vk_format_is_depth_or_stencil(VkFormat format) {
+ return (vk_format_is_depth_and_stencil(format) || vk_format_is_depth_only(format) || vk_format_is_stencil_only(format));
}
// Return true if format contains depth and stencil information
-bool vk_format_is_depth_and_stencil(VkFormat format)
-{
+bool vk_format_is_depth_and_stencil(VkFormat format) {
bool is_ds = false;
switch (format) {
@@ -254,14 +248,10 @@ bool vk_format_is_depth_and_stencil(VkFormat format)
}
// Return true if format is a stencil-only format
-bool vk_format_is_stencil_only(VkFormat format)
-{
- return (format == VK_FORMAT_S8_UINT);
-}
+bool vk_format_is_stencil_only(VkFormat format) { return (format == VK_FORMAT_S8_UINT); }
// Return true if format is a depth-only format
-bool vk_format_is_depth_only(VkFormat format)
-{
+bool vk_format_is_depth_only(VkFormat format) {
bool is_depth = false;
switch (format) {
@@ -278,8 +268,7 @@ bool vk_format_is_depth_only(VkFormat format)
}
// Return true if format is of time UNORM
-bool vk_format_is_norm(VkFormat format)
-{
+bool vk_format_is_norm(VkFormat format) {
bool is_norm = false;
switch (format) {
@@ -353,16 +342,11 @@ bool vk_format_is_norm(VkFormat format)
return is_norm;
};
-
// Return true if format is an integer format
-bool vk_format_is_int(VkFormat format)
-{
- return (vk_format_is_sint(format) || vk_format_is_uint(format));
-}
+bool vk_format_is_int(VkFormat format) { return (vk_format_is_sint(format) || vk_format_is_uint(format)); }
// Return true if format is an unsigned integer format
-bool vk_format_is_uint(VkFormat format)
-{
+bool vk_format_is_uint(VkFormat format) {
bool is_uint = false;
switch (format) {
@@ -397,8 +381,7 @@ bool vk_format_is_uint(VkFormat format)
}
// Return true if format is a signed integer format
-bool vk_format_is_sint(VkFormat format)
-{
+bool vk_format_is_sint(VkFormat format) {
bool is_sint = false;
switch (format) {
@@ -433,8 +416,7 @@ bool vk_format_is_sint(VkFormat format)
}
// Return true if format is a floating-point format
-bool vk_format_is_float(VkFormat format)
-{
+bool vk_format_is_float(VkFormat format) {
bool is_float = false;
switch (format) {
@@ -464,8 +446,7 @@ bool vk_format_is_float(VkFormat format)
}
// Return true if format is in the SRGB colorspace
-bool vk_format_is_srgb(VkFormat format)
-{
+bool vk_format_is_srgb(VkFormat format) {
bool is_srgb = false;
switch (format) {
@@ -507,8 +488,7 @@ bool vk_format_is_srgb(VkFormat format)
}
// Return true if format is compressed
-bool vk_format_is_compressed(VkFormat format)
-{
+bool vk_format_is_compressed(VkFormat format) {
switch (format) {
case VK_FORMAT_BC1_RGB_UNORM_BLOCK:
case VK_FORMAT_BC1_RGB_SRGB_BLOCK:
@@ -569,26 +549,16 @@ bool vk_format_is_compressed(VkFormat format)
}
// Return format class of the specified format
-VkFormatCompatibilityClass vk_format_get_compatibility_class(VkFormat format)
-{
- return vk_format_table[format].format_class;
-}
+VkFormatCompatibilityClass vk_format_get_compatibility_class(VkFormat format) { return vk_format_table[format].format_class; }
// Return size, in bytes, of a pixel of the specified format
-size_t vk_format_get_size(VkFormat format)
-{
- return vk_format_table[format].size;
-}
+size_t vk_format_get_size(VkFormat format) { return vk_format_table[format].size; }
// Return the number of channels for a given format
-unsigned int vk_format_get_channel_count(VkFormat format)
-{
- return vk_format_table[format].channel_count;
-}
+unsigned int vk_format_get_channel_count(VkFormat format) { return vk_format_table[format].channel_count; }
// Perform a zero-tolerant modulo operation
-VkDeviceSize vk_safe_modulo(VkDeviceSize dividend, VkDeviceSize divisor)
-{
+VkDeviceSize vk_safe_modulo(VkDeviceSize dividend, VkDeviceSize divisor) {
VkDeviceSize result = 0;
if (divisor != 0) {
result = dividend % divisor;
@@ -596,31 +566,28 @@ VkDeviceSize vk_safe_modulo(VkDeviceSize dividend, VkDeviceSize divisor)
return result;
}
-
-static const char UTF8_ONE_BYTE_CODE = 0xC0;
-static const char UTF8_ONE_BYTE_MASK = 0xE0;
-static const char UTF8_TWO_BYTE_CODE = 0xE0;
-static const char UTF8_TWO_BYTE_MASK = 0xF0;
+static const char UTF8_ONE_BYTE_CODE = 0xC0;
+static const char UTF8_ONE_BYTE_MASK = 0xE0;
+static const char UTF8_TWO_BYTE_CODE = 0xE0;
+static const char UTF8_TWO_BYTE_MASK = 0xF0;
static const char UTF8_THREE_BYTE_CODE = 0xF0;
static const char UTF8_THREE_BYTE_MASK = 0xF8;
-static const char UTF8_DATA_BYTE_CODE = 0x80;
-static const char UTF8_DATA_BYTE_MASK = 0xC0;
+static const char UTF8_DATA_BYTE_CODE = 0x80;
+static const char UTF8_DATA_BYTE_MASK = 0xC0;
-VkStringErrorFlags vk_string_validate(const int max_length, const char *utf8)
-{
+VkStringErrorFlags vk_string_validate(const int max_length, const char *utf8) {
VkStringErrorFlags result = VK_STRING_ERROR_NONE;
- int num_char_bytes;
- int i,j;
+ int num_char_bytes;
+ int i, j;
- for (i = 0; i < max_length; i++)
- {
+ for (i = 0; i < max_length; i++) {
if (utf8[i] == 0) {
break;
} else if ((utf8[i] >= 0x20) && (utf8[i] < 0x7f)) {
num_char_bytes = 0;
- } else if ((utf8[i] & UTF8_ONE_BYTE_MASK) == UTF8_ONE_BYTE_CODE) {
+ } else if ((utf8[i] & UTF8_ONE_BYTE_MASK) == UTF8_ONE_BYTE_CODE) {
num_char_bytes = 1;
- } else if ((utf8[i] & UTF8_TWO_BYTE_MASK) == UTF8_TWO_BYTE_CODE) {
+ } else if ((utf8[i] & UTF8_TWO_BYTE_MASK) == UTF8_TWO_BYTE_CODE) {
num_char_bytes = 2;
} else if ((utf8[i] & UTF8_THREE_BYTE_MASK) == UTF8_THREE_BYTE_CODE) {
num_char_bytes = 3;
diff --git a/layers/vk_layer_utils.h b/layers/vk_layer_utils.h
index e94798348..1b69e9c43 100644
--- a/layers/vk_layer_utils.h
+++ b/layers/vk_layer_utils.h
@@ -38,91 +38,86 @@ extern "C" {
#endif
typedef enum VkFormatCompatibilityClass {
- VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT = 0,
- VK_FORMAT_COMPATIBILITY_CLASS_8_BIT = 1,
- VK_FORMAT_COMPATIBILITY_CLASS_16_BIT = 2,
- VK_FORMAT_COMPATIBILITY_CLASS_24_BIT = 3,
- VK_FORMAT_COMPATIBILITY_CLASS_32_BIT = 4,
- VK_FORMAT_COMPATIBILITY_CLASS_48_BIT = 5,
- VK_FORMAT_COMPATIBILITY_CLASS_64_BIT = 6,
- VK_FORMAT_COMPATIBILITY_CLASS_96_BIT = 7,
- VK_FORMAT_COMPATIBILITY_CLASS_128_BIT = 8,
- VK_FORMAT_COMPATIBILITY_CLASS_192_BIT = 9,
- VK_FORMAT_COMPATIBILITY_CLASS_256_BIT = 10,
- VK_FORMAT_COMPATIBILITY_CLASS_BC1_RGB_BIT = 11,
- VK_FORMAT_COMPATIBILITY_CLASS_BC1_RGBA_BIT = 12,
- VK_FORMAT_COMPATIBILITY_CLASS_BC2_BIT = 13,
- VK_FORMAT_COMPATIBILITY_CLASS_BC3_BIT = 14,
- VK_FORMAT_COMPATIBILITY_CLASS_BC4_BIT = 15,
- VK_FORMAT_COMPATIBILITY_CLASS_BC5_BIT = 16,
- VK_FORMAT_COMPATIBILITY_CLASS_BC6H_BIT = 17,
- VK_FORMAT_COMPATIBILITY_CLASS_BC7_BIT = 18,
- VK_FORMAT_COMPATIBILITY_CLASS_ETC2_RGB_BIT = 19,
- VK_FORMAT_COMPATIBILITY_CLASS_ETC2_RGBA_BIT = 20,
+ VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT = 0,
+ VK_FORMAT_COMPATIBILITY_CLASS_8_BIT = 1,
+ VK_FORMAT_COMPATIBILITY_CLASS_16_BIT = 2,
+ VK_FORMAT_COMPATIBILITY_CLASS_24_BIT = 3,
+ VK_FORMAT_COMPATIBILITY_CLASS_32_BIT = 4,
+ VK_FORMAT_COMPATIBILITY_CLASS_48_BIT = 5,
+ VK_FORMAT_COMPATIBILITY_CLASS_64_BIT = 6,
+ VK_FORMAT_COMPATIBILITY_CLASS_96_BIT = 7,
+ VK_FORMAT_COMPATIBILITY_CLASS_128_BIT = 8,
+ VK_FORMAT_COMPATIBILITY_CLASS_192_BIT = 9,
+ VK_FORMAT_COMPATIBILITY_CLASS_256_BIT = 10,
+ VK_FORMAT_COMPATIBILITY_CLASS_BC1_RGB_BIT = 11,
+ VK_FORMAT_COMPATIBILITY_CLASS_BC1_RGBA_BIT = 12,
+ VK_FORMAT_COMPATIBILITY_CLASS_BC2_BIT = 13,
+ VK_FORMAT_COMPATIBILITY_CLASS_BC3_BIT = 14,
+ VK_FORMAT_COMPATIBILITY_CLASS_BC4_BIT = 15,
+ VK_FORMAT_COMPATIBILITY_CLASS_BC5_BIT = 16,
+ VK_FORMAT_COMPATIBILITY_CLASS_BC6H_BIT = 17,
+ VK_FORMAT_COMPATIBILITY_CLASS_BC7_BIT = 18,
+ VK_FORMAT_COMPATIBILITY_CLASS_ETC2_RGB_BIT = 19,
+ VK_FORMAT_COMPATIBILITY_CLASS_ETC2_RGBA_BIT = 20,
VK_FORMAT_COMPATIBILITY_CLASS_ETC2_EAC_RGBA_BIT = 21,
- VK_FORMAT_COMPATIBILITY_CLASS_EAC_R_BIT = 22,
- VK_FORMAT_COMPATIBILITY_CLASS_EAC_RG_BIT = 23,
- VK_FORMAT_COMPATIBILITY_CLASS_ASTC_4X4_BIT = 24,
- VK_FORMAT_COMPATIBILITY_CLASS_ASTC_5X4_BIT = 25,
- VK_FORMAT_COMPATIBILITY_CLASS_ASTC_5X5_BIT = 26,
- VK_FORMAT_COMPATIBILITY_CLASS_ASTC_6X5_BIT = 27,
- VK_FORMAT_COMPATIBILITY_CLASS_ASTC_6X6_BIT = 28,
- VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X5_BIT = 29,
- VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X6_BIT = 20,
- VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X8_BIT = 31,
- VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X5_BIT = 32,
- VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X6_BIT = 33,
- VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X8_BIT = 34,
- VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X10_BIT = 35,
- VK_FORMAT_COMPATIBILITY_CLASS_ASTC_12X10_BIT = 36,
- VK_FORMAT_COMPATIBILITY_CLASS_ASTC_12X12_BIT = 37,
- VK_FORMAT_COMPATIBILITY_CLASS_D16_BIT = 38,
- VK_FORMAT_COMPATIBILITY_CLASS_D24_BIT = 39,
- VK_FORMAT_COMPATIBILITY_CLASS_D32_BIT = 30,
- VK_FORMAT_COMPATIBILITY_CLASS_S8_BIT = 41,
- VK_FORMAT_COMPATIBILITY_CLASS_D16S8_BIT = 42,
- VK_FORMAT_COMPATIBILITY_CLASS_D24S8_BIT = 43,
- VK_FORMAT_COMPATIBILITY_CLASS_D32S8_BIT = 44,
- VK_FORMAT_COMPATIBILITY_CLASS_MAX_ENUM = 45
+ VK_FORMAT_COMPATIBILITY_CLASS_EAC_R_BIT = 22,
+ VK_FORMAT_COMPATIBILITY_CLASS_EAC_RG_BIT = 23,
+ VK_FORMAT_COMPATIBILITY_CLASS_ASTC_4X4_BIT = 24,
+ VK_FORMAT_COMPATIBILITY_CLASS_ASTC_5X4_BIT = 25,
+ VK_FORMAT_COMPATIBILITY_CLASS_ASTC_5X5_BIT = 26,
+ VK_FORMAT_COMPATIBILITY_CLASS_ASTC_6X5_BIT = 27,
+ VK_FORMAT_COMPATIBILITY_CLASS_ASTC_6X6_BIT = 28,
+ VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X5_BIT = 29,
+ VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X6_BIT = 20,
+ VK_FORMAT_COMPATIBILITY_CLASS_ASTC_8X8_BIT = 31,
+ VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X5_BIT = 32,
+ VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X6_BIT = 33,
+ VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X8_BIT = 34,
+ VK_FORMAT_COMPATIBILITY_CLASS_ASTC_10X10_BIT = 35,
+ VK_FORMAT_COMPATIBILITY_CLASS_ASTC_12X10_BIT = 36,
+ VK_FORMAT_COMPATIBILITY_CLASS_ASTC_12X12_BIT = 37,
+ VK_FORMAT_COMPATIBILITY_CLASS_D16_BIT = 38,
+ VK_FORMAT_COMPATIBILITY_CLASS_D24_BIT = 39,
+ VK_FORMAT_COMPATIBILITY_CLASS_D32_BIT = 30,
+ VK_FORMAT_COMPATIBILITY_CLASS_S8_BIT = 41,
+ VK_FORMAT_COMPATIBILITY_CLASS_D16S8_BIT = 42,
+ VK_FORMAT_COMPATIBILITY_CLASS_D24S8_BIT = 43,
+ VK_FORMAT_COMPATIBILITY_CLASS_D32S8_BIT = 44,
+ VK_FORMAT_COMPATIBILITY_CLASS_MAX_ENUM = 45
} VkFormatCompatibilityClass;
typedef enum VkStringErrorFlagBits {
- VK_STRING_ERROR_NONE = 0x00000000,
- VK_STRING_ERROR_LENGTH = 0x00000001,
- VK_STRING_ERROR_BAD_DATA = 0x00000002,
+ VK_STRING_ERROR_NONE = 0x00000000,
+ VK_STRING_ERROR_LENGTH = 0x00000001,
+ VK_STRING_ERROR_BAD_DATA = 0x00000002,
} VkStringErrorFlagBits;
typedef VkFlags VkStringErrorFlags;
-static inline bool vk_format_is_undef(VkFormat format)
-{
- return (format == VK_FORMAT_UNDEFINED);
-}
+static inline bool vk_format_is_undef(VkFormat format) { return (format == VK_FORMAT_UNDEFINED); }
bool vk_format_is_depth_or_stencil(VkFormat format);
bool vk_format_is_depth_and_stencil(VkFormat format);
bool vk_format_is_depth_only(VkFormat format);
bool vk_format_is_stencil_only(VkFormat format);
-static inline bool vk_format_is_color(VkFormat format)
-{
+static inline bool vk_format_is_color(VkFormat format) {
return !(vk_format_is_undef(format) || vk_format_is_depth_or_stencil(format));
}
-bool vk_format_is_norm(VkFormat format);
-bool vk_format_is_int(VkFormat format);
-bool vk_format_is_sint(VkFormat format);
-bool vk_format_is_uint(VkFormat format);
-bool vk_format_is_float(VkFormat format);
-bool vk_format_is_srgb(VkFormat format);
-bool vk_format_is_compressed(VkFormat format);
-size_t vk_format_get_size(VkFormat format);
-unsigned int vk_format_get_channel_count(VkFormat format);
+bool vk_format_is_norm(VkFormat format);
+bool vk_format_is_int(VkFormat format);
+bool vk_format_is_sint(VkFormat format);
+bool vk_format_is_uint(VkFormat format);
+bool vk_format_is_float(VkFormat format);
+bool vk_format_is_srgb(VkFormat format);
+bool vk_format_is_compressed(VkFormat format);
+size_t vk_format_get_size(VkFormat format);
+unsigned int vk_format_get_channel_count(VkFormat format);
VkFormatCompatibilityClass vk_format_get_compatibility_class(VkFormat format);
-VkDeviceSize vk_safe_modulo(VkDeviceSize dividend, VkDeviceSize divisor);
-VkStringErrorFlags vk_string_validate(const int max_length, const char *char_array);
+VkDeviceSize vk_safe_modulo(VkDeviceSize dividend, VkDeviceSize divisor);
+VkStringErrorFlags vk_string_validate(const int max_length, const char *char_array);
-static inline int u_ffs(int val)
-{
+static inline int u_ffs(int val) {
#ifdef WIN32
unsigned long bit_pos = 0;
if (_BitScanForward(&bit_pos, val) != 0) {
@@ -137,5 +132,3 @@ static inline int u_ffs(int val)
#ifdef __cplusplus
}
#endif
-
-