From a26154680ee443216d7346ea2f44a1205cc251f9 Mon Sep 17 00:00:00 2001 From: Karl Schultz Date: Fri, 20 Jan 2017 13:19:20 -0700 Subject: [PATCH] build: Enable declaration hiding warning on Windows Fixes #1388 Turn on the Windows compiler option (4456) to report hidden declarations. Fix all places where this was occurring. Change-Id: I3346d87da8b70d6299c206fcac68520a091ed1a6 --- CMakeLists.txt | 9 ++++++--- demos/cube.c | 35 ++++++++++++++++----------------- demos/cube.cpp | 34 ++++++++++++++++---------------- demos/vulkaninfo.c | 30 ++++++++++++++--------------- layers/core_validation.cpp | 48 +++++++++++++++++++++++----------------------- layers/object_tracker.cpp | 4 ++-- layers/threading.h | 16 ++++++++-------- layers/unique_objects.cpp | 1 - loader/loader.c | 26 +++++++++++-------------- 9 files changed, 99 insertions(+), 104 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8b09cbc..77e365a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -57,9 +57,12 @@ if (CMAKE_COMPILER_IS_GNUCC OR CMAKE_C_COMPILER_ID MATCHES "Clang") endif() if(WIN32) - # Disable RTTI, Treat warnings as errors - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /WX") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GR- /WX") + # Treat warnings as errors + add_compile_options("$<$:/WX>") + # Disable RTTI + add_compile_options("$<$:/GR->") + # Warn about nested declarations + add_compile_options("$<$:/w34456>") endif() if(NOT WIN32) diff --git a/demos/cube.c b/demos/cube.c index 49fd9e7..a55c603 100644 --- a/demos/cube.c +++ b/demos/cube.c @@ -1420,7 +1420,6 @@ void demo_prepare_cube_data_buffers(struct demo *demo) { VkMemoryRequirements mem_reqs; VkMemoryAllocateInfo mem_alloc; uint8_t *pData; - int i; mat4x4 MVP, VP; VkResult U_ASSERT_ONLY err; bool U_ASSERT_ONLY pass; @@ -1431,7 +1430,7 @@ void demo_prepare_cube_data_buffers(struct demo *demo) { memcpy(data.mvp, MVP, sizeof(MVP)); // dumpMatrix("MVP", MVP); - for (i = 0; i < 12 * 3; i++) { + for (unsigned int i = 0; i < 12 * 3; i++) { data.position[i][0] = g_vertex_buffer_data[i * 3]; data.position[i][1] = g_vertex_buffer_data[i * 3 + 1]; data.position[i][2] = g_vertex_buffer_data[i * 3 + 2]; @@ -1846,7 +1845,6 @@ static void demo_prepare_descriptor_set(struct demo *demo) { VkDescriptorImageInfo tex_descs[DEMO_TEXTURE_COUNT]; VkWriteDescriptorSet writes[2]; VkResult U_ASSERT_ONLY err; - uint32_t i; VkDescriptorSetAllocateInfo alloc_info = { .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, @@ -1860,7 +1858,7 @@ static void demo_prepare_descriptor_set(struct demo *demo) { buffer_info.range = sizeof(struct vktexcube_vs_uniform); memset(&tex_descs, 0, sizeof(tex_descs)); - for (i = 0; i < DEMO_TEXTURE_COUNT; i++) { + for (unsigned int i = 0; i < DEMO_TEXTURE_COUNT; i++) { tex_descs[i].sampler = demo->textures[i].sampler; tex_descs[i].imageView = demo->textures[i].view; tex_descs[i].imageLayout = VK_IMAGE_LAYOUT_GENERAL; @@ -1961,16 +1959,16 @@ static void demo_prepare(struct demo *demo) { } if (demo->separate_present_queue) { - const VkCommandPoolCreateInfo cmd_pool_info = { + const VkCommandPoolCreateInfo present_cmd_pool_info = { .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, .pNext = NULL, .queueFamilyIndex = demo->present_queue_family_index, .flags = 0, }; - err = vkCreateCommandPool(demo->device, &cmd_pool_info, NULL, + err = vkCreateCommandPool(demo->device, &present_cmd_pool_info, NULL, &demo->present_cmd_pool); assert(!err); - const VkCommandBufferAllocateInfo cmd = { + const VkCommandBufferAllocateInfo present_cmd_info = { .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, .pNext = NULL, .commandPool = demo->present_cmd_pool, @@ -1979,7 +1977,7 @@ static void demo_prepare(struct demo *demo) { }; for (uint32_t i = 0; i < demo->swapchainImageCount; i++) { err = vkAllocateCommandBuffers( - demo->device, &cmd, &demo->swapchain_image_resources[i].graphics_to_present_cmd); + demo->device, &present_cmd_info, &demo->swapchain_image_resources[i].graphics_to_present_cmd); assert(!err); demo_build_image_ownership_cmd(demo, i); } @@ -2741,15 +2739,15 @@ static void demo_init_vk(struct demo *demo) { * After the instance is created, we use the instance-based * function to register the final callback. */ - VkDebugReportCallbackCreateInfoEXT dbgCreateInfo; + VkDebugReportCallbackCreateInfoEXT dbgCreateInfoTemp; if (demo->validate) { - dbgCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT; - dbgCreateInfo.pNext = NULL; - dbgCreateInfo.pfnCallback = demo->use_break ? BreakCallback : dbgFunc; - dbgCreateInfo.pUserData = demo; - dbgCreateInfo.flags = + dbgCreateInfoTemp.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT; + dbgCreateInfoTemp.pNext = NULL; + dbgCreateInfoTemp.pfnCallback = demo->use_break ? BreakCallback : dbgFunc; + dbgCreateInfoTemp.pUserData = demo; + dbgCreateInfoTemp.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT; - inst_info.pNext = &dbgCreateInfo; + inst_info.pNext = &dbgCreateInfoTemp; } uint32_t gpu_count; @@ -2942,7 +2940,6 @@ static void demo_create_device(struct demo *demo) { static void demo_init_vk_swapchain(struct demo *demo) { VkResult U_ASSERT_ONLY err; - uint32_t i; // Create a WSI surface for the window: #if defined(VK_USE_PLATFORM_WIN32_KHR) @@ -2999,7 +2996,7 @@ static void demo_init_vk_swapchain(struct demo *demo) { // Iterate over each queue to learn whether it supports presenting: VkBool32 *supportsPresent = (VkBool32 *)malloc(demo->queue_family_count * sizeof(VkBool32)); - for (i = 0; i < demo->queue_family_count; i++) { + for (uint32_t i = 0; i < demo->queue_family_count; i++) { demo->fpGetPhysicalDeviceSurfaceSupportKHR(demo->gpu, i, demo->surface, &supportsPresent[i]); } @@ -3008,7 +3005,7 @@ static void demo_init_vk_swapchain(struct demo *demo) { // families, try to find one that supports both uint32_t graphicsQueueFamilyIndex = UINT32_MAX; uint32_t presentQueueFamilyIndex = UINT32_MAX; - for (i = 0; i < demo->queue_family_count; i++) { + for (uint32_t i = 0; i < demo->queue_family_count; i++) { if ((demo->queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) != 0) { if (graphicsQueueFamilyIndex == UINT32_MAX) { graphicsQueueFamilyIndex = i; @@ -3025,7 +3022,7 @@ static void demo_init_vk_swapchain(struct demo *demo) { if (presentQueueFamilyIndex == UINT32_MAX) { // If didn't find a queue that supports both graphics and present, then // find a separate present queue. - for (i = 0; i < demo->queue_family_count; ++i) { + for (uint32_t i = 0; i < demo->queue_family_count; ++i) { if (supportsPresent[i] == VK_TRUE) { presentQueueFamilyIndex = i; break; diff --git a/demos/cube.cpp b/demos/cube.cpp index f607bc0..2042543 100644 --- a/demos/cube.cpp +++ b/demos/cube.cpp @@ -479,16 +479,16 @@ struct Demo { // present queue before presenting, waiting for the draw complete // semaphore and signalling the ownership released semaphore when // finished - auto const submit_info = vk::SubmitInfo() - .setPWaitDstStageMask(&pipe_stage_flags) - .setWaitSemaphoreCount(1) - .setPWaitSemaphores(&draw_complete_semaphores[frame_index]) - .setCommandBufferCount(1) - .setPCommandBuffers(&buffers[current_buffer].graphics_to_present_cmd) - .setSignalSemaphoreCount(1) - .setPSignalSemaphores(&image_ownership_semaphores[frame_index]); - - result = present_queue.submit(1, &submit_info, vk::Fence()); + auto const present_submit_info = vk::SubmitInfo() + .setPWaitDstStageMask(&pipe_stage_flags) + .setWaitSemaphoreCount(1) + .setPWaitSemaphores(&draw_complete_semaphores[frame_index]) + .setCommandBufferCount(1) + .setPCommandBuffers(&buffers[current_buffer].graphics_to_present_cmd) + .setSignalSemaphoreCount(1) + .setPSignalSemaphores(&image_ownership_semaphores[frame_index]); + + result = present_queue.submit(1, &present_submit_info, vk::Fence()); VERIFY(result == vk::Result::eSuccess); } @@ -1153,18 +1153,18 @@ struct Demo { } if (separate_present_queue) { - auto const cmd_pool_info = vk::CommandPoolCreateInfo().setQueueFamilyIndex(present_queue_family_index); + auto const present_cmd_pool_info = vk::CommandPoolCreateInfo().setQueueFamilyIndex(present_queue_family_index); - result = device.createCommandPool(&cmd_pool_info, nullptr, &present_cmd_pool); + result = device.createCommandPool(&present_cmd_pool_info, nullptr, &present_cmd_pool); VERIFY(result == vk::Result::eSuccess); - auto const cmd = vk::CommandBufferAllocateInfo() - .setCommandPool(present_cmd_pool) - .setLevel(vk::CommandBufferLevel::ePrimary) - .setCommandBufferCount(1); + auto const present_cmd = vk::CommandBufferAllocateInfo() + .setCommandPool(present_cmd_pool) + .setLevel(vk::CommandBufferLevel::ePrimary) + .setCommandBufferCount(1); for (uint32_t i = 0; i < swapchainImageCount; i++) { - result = device.allocateCommandBuffers(&cmd, &buffers[i].graphics_to_present_cmd); + result = device.allocateCommandBuffers(&present_cmd, &buffers[i].graphics_to_present_cmd); VERIFY(result == vk::Result::eSuccess); build_image_ownership_cmd(i); diff --git a/demos/vulkaninfo.c b/demos/vulkaninfo.c index f103eaa..be81c6f 100644 --- a/demos/vulkaninfo.c +++ b/demos/vulkaninfo.c @@ -1395,10 +1395,10 @@ static void ConsoleEnlarge() { #endif int main(int argc, char **argv) { - unsigned int major, minor, patch; + uint32_t vulkan_major, vulkan_minor, vulkan_patch; struct AppGpu *gpus; VkPhysicalDevice *objs; - uint32_t gpu_count, i; + uint32_t gpu_count; VkResult err; struct AppInstance inst; @@ -1406,14 +1406,14 @@ int main(int argc, char **argv) { if (ConsoleIsExclusive()) ConsoleEnlarge(); #endif - major = VK_VERSION_MAJOR(VK_API_VERSION_1_0); - minor = VK_VERSION_MINOR(VK_API_VERSION_1_0); - patch = VK_VERSION_PATCH(VK_HEADER_VERSION); + vulkan_major = VK_VERSION_MAJOR(VK_API_VERSION_1_0); + vulkan_minor = VK_VERSION_MINOR(VK_API_VERSION_1_0); + vulkan_patch = VK_VERSION_PATCH(VK_HEADER_VERSION); printf("===========\n"); printf("VULKAN INFO\n"); printf("===========\n\n"); - printf("Vulkan API Version: %d.%d.%d\n\n", major, minor, patch); + printf("Vulkan API Version: %d.%d.%d\n\n", vulkan_major, vulkan_minor, vulkan_patch); AppCreateInstance(&inst); @@ -1430,7 +1430,7 @@ int main(int argc, char **argv) { gpus = malloc(sizeof(gpus[0]) * gpu_count); if (!gpus) ERR_EXIT(VK_ERROR_OUT_OF_HOST_MEMORY); - for (i = 0; i < gpu_count; i++) { + for (uint32_t i = 0; i < gpu_count; i++) { AppGpuInit(&gpus[i], i, objs[i]); printf("\n\n"); } @@ -1439,12 +1439,12 @@ int main(int argc, char **argv) { printf("Layers: count = %d\n", inst.global_layer_count); printf("=======\n"); for (uint32_t i = 0; i < inst.global_layer_count; i++) { - uint32_t major, minor, patch; + uint32_t layer_major, layer_minor, layer_patch; char spec_version[64], layer_version[64]; VkLayerProperties const *layer_prop = &inst.global_layers[i].layer_properties; - ExtractVersion(layer_prop->specVersion, &major, &minor, &patch); - snprintf(spec_version, sizeof(spec_version), "%d.%d.%d", major, minor, patch); + ExtractVersion(layer_prop->specVersion, &layer_major, &layer_minor, &layer_patch); + snprintf(spec_version, sizeof(spec_version), "%d.%d.%d", layer_major, layer_minor, layer_patch); snprintf(layer_version, sizeof(layer_version), "%d", layer_prop->implementationVersion); printf("%s (%s) Vulkan version %s, layer version %s\n", layer_prop->layerName, (char *)layer_prop->description, spec_version, layer_version); @@ -1484,7 +1484,7 @@ int main(int argc, char **argv) { #ifdef VK_USE_PLATFORM_WIN32_KHR if (HasExtension(VK_KHR_WIN32_SURFACE_EXTENSION_NAME, inst.global_extension_count, inst.global_extensions)) { AppCreateWin32Window(&inst); - for (i = 0; i < gpu_count; i++) { + for (uint32_t i = 0; i < gpu_count; i++) { AppCreateWin32Surface(&inst); printf("GPU id : %u (%s)\n", i, gpus[i].props.deviceName); printf("Surface type : %s\n", VK_KHR_WIN32_SURFACE_EXTENSION_NAME); @@ -1498,7 +1498,7 @@ int main(int argc, char **argv) { #elif VK_USE_PLATFORM_XCB_KHR if (HasExtension(VK_KHR_XCB_SURFACE_EXTENSION_NAME, inst.global_extension_count, inst.global_extensions)) { AppCreateXcbWindow(&inst); - for (i = 0; i < gpu_count; i++) { + for (uint32_t i = 0; i < gpu_count; i++) { AppCreateXcbSurface(&inst); printf("GPU id : %u (%s)\n", i, gpus[i].props.deviceName); printf("Surface type : %s\n", VK_KHR_XCB_SURFACE_EXTENSION_NAME); @@ -1512,7 +1512,7 @@ int main(int argc, char **argv) { #elif VK_USE_PLATFORM_XLIB_KHR if (HasExtension(VK_KHR_XLIB_SURFACE_EXTENSION_NAME, inst.global_extension_count, inst.global_extensions)) { AppCreateXlibWindow(&inst); - for (i = 0; i < gpu_count; i++) { + for (uint32_t i = 0; i < gpu_count; i++) { AppCreateXlibSurface(&inst); printf("GPU id : %u (%s)\n", i, gpus[i].props.deviceName); printf("Surface type : %s\n", VK_KHR_XLIB_SURFACE_EXTENSION_NAME); @@ -1527,12 +1527,12 @@ int main(int argc, char **argv) { if (!format_count && !present_mode_count) printf("None found\n"); //--------- - for (i = 0; i < gpu_count; i++) { + for (uint32_t i = 0; i < gpu_count; i++) { AppGpuDump(&gpus[i]); printf("\n\n"); } - for (i = 0; i < gpu_count; i++) AppGpuDestroy(&gpus[i]); + for (uint32_t i = 0; i < gpu_count; i++) AppGpuDestroy(&gpus[i]); free(gpus); free(objs); diff --git a/layers/core_validation.cpp b/layers/core_validation.cpp index 4ed0803..d342afa 100644 --- a/layers/core_validation.cpp +++ b/layers/core_validation.cpp @@ -7621,49 +7621,50 @@ VKAPI_ATTR void VKAPI_CALL CmdBindDescriptorSets(VkCommandBuffer commandBuffer, } auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex]; auto pipeline_layout = getPipelineLayout(dev_data, layout); - for (uint32_t i = 0; i < setCount; i++) { - cvdescriptorset::DescriptorSet *descriptor_set = getSetNode(dev_data, pDescriptorSets[i]); + for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) { + cvdescriptorset::DescriptorSet *descriptor_set = getSetNode(dev_data, pDescriptorSets[set_idx]); if (descriptor_set) { pCB->lastBound[pipelineBindPoint].pipeline_layout = *pipeline_layout; - pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = descriptor_set; + pCB->lastBound[pipelineBindPoint].boundDescriptorSets[set_idx + firstSet] = descriptor_set; skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, - VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, - DRAWSTATE_NONE, "DS", "Descriptor Set 0x%" PRIxLEAST64 " bound on pipeline %s", - (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint)); + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[set_idx], + __LINE__, DRAWSTATE_NONE, "DS", "Descriptor Set 0x%" PRIxLEAST64 " bound on pipeline %s", + (uint64_t)pDescriptorSets[set_idx], string_VkPipelineBindPoint(pipelineBindPoint)); if (!descriptor_set->IsUpdated() && (descriptor_set->GetTotalDescriptorCount() != 0)) { skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, - VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, - DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS", + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[set_idx], + __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS", "Descriptor Set 0x%" PRIxLEAST64 " bound but it was never updated. You may want to either update it or not bind it.", - (uint64_t)pDescriptorSets[i]); + (uint64_t)pDescriptorSets[set_idx]); } // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout - if (!verify_set_layout_compatibility(dev_data, descriptor_set, pipeline_layout, i + firstSet, errorString)) { + if (!verify_set_layout_compatibility(dev_data, descriptor_set, pipeline_layout, set_idx + firstSet, + errorString)) { skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, - VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, - VALIDATION_ERROR_00974, "DS", + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[set_idx], + __LINE__, VALIDATION_ERROR_00974, "DS", "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout " "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s. %s", - i, i + firstSet, reinterpret_cast(layout), errorString.c_str(), + set_idx, set_idx + firstSet, reinterpret_cast(layout), errorString.c_str(), validation_error_map[VALIDATION_ERROR_00974]); } auto setDynamicDescriptorCount = descriptor_set->GetDynamicDescriptorCount(); - pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i].clear(); + pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + set_idx].clear(); if (setDynamicDescriptorCount) { // First make sure we won't overstep bounds of pDynamicOffsets array if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) { skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, - VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__, - DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS", + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[set_idx], + __LINE__, DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS", "descriptorSet #%u (0x%" PRIxLEAST64 ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets " "array. There must be one dynamic offset for each dynamic descriptor being bound.", - i, (uint64_t)pDescriptorSets[i], descriptor_set->GetDynamicDescriptorCount(), + set_idx, (uint64_t)pDescriptorSets[set_idx], descriptor_set->GetDynamicDescriptorCount(), (dynamicOffsetCount - totalDynamicDescriptors)); } else { // Validate and store dynamic offsets with the set // Validate Dynamic Offset Minimums @@ -7702,7 +7703,7 @@ VKAPI_ATTR void VKAPI_CALL CmdBindDescriptorSets(VkCommandBuffer commandBuffer, } } - pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i] = + pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + set_idx] = std::vector(pDynamicOffsets + totalDynamicDescriptors, pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount); // Keep running total of dynamic descriptor count to verify at the end @@ -7710,10 +7711,11 @@ VKAPI_ATTR void VKAPI_CALL CmdBindDescriptorSets(VkCommandBuffer commandBuffer, } } } else { - skip_call |= log_msg( - dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, - (uint64_t)pDescriptorSets[i], __LINE__, DRAWSTATE_INVALID_SET, "DS", - "Attempt to bind descriptor set 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)pDescriptorSets[i]); + skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[set_idx], + __LINE__, DRAWSTATE_INVALID_SET, "DS", + "Attempt to bind descriptor set 0x%" PRIxLEAST64 " that doesn't exist!", + (uint64_t)pDescriptorSets[set_idx]); } skip_call |= ValidateCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()"); UpdateCmdBufferLastCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS); @@ -9277,7 +9279,6 @@ static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, ui "PREINITIALIZED.", funcName); } - auto image_data = getImageState(dev_data, mem_barrier->image); VkFormat format = VK_FORMAT_UNDEFINED; uint32_t arrayLayers = 0, mipLevels = 0; bool imageFound = false; @@ -10139,7 +10140,6 @@ static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateI } // If the attachment was written to by a previous node than this node needs to preserve it. if (result && depth > 0) { - const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index]; bool has_preserved = false; for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) { if (subpass.pPreserveAttachments[j] == attachment) { diff --git a/layers/object_tracker.cpp b/layers/object_tracker.cpp index 6b854be..f4acbe3 100644 --- a/layers/object_tracker.cpp +++ b/layers/object_tracker.cpp @@ -4338,7 +4338,7 @@ VKAPI_ATTR VkResult VKAPI_CALL RegisterDeviceEventEXT(VkDevice device, const VkD if (dev_data->dispatch_table.RegisterDeviceEventEXT) { result = dev_data->dispatch_table.RegisterDeviceEventEXT(device, pDeviceEventInfo, pAllocator, pFence); if (result == VK_SUCCESS && pFence != NULL) { - std::lock_guard lock(global_lock); + std::lock_guard create_lock(global_lock); CreateObject(device, *pFence, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, pAllocator); } } @@ -4360,7 +4360,7 @@ VKAPI_ATTR VkResult VKAPI_CALL RegisterDisplayEventEXT(VkDevice device, VkDispla if (dev_data->dispatch_table.RegisterDisplayEventEXT) { result = dev_data->dispatch_table.RegisterDisplayEventEXT(device, display, pDisplayEventInfo, pAllocator, pFence); if (result == VK_SUCCESS && pFence != NULL) { - std::lock_guard lock(global_lock); + std::lock_guard create_lock(global_lock); CreateObject(device, *pFence, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, pAllocator); } } diff --git a/layers/threading.h b/layers/threading.h index 481acd4..67bb0d6 100644 --- a/layers/threading.h +++ b/layers/threading.h @@ -101,10 +101,10 @@ class counter { counter_condition.wait(lock); } // There is now no current use of the object. Record writer thread. - struct object_use_data *use_data = &uses[object]; - use_data->thread = tid; - use_data->reader_count = 0; - use_data->writer_count = 1; + struct object_use_data *new_use_data = &uses[object]; + new_use_data->thread = tid; + new_use_data->reader_count = 0; + new_use_data->writer_count = 1; } else { // Continue with an unsafe use of the object. use_data->thread = tid; @@ -128,10 +128,10 @@ class counter { counter_condition.wait(lock); } // There is now no current use of the object. Record writer thread. - struct object_use_data *use_data = &uses[object]; - use_data->thread = tid; - use_data->reader_count = 0; - use_data->writer_count = 1; + struct object_use_data *new_use_data = &uses[object]; + new_use_data->thread = tid; + new_use_data->reader_count = 0; + new_use_data->writer_count = 1; } else { // Continue with an unsafe use of the object. use_data->thread = tid; diff --git a/layers/unique_objects.cpp b/layers/unique_objects.cpp index f973ec5..dfd35f2 100644 --- a/layers/unique_objects.cpp +++ b/layers/unique_objects.cpp @@ -103,7 +103,6 @@ static void checkInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateI #endif // Check for recognized instance extensions - layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); if (!white_list(pCreateInfo->ppEnabledExtensionNames[i], kUniqueObjectsSupportedInstanceExtensions)) { log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, VALIDATION_ERROR_UNDEFINED, "UniqueObjects", diff --git a/loader/loader.c b/loader/loader.c index 9c43525..2875af1 100644 --- a/loader/loader.c +++ b/loader/loader.c @@ -4858,7 +4858,6 @@ VkResult setupLoaderTermPhysDevs(struct loader_instance *inst) { struct loader_icd_term *icd_term; struct loader_phys_dev_per_icd *icd_phys_dev_array = NULL; struct loader_physical_device_term **new_phys_devs = NULL; - uint32_t i = 0; inst->total_gpu_count = 0; @@ -4879,38 +4878,35 @@ VkResult setupLoaderTermPhysDevs(struct loader_instance *inst) { // For each ICD, query the number of physical devices, and then get an // internal value for those physical devices. - while (NULL != icd_term) { - res = icd_term->EnumeratePhysicalDevices(icd_term->instance, &icd_phys_dev_array[i].count, NULL); + for (uint32_t icd_idx = 0; NULL != icd_term; icd_term = icd_term->next, icd_idx++) { + res = icd_term->EnumeratePhysicalDevices(icd_term->instance, &icd_phys_dev_array[icd_idx].count, NULL); if (VK_SUCCESS != res) { loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "setupLoaderTermPhysDevs: Call to " "ICD %d's \'vkEnumeratePhysicalDevices\' failed with" " error 0x%08x", - i, res); + icd_idx, res); goto out; } - icd_phys_dev_array[i].phys_devs = - (VkPhysicalDevice *)loader_stack_alloc(icd_phys_dev_array[i].count * sizeof(VkPhysicalDevice)); - if (NULL == icd_phys_dev_array[i].phys_devs) { + icd_phys_dev_array[icd_idx].phys_devs = + (VkPhysicalDevice *)loader_stack_alloc(icd_phys_dev_array[icd_idx].count * sizeof(VkPhysicalDevice)); + if (NULL == icd_phys_dev_array[icd_idx].phys_devs) { loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0, "setupLoaderTermPhysDevs: Failed to allocate temporary " "ICD Physical device array for ICD %d of size %d", - i, inst->total_gpu_count); + icd_idx, inst->total_gpu_count); res = VK_ERROR_OUT_OF_HOST_MEMORY; goto out; } - res = - icd_term->EnumeratePhysicalDevices(icd_term->instance, &(icd_phys_dev_array[i].count), icd_phys_dev_array[i].phys_devs); + res = icd_term->EnumeratePhysicalDevices(icd_term->instance, &(icd_phys_dev_array[icd_idx].count), + icd_phys_dev_array[icd_idx].phys_devs); if (VK_SUCCESS != res) { goto out; } - inst->total_gpu_count += icd_phys_dev_array[i].count; - icd_phys_dev_array[i].this_icd_term = icd_term; - - icd_term = icd_term->next; - i++; + inst->total_gpu_count += icd_phys_dev_array[icd_idx].count; + icd_phys_dev_array[icd_idx].this_icd_term = icd_term; } if (0 == inst->total_gpu_count) { -- 2.7.4