2 * Copyright (c) 2015-2020 The Khronos Group Inc.
3 * Copyright (c) 2015-2020 Valve Corporation
4 * Copyright (c) 2015-2020 LunarG, Inc.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
19 * Author: David Pinedo <david@lunarg.com>
20 * Author: Mark Lobodzinski <mark@lunarg.com>
21 * Author: Rene Lindsay <rene@lunarg.com>
22 * Author: Jeremy Kniager <jeremyk@lunarg.com>
23 * Author: Shannon McPherson <shannon@lunarg.com>
24 * Author: Bob Ellison <bob@lunarg.com>
25 * Author: Charles Giessen <charles@lunarg.com>
29 #include "vulkaninfo.hpp"
32 // Initialize User32 pointers
33 PFN_AdjustWindowRect User32Handles::pfnAdjustWindowRect = nullptr;
34 PFN_CreateWindowExA User32Handles::pfnCreateWindowExA = nullptr;
35 PFN_DefWindowProcA User32Handles::pfnDefWindowProcA = nullptr;
36 PFN_DestroyWindow User32Handles::pfnDestroyWindow = nullptr;
37 PFN_LoadIconA User32Handles::pfnLoadIconA = nullptr;
38 PFN_RegisterClassExA User32Handles::pfnRegisterClassExA = nullptr;
40 HMODULE User32Handles::user32DllHandle = nullptr;
44 // =========== Dump Functions ========= //
46 void DumpExtensions(Printer &p, std::string layer_name, std::vector<VkExtensionProperties> extensions) {
47 std::sort(extensions.begin(), extensions.end(), [](VkExtensionProperties &a, VkExtensionProperties &b) -> int {
48 return std::string(a.extensionName) < std::string(b.extensionName);
52 if (extensions.size() > 0) {
53 max_length = static_cast<int>(strlen(extensions.at(0).extensionName));
54 for (auto &ext : extensions) {
55 int len = static_cast<int>(strlen(ext.extensionName));
56 if (len > max_length) max_length = len;
60 ObjectWrapper obj(p, layer_name + " Extensions", extensions.size());
61 for (auto &ext : extensions) {
62 p.PrintExtension(ext.extensionName, ext.specVersion, max_length);
66 void DumpLayers(Printer &p, std::vector<LayerExtensionList> layers, const std::vector<std::unique_ptr<AppGpu>> &gpus) {
67 std::sort(layers.begin(), layers.end(), [](LayerExtensionList &left, LayerExtensionList &right) -> int {
68 const char *a = left.layer_properties.layerName;
69 const char *b = right.layer_properties.layerName;
70 return a && (!b || std::strcmp(a, b) < 0);
73 case OutputType::text:
74 case OutputType::html: {
76 ArrayWrapper arr(p, "Layers", layers.size());
78 for (auto &layer : layers) {
79 auto v_str = VkVersionString(layer.layer_properties.specVersion);
80 auto props = layer.layer_properties;
82 std::string header = p.DecorateAsType(props.layerName) + " (" + props.description + ") Vulkan version " +
83 p.DecorateAsValue(v_str) + ", layer version " +
84 p.DecorateAsValue(std::to_string(props.implementationVersion));
85 ObjectWrapper obj(p, header);
86 DumpExtensions(p, "Layer", layer.extension_properties);
88 ArrayWrapper arr(p, "Devices", gpus.size());
89 for (auto &gpu : gpus) {
90 p.PrintKeyValue("GPU id", gpu->id, 0, gpu->props.deviceName);
91 auto exts = gpu->AppGetPhysicalDeviceLayerExtensions(props.layerName);
92 DumpExtensions(p, "Layer-Device", exts);
100 case OutputType::json: {
101 ArrayWrapper arr(p, "ArrayOfVkLayerProperties", layers.size());
103 for (auto &layer : layers) {
104 p.SetElementIndex(i++);
105 DumpVkLayerProperties(p, "layerProperty", layer.layer_properties);
109 case OutputType::vkconfig_output: {
110 ObjectWrapper obj(p, "Layer Properties");
111 for (auto &layer : layers) {
112 ObjectWrapper obj_name(p, layer.layer_properties.layerName);
113 p.PrintKeyString("layerName", layer.layer_properties.layerName, 21);
114 p.PrintKeyString("version", VkVersionString(layer.layer_properties.specVersion), 21);
115 p.PrintKeyValue("implementation version", layer.layer_properties.implementationVersion, 21);
116 p.PrintKeyString("description", layer.layer_properties.description, 21);
117 DumpExtensions(p, "Layer", layer.extension_properties);
118 ObjectWrapper obj_devices(p, "Devices");
119 for (auto &gpu : gpus) {
120 ObjectWrapper obj(p, gpu->props.deviceName);
121 p.PrintKeyValue("GPU id", gpu->id, 0, gpu->props.deviceName);
122 auto exts = gpu->AppGetPhysicalDeviceLayerExtensions(layer.layer_properties.layerName);
123 DumpExtensions(p, "Layer-Device", exts);
131 void DumpSurfaceFormats(Printer &p, AppInstance &inst, AppSurface &surface) {
132 std::vector<VkSurfaceFormatKHR> formats;
133 if (inst.CheckExtensionEnabled(VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME)) {
134 for (auto &format : surface.surf_formats2) {
135 formats.push_back(format.surfaceFormat);
138 for (auto &format : surface.surf_formats) {
139 formats.push_back(format);
142 ObjectWrapper obj(p, "Formats", formats.size());
144 for (auto &format : formats) {
145 p.SetElementIndex(i++);
146 DumpVkSurfaceFormatKHR(p, "SurfaceFormat", format);
150 void DumpPresentModes(Printer &p, AppSurface &surface) {
151 ArrayWrapper arr(p, "Present Modes", surface.surf_present_modes.size());
152 for (auto &mode : surface.surf_present_modes) {
153 p.SetAsType().PrintString(VkPresentModeKHRString(mode));
157 void DumpSurfaceCapabilities(Printer &p, AppInstance &inst, AppGpu &gpu, AppSurface &surface) {
158 auto &surf_cap = surface.surface_capabilities;
160 DumpVkSurfaceCapabilitiesKHR(p, "VkSurfaceCapabilitiesKHR", surf_cap);
162 if (inst.CheckExtensionEnabled(VK_EXT_DISPLAY_SURFACE_COUNTER_EXTENSION_NAME)) {
164 ObjectWrapper obj(p, "VkSurfaceCapabilities2EXT");
166 ArrayWrapper arr(p, "supportedSurfaceCounters");
167 if (surface.surface_capabilities2_ext.supportedSurfaceCounters == 0) p.PrintString("None");
168 if (surface.surface_capabilities2_ext.supportedSurfaceCounters & VK_SURFACE_COUNTER_VBLANK_EXT) {
169 p.SetAsType().PrintString("VK_SURFACE_COUNTER_VBLANK_EXT");
173 if (inst.CheckExtensionEnabled(VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME)) {
174 chain_iterator_surface_capabilities2(p, inst, gpu, surface.surface_capabilities2_khr.pNext, inst.vk_version);
178 void DumpSurface(Printer &p, AppInstance &inst, AppGpu &gpu, AppSurface &surface, std::set<std::string> surface_types) {
179 ObjectWrapper obj(p, std::string("GPU id : ") + p.DecorateAsValue(std::to_string(gpu.id)) + " (" + gpu.props.deviceName + ")");
181 if (surface_types.size() == 0) {
182 p.SetAsType().PrintKeyString("Surface type", "No type found");
183 } else if (surface_types.size() == 1) {
184 p.SetAsType().PrintKeyString("Surface type", surface.surface_extension.name);
186 ArrayWrapper arr(p, "Surface types", surface_types.size());
187 for (auto &name : surface_types) {
192 DumpSurfaceFormats(p, inst, surface);
193 DumpPresentModes(p, surface);
194 DumpSurfaceCapabilities(p, inst, gpu, surface);
199 struct SurfaceTypeGroup {
202 std::set<std::string> surface_types;
205 bool operator==(AppSurface const &a, AppSurface const &b) {
206 return a.phys_device == b.phys_device && a.surf_present_modes == b.surf_present_modes && a.surf_formats == b.surf_formats &&
207 a.surf_formats2 == b.surf_formats2 && a.surface_capabilities == b.surface_capabilities &&
208 a.surface_capabilities2_khr == b.surface_capabilities2_khr && a.surface_capabilities2_ext == b.surface_capabilities2_ext;
211 void DumpPresentableSurfaces(Printer &p, AppInstance &inst, const std::vector<std::unique_ptr<AppGpu>> &gpus,
212 const std::vector<std::unique_ptr<AppSurface>> &surfaces) {
214 ObjectWrapper obj(p, "Presentable Surfaces");
217 std::vector<SurfaceTypeGroup> surface_list;
219 for (auto &surface : surfaces) {
220 auto exists = surface_list.end();
221 for (auto it = surface_list.begin(); it != surface_list.end(); it++) {
222 // check for duplicat surfaces that differ only by the surface extension
223 if (*(it->surface) == *(surface.get())) {
228 if (exists != surface_list.end()) {
229 exists->surface_types.insert(surface.get()->surface_extension.name);
231 // find surface.phys_device's corresponding AppGpu
232 AppGpu *corresponding_gpu = nullptr;
233 for (auto &gpu : gpus) {
234 if (gpu->phys_device == surface->phys_device) corresponding_gpu = gpu.get();
236 if (corresponding_gpu != nullptr)
237 surface_list.push_back({surface.get(), corresponding_gpu, {surface.get()->surface_extension.name}});
240 for (auto &group : surface_list) {
241 DumpSurface(p, inst, *group.gpu, *group.surface, group.surface_types);
247 void DumpGroups(Printer &p, AppInstance &inst) {
248 if (inst.CheckExtensionEnabled(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME)) {
249 auto groups = GetGroups(inst);
250 if (groups.size() == 0) {
252 ObjectWrapper obj(p, "Groups");
253 p.PrintString("No Device Groups Found");
259 ObjectWrapper obj(p, "Device Groups");
262 for (auto &group : groups) {
263 ObjectWrapper obj(p, "Group " + std::to_string(group_id));
264 auto group_props = GetGroupProps(group);
266 ObjectWrapper obj(p, "Properties");
268 ArrayWrapper arr(p, "physicalDevices", group.physicalDeviceCount);
270 for (auto &prop : group_props) {
271 p.PrintString(std::string(prop.deviceName) + " (ID: " + p.DecorateAsValue(std::to_string(id++)) + ")");
274 p.PrintKeyValue("subsetAllocation", group.subsetAllocation);
278 auto group_capabilities = GetGroupCapabilities(inst, group);
279 if (group_capabilities.first == false) {
280 p.PrintKeyString("Present Capabilities",
281 "Group does not support VK_KHR_device_group, skipping printing present capabilities");
283 ObjectWrapper obj(p, "Present Capabilities");
284 for (uint32_t i = 0; i < group.physicalDeviceCount; i++) {
286 p, std::string(group_props[i].deviceName) + " (ID: " + p.DecorateAsValue(std::to_string(i)) + ")");
287 ArrayWrapper arr(p, "Can present images from the following devices", group.physicalDeviceCount);
289 for (uint32_t j = 0; j < group.physicalDeviceCount; j++) {
290 uint32_t mask = 1 << j;
291 if (group_capabilities.second.presentMask[i] & mask) {
292 p.PrintString(std::string(group_props[j].deviceName) + " (ID: " + p.DecorateAsValue(std::to_string(j)) +
297 DumpVkDeviceGroupPresentModeFlagsKHR(p, "Present modes", group_capabilities.second.modes);
307 void GpuDumpProps(Printer &p, AppGpu &gpu) {
308 auto props = gpu.GetDeviceProperties();
311 ObjectWrapper obj(p, "VkPhysicalDeviceProperties");
312 p.PrintKeyValue("apiVersion", props.apiVersion, 14, VkVersionString(props.apiVersion));
313 p.PrintKeyValue("driverVersion", props.driverVersion, 14, to_hex_str(props.driverVersion));
314 p.PrintKeyString("vendorID", to_hex_str(props.vendorID), 14);
315 p.PrintKeyString("deviceID", to_hex_str(props.deviceID), 14);
316 p.PrintKeyString("deviceType", VkPhysicalDeviceTypeString(props.deviceType), 14);
317 p.PrintKeyString("deviceName", props.deviceName, 14);
318 if (p.Type() == OutputType::vkconfig_output) {
319 ArrayWrapper arr(p, "pipelineCacheUUID", VK_UUID_SIZE);
320 for (uint32_t i = 0; i < VK_UUID_SIZE; ++i) {
321 p.PrintElement(static_cast<uint32_t>(props.pipelineCacheUUID[i]));
326 DumpVkPhysicalDeviceLimits(p, "VkPhysicalDeviceLimits",
327 gpu.inst.CheckExtensionEnabled(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)
328 ? gpu.props2.properties.limits
331 DumpVkPhysicalDeviceSparseProperties(p, "VkPhysicalDeviceSparseProperties",
332 gpu.inst.CheckExtensionEnabled(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)
333 ? gpu.props2.properties.sparseProperties
334 : gpu.props.sparseProperties);
336 if (gpu.inst.CheckExtensionEnabled(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
337 void *place = gpu.props2.pNext;
338 chain_iterator_phys_device_props2(p, gpu.inst, gpu, place, gpu.api_version);
342 void GpuDumpPropsJson(Printer &p, AppGpu &gpu) {
343 auto props = gpu.GetDeviceProperties();
344 ObjectWrapper obj(p, "VkPhysicalDeviceProperties");
345 p.PrintKeyValue("apiVersion", props.apiVersion, 14, VkVersionString(props.apiVersion));
346 p.PrintKeyValue("driverVersion", props.driverVersion, 14, to_hex_str(props.driverVersion));
347 p.PrintKeyValue("vendorID", props.vendorID, 14);
348 p.PrintKeyValue("deviceID", props.deviceID, 14);
349 p.PrintKeyValue("deviceType", props.deviceType, 14);
350 p.PrintKeyString("deviceName", props.deviceName, 14);
352 ArrayWrapper arr(p, "pipelineCacheUUID", VK_UUID_SIZE);
353 for (uint32_t i = 0; i < VK_UUID_SIZE; ++i) {
354 p.PrintElement(static_cast<uint32_t>(props.pipelineCacheUUID[i]));
358 DumpVkPhysicalDeviceLimits(p, "VkPhysicalDeviceLimits",
359 gpu.inst.CheckExtensionEnabled(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)
360 ? gpu.props2.properties.limits
362 DumpVkPhysicalDeviceSparseProperties(p, "VkPhysicalDeviceSparseProperties",
363 gpu.inst.CheckExtensionEnabled(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)
364 ? gpu.props2.properties.sparseProperties
365 : gpu.props.sparseProperties);
368 void GpuDumpQueueProps(Printer &p, std::vector<SurfaceExtension> &surfaces, AppQueueFamilyProperties &queue) {
369 p.SetSubHeader().SetElementIndex(static_cast<int>(queue.queue_index));
370 ObjectWrapper obj(p, "queueProperties");
371 if (p.Type() == OutputType::vkconfig_output) {
372 DumpVkExtent3D(p, "minImageTransferGranularity", queue.props.minImageTransferGranularity);
374 p.PrintKeyValue("minImageTransferGranularity", queue.props.minImageTransferGranularity, 27);
376 p.PrintKeyValue("queueCount", queue.props.queueCount, 27);
377 p.PrintKeyString("queueFlags", VkQueueFlagsString(queue.props.queueFlags), 27);
378 p.PrintKeyValue("timestampValidBits", queue.props.timestampValidBits, 27);
380 if (queue.is_present_platform_agnostic) {
381 p.PrintKeyString("present support", queue.platforms_support_present ? "true" : "false");
384 for (auto &surface : surfaces) {
385 if (surface.name.size() > width) width = surface.name.size();
387 ObjectWrapper obj(p, "present support");
388 for (auto &surface : surfaces) {
389 p.PrintKeyString(surface.name, surface.supports_present ? "true" : "false", width);
396 void GpuDumpQueuePropsJson(Printer &p, std::vector<SurfaceExtension> &surfaces, AppQueueFamilyProperties &queue) {
397 ObjectWrapper obj(p, "");
398 DumpVkExtent3D(p, "minImageTransferGranularity", queue.props.minImageTransferGranularity);
399 p.PrintKeyValue("queueCount", queue.props.queueCount, 27);
400 p.PrintKeyValue("queueFlags", queue.props.queueFlags, 27);
401 p.PrintKeyValue("timestampValidBits", queue.props.timestampValidBits, 27);
404 // This prints a number of bytes in a human-readable format according to prefixes of the International System of Quantities (ISQ),
405 // defined in ISO/IEC 80000. The prefixes used here are not SI prefixes, but rather the binary prefixes based on powers of 1024
406 // (kibi-, mebi-, gibi- etc.).
407 #define kBufferSize 32
409 std::string NumToNiceStr(const size_t sz) {
410 const char prefixes[] = "KMGTPEZY";
411 char buf[kBufferSize];
413 double result = (double)sz;
414 while (result > 1024 && which < 7) {
421 unit[0] = prefixes[which];
424 _snprintf_s(buf, kBufferSize * sizeof(char), kBufferSize, "%.2f %sB", result, unit);
426 snprintf(buf, kBufferSize, "%.2f %sB", result, unit);
428 return std::string(buf);
431 std::string append_human_readible(VkDeviceSize memory) {
432 return std::to_string(memory) + " (" + to_hex_str(memory) + ") (" + NumToNiceStr(static_cast<size_t>(memory)) + ")";
435 void GpuDumpMemoryProps(Printer &p, AppGpu &gpu) {
437 ObjectWrapper obj(p, "VkPhysicalDeviceMemoryProperties");
440 ObjectWrapper obj(p, "memoryHeaps", gpu.memory_props.memoryHeapCount);
442 for (uint32_t i = 0; i < gpu.memory_props.memoryHeapCount; ++i) {
443 p.SetElementIndex(static_cast<int>(i));
444 ObjectWrapper obj(p, "memoryHeaps");
446 p.PrintKeyString("size", append_human_readible(gpu.memory_props.memoryHeaps[i].size), 6);
447 p.PrintKeyString("budget", append_human_readible(gpu.heapBudget[i]), 6);
448 p.PrintKeyString("usage", append_human_readible(gpu.heapUsage[i]), 6);
449 DumpVkMemoryHeapFlags(p, "flags", gpu.memory_props.memoryHeaps[i].flags, 6);
453 ObjectWrapper obj(p, "memoryTypes", gpu.memory_props.memoryTypeCount);
455 for (uint32_t i = 0; i < gpu.memory_props.memoryTypeCount; ++i) {
456 p.SetElementIndex(static_cast<int>(i));
457 ObjectWrapper obj(p, "memoryTypes");
458 p.PrintKeyValue("heapIndex", gpu.memory_props.memoryTypes[i].heapIndex, 13);
460 auto flags = gpu.memory_props.memoryTypes[i].propertyFlags;
461 DumpVkMemoryPropertyFlags(p, "propertyFlags = " + to_hex_str(flags), flags);
463 ArrayWrapper arr(p, "usable for", -1);
464 const uint32_t memtype_bit = 1U << i;
466 // only linear and optimal tiling considered
467 for (uint32_t tiling = VK_IMAGE_TILING_OPTIMAL; tiling < gpu.mem_type_res_support.image.size(); ++tiling) {
469 usable += std::string(VkImageTilingString(VkImageTiling(tiling))) + ": ";
470 size_t orig_usable_str_size = usable.size();
472 for (size_t fmt_i = 0; fmt_i < gpu.mem_type_res_support.image[tiling].size(); ++fmt_i) {
473 const MemImageSupport *image_support = &gpu.mem_type_res_support.image[tiling][fmt_i];
474 const bool regular_compatible =
475 image_support->regular_supported && (image_support->regular_memtypes & memtype_bit);
476 const bool sparse_compatible =
477 image_support->sparse_supported && (image_support->sparse_memtypes & memtype_bit);
478 const bool transient_compatible =
479 image_support->transient_supported && (image_support->transient_memtypes & memtype_bit);
481 if (regular_compatible || sparse_compatible || transient_compatible) {
482 if (!first) usable += ", ";
486 usable += "color images";
488 usable += VkFormatString(gpu.mem_type_res_support.image[tiling][fmt_i].format);
491 if (regular_compatible && !sparse_compatible && !transient_compatible && image_support->sparse_supported &&
492 image_support->transient_supported) {
493 usable += "(non-sparse, non-transient)";
494 } else if (regular_compatible && !sparse_compatible && image_support->sparse_supported) {
495 if (image_support->sparse_supported) usable += "(non-sparse)";
496 } else if (regular_compatible && !transient_compatible && image_support->transient_supported) {
497 if (image_support->transient_supported) usable += "(non-transient)";
498 } else if (!regular_compatible && sparse_compatible && !transient_compatible &&
499 image_support->sparse_supported) {
500 if (image_support->sparse_supported) usable += "(sparse only)";
501 } else if (!regular_compatible && !sparse_compatible && transient_compatible &&
502 image_support->transient_supported) {
503 if (image_support->transient_supported) usable += "(transient only)";
504 } else if (!regular_compatible && sparse_compatible && transient_compatible &&
505 image_support->sparse_supported && image_support->transient_supported) {
506 usable += "(sparse and transient only)";
510 if (usable.size() == orig_usable_str_size) // not usable for anything
514 p.PrintString(usable);
523 void GpuDumpMemoryPropsJson(Printer &p, AppGpu &gpu) {
524 ObjectWrapper obj(p, "VkPhysicalDeviceMemoryProperties");
526 ArrayWrapper arr(p, "memoryHeaps", gpu.memory_props.memoryHeapCount);
527 for (uint32_t i = 0; i < gpu.memory_props.memoryHeapCount; ++i) {
528 ObjectWrapper obj(p, "");
529 p.PrintKeyValue("flags", gpu.memory_props.memoryHeaps[i].flags);
530 p.PrintKeyValue("size", gpu.memory_props.memoryHeaps[i].size);
534 ArrayWrapper arr(p, "memoryTypes", gpu.memory_props.memoryTypeCount);
535 for (uint32_t i = 0; i < gpu.memory_props.memoryTypeCount; ++i) {
536 ObjectWrapper obj(p, "");
537 p.PrintKeyValue("heapIndex", gpu.memory_props.memoryTypes[i].heapIndex, 13);
538 p.PrintKeyValue("propertyFlags", gpu.memory_props.memoryTypes[i].propertyFlags, 13);
543 void GpuDumpFeatures(Printer &p, AppGpu &gpu) {
545 DumpVkPhysicalDeviceFeatures(p, "VkPhysicalDeviceFeatures", gpu.features);
547 if (gpu.inst.CheckExtensionEnabled(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
548 void *place = gpu.features2.pNext;
549 chain_iterator_phys_device_features2(p, gpu, place, gpu.api_version);
553 void GpuDumpFormatProperty(Printer &p, VkFormat fmt, VkFormatProperties prop) {
555 case OutputType::text: {
556 ObjectWrapper obj(p, "Properties");
557 DumpVkFormatFeatureFlags(p, "linearTiling", prop.linearTilingFeatures);
558 DumpVkFormatFeatureFlags(p, "optimalTiling", prop.optimalTilingFeatures);
559 DumpVkFormatFeatureFlags(p, "bufferFeatures", prop.bufferFeatures);
562 case OutputType::html: {
564 ObjectWrapper obj(p, VkFormatString(fmt));
566 DumpVkFormatFeatureFlags(p, "linearTiling", prop.linearTilingFeatures);
568 DumpVkFormatFeatureFlags(p, "optimalTiling", prop.optimalTilingFeatures);
570 DumpVkFormatFeatureFlags(p, "bufferFeatures", prop.bufferFeatures);
573 case OutputType::json: {
574 ObjectWrapper obj(p, "");
575 p.PrintKeyValue("formatID", fmt);
576 p.PrintKeyValue("linearTilingFeatures", prop.linearTilingFeatures);
577 p.PrintKeyValue("optimalTilingFeatures", prop.optimalTilingFeatures);
578 p.PrintKeyValue("bufferFeatures", prop.bufferFeatures);
581 case OutputType::vkconfig_output: {
582 ObjectWrapper obj(p, VkFormatString(fmt));
583 DumpVkFormatFeatureFlags(p, "linearTiling", prop.linearTilingFeatures);
584 DumpVkFormatFeatureFlags(p, "optimalTiling", prop.optimalTilingFeatures);
585 DumpVkFormatFeatureFlags(p, "bufferFeatures", prop.bufferFeatures);
591 void GpuDumpToolingInfo(Printer &p, AppGpu &gpu) {
592 auto tools = GetToolingInfo(gpu);
593 if (tools.size() > 0) {
595 ObjectWrapper obj(p, "Tooling Info");
596 for (auto tool : tools) {
597 DumpVkPhysicalDeviceToolPropertiesEXT(p, tool.name, tool);
603 void GpuDevDump(Printer &p, AppGpu &gpu) {
605 ObjectWrapper obj(p, "Format Properties");
608 if (p.Type() == OutputType::text) {
609 auto fmtPropMap = FormatPropMap(gpu);
612 std::vector<VkFormat> unsupported_formats;
613 for (auto &prop : fmtPropMap) {
614 VkFormatProperties props;
615 props.linearTilingFeatures = prop.first.linear;
616 props.optimalTilingFeatures = prop.first.optimal;
617 props.bufferFeatures = prop.first.buffer;
618 if (props.linearTilingFeatures == 0 && props.optimalTilingFeatures == 0 && props.bufferFeatures == 0) {
619 unsupported_formats = prop.second;
623 p.SetElementIndex(counter++);
624 ObjectWrapper obj(p, "Common Format Group");
628 ArrayWrapper arr(p, "Formats", prop.second.size());
629 for (auto &fmt : prop.second) {
630 p.SetAsType().PrintString(VkFormatString(fmt));
633 GpuDumpFormatProperty(p, VK_FORMAT_UNDEFINED, props);
639 ArrayWrapper arr(p, "Unsupported Formats", unsupported_formats.size());
640 for (auto &fmt : unsupported_formats) {
641 p.SetAsType().PrintString(VkFormatString(fmt));
644 for (auto &format : gpu.supported_format_ranges) {
645 if (gpu.FormatRangeSupported(format)) {
646 for (int32_t fmt_counter = format.first_format; fmt_counter <= format.last_format; ++fmt_counter) {
647 VkFormat fmt = static_cast<VkFormat>(fmt_counter);
649 VkFormatProperties props;
650 vkGetPhysicalDeviceFormatProperties(gpu.phys_device, fmt, &props);
652 GpuDumpFormatProperty(p, fmt, props);
662 void GpuDevDumpJson(Printer &p, AppGpu &gpu) {
663 ArrayWrapper arr(p, "ArrayOfVkFormatProperties");
664 for (auto &format : gpu.supported_format_ranges) {
665 if (gpu.FormatRangeSupported(format)) {
666 for (int32_t fmt_counter = format.first_format; fmt_counter <= format.last_format; ++fmt_counter) {
667 VkFormat fmt = static_cast<VkFormat>(fmt_counter);
669 VkFormatProperties props;
670 vkGetPhysicalDeviceFormatProperties(gpu.phys_device, fmt, &props);
672 // don't print format properties that are unsupported
673 if ((props.linearTilingFeatures || props.optimalTilingFeatures || props.bufferFeatures) == 0) continue;
675 GpuDumpFormatProperty(p, fmt, props);
680 // Print gpu info for text, html, & vkconfig_output
681 // Uses a seperate function than schema-json for clarity
682 void DumpGpu(Printer &p, AppGpu &gpu, bool show_formats) {
683 ObjectWrapper obj(p, "GPU" + std::to_string(gpu.id));
686 GpuDumpProps(p, gpu);
687 DumpExtensions(p, "Device", gpu.device_extensions);
691 ObjectWrapper obj(p, "VkQueueFamilyProperties");
692 for (uint32_t i = 0; i < gpu.queue_count; i++) {
693 AppQueueFamilyProperties queue_props = AppQueueFamilyProperties(gpu, i);
694 GpuDumpQueueProps(p, gpu.inst.surface_extensions, queue_props);
697 GpuDumpMemoryProps(p, gpu);
698 GpuDumpFeatures(p, gpu);
699 GpuDumpToolingInfo(p, gpu);
701 if (p.Type() != OutputType::text || show_formats) {
709 // Print gpu info for json
710 void DumpGpuJson(Printer &p, AppGpu &gpu) {
711 GpuDumpPropsJson(p, gpu);
713 ArrayWrapper arr(p, "ArrayOfVkQueueFamilyProperties");
714 for (uint32_t i = 0; i < gpu.queue_count; i++) {
715 AppQueueFamilyProperties queue_props = AppQueueFamilyProperties(gpu, i);
716 GpuDumpQueuePropsJson(p, gpu.inst.surface_extensions, queue_props);
719 GpuDumpMemoryPropsJson(p, gpu);
720 DumpVkPhysicalDeviceFeatures(p, "VkPhysicalDeviceFeatures", gpu.features);
721 GpuDevDumpJson(p, gpu);
724 // ============ Printing Logic ============= //
727 // Enlarges the console window to have a large scrollback size.
728 static void ConsoleEnlarge() {
729 const HANDLE console_handle = GetStdHandle(STD_OUTPUT_HANDLE);
731 // make the console window bigger
732 CONSOLE_SCREEN_BUFFER_INFO csbi;
734 if (GetConsoleScreenBufferInfo(console_handle, &csbi)) {
735 buffer_size.X = csbi.dwSize.X + 30;
736 buffer_size.Y = 20000;
737 SetConsoleScreenBufferSize(console_handle, buffer_size);
742 r.Right = csbi.dwSize.X - 1 + 30;
744 SetConsoleWindowInfo(console_handle, true, &r);
746 // change the console window title
747 SetConsoleTitle(TEXT(app_short_name));
751 void print_usage(const char *argv0) {
752 std::cout << "\nvulkaninfo - Summarize Vulkan information in relation to the current environment.\n\n";
753 std::cout << "USAGE: " << argv0 << " [options]\n\n";
754 std::cout << "OPTIONS:\n";
755 std::cout << "-h, --help Print this help.\n";
756 std::cout << "--html Produce an html version of vulkaninfo output, saved as\n";
757 std::cout << " \"vulkaninfo.html\" in the directory in which the command is\n";
758 std::cout << " run.\n";
759 std::cout << "-j, --json Produce a json version of vulkaninfo to standard output of the\n";
760 std::cout << " first gpu in the system conforming to the DevSim schema.\n";
761 std::cout << "--json=<gpu-number> For a multi-gpu system, a single gpu can be targetted by\n";
762 std::cout << " specifying the gpu-number associated with the gpu of \n";
763 std::cout << " interest. This number can be determined by running\n";
764 std::cout << " vulkaninfo without any options specified.\n";
765 std::cout << "--show-formats Display the format properties of each physical device.\n";
766 std::cout << " Note: This option does not affect html or json output;\n";
767 std::cout << " they will always print format properties.\n\n";
770 int main(int argc, char **argv) {
772 if (ConsoleIsExclusive()) ConsoleEnlarge();
773 if (!LoadUser32Dll()) {
774 fprintf(stderr, "Failed to load user32.dll library!\n");
775 WAIT_FOR_CONSOLE_DESTROY;
780 uint32_t selected_gpu = 0;
781 bool show_formats = false;
782 char *output_path = nullptr;
784 // Combinations of output: html only, html AND json, json only, human readable only
785 for (int i = 1; i < argc; ++i) {
786 // A internal-use-only format for communication with the Vulkan Configurator tool
787 // Usage "--vkconfig_output <path>"
788 if (0 == strcmp("--vkconfig_output", argv[i]) && argc > (i + 1)) {
789 human_readable_output = false;
790 vkconfig_output = true;
791 output_path = argv[i + 1];
793 } else if (strncmp("--json", argv[i], 6) == 0 || strcmp(argv[i], "-j") == 0) {
794 if (strlen(argv[i]) > 7 && strncmp("--json=", argv[i], 7) == 0) {
795 selected_gpu = static_cast<uint32_t>(strtol(argv[i] + 7, nullptr, 10));
797 human_readable_output = false;
799 } else if (strcmp(argv[i], "--html") == 0) {
800 human_readable_output = false;
802 } else if (strcmp(argv[i], "--show-formats") == 0) {
804 } else if (strcmp(argv[i], "--help") == 0 || strcmp(argv[i], "-h") == 0) {
805 print_usage(argv[0]);
808 print_usage(argv[0]);
813 AppInstance instance = {};
814 SetupWindowExtensions(instance);
816 auto pNext_chains = get_chain_infos();
818 auto phys_devices = instance.FindPhysicalDevices();
820 std::vector<std::unique_ptr<AppSurface>> surfaces;
821 #if defined(VK_USE_PLATFORM_XCB_KHR) || defined(VK_USE_PLATFORM_XLIB_KHR) || defined(VK_USE_PLATFORM_WIN32_KHR) || \
822 defined(VK_USE_PLATFORM_MACOS_MVK) || defined(VK_USE_PLATFORM_METAL_EXT) || defined(VK_USE_PLATFORM_WAYLAND_KHR)
823 for (auto &surface_extension : instance.surface_extensions) {
824 surface_extension.create_window(instance);
825 surface_extension.surface = surface_extension.create_surface(instance);
826 for (auto &phys_device : phys_devices) {
827 surfaces.push_back(std::unique_ptr<AppSurface>(
828 new AppSurface(instance, phys_device, surface_extension, pNext_chains.surface_capabilities2)));
833 std::vector<std::unique_ptr<AppGpu>> gpus;
835 uint32_t gpu_counter = 0;
836 for (auto &phys_device : phys_devices) {
837 gpus.push_back(std::unique_ptr<AppGpu>(new AppGpu(instance, gpu_counter++, phys_device, pNext_chains)));
840 if (selected_gpu >= gpus.size()) {
841 std::cout << "The selected gpu (" << selected_gpu << ") is not in the valid range of 0 to " << gpus.size() - 1 << ".\n";
845 std::vector<std::unique_ptr<Printer>> printers;
848 buf = std::cout.rdbuf();
849 std::ostream out(buf);
850 std::ofstream html_out;
851 std::ofstream vkconfig_out;
853 if (human_readable_output) {
854 printers.push_back(std::unique_ptr<Printer>(new Printer(OutputType::text, out, selected_gpu, instance.vk_version)));
857 html_out = std::ofstream("vulkaninfo.html");
858 printers.push_back(std::unique_ptr<Printer>(new Printer(OutputType::html, html_out, selected_gpu, instance.vk_version)));
861 printers.push_back(std::unique_ptr<Printer>(new Printer(OutputType::json, out, selected_gpu, instance.vk_version)));
863 if (vkconfig_output) {
865 vkconfig_out = std::ofstream(std::string(output_path) + "\\vulkaninfo.json");
867 vkconfig_out = std::ofstream(std::string(output_path) + "/vulkaninfo.json");
870 std::unique_ptr<Printer>(new Printer(OutputType::vkconfig_output, vkconfig_out, selected_gpu, instance.vk_version)));
873 for (auto &p : printers) {
874 if (p->Type() == OutputType::json) {
875 DumpLayers(*p.get(), instance.global_layers, gpus);
876 DumpGpuJson(*p.get(), *gpus.at(selected_gpu).get());
880 DumpExtensions(*p.get(), "Instance", instance.global_extensions);
883 DumpLayers(*p.get(), instance.global_layers, gpus);
885 #if defined(VK_USE_PLATFORM_XCB_KHR) || defined(VK_USE_PLATFORM_XLIB_KHR) || defined(VK_USE_PLATFORM_WIN32_KHR) || \
886 defined(VK_USE_PLATFORM_MACOS_MVK) || defined(VK_USE_PLATFORM_METAL_EXT) || defined(VK_USE_PLATFORM_WAYLAND_KHR)
887 DumpPresentableSurfaces(*p.get(), instance, gpus, surfaces);
889 DumpGroups(*p.get(), instance);
892 ObjectWrapper obj(*p, "Device Properties and Extensions");
895 for (auto &gpu : gpus) {
896 DumpGpu(*p.get(), *gpu.get(), show_formats);
904 #if defined(VK_USE_PLATFORM_XCB_KHR) || defined(VK_USE_PLATFORM_XLIB_KHR) || defined(VK_USE_PLATFORM_WIN32_KHR) || \
905 defined(VK_USE_PLATFORM_MACOS_MVK) || defined(VK_USE_PLATFORM_METAL_EXT) || defined(VK_USE_PLATFORM_WAYLAND_KHR)
907 for (auto &surface_extension : instance.surface_extensions) {
908 AppDestroySurface(instance, surface_extension.surface);
909 surface_extension.destroy_window(instance);
913 WAIT_FOR_CONSOLE_DESTROY;