1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 Intel Corporation
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
12 * http://www.apache.org/licenses/LICENSE-2.0
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
22 * \brief Vulkan Occlusion Query Tests
23 *//*--------------------------------------------------------------------*/
25 #include "vktQueryPoolOcclusionTests.hpp"
27 #include "vktTestCase.hpp"
29 #include "vktDrawImageObjectUtil.hpp"
30 #include "vktDrawBufferObjectUtil.hpp"
31 #include "vktDrawCreateInfoUtil.hpp"
32 #include "vkBuilderUtil.hpp"
33 #include "vkRefUtil.hpp"
34 #include "vkPrograms.hpp"
35 #include "vkTypeUtil.hpp"
36 #include "vkCmdUtil.hpp"
38 #include "tcuTestLog.hpp"
39 #include "tcuResource.hpp"
40 #include "tcuImageCompare.hpp"
41 #include "tcuCommandLine.hpp"
56 StateObjects (const vk::DeviceInterface&vk, vkt::Context &context, const int numVertices, vk::VkPrimitiveTopology primitive);
57 void setVertices (const vk::DeviceInterface&vk, std::vector<tcu::Vec4> vertices);
65 vkt::Context &m_context;
67 vk::Move<vk::VkPipeline> m_pipeline;
68 vk::Move<vk::VkPipelineLayout> m_pipelineLayout;
70 de::SharedPtr<Image> m_colorAttachmentImage, m_DepthImage;
71 vk::Move<vk::VkImageView> m_attachmentView;
72 vk::Move<vk::VkImageView> m_depthiew;
74 vk::Move<vk::VkRenderPass> m_renderPass;
75 vk::Move<vk::VkFramebuffer> m_framebuffer;
77 de::SharedPtr<Buffer> m_vertexBuffer;
79 vk::VkFormat m_colorAttachmentFormat;
82 StateObjects::StateObjects (const vk::DeviceInterface&vk, vkt::Context &context, const int numVertices, vk::VkPrimitiveTopology primitive)
84 , m_colorAttachmentFormat(vk::VK_FORMAT_R8G8B8A8_UNORM)
87 vk::VkFormat depthFormat = vk::VK_FORMAT_D16_UNORM;
88 const vk::VkDevice device = m_context.getDevice();
90 //attachment images and views
92 vk::VkExtent3D imageExtent =
99 const ImageCreateInfo colorImageCreateInfo(vk::VK_IMAGE_TYPE_2D, m_colorAttachmentFormat, imageExtent, 1, 1, vk::VK_SAMPLE_COUNT_1_BIT, vk::VK_IMAGE_TILING_OPTIMAL,
100 vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
102 m_colorAttachmentImage = Image::createAndAlloc(vk, device, colorImageCreateInfo, m_context.getDefaultAllocator(), m_context.getUniversalQueueFamilyIndex());
104 const ImageViewCreateInfo attachmentViewInfo(m_colorAttachmentImage->object(), vk::VK_IMAGE_VIEW_TYPE_2D, m_colorAttachmentFormat);
105 m_attachmentView = vk::createImageView(vk, device, &attachmentViewInfo);
107 ImageCreateInfo depthImageCreateInfo(vk::VK_IMAGE_TYPE_2D, depthFormat, imageExtent, 1, 1, vk::VK_SAMPLE_COUNT_1_BIT, vk::VK_IMAGE_TILING_OPTIMAL,
108 vk::VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
110 m_DepthImage = Image::createAndAlloc(vk, device, depthImageCreateInfo, m_context.getDefaultAllocator(), m_context.getUniversalQueueFamilyIndex());
112 // Construct a depth view from depth image
113 const ImageViewCreateInfo depthViewInfo(m_DepthImage->object(), vk::VK_IMAGE_VIEW_TYPE_2D, depthFormat);
114 m_depthiew = vk::createImageView(vk, device, &depthViewInfo);
118 // Renderpass and Framebuffer
120 RenderPassCreateInfo renderPassCreateInfo;
121 renderPassCreateInfo.addAttachment(AttachmentDescription(m_colorAttachmentFormat, // format
122 vk::VK_SAMPLE_COUNT_1_BIT, // samples
123 vk::VK_ATTACHMENT_LOAD_OP_CLEAR, // loadOp
124 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE, // storeOp
125 vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE, // stencilLoadOp
126 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE, // stencilLoadOp
127 vk::VK_IMAGE_LAYOUT_GENERAL, // initialLauout
128 vk::VK_IMAGE_LAYOUT_GENERAL)); // finalLayout
130 renderPassCreateInfo.addAttachment(AttachmentDescription(depthFormat, // format
131 vk::VK_SAMPLE_COUNT_1_BIT, // samples
132 vk::VK_ATTACHMENT_LOAD_OP_CLEAR, // loadOp
133 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE, // storeOp
134 vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE, // stencilLoadOp
135 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE, // stencilLoadOp
136 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // initialLauout
137 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)); // finalLayout
139 const vk::VkAttachmentReference colorAttachmentReference =
142 vk::VK_IMAGE_LAYOUT_GENERAL // layout
145 const vk::VkAttachmentReference depthAttachmentReference =
148 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL // layout
151 renderPassCreateInfo.addSubpass(SubpassDescription(vk::VK_PIPELINE_BIND_POINT_GRAPHICS, // pipelineBindPoint
154 DE_NULL, // pInputAttachments
156 &colorAttachmentReference, // pColorAttachments
157 DE_NULL, // pResolveAttachments
158 depthAttachmentReference, // depthStencilAttachment
160 DE_NULL)); // preserveAttachments
162 m_renderPass = vk::createRenderPass(vk, device, &renderPassCreateInfo);
164 std::vector<vk::VkImageView> attachments(2);
165 attachments[0] = *m_attachmentView;
166 attachments[1] = *m_depthiew;
168 FramebufferCreateInfo framebufferCreateInfo(*m_renderPass, attachments, WIDTH, HEIGHT, 1);
169 m_framebuffer = vk::createFramebuffer(vk, device, &framebufferCreateInfo);
175 vk::Unique<vk::VkShaderModule> vs(vk::createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0));
176 vk::Unique<vk::VkShaderModule> fs(vk::createShaderModule(vk, device, m_context.getBinaryCollection().get("frag"), 0));
178 const PipelineCreateInfo::ColorBlendState::Attachment attachmentState;
180 const PipelineLayoutCreateInfo pipelineLayoutCreateInfo;
181 m_pipelineLayout = vk::createPipelineLayout(vk, device, &pipelineLayoutCreateInfo);
183 const vk::VkVertexInputBindingDescription vf_binding_desc =
186 4 * (deUint32)sizeof(float), // stride;
187 vk::VK_VERTEX_INPUT_RATE_VERTEX // inputRate
190 const vk::VkVertexInputAttributeDescription vf_attribute_desc =
194 vk::VK_FORMAT_R32G32B32A32_SFLOAT, // format;
198 const vk::VkPipelineVertexInputStateCreateInfo vf_info =
200 vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // pNext;
202 0u, // vertexBindingDescriptionCount;
203 1, // pVertexBindingDescriptions;
204 &vf_binding_desc, // vertexAttributeDescriptionCount;
205 1, // pVertexAttributeDescriptions;
209 PipelineCreateInfo pipelineCreateInfo(*m_pipelineLayout, *m_renderPass, 0, 0);
210 pipelineCreateInfo.addShader(PipelineCreateInfo::PipelineShaderStage(*vs, "main", vk::VK_SHADER_STAGE_VERTEX_BIT));
211 pipelineCreateInfo.addShader(PipelineCreateInfo::PipelineShaderStage(*fs, "main", vk::VK_SHADER_STAGE_FRAGMENT_BIT));
212 pipelineCreateInfo.addState(PipelineCreateInfo::InputAssemblerState(primitive));
213 pipelineCreateInfo.addState(PipelineCreateInfo::ColorBlendState(1, &attachmentState));
214 const vk::VkViewport viewport = vk::makeViewport(WIDTH, HEIGHT);
215 const vk::VkRect2D scissor = vk::makeRect2D(WIDTH, HEIGHT);
216 pipelineCreateInfo.addState(PipelineCreateInfo::ViewportState(1, std::vector<vk::VkViewport>(1, viewport), std::vector<vk::VkRect2D>(1, scissor)));
217 pipelineCreateInfo.addState(PipelineCreateInfo::DepthStencilState(true, true, vk::VK_COMPARE_OP_GREATER_OR_EQUAL));
218 pipelineCreateInfo.addState(PipelineCreateInfo::RasterizerState());
219 pipelineCreateInfo.addState(PipelineCreateInfo::MultiSampleState());
220 pipelineCreateInfo.addState(vf_info);
221 m_pipeline = vk::createGraphicsPipeline(vk, device, DE_NULL, &pipelineCreateInfo);
226 const size_t kBufferSize = numVertices * sizeof(tcu::Vec4);
227 m_vertexBuffer = Buffer::createAndAlloc(vk, device, BufferCreateInfo(kBufferSize, vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT), m_context.getDefaultAllocator(), vk::MemoryRequirement::HostVisible);
231 void StateObjects::setVertices (const vk::DeviceInterface&vk, std::vector<tcu::Vec4> vertices)
233 const vk::VkDevice device = m_context.getDevice();
235 tcu::Vec4 *ptr = reinterpret_cast<tcu::Vec4*>(m_vertexBuffer->getBoundMemory().getHostPtr());
236 std::copy(vertices.begin(), vertices.end(), ptr);
238 vk::flushAlloc(vk, device, m_vertexBuffer->getBoundMemory());
241 enum OcclusionQueryResultSize
247 enum OcclusionQueryWait
254 enum OcclusionQueryResultsMode
260 struct OcclusionQueryTestVector
262 vk::VkQueryControlFlags queryControlFlags;
263 OcclusionQueryResultSize queryResultSize;
264 OcclusionQueryWait queryWait;
265 OcclusionQueryResultsMode queryResultsMode;
266 vk::VkDeviceSize queryResultsStride;
267 bool queryResultsAvailability;
268 vk::VkPrimitiveTopology primitiveTopology;
272 class BasicOcclusionQueryTestInstance : public vkt::TestInstance
275 BasicOcclusionQueryTestInstance (vkt::Context &context, const OcclusionQueryTestVector& testVector);
276 ~BasicOcclusionQueryTestInstance (void);
278 tcu::TestStatus iterate (void);
282 NUM_QUERIES_IN_POOL = 2,
283 QUERY_INDEX_CAPTURE_EMPTY = 0,
284 QUERY_INDEX_CAPTURE_DRAWCALL = 1,
285 NUM_VERTICES_IN_DRAWCALL = 3
288 OcclusionQueryTestVector m_testVector;
289 StateObjects* m_stateObjects;
290 vk::VkQueryPool m_queryPool;
293 BasicOcclusionQueryTestInstance::BasicOcclusionQueryTestInstance (vkt::Context &context, const OcclusionQueryTestVector& testVector)
294 : TestInstance (context)
295 , m_testVector (testVector)
297 DE_ASSERT(testVector.queryResultSize == RESULT_SIZE_64_BIT
298 && testVector.queryWait == WAIT_QUEUE
299 && testVector.queryResultsMode == RESULTS_MODE_GET
300 && testVector.queryResultsStride == sizeof(deUint64)
301 && testVector.queryResultsAvailability == false
302 && testVector.primitiveTopology == vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST);
304 if ((m_testVector.queryControlFlags & vk::VK_QUERY_CONTROL_PRECISE_BIT) && !m_context.getDeviceFeatures().occlusionQueryPrecise)
305 throw tcu::NotSupportedError("Precise occlusion queries are not supported");
307 m_stateObjects = new StateObjects(m_context.getDeviceInterface(), m_context, NUM_VERTICES_IN_DRAWCALL, m_testVector.primitiveTopology);
309 const vk::VkDevice device = m_context.getDevice();
310 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
312 const vk::VkQueryPoolCreateInfo queryPoolCreateInfo =
314 vk::VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
317 vk::VK_QUERY_TYPE_OCCLUSION,
321 VK_CHECK(vk.createQueryPool(device, &queryPoolCreateInfo, /*pAllocator*/ DE_NULL, &m_queryPool));
323 std::vector<tcu::Vec4> vertices(NUM_VERTICES_IN_DRAWCALL);
324 vertices[0] = tcu::Vec4(0.5, 0.5, 0.0, 1.0);
325 vertices[1] = tcu::Vec4(0.5, 0.0, 0.0, 1.0);
326 vertices[2] = tcu::Vec4(0.0, 0.5, 0.0, 1.0);
327 m_stateObjects->setVertices(vk, vertices);
330 BasicOcclusionQueryTestInstance::~BasicOcclusionQueryTestInstance (void)
333 delete m_stateObjects;
335 if (m_queryPool != DE_NULL)
337 const vk::VkDevice device = m_context.getDevice();
338 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
340 vk.destroyQueryPool(device, m_queryPool, /*pAllocator*/ DE_NULL);
344 tcu::TestStatus BasicOcclusionQueryTestInstance::iterate (void)
346 tcu::TestLog &log = m_context.getTestContext().getLog();
347 const vk::VkDevice device = m_context.getDevice();
348 const vk::VkQueue queue = m_context.getUniversalQueue();
349 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
351 const CmdPoolCreateInfo cmdPoolCreateInfo (m_context.getUniversalQueueFamilyIndex());
352 vk::Move<vk::VkCommandPool> cmdPool = vk::createCommandPool(vk, device, &cmdPoolCreateInfo);
354 vk::Unique<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
356 beginCommandBuffer(vk, *cmdBuffer);
358 initialTransitionColor2DImage(vk, *cmdBuffer, m_stateObjects->m_colorAttachmentImage->object(), vk::VK_IMAGE_LAYOUT_GENERAL,
359 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT);
360 initialTransitionDepth2DImage(vk, *cmdBuffer, m_stateObjects->m_DepthImage->object(), vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
361 vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT);
363 std::vector<vk::VkClearValue> renderPassClearValues(2);
364 deMemset(&renderPassClearValues[0], 0, static_cast<int>(renderPassClearValues.size()) * sizeof(vk::VkClearValue));
366 vk.cmdResetQueryPool(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL);
368 beginRenderPass(vk, *cmdBuffer, *m_stateObjects->m_renderPass, *m_stateObjects->m_framebuffer, vk::makeRect2D(0, 0, StateObjects::WIDTH, StateObjects::HEIGHT), (deUint32)renderPassClearValues.size(), &renderPassClearValues[0]);
370 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_stateObjects->m_pipeline);
372 vk::VkBuffer vertexBuffer = m_stateObjects->m_vertexBuffer->object();
373 const vk::VkDeviceSize vertexBufferOffset = 0;
374 vk.cmdBindVertexBuffers(*cmdBuffer, 0, 1, &vertexBuffer, &vertexBufferOffset);
376 vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_EMPTY, m_testVector.queryControlFlags);
377 vk.cmdEndQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_EMPTY);
379 vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_DRAWCALL, m_testVector.queryControlFlags);
380 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, 0, 0);
381 vk.cmdEndQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_DRAWCALL);
383 endRenderPass(vk, *cmdBuffer);
385 transition2DImage(vk, *cmdBuffer, m_stateObjects->m_colorAttachmentImage->object(), vk::VK_IMAGE_ASPECT_COLOR_BIT,
386 vk::VK_IMAGE_LAYOUT_GENERAL, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
387 vk::VK_ACCESS_TRANSFER_READ_BIT, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT);
389 endCommandBuffer(vk, *cmdBuffer);
391 submitCommandsAndWait(vk, device, queue, cmdBuffer.get());
393 deUint64 queryResults[NUM_QUERIES_IN_POOL] = { 0 };
394 size_t queryResultsSize = sizeof(queryResults);
396 vk::VkResult queryResult = vk.getQueryPoolResults(device, m_queryPool, 0, NUM_QUERIES_IN_POOL, queryResultsSize, queryResults, sizeof(queryResults[0]), vk::VK_QUERY_RESULT_64_BIT);
398 if (queryResult == vk::VK_NOT_READY)
400 TCU_FAIL("Query result not avaliable, but vkWaitIdle() was called.");
403 VK_CHECK(queryResult);
405 log << tcu::TestLog::Section("OcclusionQueryResults",
406 "Occlusion query results");
407 for (int ndx = 0; ndx < DE_LENGTH_OF_ARRAY(queryResults); ++ndx)
409 log << tcu::TestLog::Message << "query[ slot == " << ndx
410 << "] result == " << queryResults[ndx] << tcu::TestLog::EndMessage;
415 for (int queryNdx = 0; queryNdx < DE_LENGTH_OF_ARRAY(queryResults); ++queryNdx)
418 deUint64 expectedValue;
422 case QUERY_INDEX_CAPTURE_EMPTY:
425 case QUERY_INDEX_CAPTURE_DRAWCALL:
426 expectedValue = NUM_VERTICES_IN_DRAWCALL;
430 if ((m_testVector.queryControlFlags & vk::VK_QUERY_CONTROL_PRECISE_BIT) || expectedValue == 0)
432 // require precise value
433 if (queryResults[queryNdx] != expectedValue)
435 log << tcu::TestLog::Message << "vkGetQueryPoolResults returned "
436 "wrong value of query for index "
437 << queryNdx << ", expected " << expectedValue << ", got "
438 << queryResults[0] << "." << tcu::TestLog::EndMessage;
444 // require imprecize value > 0
445 if (queryResults[queryNdx] == 0)
447 log << tcu::TestLog::Message << "vkGetQueryPoolResults returned "
448 "wrong value of query for index "
449 << queryNdx << ", expected any non-zero value, got "
450 << queryResults[0] << "." << tcu::TestLog::EndMessage;
455 log << tcu::TestLog::EndSection;
459 return tcu::TestStatus(QP_TEST_RESULT_PASS, "Query result verification passed");
461 return tcu::TestStatus(QP_TEST_RESULT_FAIL, "Query result verification failed");
464 class OcclusionQueryTestInstance : public vkt::TestInstance
467 OcclusionQueryTestInstance (vkt::Context &context, const OcclusionQueryTestVector& testVector);
468 ~OcclusionQueryTestInstance (void);
470 tcu::TestStatus iterate (void);
472 bool hasSeparateResetCmdBuf (void) const;
473 bool hasSeparateCopyCmdBuf (void) const;
475 vk::Move<vk::VkCommandBuffer> recordQueryPoolReset (vk::VkCommandPool commandPool);
476 vk::Move<vk::VkCommandBuffer> recordRender (vk::VkCommandPool commandPool);
477 vk::Move<vk::VkCommandBuffer> recordCopyResults (vk::VkCommandPool commandPool);
479 void captureResults (deUint64* retResults, deUint64* retAvailability, bool allowNotReady);
480 void logResults (const deUint64* results, const deUint64* availability);
481 bool validateResults (const deUint64* results, const deUint64* availability, bool allowUnavailable, vk::VkPrimitiveTopology primitiveTopology);
485 NUM_QUERIES_IN_POOL = 3,
486 QUERY_INDEX_CAPTURE_ALL = 0,
487 QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED = 1,
488 QUERY_INDEX_CAPTURE_OCCLUDED = 2
492 NUM_VERTICES_IN_DRAWCALL = 3,
493 NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL = 3,
494 NUM_VERTICES_IN_OCCLUDER_DRAWCALL = 3,
495 NUM_VERTICES = NUM_VERTICES_IN_DRAWCALL + NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL + NUM_VERTICES_IN_OCCLUDER_DRAWCALL
500 START_VERTEX_PARTIALLY_OCCLUDED = START_VERTEX + NUM_VERTICES_IN_DRAWCALL,
501 START_VERTEX_OCCLUDER = START_VERTEX_PARTIALLY_OCCLUDED + NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL
504 OcclusionQueryTestVector m_testVector;
506 const vk::VkQueryResultFlags m_queryResultFlags;
508 StateObjects* m_stateObjects;
509 vk::VkQueryPool m_queryPool;
510 de::SharedPtr<Buffer> m_queryPoolResultsBuffer;
512 vk::Move<vk::VkCommandPool> m_commandPool;
513 vk::Move<vk::VkCommandBuffer> m_queryPoolResetCommandBuffer;
514 vk::Move<vk::VkCommandBuffer> m_renderCommandBuffer;
515 vk::Move<vk::VkCommandBuffer> m_copyResultsCommandBuffer;
518 OcclusionQueryTestInstance::OcclusionQueryTestInstance (vkt::Context &context, const OcclusionQueryTestVector& testVector)
519 : vkt::TestInstance (context)
520 , m_testVector (testVector)
521 , m_queryResultFlags ((m_testVector.queryWait == WAIT_QUERY ? vk::VK_QUERY_RESULT_WAIT_BIT : 0)
522 | (m_testVector.queryResultSize == RESULT_SIZE_64_BIT ? vk::VK_QUERY_RESULT_64_BIT : 0)
523 | (m_testVector.queryResultsAvailability ? vk::VK_QUERY_RESULT_WITH_AVAILABILITY_BIT : 0))
525 const vk::VkDevice device = m_context.getDevice();
526 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
528 if ((m_testVector.queryControlFlags & vk::VK_QUERY_CONTROL_PRECISE_BIT) && !m_context.getDeviceFeatures().occlusionQueryPrecise)
529 throw tcu::NotSupportedError("Precise occlusion queries are not supported");
531 m_stateObjects = new StateObjects(m_context.getDeviceInterface(), m_context, NUM_VERTICES_IN_DRAWCALL + NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL + NUM_VERTICES_IN_OCCLUDER_DRAWCALL, m_testVector.primitiveTopology);
533 const vk::VkQueryPoolCreateInfo queryPoolCreateInfo =
535 vk::VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
538 vk::VK_QUERY_TYPE_OCCLUSION,
543 VK_CHECK(vk.createQueryPool(device, &queryPoolCreateInfo, /*pAllocator*/ DE_NULL, &m_queryPool));
545 if (m_testVector.queryResultsMode == RESULTS_MODE_COPY)
547 const vk::VkDeviceSize resultsBufferSize = m_testVector.queryResultsStride * NUM_QUERIES_IN_POOL;
548 m_queryPoolResultsBuffer = Buffer::createAndAlloc(vk, device, BufferCreateInfo(resultsBufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT), m_context.getDefaultAllocator(), vk::MemoryRequirement::HostVisible);
551 const CmdPoolCreateInfo cmdPoolCreateInfo (m_context.getUniversalQueueFamilyIndex());
552 m_commandPool = vk::createCommandPool(vk, device, &cmdPoolCreateInfo);
553 m_renderCommandBuffer = recordRender(*m_commandPool);
555 if (hasSeparateResetCmdBuf())
557 m_queryPoolResetCommandBuffer = recordQueryPoolReset(*m_commandPool);
560 if (hasSeparateCopyCmdBuf())
562 m_copyResultsCommandBuffer = recordCopyResults(*m_commandPool);
566 OcclusionQueryTestInstance::~OcclusionQueryTestInstance (void)
568 const vk::VkDevice device = m_context.getDevice();
571 delete m_stateObjects;
573 if (m_queryPool != DE_NULL)
575 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
576 vk.destroyQueryPool(device, m_queryPool, /*pAllocator*/ DE_NULL);
580 tcu::TestStatus OcclusionQueryTestInstance::iterate (void)
582 const vk::VkQueue queue = m_context.getUniversalQueue();
583 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
584 tcu::TestLog& log = m_context.getTestContext().getLog();
585 std::vector<tcu::Vec4> vertices (NUM_VERTICES);
588 vertices[START_VERTEX + 0] = tcu::Vec4( 0.5, 0.5, 0.5, 1.0);
589 vertices[START_VERTEX + 1] = tcu::Vec4( 0.5, -0.5, 0.5, 1.0);
590 vertices[START_VERTEX + 2] = tcu::Vec4(-0.5, 0.5, 0.5, 1.0);
591 // 2nd triangle - partially occluding the scene
592 vertices[START_VERTEX_PARTIALLY_OCCLUDED + 0] = tcu::Vec4(-0.5, -0.5, 1.0, 1.0);
593 vertices[START_VERTEX_PARTIALLY_OCCLUDED + 1] = tcu::Vec4( 0.5, -0.5, 1.0, 1.0);
594 vertices[START_VERTEX_PARTIALLY_OCCLUDED + 2] = tcu::Vec4(-0.5, 0.5, 1.0, 1.0);
595 // 3nd triangle - fully occluding the scene
596 vertices[START_VERTEX_OCCLUDER + 0] = tcu::Vec4( 0.5, 0.5, 1.0, 1.0);
597 vertices[START_VERTEX_OCCLUDER + 1] = tcu::Vec4( 0.5, -0.5, 1.0, 1.0);
598 vertices[START_VERTEX_OCCLUDER + 2] = tcu::Vec4(-0.5, 0.5, 1.0, 1.0);
600 m_stateObjects->setVertices(vk, vertices);
602 if (hasSeparateResetCmdBuf())
604 const vk::VkSubmitInfo submitInfoReset =
606 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
607 DE_NULL, // const void* pNext;
608 0u, // deUint32 waitSemaphoreCount;
609 DE_NULL, // const VkSemaphore* pWaitSemaphores;
610 (const vk::VkPipelineStageFlags*)DE_NULL,
611 1u, // deUint32 commandBufferCount;
612 &m_queryPoolResetCommandBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
613 0u, // deUint32 signalSemaphoreCount;
614 DE_NULL // const VkSemaphore* pSignalSemaphores;
617 vk.queueSubmit(queue, 1, &submitInfoReset, DE_NULL);
619 // Trivially wait for reset to complete. This is to ensure the query pool is in reset state before
620 // host accesses, so as to not insert any synchronization before capturing the results needed for WAIT_NONE
622 VK_CHECK(vk.queueWaitIdle(queue));
626 const vk::VkSubmitInfo submitInfoRender =
628 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
629 DE_NULL, // const void* pNext;
630 0, // deUint32 waitSemaphoreCount;
631 DE_NULL, // const VkSemaphore* pWaitSemaphores;
632 (const vk::VkPipelineStageFlags*)DE_NULL,
633 1, // deUint32 commandBufferCount;
634 &m_renderCommandBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
635 0, // deUint32 signalSemaphoreCount;
636 DE_NULL // const VkSemaphore* pSignalSemaphores;
638 vk.queueSubmit(queue, 1, &submitInfoRender, DE_NULL);
641 if (m_testVector.queryWait == WAIT_QUEUE)
643 VK_CHECK(vk.queueWaitIdle(queue));
646 if (hasSeparateCopyCmdBuf())
648 // In case of WAIT_QUEUE test variant, the previously submitted m_renderCommandBuffer did not
649 // contain vkCmdCopyQueryResults, so additional cmd buffer is needed.
651 // In the case of WAIT_NONE or WAIT_QUERY, vkCmdCopyQueryResults is stored in m_renderCommandBuffer.
653 const vk::VkSubmitInfo submitInfo =
655 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
656 DE_NULL, // const void* pNext;
657 0, // deUint32 waitSemaphoreCount;
658 DE_NULL, // const VkSemaphore* pWaitSemaphores;
659 (const vk::VkPipelineStageFlags*)DE_NULL,
660 1, // deUint32 commandBufferCount;
661 &m_copyResultsCommandBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
662 0, // deUint32 signalSemaphoreCount;
663 DE_NULL // const VkSemaphore* pSignalSemaphores;
665 vk.queueSubmit(queue, 1, &submitInfo, DE_NULL);
668 if (m_testVector.queryResultsMode == RESULTS_MODE_COPY)
670 // In case of vkCmdCopyQueryResults is used, test must always wait for it
671 // to complete before we can read the result buffer.
673 VK_CHECK(vk.queueWaitIdle(queue));
676 deUint64 queryResults [NUM_QUERIES_IN_POOL];
677 deUint64 queryAvailability [NUM_QUERIES_IN_POOL];
679 // Allow not ready results only if nobody waited before getting the query results
680 const bool allowNotReady = (m_testVector.queryWait == WAIT_NONE);
682 captureResults(queryResults, queryAvailability, allowNotReady);
684 log << tcu::TestLog::Section("OcclusionQueryResults", "Occlusion query results");
686 logResults(queryResults, queryAvailability);
687 bool passed = validateResults(queryResults, queryAvailability, allowNotReady, m_testVector.primitiveTopology);
689 log << tcu::TestLog::EndSection;
691 if (m_testVector.queryResultsMode != RESULTS_MODE_COPY)
693 VK_CHECK(vk.queueWaitIdle(queue));
698 return tcu::TestStatus(QP_TEST_RESULT_PASS, "Query result verification passed");
700 return tcu::TestStatus(QP_TEST_RESULT_FAIL, "Query result verification failed");
703 bool OcclusionQueryTestInstance::hasSeparateResetCmdBuf (void) const
705 // Determine if resetting query pool should be performed in separate command buffer
706 // to avoid race condition between host query access and device query reset.
708 if (m_testVector.queryResultsMode == RESULTS_MODE_COPY)
710 // We copy query results on device, so there is no race condition between
714 if (m_testVector.queryWait == WAIT_QUEUE)
716 // We wait for queue to be complete before accessing query results
720 // Separate command buffer with reset must be submitted & completed before
721 // host accesses the query results
725 bool OcclusionQueryTestInstance::hasSeparateCopyCmdBuf (void) const
727 // Copy query results must go into separate command buffer, if we want to wait on queue before that
728 return (m_testVector.queryResultsMode == RESULTS_MODE_COPY && m_testVector.queryWait == WAIT_QUEUE);
731 vk::Move<vk::VkCommandBuffer> OcclusionQueryTestInstance::recordQueryPoolReset (vk::VkCommandPool cmdPool)
733 const vk::VkDevice device = m_context.getDevice();
734 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
736 DE_ASSERT(hasSeparateResetCmdBuf());
738 vk::Move<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
740 beginCommandBuffer(vk, *cmdBuffer);
741 vk.cmdResetQueryPool(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL);
742 endCommandBuffer(vk, *cmdBuffer);
747 vk::Move<vk::VkCommandBuffer> OcclusionQueryTestInstance::recordRender (vk::VkCommandPool cmdPool)
749 const vk::VkDevice device = m_context.getDevice();
750 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
752 vk::Move<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
754 beginCommandBuffer(vk, *cmdBuffer);
756 initialTransitionColor2DImage(vk, *cmdBuffer, m_stateObjects->m_colorAttachmentImage->object(), vk::VK_IMAGE_LAYOUT_GENERAL,
757 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT);
758 initialTransitionDepth2DImage(vk, *cmdBuffer, m_stateObjects->m_DepthImage->object(), vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
759 vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT);
761 std::vector<vk::VkClearValue> renderPassClearValues(2);
762 deMemset(&renderPassClearValues[0], 0, static_cast<int>(renderPassClearValues.size()) * sizeof(vk::VkClearValue));
764 if (!hasSeparateResetCmdBuf())
766 vk.cmdResetQueryPool(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL);
769 beginRenderPass(vk, *cmdBuffer, *m_stateObjects->m_renderPass, *m_stateObjects->m_framebuffer, vk::makeRect2D(0, 0, StateObjects::WIDTH, StateObjects::HEIGHT), (deUint32)renderPassClearValues.size(), &renderPassClearValues[0]);
771 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_stateObjects->m_pipeline);
773 vk::VkBuffer vertexBuffer = m_stateObjects->m_vertexBuffer->object();
774 const vk::VkDeviceSize vertexBufferOffset = 0;
775 vk.cmdBindVertexBuffers(*cmdBuffer, 0, 1, &vertexBuffer, &vertexBufferOffset);
777 // Draw un-occluded geometry
778 vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_ALL, m_testVector.queryControlFlags);
779 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
780 vk.cmdEndQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_ALL);
782 // Partially occlude geometry
783 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL, 1, START_VERTEX_PARTIALLY_OCCLUDED, 0);
785 // Draw partially-occluded geometry
786 vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED, m_testVector.queryControlFlags);
787 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
788 vk.cmdEndQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED);
791 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_OCCLUDER_DRAWCALL, 1, START_VERTEX_OCCLUDER, 0);
793 // Draw occluded geometry
794 vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_OCCLUDED, m_testVector.queryControlFlags);
795 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
796 vk.cmdEndQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_OCCLUDED);
798 endRenderPass(vk, *cmdBuffer);
800 if (m_testVector.queryResultsMode == RESULTS_MODE_COPY && !hasSeparateCopyCmdBuf())
802 vk.cmdCopyQueryPoolResults(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL, m_queryPoolResultsBuffer->object(), /*dstOffset*/ 0, m_testVector.queryResultsStride, m_queryResultFlags);
805 transition2DImage(vk, *cmdBuffer, m_stateObjects->m_colorAttachmentImage->object(), vk::VK_IMAGE_ASPECT_COLOR_BIT, vk::VK_IMAGE_LAYOUT_GENERAL,
806 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT,
807 vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT);
809 endCommandBuffer(vk, *cmdBuffer);
814 vk::Move<vk::VkCommandBuffer> OcclusionQueryTestInstance::recordCopyResults (vk::VkCommandPool cmdPool)
816 const vk::VkDevice device = m_context.getDevice();
817 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
819 vk::Move<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
821 beginCommandBuffer(vk, *cmdBuffer);
822 vk.cmdCopyQueryPoolResults(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL, m_queryPoolResultsBuffer->object(), /*dstOffset*/ 0, m_testVector.queryResultsStride, m_queryResultFlags);
823 endCommandBuffer(vk, *cmdBuffer);
828 void OcclusionQueryTestInstance::captureResults (deUint64* retResults, deUint64* retAvailAbility, bool allowNotReady)
831 const vk::VkDevice device = m_context.getDevice();
832 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
833 std::vector<deUint8> resultsBuffer (static_cast<size_t>(m_testVector.queryResultsStride) * NUM_QUERIES_IN_POOL);
835 if (m_testVector.queryResultsMode == RESULTS_MODE_GET)
837 const vk::VkResult queryResult = vk.getQueryPoolResults(device, m_queryPool, 0, NUM_QUERIES_IN_POOL, resultsBuffer.size(), &resultsBuffer[0], m_testVector.queryResultsStride, m_queryResultFlags);
838 if (queryResult == vk::VK_NOT_READY && !allowNotReady)
840 TCU_FAIL("getQueryPoolResults returned VK_NOT_READY, but results should be already available.");
844 VK_CHECK(queryResult);
847 else if (m_testVector.queryResultsMode == RESULTS_MODE_COPY)
849 const vk::Allocation& allocation = m_queryPoolResultsBuffer->getBoundMemory();
850 const void* allocationData = allocation.getHostPtr();
852 vk::invalidateAlloc(vk, device, allocation);
854 deMemcpy(&resultsBuffer[0], allocationData, resultsBuffer.size());
857 for (int queryNdx = 0; queryNdx < NUM_QUERIES_IN_POOL; queryNdx++)
859 const void* srcPtr = &resultsBuffer[queryNdx * static_cast<size_t>(m_testVector.queryResultsStride)];
860 if (m_testVector.queryResultSize == RESULT_SIZE_32_BIT)
862 const deUint32* srcPtrTyped = static_cast<const deUint32*>(srcPtr);
863 retResults[queryNdx] = *srcPtrTyped;
864 if (m_testVector.queryResultsAvailability)
866 retAvailAbility[queryNdx] = *(srcPtrTyped + 1);
869 else if (m_testVector.queryResultSize == RESULT_SIZE_64_BIT)
871 const deUint64* srcPtrTyped = static_cast<const deUint64*>(srcPtr);
872 retResults[queryNdx] = *srcPtrTyped;
874 if (m_testVector.queryResultsAvailability)
876 if (m_testVector.queryResultsAvailability)
878 retAvailAbility[queryNdx] = *(srcPtrTyped + 1);
884 TCU_FAIL("Wrong m_testVector.queryResultSize");
889 void OcclusionQueryTestInstance::logResults (const deUint64* results, const deUint64* availability)
891 tcu::TestLog& log = m_context.getTestContext().getLog();
893 for (int ndx = 0; ndx < NUM_QUERIES_IN_POOL; ++ndx)
895 if (!m_testVector.queryResultsAvailability)
897 log << tcu::TestLog::Message << "query[ slot == " << ndx << "] result == " << results[ndx] << tcu::TestLog::EndMessage;
901 log << tcu::TestLog::Message << "query[ slot == " << ndx << "] result == " << results[ndx] << ", availability == " << availability[ndx] << tcu::TestLog::EndMessage;
906 bool OcclusionQueryTestInstance::validateResults (const deUint64* results , const deUint64* availability, bool allowUnavailable, vk::VkPrimitiveTopology primitiveTopology)
909 tcu::TestLog& log = m_context.getTestContext().getLog();
911 for (int queryNdx = 0; queryNdx < NUM_QUERIES_IN_POOL; ++queryNdx)
913 deUint64 expectedValueMin = 0;
914 deUint64 expectedValueMax = 0;
916 if (m_testVector.queryResultsAvailability && availability[queryNdx] == 0)
918 // query result was not available
919 if (!allowUnavailable)
921 log << tcu::TestLog::Message << "query results availability was 0 for index "
922 << queryNdx << ", expected any value greater than 0." << tcu::TestLog::EndMessage;
929 // query is available, so expect proper result values
930 if (primitiveTopology == vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST)
934 case QUERY_INDEX_CAPTURE_OCCLUDED:
935 expectedValueMin = 0;
936 expectedValueMax = 0;
938 case QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED:
939 expectedValueMin = 1;
940 expectedValueMax = 1;
942 case QUERY_INDEX_CAPTURE_ALL:
943 expectedValueMin = NUM_VERTICES_IN_DRAWCALL;
944 expectedValueMax = NUM_VERTICES_IN_DRAWCALL;
948 else if (primitiveTopology == vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST)
952 case QUERY_INDEX_CAPTURE_OCCLUDED:
953 expectedValueMin = 0;
954 expectedValueMax = 0;
956 case QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED:
957 case QUERY_INDEX_CAPTURE_ALL:
959 const int primWidth = StateObjects::WIDTH / 2;
960 const int primHeight = StateObjects::HEIGHT / 2;
961 const int primArea = primWidth * primHeight / 2;
963 if (m_testVector.discardHalf)
965 expectedValueMin = (int)(0.95f * primArea * 0.5f);
966 expectedValueMax = (int)(1.05f * primArea * 0.5f);
970 expectedValueMin = (int)(0.97f * primArea);
971 expectedValueMax = (int)(1.03f * primArea);
978 TCU_FAIL("Unsupported primitive topology");
982 if ((m_testVector.queryControlFlags & vk::VK_QUERY_CONTROL_PRECISE_BIT) || (expectedValueMin == 0 && expectedValueMax == 0))
984 // require precise value
985 if (results[queryNdx] < expectedValueMin || results[queryNdx] > expectedValueMax)
987 log << tcu::TestLog::Message << "wrong value of query for index "
988 << queryNdx << ", expected the value minimum of " << expectedValueMin << ", maximum of " << expectedValueMax << " got "
989 << results[queryNdx] << "." << tcu::TestLog::EndMessage;
995 // require imprecise value greater than 0
996 if (results[queryNdx] == 0)
998 log << tcu::TestLog::Message << "wrong value of query for index "
999 << queryNdx << ", expected any non-zero value, got "
1000 << results[queryNdx] << "." << tcu::TestLog::EndMessage;
1008 template<class Instance>
1009 class QueryPoolOcclusionTest : public vkt::TestCase
1012 QueryPoolOcclusionTest (tcu::TestContext &context, const char *name, const char *description, const OcclusionQueryTestVector& testVector)
1013 : TestCase (context, name, description)
1014 , m_testVector (testVector)
1018 vkt::TestInstance* createInstance (vkt::Context& context) const
1020 return new Instance(context, m_testVector);
1023 void initPrograms(vk::SourceCollections& programCollection) const
1025 const char* const discard =
1026 " if ((int(gl_FragCoord.x) % 2) == (int(gl_FragCoord.y) % 2))\n"
1029 const std::string fragSrc = std::string(
1031 "layout(location = 0) out vec4 out_FragColor;\n"
1034 " out_FragColor = vec4(0.07, 0.48, 0.75, 1.0);\n")
1035 + std::string(m_testVector.discardHalf ? discard : "")
1038 programCollection.glslSources.add("frag") << glu::FragmentSource(fragSrc.c_str());
1040 programCollection.glslSources.add("vert") << glu::VertexSource("#version 430\n"
1041 "layout(location = 0) in vec4 in_Position;\n"
1042 "out gl_PerVertex { vec4 gl_Position; float gl_PointSize; };\n"
1044 " gl_Position = in_Position;\n"
1045 " gl_PointSize = 1.0;\n"
1049 OcclusionQueryTestVector m_testVector;
1054 QueryPoolOcclusionTests::QueryPoolOcclusionTests (tcu::TestContext &testCtx)
1055 : TestCaseGroup(testCtx, "occlusion_query", "Tests for occlusion queries")
1057 /* Left blank on purpose */
1060 QueryPoolOcclusionTests::~QueryPoolOcclusionTests (void)
1062 /* Left blank on purpose */
1065 void QueryPoolOcclusionTests::init (void)
1067 OcclusionQueryTestVector baseTestVector;
1068 baseTestVector.queryControlFlags = 0;
1069 baseTestVector.queryResultSize = RESULT_SIZE_64_BIT;
1070 baseTestVector.queryWait = WAIT_QUEUE;
1071 baseTestVector.queryResultsMode = RESULTS_MODE_GET;
1072 baseTestVector.queryResultsStride = sizeof(deUint64);
1073 baseTestVector.queryResultsAvailability = false;
1074 baseTestVector.primitiveTopology = vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
1075 baseTestVector.discardHalf = false;
1079 OcclusionQueryTestVector testVector = baseTestVector;
1080 testVector.queryControlFlags = 0;
1081 addChild(new QueryPoolOcclusionTest<BasicOcclusionQueryTestInstance>(m_testCtx, "basic_conservative", "draw with conservative occlusion query", testVector));
1082 testVector.queryControlFlags = vk::VK_QUERY_CONTROL_PRECISE_BIT;
1083 addChild(new QueryPoolOcclusionTest<BasicOcclusionQueryTestInstance>(m_testCtx, "basic_precise", "draw with precise occlusion query", testVector));
1088 const vk::VkQueryControlFlags controlFlags[] = { 0, vk::VK_QUERY_CONTROL_PRECISE_BIT };
1089 const char* const controlFlagsStr[] = { "conservative", "precise" };
1091 for (int controlFlagIdx = 0; controlFlagIdx < DE_LENGTH_OF_ARRAY(controlFlags); ++controlFlagIdx)
1094 const vk::VkPrimitiveTopology primitiveTopology[] = { vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST };
1095 const char* const primitiveTopologyStr[] = { "points", "triangles" };
1096 for (int primitiveTopologyIdx = 0; primitiveTopologyIdx < DE_LENGTH_OF_ARRAY(primitiveTopology); ++primitiveTopologyIdx)
1099 const OcclusionQueryResultSize resultSize[] = { RESULT_SIZE_32_BIT, RESULT_SIZE_64_BIT };
1100 const char* const resultSizeStr[] = { "32", "64" };
1102 for (int resultSizeIdx = 0; resultSizeIdx < DE_LENGTH_OF_ARRAY(resultSize); ++resultSizeIdx)
1105 const OcclusionQueryWait wait[] = { WAIT_QUEUE, WAIT_QUERY };
1106 const char* const waitStr[] = { "queue", "query" };
1108 for (int waitIdx = 0; waitIdx < DE_LENGTH_OF_ARRAY(wait); ++waitIdx)
1110 const OcclusionQueryResultsMode resultsMode[] = { RESULTS_MODE_GET, RESULTS_MODE_COPY };
1111 const char* const resultsModeStr[] = { "get", "copy" };
1113 for (int resultsModeIdx = 0; resultsModeIdx < DE_LENGTH_OF_ARRAY(resultsMode); ++resultsModeIdx)
1116 const bool testAvailability[] = { false, true };
1117 const char* const testAvailabilityStr[] = { "without", "with"};
1119 for (int testAvailabilityIdx = 0; testAvailabilityIdx < DE_LENGTH_OF_ARRAY(testAvailability); ++testAvailabilityIdx)
1121 const bool discardHalf[] = { false, true };
1122 const char* const discardHalfStr[] = { "", "_discard" };
1124 for (int discardHalfIdx = 0; discardHalfIdx < DE_LENGTH_OF_ARRAY(discardHalf); ++discardHalfIdx)
1126 OcclusionQueryTestVector testVector = baseTestVector;
1127 testVector.queryControlFlags = controlFlags[controlFlagIdx];
1128 testVector.queryResultSize = resultSize[resultSizeIdx];
1129 testVector.queryWait = wait[waitIdx];
1130 testVector.queryResultsMode = resultsMode[resultsModeIdx];
1131 testVector.queryResultsStride = (testVector.queryResultSize == RESULT_SIZE_32_BIT ? sizeof(deUint32) : sizeof(deUint64));
1132 testVector.queryResultsAvailability = testAvailability[testAvailabilityIdx];
1133 testVector.primitiveTopology = primitiveTopology[primitiveTopologyIdx];
1134 testVector.discardHalf = discardHalf[discardHalfIdx];
1136 if (testVector.discardHalf && testVector.primitiveTopology == vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST)
1137 continue; // Discarding half of the pixels in fragment shader doesn't make sense with one-pixel-sized points.
1139 if (testVector.queryResultsAvailability)
1141 testVector.queryResultsStride *= 2;
1144 std::ostringstream testName;
1145 std::ostringstream testDescr;
1147 testName << resultsModeStr[resultsModeIdx] << "_results"
1148 << "_" << controlFlagsStr[controlFlagIdx]
1149 << "_size_" << resultSizeStr[resultSizeIdx]
1150 << "_wait_" << waitStr[waitIdx]
1151 << "_" << testAvailabilityStr[testAvailabilityIdx] << "_availability"
1152 << "_draw_" << primitiveTopologyStr[primitiveTopologyIdx]
1153 << discardHalfStr[discardHalfIdx];
1155 testDescr << "draw occluded " << primitiveTopologyStr[primitiveTopologyIdx]
1156 << "with " << controlFlagsStr[controlFlagIdx] << ", "
1157 << resultsModeStr[resultsModeIdx] << " results "
1158 << testAvailabilityStr[testAvailabilityIdx] << " availability bit as "
1159 << resultSizeStr[resultSizeIdx] << "bit variables,"
1160 << (testVector.discardHalf ? " discarding half of the fragments," : "")
1161 << "wait for results on" << waitStr[waitIdx];
1163 addChild(new QueryPoolOcclusionTest<OcclusionQueryTestInstance>(m_testCtx, testName.str().c_str(), testDescr.str().c_str(), testVector));
1172 // Test different strides
1174 const OcclusionQueryResultsMode resultsMode[] = { RESULTS_MODE_GET, RESULTS_MODE_COPY };
1175 const char* const resultsModeStr[] = { "get", "copy" };
1177 for (int resultsModeIdx = 0; resultsModeIdx < DE_LENGTH_OF_ARRAY(resultsMode); ++resultsModeIdx)
1179 const OcclusionQueryResultSize resultSizes[] = { RESULT_SIZE_32_BIT, RESULT_SIZE_64_BIT };
1180 const char* const resultSizeStr[] = { "32", "64" };
1182 const bool testAvailability[] = { false, true };
1183 const char* const testAvailabilityStr[] = { "without", "with" };
1185 for (int testAvailabilityIdx = 0; testAvailabilityIdx < DE_LENGTH_OF_ARRAY(testAvailability); ++testAvailabilityIdx)
1187 for (int resultSizeIdx = 0; resultSizeIdx < DE_LENGTH_OF_ARRAY(resultSizes); ++resultSizeIdx)
1189 const vk::VkDeviceSize resultSize = (resultSizes[resultSizeIdx] == RESULT_SIZE_32_BIT ? sizeof(deUint32) : sizeof(deUint64));
1191 // \todo [2015-12-18 scygan] Ensure only stride values aligned to resultSize are allowed. Otherwise test should be extended.
1192 const vk::VkDeviceSize strides[] =
1203 for (int strideIdx = 0; strideIdx < DE_LENGTH_OF_ARRAY(strides); strideIdx++)
1205 OcclusionQueryTestVector testVector = baseTestVector;
1206 testVector.queryResultsMode = resultsMode[resultsModeIdx];
1207 testVector.queryResultSize = resultSizes[resultSizeIdx];
1208 testVector.queryResultsAvailability = testAvailability[testAvailabilityIdx];
1209 testVector.queryResultsStride = strides[strideIdx];
1211 const vk::VkDeviceSize elementSize = (testVector.queryResultsAvailability ? resultSize * 2 : resultSize);
1213 if (elementSize > testVector.queryResultsStride)
1218 std::ostringstream testName;
1219 std::ostringstream testDescr;
1221 testName << resultsModeStr[resultsModeIdx]
1222 << "_results_size_" << resultSizeStr[resultSizeIdx]
1223 << "_stride_" << strides[strideIdx]
1224 << "_" << testAvailabilityStr[testAvailabilityIdx] << "_availability";
1226 testDescr << resultsModeStr[resultsModeIdx] << " results "
1227 << testAvailabilityStr[testAvailabilityIdx] << " availability bit as "
1228 << resultSizeStr[resultSizeIdx] << "bit variables, with stride" << strides[strideIdx];
1230 addChild(new QueryPoolOcclusionTest<OcclusionQueryTestInstance>(m_testCtx, testName.str().c_str(), testDescr.str().c_str(), testVector));