1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 Intel Corporation
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
12 * http://www.apache.org/licenses/LICENSE-2.0
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
22 * \brief Vulkan Occlusion Query Tests
23 *//*--------------------------------------------------------------------*/
25 #include "vktQueryPoolOcclusionTests.hpp"
27 #include "vktTestCase.hpp"
29 #include "vktDrawImageObjectUtil.hpp"
30 #include "vktDrawBufferObjectUtil.hpp"
31 #include "vktDrawCreateInfoUtil.hpp"
32 #include "vkBuilderUtil.hpp"
33 #include "vkRefUtil.hpp"
34 #include "vkPrograms.hpp"
35 #include "vkTypeUtil.hpp"
36 #include "vkCmdUtil.hpp"
38 #include "tcuTestLog.hpp"
39 #include "tcuResource.hpp"
40 #include "tcuImageCompare.hpp"
41 #include "tcuCommandLine.hpp"
56 StateObjects (const vk::DeviceInterface&vk, vkt::Context &context, const int numVertices, vk::VkPrimitiveTopology primitive);
57 void setVertices (const vk::DeviceInterface&vk, std::vector<tcu::Vec4> vertices);
65 vkt::Context &m_context;
67 vk::Move<vk::VkPipeline> m_pipeline;
68 vk::Move<vk::VkPipelineLayout> m_pipelineLayout;
70 de::SharedPtr<Image> m_colorAttachmentImage, m_DepthImage;
71 vk::Move<vk::VkImageView> m_attachmentView;
72 vk::Move<vk::VkImageView> m_depthiew;
74 vk::Move<vk::VkRenderPass> m_renderPass;
75 vk::Move<vk::VkFramebuffer> m_framebuffer;
77 de::SharedPtr<Buffer> m_vertexBuffer;
79 vk::VkFormat m_colorAttachmentFormat;
82 StateObjects::StateObjects (const vk::DeviceInterface&vk, vkt::Context &context, const int numVertices, vk::VkPrimitiveTopology primitive)
84 , m_colorAttachmentFormat(vk::VK_FORMAT_R8G8B8A8_UNORM)
87 vk::VkFormat depthFormat = vk::VK_FORMAT_D16_UNORM;
88 const vk::VkDevice device = m_context.getDevice();
90 //attachment images and views
92 vk::VkExtent3D imageExtent =
99 const ImageCreateInfo colorImageCreateInfo(vk::VK_IMAGE_TYPE_2D, m_colorAttachmentFormat, imageExtent, 1, 1, vk::VK_SAMPLE_COUNT_1_BIT, vk::VK_IMAGE_TILING_OPTIMAL,
100 vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
102 m_colorAttachmentImage = Image::createAndAlloc(vk, device, colorImageCreateInfo, m_context.getDefaultAllocator(), m_context.getUniversalQueueFamilyIndex());
104 const ImageViewCreateInfo attachmentViewInfo(m_colorAttachmentImage->object(), vk::VK_IMAGE_VIEW_TYPE_2D, m_colorAttachmentFormat);
105 m_attachmentView = vk::createImageView(vk, device, &attachmentViewInfo);
107 ImageCreateInfo depthImageCreateInfo(vk::VK_IMAGE_TYPE_2D, depthFormat, imageExtent, 1, 1, vk::VK_SAMPLE_COUNT_1_BIT, vk::VK_IMAGE_TILING_OPTIMAL,
108 vk::VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
110 m_DepthImage = Image::createAndAlloc(vk, device, depthImageCreateInfo, m_context.getDefaultAllocator(), m_context.getUniversalQueueFamilyIndex());
112 // Construct a depth view from depth image
113 const ImageViewCreateInfo depthViewInfo(m_DepthImage->object(), vk::VK_IMAGE_VIEW_TYPE_2D, depthFormat);
114 m_depthiew = vk::createImageView(vk, device, &depthViewInfo);
118 // Renderpass and Framebuffer
120 RenderPassCreateInfo renderPassCreateInfo;
121 renderPassCreateInfo.addAttachment(AttachmentDescription(m_colorAttachmentFormat, // format
122 vk::VK_SAMPLE_COUNT_1_BIT, // samples
123 vk::VK_ATTACHMENT_LOAD_OP_CLEAR, // loadOp
124 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE, // storeOp
125 vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE, // stencilLoadOp
126 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE, // stencilLoadOp
127 vk::VK_IMAGE_LAYOUT_GENERAL, // initialLauout
128 vk::VK_IMAGE_LAYOUT_GENERAL)); // finalLayout
130 renderPassCreateInfo.addAttachment(AttachmentDescription(depthFormat, // format
131 vk::VK_SAMPLE_COUNT_1_BIT, // samples
132 vk::VK_ATTACHMENT_LOAD_OP_CLEAR, // loadOp
133 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE, // storeOp
134 vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE, // stencilLoadOp
135 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE, // stencilLoadOp
136 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // initialLauout
137 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)); // finalLayout
139 const vk::VkAttachmentReference colorAttachmentReference =
142 vk::VK_IMAGE_LAYOUT_GENERAL // layout
145 const vk::VkAttachmentReference depthAttachmentReference =
148 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL // layout
151 renderPassCreateInfo.addSubpass(SubpassDescription(vk::VK_PIPELINE_BIND_POINT_GRAPHICS, // pipelineBindPoint
154 DE_NULL, // pInputAttachments
156 &colorAttachmentReference, // pColorAttachments
157 DE_NULL, // pResolveAttachments
158 depthAttachmentReference, // depthStencilAttachment
160 DE_NULL)); // preserveAttachments
162 m_renderPass = vk::createRenderPass(vk, device, &renderPassCreateInfo);
164 std::vector<vk::VkImageView> attachments(2);
165 attachments[0] = *m_attachmentView;
166 attachments[1] = *m_depthiew;
168 FramebufferCreateInfo framebufferCreateInfo(*m_renderPass, attachments, WIDTH, HEIGHT, 1);
169 m_framebuffer = vk::createFramebuffer(vk, device, &framebufferCreateInfo);
175 vk::Unique<vk::VkShaderModule> vs(vk::createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0));
176 vk::Unique<vk::VkShaderModule> fs(vk::createShaderModule(vk, device, m_context.getBinaryCollection().get("frag"), 0));
178 const PipelineCreateInfo::ColorBlendState::Attachment attachmentState;
180 const PipelineLayoutCreateInfo pipelineLayoutCreateInfo;
181 m_pipelineLayout = vk::createPipelineLayout(vk, device, &pipelineLayoutCreateInfo);
183 const vk::VkVertexInputBindingDescription vf_binding_desc =
186 4 * (deUint32)sizeof(float), // stride;
187 vk::VK_VERTEX_INPUT_RATE_VERTEX // inputRate
190 const vk::VkVertexInputAttributeDescription vf_attribute_desc =
194 vk::VK_FORMAT_R32G32B32A32_SFLOAT, // format;
198 const vk::VkPipelineVertexInputStateCreateInfo vf_info =
200 vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // pNext;
202 0u, // vertexBindingDescriptionCount;
203 1, // pVertexBindingDescriptions;
204 &vf_binding_desc, // vertexAttributeDescriptionCount;
205 1, // pVertexAttributeDescriptions;
209 PipelineCreateInfo pipelineCreateInfo(*m_pipelineLayout, *m_renderPass, 0, 0);
210 pipelineCreateInfo.addShader(PipelineCreateInfo::PipelineShaderStage(*vs, "main", vk::VK_SHADER_STAGE_VERTEX_BIT));
211 pipelineCreateInfo.addShader(PipelineCreateInfo::PipelineShaderStage(*fs, "main", vk::VK_SHADER_STAGE_FRAGMENT_BIT));
212 pipelineCreateInfo.addState(PipelineCreateInfo::InputAssemblerState(primitive));
213 pipelineCreateInfo.addState(PipelineCreateInfo::ColorBlendState(1, &attachmentState));
214 const vk::VkViewport viewport = vk::makeViewport(WIDTH, HEIGHT);
215 const vk::VkRect2D scissor = vk::makeRect2D(WIDTH, HEIGHT);
216 pipelineCreateInfo.addState(PipelineCreateInfo::ViewportState(1, std::vector<vk::VkViewport>(1, viewport), std::vector<vk::VkRect2D>(1, scissor)));
217 pipelineCreateInfo.addState(PipelineCreateInfo::DepthStencilState(true, true, vk::VK_COMPARE_OP_GREATER_OR_EQUAL));
218 pipelineCreateInfo.addState(PipelineCreateInfo::RasterizerState());
219 pipelineCreateInfo.addState(PipelineCreateInfo::MultiSampleState());
220 pipelineCreateInfo.addState(vf_info);
221 m_pipeline = vk::createGraphicsPipeline(vk, device, DE_NULL, &pipelineCreateInfo);
226 const size_t kBufferSize = numVertices * sizeof(tcu::Vec4);
227 m_vertexBuffer = Buffer::createAndAlloc(vk, device, BufferCreateInfo(kBufferSize, vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT), m_context.getDefaultAllocator(), vk::MemoryRequirement::HostVisible);
231 void StateObjects::setVertices (const vk::DeviceInterface&vk, std::vector<tcu::Vec4> vertices)
233 const vk::VkDevice device = m_context.getDevice();
235 tcu::Vec4 *ptr = reinterpret_cast<tcu::Vec4*>(m_vertexBuffer->getBoundMemory().getHostPtr());
236 std::copy(vertices.begin(), vertices.end(), ptr);
238 vk::flushAlloc(vk, device, m_vertexBuffer->getBoundMemory());
241 enum OcclusionQueryResultSize
247 enum OcclusionQueryWait
254 enum OcclusionQueryResultsMode
260 struct OcclusionQueryTestVector
262 vk::VkQueryControlFlags queryControlFlags;
263 OcclusionQueryResultSize queryResultSize;
264 OcclusionQueryWait queryWait;
265 OcclusionQueryResultsMode queryResultsMode;
266 vk::VkDeviceSize queryResultsStride;
267 bool queryResultsAvailability;
268 vk::VkPrimitiveTopology primitiveTopology;
272 class BasicOcclusionQueryTestInstance : public vkt::TestInstance
275 BasicOcclusionQueryTestInstance (vkt::Context &context, const OcclusionQueryTestVector& testVector);
276 ~BasicOcclusionQueryTestInstance (void);
278 tcu::TestStatus iterate (void);
282 NUM_QUERIES_IN_POOL = 2,
283 QUERY_INDEX_CAPTURE_EMPTY = 0,
284 QUERY_INDEX_CAPTURE_DRAWCALL = 1,
285 NUM_VERTICES_IN_DRAWCALL = 3
288 OcclusionQueryTestVector m_testVector;
289 StateObjects* m_stateObjects;
290 vk::VkQueryPool m_queryPool;
293 BasicOcclusionQueryTestInstance::BasicOcclusionQueryTestInstance (vkt::Context &context, const OcclusionQueryTestVector& testVector)
294 : TestInstance (context)
295 , m_testVector (testVector)
297 DE_ASSERT(testVector.queryResultSize == RESULT_SIZE_64_BIT
298 && testVector.queryWait == WAIT_QUEUE
299 && testVector.queryResultsMode == RESULTS_MODE_GET
300 && testVector.queryResultsStride == sizeof(deUint64)
301 && testVector.queryResultsAvailability == false
302 && testVector.primitiveTopology == vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST);
304 if ((m_testVector.queryControlFlags & vk::VK_QUERY_CONTROL_PRECISE_BIT) && !m_context.getDeviceFeatures().occlusionQueryPrecise)
305 throw tcu::NotSupportedError("Precise occlusion queries are not supported");
307 m_stateObjects = new StateObjects(m_context.getDeviceInterface(), m_context, NUM_VERTICES_IN_DRAWCALL, m_testVector.primitiveTopology);
309 const vk::VkDevice device = m_context.getDevice();
310 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
312 const vk::VkQueryPoolCreateInfo queryPoolCreateInfo =
314 vk::VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
317 vk::VK_QUERY_TYPE_OCCLUSION,
321 VK_CHECK(vk.createQueryPool(device, &queryPoolCreateInfo, /*pAllocator*/ DE_NULL, &m_queryPool));
323 std::vector<tcu::Vec4> vertices(NUM_VERTICES_IN_DRAWCALL);
324 vertices[0] = tcu::Vec4(0.5, 0.5, 0.0, 1.0);
325 vertices[1] = tcu::Vec4(0.5, 0.0, 0.0, 1.0);
326 vertices[2] = tcu::Vec4(0.0, 0.5, 0.0, 1.0);
327 m_stateObjects->setVertices(vk, vertices);
330 BasicOcclusionQueryTestInstance::~BasicOcclusionQueryTestInstance (void)
333 delete m_stateObjects;
335 if (m_queryPool != DE_NULL)
337 const vk::VkDevice device = m_context.getDevice();
338 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
340 vk.destroyQueryPool(device, m_queryPool, /*pAllocator*/ DE_NULL);
344 tcu::TestStatus BasicOcclusionQueryTestInstance::iterate (void)
346 tcu::TestLog &log = m_context.getTestContext().getLog();
347 const vk::VkDevice device = m_context.getDevice();
348 const vk::VkQueue queue = m_context.getUniversalQueue();
349 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
351 const CmdPoolCreateInfo cmdPoolCreateInfo (m_context.getUniversalQueueFamilyIndex());
352 vk::Move<vk::VkCommandPool> cmdPool = vk::createCommandPool(vk, device, &cmdPoolCreateInfo);
354 vk::Unique<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
356 beginCommandBuffer(vk, *cmdBuffer);
358 initialTransitionColor2DImage(vk, *cmdBuffer, m_stateObjects->m_colorAttachmentImage->object(), vk::VK_IMAGE_LAYOUT_GENERAL,
359 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT);
360 initialTransitionDepth2DImage(vk, *cmdBuffer, m_stateObjects->m_DepthImage->object(), vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
361 vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT);
363 std::vector<vk::VkClearValue> renderPassClearValues(2);
364 deMemset(&renderPassClearValues[0], 0, static_cast<int>(renderPassClearValues.size()) * sizeof(vk::VkClearValue));
366 vk.cmdResetQueryPool(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL);
368 beginRenderPass(vk, *cmdBuffer, *m_stateObjects->m_renderPass, *m_stateObjects->m_framebuffer, vk::makeRect2D(0, 0, StateObjects::WIDTH, StateObjects::HEIGHT), (deUint32)renderPassClearValues.size(), &renderPassClearValues[0]);
370 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_stateObjects->m_pipeline);
372 vk::VkBuffer vertexBuffer = m_stateObjects->m_vertexBuffer->object();
373 const vk::VkDeviceSize vertexBufferOffset = 0;
374 vk.cmdBindVertexBuffers(*cmdBuffer, 0, 1, &vertexBuffer, &vertexBufferOffset);
376 vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_EMPTY, m_testVector.queryControlFlags);
377 vk.cmdEndQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_EMPTY);
379 vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_DRAWCALL, m_testVector.queryControlFlags);
380 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, 0, 0);
381 vk.cmdEndQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_DRAWCALL);
383 endRenderPass(vk, *cmdBuffer);
385 transition2DImage(vk, *cmdBuffer, m_stateObjects->m_colorAttachmentImage->object(), vk::VK_IMAGE_ASPECT_COLOR_BIT,
386 vk::VK_IMAGE_LAYOUT_GENERAL, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
387 vk::VK_ACCESS_TRANSFER_READ_BIT, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT);
389 endCommandBuffer(vk, *cmdBuffer);
391 submitCommandsAndWait(vk, device, queue, cmdBuffer.get());
393 deUint64 queryResults[NUM_QUERIES_IN_POOL] = { 0 };
394 size_t queryResultsSize = sizeof(queryResults);
396 vk::VkResult queryResult = vk.getQueryPoolResults(device, m_queryPool, 0, NUM_QUERIES_IN_POOL, queryResultsSize, queryResults, sizeof(queryResults[0]), vk::VK_QUERY_RESULT_64_BIT);
398 if (queryResult == vk::VK_NOT_READY)
400 TCU_FAIL("Query result not avaliable, but vkWaitIdle() was called.");
403 VK_CHECK(queryResult);
405 log << tcu::TestLog::Section("OcclusionQueryResults",
406 "Occlusion query results");
407 for (int ndx = 0; ndx < DE_LENGTH_OF_ARRAY(queryResults); ++ndx)
409 log << tcu::TestLog::Message << "query[ slot == " << ndx
410 << "] result == " << queryResults[ndx] << tcu::TestLog::EndMessage;
415 for (int queryNdx = 0; queryNdx < DE_LENGTH_OF_ARRAY(queryResults); ++queryNdx)
418 deUint64 expectedValue;
422 case QUERY_INDEX_CAPTURE_EMPTY:
425 case QUERY_INDEX_CAPTURE_DRAWCALL:
426 expectedValue = NUM_VERTICES_IN_DRAWCALL;
430 if ((m_testVector.queryControlFlags & vk::VK_QUERY_CONTROL_PRECISE_BIT) || expectedValue == 0)
432 // require precise value
433 if (queryResults[queryNdx] != expectedValue)
435 log << tcu::TestLog::Message << "vkGetQueryPoolResults returned "
436 "wrong value of query for index "
437 << queryNdx << ", expected " << expectedValue << ", got "
438 << queryResults[0] << "." << tcu::TestLog::EndMessage;
444 // require imprecize value > 0
445 if (queryResults[queryNdx] == 0)
447 log << tcu::TestLog::Message << "vkGetQueryPoolResults returned "
448 "wrong value of query for index "
449 << queryNdx << ", expected any non-zero value, got "
450 << queryResults[0] << "." << tcu::TestLog::EndMessage;
455 log << tcu::TestLog::EndSection;
459 return tcu::TestStatus(QP_TEST_RESULT_PASS, "Query result verification passed");
461 return tcu::TestStatus(QP_TEST_RESULT_FAIL, "Query result verification failed");
464 class OcclusionQueryTestInstance : public vkt::TestInstance
467 OcclusionQueryTestInstance (vkt::Context &context, const OcclusionQueryTestVector& testVector);
468 ~OcclusionQueryTestInstance (void);
470 tcu::TestStatus iterate (void);
472 bool hasSeparateResetCmdBuf (void) const;
473 bool hasSeparateCopyCmdBuf (void) const;
475 vk::Move<vk::VkCommandBuffer> recordQueryPoolReset (vk::VkCommandPool commandPool);
476 vk::Move<vk::VkCommandBuffer> recordRender (vk::VkCommandPool commandPool);
477 vk::Move<vk::VkCommandBuffer> recordCopyResults (vk::VkCommandPool commandPool);
479 void captureResults (deUint64* retResults, deUint64* retAvailability, bool allowNotReady);
480 void logResults (const deUint64* results, const deUint64* availability);
481 bool validateResults (const deUint64* results, const deUint64* availability, bool allowUnavailable, vk::VkPrimitiveTopology primitiveTopology);
485 NUM_QUERIES_IN_POOL = 3,
486 QUERY_INDEX_CAPTURE_ALL = 0,
487 QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED = 1,
488 QUERY_INDEX_CAPTURE_OCCLUDED = 2
492 NUM_VERTICES_IN_DRAWCALL = 3,
493 NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL = 3,
494 NUM_VERTICES_IN_OCCLUDER_DRAWCALL = 3,
495 NUM_VERTICES = NUM_VERTICES_IN_DRAWCALL + NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL + NUM_VERTICES_IN_OCCLUDER_DRAWCALL
500 START_VERTEX_PARTIALLY_OCCLUDED = START_VERTEX + NUM_VERTICES_IN_DRAWCALL,
501 START_VERTEX_OCCLUDER = START_VERTEX_PARTIALLY_OCCLUDED + NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL
504 OcclusionQueryTestVector m_testVector;
506 const vk::VkQueryResultFlags m_queryResultFlags;
508 StateObjects* m_stateObjects;
509 vk::VkQueryPool m_queryPool;
510 de::SharedPtr<Buffer> m_queryPoolResultsBuffer;
512 vk::Move<vk::VkCommandPool> m_commandPool;
513 vk::Move<vk::VkCommandBuffer> m_queryPoolResetCommandBuffer;
514 vk::Move<vk::VkCommandBuffer> m_renderCommandBuffer;
515 vk::Move<vk::VkCommandBuffer> m_copyResultsCommandBuffer;
518 OcclusionQueryTestInstance::OcclusionQueryTestInstance (vkt::Context &context, const OcclusionQueryTestVector& testVector)
519 : vkt::TestInstance (context)
520 , m_testVector (testVector)
521 , m_queryResultFlags ((m_testVector.queryWait == WAIT_QUERY ? vk::VK_QUERY_RESULT_WAIT_BIT : 0)
522 | (m_testVector.queryResultSize == RESULT_SIZE_64_BIT ? vk::VK_QUERY_RESULT_64_BIT : 0)
523 | (m_testVector.queryResultsAvailability ? vk::VK_QUERY_RESULT_WITH_AVAILABILITY_BIT : 0))
525 const vk::VkDevice device = m_context.getDevice();
526 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
528 if ((m_testVector.queryControlFlags & vk::VK_QUERY_CONTROL_PRECISE_BIT) && !m_context.getDeviceFeatures().occlusionQueryPrecise)
529 throw tcu::NotSupportedError("Precise occlusion queries are not supported");
531 m_stateObjects = new StateObjects(m_context.getDeviceInterface(), m_context, NUM_VERTICES_IN_DRAWCALL + NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL + NUM_VERTICES_IN_OCCLUDER_DRAWCALL, m_testVector.primitiveTopology);
533 const vk::VkQueryPoolCreateInfo queryPoolCreateInfo =
535 vk::VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
538 vk::VK_QUERY_TYPE_OCCLUSION,
543 VK_CHECK(vk.createQueryPool(device, &queryPoolCreateInfo, /*pAllocator*/ DE_NULL, &m_queryPool));
545 if (m_testVector.queryResultsMode == RESULTS_MODE_COPY)
547 const vk::VkDeviceSize resultsBufferSize = m_testVector.queryResultsStride * NUM_QUERIES_IN_POOL;
548 m_queryPoolResultsBuffer = Buffer::createAndAlloc(vk, device, BufferCreateInfo(resultsBufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT), m_context.getDefaultAllocator(), vk::MemoryRequirement::HostVisible);
551 const CmdPoolCreateInfo cmdPoolCreateInfo (m_context.getUniversalQueueFamilyIndex());
552 m_commandPool = vk::createCommandPool(vk, device, &cmdPoolCreateInfo);
553 m_renderCommandBuffer = recordRender(*m_commandPool);
555 if (hasSeparateResetCmdBuf())
557 m_queryPoolResetCommandBuffer = recordQueryPoolReset(*m_commandPool);
560 if (hasSeparateCopyCmdBuf())
562 m_copyResultsCommandBuffer = recordCopyResults(*m_commandPool);
566 OcclusionQueryTestInstance::~OcclusionQueryTestInstance (void)
568 const vk::VkDevice device = m_context.getDevice();
571 delete m_stateObjects;
573 if (m_queryPool != DE_NULL)
575 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
576 vk.destroyQueryPool(device, m_queryPool, /*pAllocator*/ DE_NULL);
580 tcu::TestStatus OcclusionQueryTestInstance::iterate (void)
582 const vk::VkQueue queue = m_context.getUniversalQueue();
583 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
584 tcu::TestLog& log = m_context.getTestContext().getLog();
585 std::vector<tcu::Vec4> vertices (NUM_VERTICES);
588 vertices[START_VERTEX + 0] = tcu::Vec4( 0.5, 0.5, 0.5, 1.0);
589 vertices[START_VERTEX + 1] = tcu::Vec4( 0.5, -0.5, 0.5, 1.0);
590 vertices[START_VERTEX + 2] = tcu::Vec4(-0.5, 0.5, 0.5, 1.0);
591 // 2nd triangle - partially occluding the scene
592 vertices[START_VERTEX_PARTIALLY_OCCLUDED + 0] = tcu::Vec4(-0.5, -0.5, 1.0, 1.0);
593 vertices[START_VERTEX_PARTIALLY_OCCLUDED + 1] = tcu::Vec4( 0.5, -0.5, 1.0, 1.0);
594 vertices[START_VERTEX_PARTIALLY_OCCLUDED + 2] = tcu::Vec4(-0.5, 0.5, 1.0, 1.0);
595 // 3nd triangle - fully occluding the scene
596 vertices[START_VERTEX_OCCLUDER + 0] = tcu::Vec4( 0.5, 0.5, 1.0, 1.0);
597 vertices[START_VERTEX_OCCLUDER + 1] = tcu::Vec4( 0.5, -0.5, 1.0, 1.0);
598 vertices[START_VERTEX_OCCLUDER + 2] = tcu::Vec4(-0.5, 0.5, 1.0, 1.0);
600 m_stateObjects->setVertices(vk, vertices);
602 if (hasSeparateResetCmdBuf())
604 const vk::VkSubmitInfo submitInfoReset =
606 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
607 DE_NULL, // const void* pNext;
608 0u, // deUint32 waitSemaphoreCount;
609 DE_NULL, // const VkSemaphore* pWaitSemaphores;
610 (const vk::VkPipelineStageFlags*)DE_NULL,
611 1u, // deUint32 commandBufferCount;
612 &m_queryPoolResetCommandBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
613 0u, // deUint32 signalSemaphoreCount;
614 DE_NULL // const VkSemaphore* pSignalSemaphores;
617 vk.queueSubmit(queue, 1, &submitInfoReset, DE_NULL);
619 // Trivially wait for reset to complete. This is to ensure the query pool is in reset state before
620 // host accesses, so as to not insert any synchronization before capturing the results needed for WAIT_NONE
622 VK_CHECK(vk.queueWaitIdle(queue));
626 const vk::VkSubmitInfo submitInfoRender =
628 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
629 DE_NULL, // const void* pNext;
630 0, // deUint32 waitSemaphoreCount;
631 DE_NULL, // const VkSemaphore* pWaitSemaphores;
632 (const vk::VkPipelineStageFlags*)DE_NULL,
633 1, // deUint32 commandBufferCount;
634 &m_renderCommandBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
635 0, // deUint32 signalSemaphoreCount;
636 DE_NULL // const VkSemaphore* pSignalSemaphores;
638 vk.queueSubmit(queue, 1, &submitInfoRender, DE_NULL);
641 if (m_testVector.queryWait == WAIT_QUEUE)
643 VK_CHECK(vk.queueWaitIdle(queue));
646 if (hasSeparateCopyCmdBuf())
648 // In case of WAIT_QUEUE test variant, the previously submitted m_renderCommandBuffer did not
649 // contain vkCmdCopyQueryResults, so additional cmd buffer is needed.
651 // In the case of WAIT_NONE or WAIT_QUERY, vkCmdCopyQueryResults is stored in m_renderCommandBuffer.
653 const vk::VkSubmitInfo submitInfo =
655 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
656 DE_NULL, // const void* pNext;
657 0, // deUint32 waitSemaphoreCount;
658 DE_NULL, // const VkSemaphore* pWaitSemaphores;
659 (const vk::VkPipelineStageFlags*)DE_NULL,
660 1, // deUint32 commandBufferCount;
661 &m_copyResultsCommandBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
662 0, // deUint32 signalSemaphoreCount;
663 DE_NULL // const VkSemaphore* pSignalSemaphores;
665 vk.queueSubmit(queue, 1, &submitInfo, DE_NULL);
668 if (m_testVector.queryResultsMode == RESULTS_MODE_COPY)
670 // In case of vkCmdCopyQueryResults is used, test must always wait for it
671 // to complete before we can read the result buffer.
673 VK_CHECK(vk.queueWaitIdle(queue));
676 deUint64 queryResults [NUM_QUERIES_IN_POOL];
677 deUint64 queryAvailability [NUM_QUERIES_IN_POOL];
679 // Allow not ready results only if nobody waited before getting the query results
680 const bool allowNotReady = (m_testVector.queryWait == WAIT_NONE);
682 captureResults(queryResults, queryAvailability, allowNotReady);
684 log << tcu::TestLog::Section("OcclusionQueryResults", "Occlusion query results");
686 logResults(queryResults, queryAvailability);
687 bool passed = validateResults(queryResults, queryAvailability, allowNotReady, m_testVector.primitiveTopology);
689 log << tcu::TestLog::EndSection;
691 if (m_testVector.queryResultsMode != RESULTS_MODE_COPY)
693 VK_CHECK(vk.queueWaitIdle(queue));
698 return tcu::TestStatus(QP_TEST_RESULT_PASS, "Query result verification passed");
700 return tcu::TestStatus(QP_TEST_RESULT_FAIL, "Query result verification failed");
703 bool OcclusionQueryTestInstance::hasSeparateResetCmdBuf (void) const
705 // Determine if resetting query pool should be performed in separate command buffer
706 // to avoid race condition between host query access and device query reset.
708 if (m_testVector.queryResultsMode == RESULTS_MODE_COPY)
710 // We copy query results on device, so there is no race condition between
714 if (m_testVector.queryWait == WAIT_QUEUE)
716 // We wait for queue to be complete before accessing query results
720 // Separate command buffer with reset must be submitted & completed before
721 // host accesses the query results
725 bool OcclusionQueryTestInstance::hasSeparateCopyCmdBuf (void) const
727 // Copy query results must go into separate command buffer, if we want to wait on queue before that
728 return (m_testVector.queryResultsMode == RESULTS_MODE_COPY && m_testVector.queryWait == WAIT_QUEUE);
731 vk::Move<vk::VkCommandBuffer> OcclusionQueryTestInstance::recordQueryPoolReset (vk::VkCommandPool cmdPool)
733 const vk::VkDevice device = m_context.getDevice();
734 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
736 DE_ASSERT(hasSeparateResetCmdBuf());
738 vk::Move<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
740 beginCommandBuffer(vk, *cmdBuffer);
741 vk.cmdResetQueryPool(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL);
742 endCommandBuffer(vk, *cmdBuffer);
747 vk::Move<vk::VkCommandBuffer> OcclusionQueryTestInstance::recordRender (vk::VkCommandPool cmdPool)
749 const vk::VkDevice device = m_context.getDevice();
750 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
752 vk::Move<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
754 beginCommandBuffer(vk, *cmdBuffer);
756 initialTransitionColor2DImage(vk, *cmdBuffer, m_stateObjects->m_colorAttachmentImage->object(), vk::VK_IMAGE_LAYOUT_GENERAL,
757 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT);
758 initialTransitionDepth2DImage(vk, *cmdBuffer, m_stateObjects->m_DepthImage->object(), vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
759 vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT);
761 std::vector<vk::VkClearValue> renderPassClearValues(2);
762 deMemset(&renderPassClearValues[0], 0, static_cast<int>(renderPassClearValues.size()) * sizeof(vk::VkClearValue));
764 if (!hasSeparateResetCmdBuf())
766 vk.cmdResetQueryPool(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL);
769 beginRenderPass(vk, *cmdBuffer, *m_stateObjects->m_renderPass, *m_stateObjects->m_framebuffer, vk::makeRect2D(0, 0, StateObjects::WIDTH, StateObjects::HEIGHT), (deUint32)renderPassClearValues.size(), &renderPassClearValues[0]);
771 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_stateObjects->m_pipeline);
773 vk::VkBuffer vertexBuffer = m_stateObjects->m_vertexBuffer->object();
774 const vk::VkDeviceSize vertexBufferOffset = 0;
775 vk.cmdBindVertexBuffers(*cmdBuffer, 0, 1, &vertexBuffer, &vertexBufferOffset);
777 // Draw un-occluded geometry
778 vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_ALL, m_testVector.queryControlFlags);
779 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
780 vk.cmdEndQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_ALL);
782 endRenderPass(vk, *cmdBuffer);
784 beginRenderPass(vk, *cmdBuffer, *m_stateObjects->m_renderPass, *m_stateObjects->m_framebuffer, vk::makeRect2D(0, 0, StateObjects::WIDTH, StateObjects::HEIGHT), (deUint32)renderPassClearValues.size(), &renderPassClearValues[0]);
786 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_stateObjects->m_pipeline);
788 // Draw un-occluded geometry
789 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
791 // Partially occlude geometry
792 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL, 1, START_VERTEX_PARTIALLY_OCCLUDED, 0);
794 // Draw partially-occluded geometry
795 vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED, m_testVector.queryControlFlags);
796 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
797 vk.cmdEndQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED);
799 endRenderPass(vk, *cmdBuffer);
801 beginRenderPass(vk, *cmdBuffer, *m_stateObjects->m_renderPass, *m_stateObjects->m_framebuffer, vk::makeRect2D(0, 0, StateObjects::WIDTH, StateObjects::HEIGHT), (deUint32)renderPassClearValues.size(), &renderPassClearValues[0]);
803 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_stateObjects->m_pipeline);
805 // Draw un-occluded geometry
806 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
808 // Partially occlude geometry
809 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL, 1, START_VERTEX_PARTIALLY_OCCLUDED, 0);
812 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_OCCLUDER_DRAWCALL, 1, START_VERTEX_OCCLUDER, 0);
814 // Draw occluded geometry
815 vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_OCCLUDED, m_testVector.queryControlFlags);
816 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
817 vk.cmdEndQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_OCCLUDED);
819 endRenderPass(vk, *cmdBuffer);
821 if (m_testVector.queryResultsMode == RESULTS_MODE_COPY && !hasSeparateCopyCmdBuf())
823 vk.cmdCopyQueryPoolResults(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL, m_queryPoolResultsBuffer->object(), /*dstOffset*/ 0, m_testVector.queryResultsStride, m_queryResultFlags);
826 transition2DImage(vk, *cmdBuffer, m_stateObjects->m_colorAttachmentImage->object(), vk::VK_IMAGE_ASPECT_COLOR_BIT, vk::VK_IMAGE_LAYOUT_GENERAL,
827 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT,
828 vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT);
830 endCommandBuffer(vk, *cmdBuffer);
835 vk::Move<vk::VkCommandBuffer> OcclusionQueryTestInstance::recordCopyResults (vk::VkCommandPool cmdPool)
837 const vk::VkDevice device = m_context.getDevice();
838 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
840 vk::Move<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
842 beginCommandBuffer(vk, *cmdBuffer);
843 vk.cmdCopyQueryPoolResults(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL, m_queryPoolResultsBuffer->object(), /*dstOffset*/ 0, m_testVector.queryResultsStride, m_queryResultFlags);
844 endCommandBuffer(vk, *cmdBuffer);
849 void OcclusionQueryTestInstance::captureResults (deUint64* retResults, deUint64* retAvailAbility, bool allowNotReady)
852 const vk::VkDevice device = m_context.getDevice();
853 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
854 std::vector<deUint8> resultsBuffer (static_cast<size_t>(m_testVector.queryResultsStride) * NUM_QUERIES_IN_POOL);
856 if (m_testVector.queryResultsMode == RESULTS_MODE_GET)
858 const vk::VkResult queryResult = vk.getQueryPoolResults(device, m_queryPool, 0, NUM_QUERIES_IN_POOL, resultsBuffer.size(), &resultsBuffer[0], m_testVector.queryResultsStride, m_queryResultFlags);
859 if (queryResult == vk::VK_NOT_READY && !allowNotReady)
861 TCU_FAIL("getQueryPoolResults returned VK_NOT_READY, but results should be already available.");
865 VK_CHECK(queryResult);
868 else if (m_testVector.queryResultsMode == RESULTS_MODE_COPY)
870 const vk::Allocation& allocation = m_queryPoolResultsBuffer->getBoundMemory();
871 const void* allocationData = allocation.getHostPtr();
873 vk::invalidateAlloc(vk, device, allocation);
875 deMemcpy(&resultsBuffer[0], allocationData, resultsBuffer.size());
878 for (int queryNdx = 0; queryNdx < NUM_QUERIES_IN_POOL; queryNdx++)
880 const void* srcPtr = &resultsBuffer[queryNdx * static_cast<size_t>(m_testVector.queryResultsStride)];
881 if (m_testVector.queryResultSize == RESULT_SIZE_32_BIT)
883 const deUint32* srcPtrTyped = static_cast<const deUint32*>(srcPtr);
884 retResults[queryNdx] = *srcPtrTyped;
885 if (m_testVector.queryResultsAvailability)
887 retAvailAbility[queryNdx] = *(srcPtrTyped + 1);
890 else if (m_testVector.queryResultSize == RESULT_SIZE_64_BIT)
892 const deUint64* srcPtrTyped = static_cast<const deUint64*>(srcPtr);
893 retResults[queryNdx] = *srcPtrTyped;
895 if (m_testVector.queryResultsAvailability)
897 if (m_testVector.queryResultsAvailability)
899 retAvailAbility[queryNdx] = *(srcPtrTyped + 1);
905 TCU_FAIL("Wrong m_testVector.queryResultSize");
910 void OcclusionQueryTestInstance::logResults (const deUint64* results, const deUint64* availability)
912 tcu::TestLog& log = m_context.getTestContext().getLog();
914 for (int ndx = 0; ndx < NUM_QUERIES_IN_POOL; ++ndx)
916 if (!m_testVector.queryResultsAvailability)
918 log << tcu::TestLog::Message << "query[ slot == " << ndx << "] result == " << results[ndx] << tcu::TestLog::EndMessage;
922 log << tcu::TestLog::Message << "query[ slot == " << ndx << "] result == " << results[ndx] << ", availability == " << availability[ndx] << tcu::TestLog::EndMessage;
927 bool OcclusionQueryTestInstance::validateResults (const deUint64* results , const deUint64* availability, bool allowUnavailable, vk::VkPrimitiveTopology primitiveTopology)
930 tcu::TestLog& log = m_context.getTestContext().getLog();
932 for (int queryNdx = 0; queryNdx < NUM_QUERIES_IN_POOL; ++queryNdx)
934 deUint64 expectedValueMin = 0;
935 deUint64 expectedValueMax = 0;
937 if (m_testVector.queryResultsAvailability && availability[queryNdx] == 0)
939 // query result was not available
940 if (!allowUnavailable)
942 log << tcu::TestLog::Message << "query results availability was 0 for index "
943 << queryNdx << ", expected any value greater than 0." << tcu::TestLog::EndMessage;
950 // query is available, so expect proper result values
951 if (primitiveTopology == vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST)
955 case QUERY_INDEX_CAPTURE_OCCLUDED:
956 expectedValueMin = 0;
957 expectedValueMax = 0;
959 case QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED:
960 expectedValueMin = 1;
961 expectedValueMax = 1;
963 case QUERY_INDEX_CAPTURE_ALL:
964 expectedValueMin = NUM_VERTICES_IN_DRAWCALL;
965 expectedValueMax = NUM_VERTICES_IN_DRAWCALL;
969 else if (primitiveTopology == vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST)
973 case QUERY_INDEX_CAPTURE_OCCLUDED:
974 expectedValueMin = 0;
975 expectedValueMax = 0;
977 case QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED:
978 case QUERY_INDEX_CAPTURE_ALL:
980 const int primWidth = StateObjects::WIDTH / 2;
981 const int primHeight = StateObjects::HEIGHT / 2;
982 const int primArea = primWidth * primHeight / 2;
984 if (m_testVector.discardHalf)
986 expectedValueMin = (int)(0.95f * primArea * 0.5f);
987 expectedValueMax = (int)(1.05f * primArea * 0.5f);
991 expectedValueMin = (int)(0.97f * primArea);
992 expectedValueMax = (int)(1.03f * primArea);
999 TCU_FAIL("Unsupported primitive topology");
1003 if ((m_testVector.queryControlFlags & vk::VK_QUERY_CONTROL_PRECISE_BIT) || (expectedValueMin == 0 && expectedValueMax == 0))
1005 // require precise value
1006 if (results[queryNdx] < expectedValueMin || results[queryNdx] > expectedValueMax)
1008 log << tcu::TestLog::Message << "wrong value of query for index "
1009 << queryNdx << ", expected the value minimum of " << expectedValueMin << ", maximum of " << expectedValueMax << " got "
1010 << results[queryNdx] << "." << tcu::TestLog::EndMessage;
1016 // require imprecise value greater than 0
1017 if (results[queryNdx] == 0)
1019 log << tcu::TestLog::Message << "wrong value of query for index "
1020 << queryNdx << ", expected any non-zero value, got "
1021 << results[queryNdx] << "." << tcu::TestLog::EndMessage;
1029 template<class Instance>
1030 class QueryPoolOcclusionTest : public vkt::TestCase
1033 QueryPoolOcclusionTest (tcu::TestContext &context, const char *name, const char *description, const OcclusionQueryTestVector& testVector)
1034 : TestCase (context, name, description)
1035 , m_testVector (testVector)
1039 vkt::TestInstance* createInstance (vkt::Context& context) const
1041 return new Instance(context, m_testVector);
1044 void initPrograms(vk::SourceCollections& programCollection) const
1046 const char* const discard =
1047 " if ((int(gl_FragCoord.x) % 2) == (int(gl_FragCoord.y) % 2))\n"
1050 const std::string fragSrc = std::string(
1052 "layout(location = 0) out vec4 out_FragColor;\n"
1055 " out_FragColor = vec4(0.07, 0.48, 0.75, 1.0);\n")
1056 + std::string(m_testVector.discardHalf ? discard : "")
1059 programCollection.glslSources.add("frag") << glu::FragmentSource(fragSrc.c_str());
1061 programCollection.glslSources.add("vert") << glu::VertexSource("#version 430\n"
1062 "layout(location = 0) in vec4 in_Position;\n"
1063 "out gl_PerVertex { vec4 gl_Position; float gl_PointSize; };\n"
1065 " gl_Position = in_Position;\n"
1066 " gl_PointSize = 1.0;\n"
1070 OcclusionQueryTestVector m_testVector;
1075 QueryPoolOcclusionTests::QueryPoolOcclusionTests (tcu::TestContext &testCtx)
1076 : TestCaseGroup(testCtx, "occlusion_query", "Tests for occlusion queries")
1078 /* Left blank on purpose */
1081 QueryPoolOcclusionTests::~QueryPoolOcclusionTests (void)
1083 /* Left blank on purpose */
1086 void QueryPoolOcclusionTests::init (void)
1088 OcclusionQueryTestVector baseTestVector;
1089 baseTestVector.queryControlFlags = 0;
1090 baseTestVector.queryResultSize = RESULT_SIZE_64_BIT;
1091 baseTestVector.queryWait = WAIT_QUEUE;
1092 baseTestVector.queryResultsMode = RESULTS_MODE_GET;
1093 baseTestVector.queryResultsStride = sizeof(deUint64);
1094 baseTestVector.queryResultsAvailability = false;
1095 baseTestVector.primitiveTopology = vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
1096 baseTestVector.discardHalf = false;
1100 OcclusionQueryTestVector testVector = baseTestVector;
1101 testVector.queryControlFlags = 0;
1102 addChild(new QueryPoolOcclusionTest<BasicOcclusionQueryTestInstance>(m_testCtx, "basic_conservative", "draw with conservative occlusion query", testVector));
1103 testVector.queryControlFlags = vk::VK_QUERY_CONTROL_PRECISE_BIT;
1104 addChild(new QueryPoolOcclusionTest<BasicOcclusionQueryTestInstance>(m_testCtx, "basic_precise", "draw with precise occlusion query", testVector));
1109 const vk::VkQueryControlFlags controlFlags[] = { 0, vk::VK_QUERY_CONTROL_PRECISE_BIT };
1110 const char* const controlFlagsStr[] = { "conservative", "precise" };
1112 for (int controlFlagIdx = 0; controlFlagIdx < DE_LENGTH_OF_ARRAY(controlFlags); ++controlFlagIdx)
1115 const vk::VkPrimitiveTopology primitiveTopology[] = { vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST };
1116 const char* const primitiveTopologyStr[] = { "points", "triangles" };
1117 for (int primitiveTopologyIdx = 0; primitiveTopologyIdx < DE_LENGTH_OF_ARRAY(primitiveTopology); ++primitiveTopologyIdx)
1120 const OcclusionQueryResultSize resultSize[] = { RESULT_SIZE_32_BIT, RESULT_SIZE_64_BIT };
1121 const char* const resultSizeStr[] = { "32", "64" };
1123 for (int resultSizeIdx = 0; resultSizeIdx < DE_LENGTH_OF_ARRAY(resultSize); ++resultSizeIdx)
1126 const OcclusionQueryWait wait[] = { WAIT_QUEUE, WAIT_QUERY };
1127 const char* const waitStr[] = { "queue", "query" };
1129 for (int waitIdx = 0; waitIdx < DE_LENGTH_OF_ARRAY(wait); ++waitIdx)
1131 const OcclusionQueryResultsMode resultsMode[] = { RESULTS_MODE_GET, RESULTS_MODE_COPY };
1132 const char* const resultsModeStr[] = { "get", "copy" };
1134 for (int resultsModeIdx = 0; resultsModeIdx < DE_LENGTH_OF_ARRAY(resultsMode); ++resultsModeIdx)
1137 const bool testAvailability[] = { false, true };
1138 const char* const testAvailabilityStr[] = { "without", "with"};
1140 for (int testAvailabilityIdx = 0; testAvailabilityIdx < DE_LENGTH_OF_ARRAY(testAvailability); ++testAvailabilityIdx)
1142 const bool discardHalf[] = { false, true };
1143 const char* const discardHalfStr[] = { "", "_discard" };
1145 for (int discardHalfIdx = 0; discardHalfIdx < DE_LENGTH_OF_ARRAY(discardHalf); ++discardHalfIdx)
1147 OcclusionQueryTestVector testVector = baseTestVector;
1148 testVector.queryControlFlags = controlFlags[controlFlagIdx];
1149 testVector.queryResultSize = resultSize[resultSizeIdx];
1150 testVector.queryWait = wait[waitIdx];
1151 testVector.queryResultsMode = resultsMode[resultsModeIdx];
1152 testVector.queryResultsStride = (testVector.queryResultSize == RESULT_SIZE_32_BIT ? sizeof(deUint32) : sizeof(deUint64));
1153 testVector.queryResultsAvailability = testAvailability[testAvailabilityIdx];
1154 testVector.primitiveTopology = primitiveTopology[primitiveTopologyIdx];
1155 testVector.discardHalf = discardHalf[discardHalfIdx];
1157 if (testVector.discardHalf && testVector.primitiveTopology == vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST)
1158 continue; // Discarding half of the pixels in fragment shader doesn't make sense with one-pixel-sized points.
1160 if (testVector.queryResultsAvailability)
1162 testVector.queryResultsStride *= 2;
1165 std::ostringstream testName;
1166 std::ostringstream testDescr;
1168 testName << resultsModeStr[resultsModeIdx] << "_results"
1169 << "_" << controlFlagsStr[controlFlagIdx]
1170 << "_size_" << resultSizeStr[resultSizeIdx]
1171 << "_wait_" << waitStr[waitIdx]
1172 << "_" << testAvailabilityStr[testAvailabilityIdx] << "_availability"
1173 << "_draw_" << primitiveTopologyStr[primitiveTopologyIdx]
1174 << discardHalfStr[discardHalfIdx];
1176 testDescr << "draw occluded " << primitiveTopologyStr[primitiveTopologyIdx]
1177 << "with " << controlFlagsStr[controlFlagIdx] << ", "
1178 << resultsModeStr[resultsModeIdx] << " results "
1179 << testAvailabilityStr[testAvailabilityIdx] << " availability bit as "
1180 << resultSizeStr[resultSizeIdx] << "bit variables,"
1181 << (testVector.discardHalf ? " discarding half of the fragments," : "")
1182 << "wait for results on" << waitStr[waitIdx];
1184 addChild(new QueryPoolOcclusionTest<OcclusionQueryTestInstance>(m_testCtx, testName.str().c_str(), testDescr.str().c_str(), testVector));
1193 // Test different strides
1195 const OcclusionQueryResultsMode resultsMode[] = { RESULTS_MODE_GET, RESULTS_MODE_COPY };
1196 const char* const resultsModeStr[] = { "get", "copy" };
1198 for (int resultsModeIdx = 0; resultsModeIdx < DE_LENGTH_OF_ARRAY(resultsMode); ++resultsModeIdx)
1200 const OcclusionQueryResultSize resultSizes[] = { RESULT_SIZE_32_BIT, RESULT_SIZE_64_BIT };
1201 const char* const resultSizeStr[] = { "32", "64" };
1203 const bool testAvailability[] = { false, true };
1204 const char* const testAvailabilityStr[] = { "without", "with" };
1206 for (int testAvailabilityIdx = 0; testAvailabilityIdx < DE_LENGTH_OF_ARRAY(testAvailability); ++testAvailabilityIdx)
1208 for (int resultSizeIdx = 0; resultSizeIdx < DE_LENGTH_OF_ARRAY(resultSizes); ++resultSizeIdx)
1210 const vk::VkDeviceSize resultSize = (resultSizes[resultSizeIdx] == RESULT_SIZE_32_BIT ? sizeof(deUint32) : sizeof(deUint64));
1212 // \todo [2015-12-18 scygan] Ensure only stride values aligned to resultSize are allowed. Otherwise test should be extended.
1213 const vk::VkDeviceSize strides[] =
1224 for (int strideIdx = 0; strideIdx < DE_LENGTH_OF_ARRAY(strides); strideIdx++)
1226 OcclusionQueryTestVector testVector = baseTestVector;
1227 testVector.queryResultsMode = resultsMode[resultsModeIdx];
1228 testVector.queryResultSize = resultSizes[resultSizeIdx];
1229 testVector.queryResultsAvailability = testAvailability[testAvailabilityIdx];
1230 testVector.queryResultsStride = strides[strideIdx];
1232 const vk::VkDeviceSize elementSize = (testVector.queryResultsAvailability ? resultSize * 2 : resultSize);
1234 if (elementSize > testVector.queryResultsStride)
1239 std::ostringstream testName;
1240 std::ostringstream testDescr;
1242 testName << resultsModeStr[resultsModeIdx]
1243 << "_results_size_" << resultSizeStr[resultSizeIdx]
1244 << "_stride_" << strides[strideIdx]
1245 << "_" << testAvailabilityStr[testAvailabilityIdx] << "_availability";
1247 testDescr << resultsModeStr[resultsModeIdx] << " results "
1248 << testAvailabilityStr[testAvailabilityIdx] << " availability bit as "
1249 << resultSizeStr[resultSizeIdx] << "bit variables, with stride" << strides[strideIdx];
1251 addChild(new QueryPoolOcclusionTest<OcclusionQueryTestInstance>(m_testCtx, testName.str().c_str(), testDescr.str().c_str(), testVector));