else return VK_BORDER_COLOR_FLOAT_CUSTOM_EXT;
}
+ // note: never reached
DE_FATAL("Unsupported border color");
return VK_BORDER_COLOR_MAX_ENUM;
}
void expect (char c)
{
- if (m_str[m_idx] != c || m_idx >= m_len)
+ if (m_idx >= m_len || m_str[m_idx] != c)
TCU_THROW(ResourceError, "Error parsing amber index file");
m_idx++;
TestParams (void)
{
+ allocationKind = ALLOCATION_KIND_DEDICATED;
+ extensionUse = EXTENSION_USE_NONE;
mipLevels = 1u;
singleCommand = DE_TRUE;
barrierCount = 1u;
{
// If deviceLUIDValid is VK_FALSE, the contents of deviceLUID and deviceNodeMask are undefined
// so thay can only be compared when deviceLUIDValid is VK_TRUE.
- if ((deMemCmp(idProperties[0].deviceLUID, idProperties[1].deviceLUID, VK_UUID_SIZE) != 0) ||
+ if ((deMemCmp(idProperties[0].deviceLUID, idProperties[1].deviceLUID, VK_LUID_SIZE) != 0) ||
(idProperties[0].deviceNodeMask != idProperties[1].deviceNodeMask))
{
TCU_FAIL("Mismatch between VkPhysicalDeviceIDProperties");
log << TestLog::Message << performanceQueryProperties[0] << TestLog::EndMessage;
+ // TODO: this is a NOP. Should the second index be [1] ?
if (performanceQueryProperties[0].allowCommandBufferQueryCopies != performanceQueryProperties[0].allowCommandBufferQueryCopies)
{
TCU_FAIL("Mismatch between VkPhysicalDevicePerformanceQueryPropertiesKHR");
{
// If deviceLUIDValid is VK_FALSE, the contents of deviceLUID and deviceNodeMask are undefined
// so thay can only be compared when deviceLUIDValid is VK_TRUE.
- if ((deMemCmp(idProperties.deviceLUID, vulkan11Properties.deviceLUID, VK_UUID_SIZE) != 0) ||
+ if ((deMemCmp(idProperties.deviceLUID, vulkan11Properties.deviceLUID, VK_LUID_SIZE) != 0) ||
(idProperties.deviceNodeMask != vulkan11Properties.deviceNodeMask))
{
TCU_FAIL("Mismatch between VkPhysicalDeviceIDProperties and VkPhysicalDeviceVulkan11Properties");
{
const float depth = access.getPixDepth(x, y);
const int mantissaBits = getTextureFormatMantissaBitDepth(format).x();
- const int threshold = 10 * 1 << (23 - mantissaBits);
+ const int threshold = (10 * 1) << (23 - mantissaBits);
DE_ASSERT(mantissaBits <= 23);
2u % arraySize,
};
- if (viewType == vk::VK_IMAGE_VIEW_TYPE_1D || viewType == vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY)
- return tcu::Vec4(coords[samplePosNdx].x() / (float)imageSize,
- (float)slices[samplePosNdx],
- 0.0f,
- 0.0f);
- else if (viewType == vk::VK_IMAGE_VIEW_TYPE_2D || viewType == vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY)
- return tcu::Vec4(coords[samplePosNdx].x() / (float)imageSize,
- coords[samplePosNdx].y() / (float)imageSize,
- (float)slices[samplePosNdx],
- 0.0f);
- else if (viewType == vk::VK_IMAGE_VIEW_TYPE_3D)
- return tcu::Vec4(coords[samplePosNdx].x() / (float)imageSize,
- coords[samplePosNdx].y() / (float)imageSize,
- coords[samplePosNdx].z() / (float)imageSize,
- 0.0f);
- else
+ switch (viewType)
{
- DE_FATAL("Impossible");
- return tcu::Vec4();
+ case vk::VK_IMAGE_VIEW_TYPE_1D:
+ case vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY:
+ return tcu::Vec4(coords[samplePosNdx].x() / (float)imageSize,
+ (float)slices[samplePosNdx],
+ 0.0f,
+ 0.0f);
+ case vk::VK_IMAGE_VIEW_TYPE_2D:
+ case vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY:
+ return tcu::Vec4(coords[samplePosNdx].x() / (float)imageSize,
+ coords[samplePosNdx].y() / (float)imageSize,
+ (float)slices[samplePosNdx],
+ 0.0f);
+ case vk::VK_IMAGE_VIEW_TYPE_3D:
+ return tcu::Vec4(coords[samplePosNdx].x() / (float)imageSize,
+ coords[samplePosNdx].y() / (float)imageSize,
+ coords[samplePosNdx].z() / (float)imageSize,
+ 0.0f);
+ default:
+ DE_FATAL("Impossible");
+ return tcu::Vec4();
}
}
};
ConditionalRenderingBaseTestInstance::ConditionalRenderingBaseTestInstance (Context& context)
- : TestInstance (context)
- , m_vki (m_context.getInstanceInterface())
- , m_vkd (m_context.getDeviceInterface())
- , m_device (m_context.getDevice())
- , m_physicalDevice (m_context.getPhysicalDevice())
- , m_queue (m_context.getUniversalQueue())
+ : TestInstance (context)
+ , m_vki (m_context.getInstanceInterface())
+ , m_vkd (m_context.getDeviceInterface())
+ , m_device (m_context.getDevice())
+ , m_physicalDevice (m_context.getPhysicalDevice())
+ , m_queue (m_context.getUniversalQueue())
+ , m_conditionalRenderingBufferOffset (0)
{
}
VkImageFormatProperties imageFormatProperties;
const auto& vki = context.getInstanceInterface();
const auto& vkd = context.getPhysicalDevice();
- const auto usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ const auto usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
if (vki.getPhysicalDeviceImageFormatProperties(vkd, m_format, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL, usage, 0u, &imageFormatProperties) == VK_ERROR_FORMAT_NOT_SUPPORTED)
{
TCU_THROW(NotSupportedError, "Format not supported");
struct DrawTypedTestSpec : public TestSpecBase
{
DrawTypedTestSpec()
- : testFirstInstanceNdx(false)
+ : drawType(DRAWTYPE_LAST)
+ , testFirstInstanceNdx(false)
, testIndirectCountExt(IndirectCountType::NONE)
{};
};
QuadDrawTestCommand::QuadDrawTestCommand (deUint32 x, deUint32 y, deUint32 width, deUint32 height, Vec4 color)
-: m_quad(x, y, width, height, color)
+: m_offset(0)
+, m_quad(x, y, width, height, color)
{
}
DepthStencilBaseCase (Context& context, const char* vertexShaderName, const char* fragmentShaderName)
: TestInstance (context)
, m_colorAttachmentFormat (vk::VK_FORMAT_R8G8B8A8_UNORM)
+ , m_depthStencilAttachmentFormat (vk::VK_FORMAT_UNDEFINED)
, m_topology (vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP)
, m_vk (context.getDeviceInterface())
, m_vertexShaderName (vertexShaderName)
DepthBiasBaseCase (Context& context, const char* vertexShaderName, const char* fragmentShaderName)
: TestInstance (context)
, m_colorAttachmentFormat (vk::VK_FORMAT_R8G8B8A8_UNORM)
+ , m_depthStencilAttachmentFormat (vk::VK_FORMAT_UNDEFINED)
, m_topology (vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP)
, m_vk (context.getDeviceInterface())
, m_vertexShaderName (vertexShaderName)
: TestInstance (context)
, m_primitiveType (primitiveType)
, m_name (name)
+ , m_numDrawVertices (0)
{
}
return false;
}
+ // Note: this is never reached
log << tcu::TestLog::Image("LayerContent", "Layer content", image);
return allPixelsOk;
VkExtent3D getDefaultDimensions (VkImageType type, bool array)
{
DE_ASSERT(type == VK_IMAGE_TYPE_2D || type == VK_IMAGE_TYPE_3D);
- DE_ASSERT(!array || VK_IMAGE_TYPE_2D);
+ DE_ASSERT(!array || type == VK_IMAGE_TYPE_2D);
constexpr VkExtent3D kDefault3D = { 32u, 48u, 56u };
constexpr VkExtent3D kDefault2DArray = kDefault3D;
: m_read (read)
, m_write (write)
, m_seed (seed)
+ , m_size (0)
{
}
CreateImage::CreateImage (vk::VkImageUsageFlags usage,
vk::VkSharingMode sharing)
- : m_usage (usage)
- , m_sharing (sharing)
+ : m_usage (usage)
+ , m_sharing (sharing)
+ , m_imageWidth (0)
+ , m_imageHeight (0)
{
}
, m_dstAccesses (dstAccesses)
, m_srcLayout (srcLayout)
, m_dstLayout (dstLayout)
+ , m_imageMemorySize (0)
{
}
class FillBuffer : public CmdCommand
{
public:
- FillBuffer (deUint32 value) : m_value(value) {}
+ FillBuffer (deUint32 value) : m_value(value), m_bufferSize(0) {}
~FillBuffer (void) {}
const char* getName (void) const { return "FillBuffer"; }
class UpdateBuffer : public CmdCommand
{
public:
- UpdateBuffer (deUint32 seed) : m_seed(seed) {}
+ UpdateBuffer (deUint32 seed) : m_seed(seed), m_bufferSize(0) {}
~UpdateBuffer (void) {}
const char* getName (void) const { return "UpdateBuffer"; }
class BufferCopyFromBuffer : public CmdCommand
{
public:
- BufferCopyFromBuffer (deUint32 seed) : m_seed(seed) {}
+ BufferCopyFromBuffer (deUint32 seed) : m_seed(seed), m_bufferSize(0) {}
~BufferCopyFromBuffer (void) {}
const char* getName (void) const { return "BufferCopyFromBuffer"; }
RenderVertexBuffer (deUint32 stride)
: m_stride(stride)
, m_name("RenderVertexBuffer" + de::toString(stride))
+ , m_bufferSize(0)
{}
~RenderVertexBuffer (void) {}
{
for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
{
- const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
-
if ((dstStage_ & m_allowedStages) == 0)
continue;
+ const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
+
+
// All operations are initially visible
m_invisibleOperations[dstStage] = 0;
for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
{
- const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
-
if ((srcStage_ & m_allowedStages) == 0)
continue;
+ const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
+
// There are no write operations that are not yet available
// initially.
for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
{
- const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
-
if ((dstAccess_ & m_allowedAccesses) == 0)
continue;
+ const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
+
m_unavailableWriteOperations[dstStage][srcStage][dstAccess] = 0;
}
}
for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
{
- const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
-
if ((dstStage_ & m_allowedStages) == 0)
continue;
+ const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
+
// Mark stage as incomplete for all stages
m_incompleteOperations[dstStage] |= stage;
// Mark write access from srcStage unavailable to all stages for all accesses
for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
{
- const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
-
if ((dstAccess_ & m_allowedAccesses) == 0)
continue;
+ const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
+
m_unavailableWriteOperations[dstStage][srcStage][dstAccess] |= access;
}
}
for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
{
- const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
-
if ((dstStage_ & m_allowedStages) == 0)
continue;
+ const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
+
// Make sure all previous operation are complete in all stages
if (m_incompleteOperations[dstStage])
{
// Make sure all write operations from all stages are available
for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
{
- const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
-
if ((srcStage_ & m_allowedStages) == 0)
continue;
+ const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
+
for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
{
- const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
-
if ((dstAccess_ & m_allowedAccesses) == 0)
continue;
+ const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
+
if (m_unavailableWriteOperations[dstStage][srcStage][dstAccess])
{
dstStages |= dstStage_;
for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= srcStages; srcStage_ <<= 1)
{
- const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
-
if ((srcStage_ & srcStages) == 0)
continue;
+ const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
+
completedStages |= (~m_incompleteOperations[srcStage]);
}
for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
{
- const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
-
if ((dstStage_ & m_allowedStages) == 0)
continue;
+ const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
+
for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
{
- const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
-
if ((srcStage_ & m_allowedStages) == 0)
continue;
+ const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
+
for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
{
- const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
-
if ((dstAccess_ & m_allowedAccesses) == 0)
continue;
+ const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
+
if (m_unavailableWriteOperations[dstStage][srcStage][dstAccess] != (getWriteAccessFlags() & m_allowedAccesses))
{
anyWriteAvailable = true;
for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
{
- const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
-
if ((dstStage_ & m_allowedStages) == 0)
continue;
+ const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
+
// All stages are incomplete after the barrier except each dstStage in it self.
m_incompleteOperations[dstStage] = m_allowedStages & (~dstStage_);
for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
{
- const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
-
if ((srcStage_ & m_allowedStages) == 0)
continue;
+ const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
+
// All write operations are available after layout transition
for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
{
- const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
-
if ((dstAccess_ & m_allowedAccesses) == 0)
continue;
+ const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
+
m_unavailableWriteOperations[dstStage][srcStage][dstAccess] = 0;
}
}
for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= srcStages; srcStage_ <<= 1)
{
- const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
-
if ((srcStage_ & srcStages) == 0)
continue;
+ const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
+
for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= dstStages; dstStage_ <<= 1)
{
- const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
-
if ((dstStage_ & dstStages) == 0)
continue;
+ const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
+
// Stages that have completed before srcStage have also completed before dstStage
m_incompleteOperations[dstStage] &= oldIncompleteOperations[srcStage];
for (vk::VkPipelineStageFlags sharedStage_ = 1; sharedStage_ <= m_allowedStages; sharedStage_ <<= 1)
{
- const PipelineStage sharedStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)sharedStage_);
-
if ((sharedStage_ & m_allowedStages) == 0)
continue;
+ const PipelineStage sharedStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)sharedStage_);
+
// Writes that are available in srcStage are also available in dstStage
for (vk::VkAccessFlags sharedAccess_ = 1; sharedAccess_ <= m_allowedAccesses; sharedAccess_ <<= 1)
{
- const Access sharedAccess = accessFlagToAccess((vk::VkAccessFlagBits)sharedAccess_);
-
if ((sharedAccess_ & m_allowedAccesses) == 0)
continue;
+ const Access sharedAccess = accessFlagToAccess((vk::VkAccessFlagBits)sharedAccess_);
+
m_unavailableWriteOperations[dstStage][sharedStage][sharedAccess] &= oldUnavailableWriteOperations[srcStage][sharedStage][sharedAccess];
}
}
for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
{
- const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
-
if ((srcStage_ & m_allowedStages) == 0)
continue;
+ const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
+
// Make srcAccesses from srcStage available in dstStage for dstAccess
for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
{
- const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
-
if ((dstAccess_ & m_allowedAccesses) == 0)
continue;
+ const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
+
if (((srcStage_ & srcStages) != 0) && ((dstAccess_ & dstAccesses) != 0))
m_unavailableWriteOperations[dstStage][srcStage][dstAccess] &= ~srcAccesses;
{
for (vk::VkPipelineStageFlags dstStage_ = 1; dstStage_ <= m_allowedStages; dstStage_ <<= 1)
{
- const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
-
if ((dstStage_ & m_allowedStages) == 0)
continue;
+ const PipelineStage dstStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)dstStage_);
+
// Some operations are not visible to some stages
if (m_invisibleOperations[dstStage] != 0)
return false;
for (vk::VkPipelineStageFlags srcStage_ = 1; srcStage_ <= m_allowedStages; srcStage_ <<= 1)
{
- const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
-
if ((srcStage_ & m_allowedStages) == 0)
continue;
+ const PipelineStage srcStage = pipelineStageFlagToPipelineStage((vk::VkPipelineStageFlagBits)srcStage_);
+
for (vk::VkAccessFlags dstAccess_ = 1; dstAccess_ <= m_allowedAccesses; dstAccess_ <<= 1)
{
- const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
-
if ((dstAccess_ & m_allowedAccesses) == 0)
continue;
+ const Access dstAccess = accessFlagToAccess((vk::VkAccessFlagBits)dstAccess_);
+
// Some write operations are not available yet
if (m_unavailableWriteOperations[dstStage][srcStage][dstAccess] != 0)
return false;
break;
case OP_SECONDARY_COMMAND_BUFFER_END:
- DE_ASSERT(state.stage == STAGE_SECONDARY_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
+ DE_ASSERT(state.stage == STAGE_COMMAND_BUFFER || state.stage == STAGE_SECONDARY_COMMAND_BUFFER);
state.stage = STAGE_COMMAND_BUFFER;
state.commandBufferIsEmpty = state.primaryCommandBufferIsEmpty;
break;
removeIllegalAccessFlags(srcAccesses, srcStages);
PipelineBarrier::Type type;
-
- if (op == OP_PIPELINE_BARRIER_IMAGE)
+ switch (op)
+ {
+ case OP_PIPELINE_BARRIER_IMAGE:
type = PipelineBarrier::TYPE_IMAGE;
- else if (op == OP_PIPELINE_BARRIER_BUFFER)
+ break;
+ case OP_PIPELINE_BARRIER_BUFFER:
type = PipelineBarrier::TYPE_BUFFER;
- else if (op == OP_PIPELINE_BARRIER_GLOBAL)
+ break;
+ case OP_PIPELINE_BARRIER_GLOBAL:
type = PipelineBarrier::TYPE_GLOBAL;
- else
- {
+ break;
+ default:
type = PipelineBarrier::TYPE_LAST;
DE_FATAL("Unknown op");
}
result.check(deIsPowerOfTwo64(static_cast<deUint64>(m_currentTestRequirements.alignment)) == DE_TRUE,
"VkMemoryRequirements alignment isn't power of two");
- if (usage & (VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT))
+ if (usage & (VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT))
{
result.check(m_currentTestRequirements.alignment >= limits.minTexelBufferOffsetAlignment,
"VkMemoryRequirements alignment doesn't respect minTexelBufferOffsetAlignment");
const VkPhysicalDeviceMemoryProperties memoryProperties = getPhysicalDeviceMemoryProperties(vki, physicalDevice);
tcu::TestLog& log = context.getTestContext().getLog();
tcu::ResultCollector result (log, "ERROR: ");
- deUint32 errorCount = 0;
log << TestLog::Message << "Memory properties: " << memoryProperties << TestLog::EndMessage;
}
}
- if (errorCount > 1)
- return tcu::TestStatus(result.getResult(), "Failed " + de::toString(errorCount) + " cases.");
- else
- return tcu::TestStatus(result.getResult(), result.getMessage());
+ return tcu::TestStatus(result.getResult(), result.getMessage());
}
void checkSupportMultiplane (Context& context, ImageTestParams params)
: MultiViewRenderTestInstance (context, parameters)
, m_verticesPerPrimitive (4u)
, m_occlusionQueryFlags ((parameters.viewIndex == TEST_TYPE_QUERIES) * VK_QUERY_CONTROL_PRECISE_BIT)
+ , m_occlusionObjectsOffset (0)
{
// Generate the timestamp mask
const std::vector<VkQueueFamilyProperties> queueProperties = vk::getPhysicalDeviceQueueFamilyProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice());
WorkingData (void)
: numVertices ()
+ , colorBufferSize (0)
{
}
};
TestParams (void)
: numLayers ()
+ , sampleSource (SAMPLE_SOURCE_IMAGE)
, numColorSamples ()
, colorFormat ()
{
VkPhysicalDeviceFeatures features = m_context.getDeviceFeatures();
createShaderModule(vk, vkDevice, m_context.getBinaryCollection(), "color_vert", &m_vertexShaderModule);
- if (m_shaderFlags & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT || m_shaderFlags & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)
+ if (m_shaderFlags & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT || m_shaderFlags & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
{
if (features.tessellationShader == VK_FALSE)
{
, m_vki (m_instance.getDriver())
, m_phyDevice (vk::chooseDevice(m_vki, m_instance, m_context.getTestContext().getCommandLine()))
, m_queueFamilyIndex (chooseProtectedMemQueueFamilyIndex(m_vki, m_phyDevice))
- , m_device (makeProtectedMemDevice(m_interface, m_instance, m_vki, m_phyDevice, m_queueFamilyIndex, ctx.getUsedApiVersion(), deviceExtensions, m_context.getTestContext().getCommandLine().isValidationEnabled()))
, m_allocator (createAllocator())
+ , m_device (makeProtectedMemDevice(m_interface, m_instance, m_vki, m_phyDevice, m_queueFamilyIndex, ctx.getUsedApiVersion(), deviceExtensions, m_context.getTestContext().getCommandLine().isValidationEnabled()))
, m_deviceDriver (m_context.getPlatformInterface(), m_instance, *m_device)
, m_queue (getProtectedQueue(m_deviceDriver, *m_device, m_queueFamilyIndex, 0))
{
, m_phyDevice (vk::chooseDevice(m_vki, m_instance, m_context.getTestContext().getCommandLine()))
, m_surface (vk::wsi::createSurface(m_vki, m_instance, wsiType, display, window))
, m_queueFamilyIndex (chooseProtectedMemQueueFamilyIndex(m_vki, m_phyDevice, *m_surface))
+ , m_allocator(createAllocator())
, m_device (makeProtectedMemDevice(m_interface, m_instance, m_vki, m_phyDevice, m_queueFamilyIndex, ctx.getUsedApiVersion(), deviceExtensions, m_context.getTestContext().getCommandLine().isValidationEnabled()))
- , m_allocator (createAllocator())
, m_deviceDriver (m_interface, m_instance, *m_device)
, m_queue (getProtectedQueue(m_deviceDriver, *m_device, m_queueFamilyIndex, 0))
{
vk::VkPhysicalDevice m_phyDevice;
const vk::Move<vk::VkSurfaceKHR> m_surface;
deUint32 m_queueFamilyIndex;
- vk::Move<vk::VkDevice> m_device;
const de::UniquePtr<vk::Allocator> m_allocator;
+ vk::Move<vk::VkDevice> m_device;
vk::DeviceDriver m_deviceDriver;
vk::VkQueue m_queue;
};
RENDER_WIDTH, RENDER_HEIGHT,
m_imageFormat,
vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT
- | vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT
- | vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT);
+ | vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
de::MovePtr<vk::BufferWithMemory> dstBuffer (makeBuffer(ctx,
PROTECTION_ENABLED,
queueFamilyIndex,
for (int x = 0; x < resultImage.getWidth(); ++x)
referenceImage.setPixel(x, y, backgroundColor);
- for (size_t renderAreaNdx = 0; result && renderAreaNdx < m_renderStart.size(); ++renderAreaNdx)
+ for (size_t renderAreaNdx = 0; renderAreaNdx < m_renderStart.size(); ++renderAreaNdx)
{
const int renderStart = m_renderStart[renderAreaNdx];
const int renderEnd = m_renderEnd[renderAreaNdx];
}
css << std::endl;
- for (size_t renderAreaNdx = 0; result && renderAreaNdx < m_renderStart.size(); ++renderAreaNdx)
+ for (size_t renderAreaNdx = 0; renderAreaNdx < m_renderStart.size(); ++renderAreaNdx)
{
const int renderStart = m_renderStart[renderAreaNdx];
const int renderEnd = m_renderEnd[renderAreaNdx];
}
css << std::endl;
- for (size_t renderAreaNdx = 0; result && renderAreaNdx < m_renderStart.size(); ++renderAreaNdx)
+ for (size_t renderAreaNdx = 0; renderAreaNdx < m_renderStart.size(); ++renderAreaNdx)
{
const int renderStart = m_renderStart[renderAreaNdx];
const int renderEnd = m_renderEnd[renderAreaNdx];
, m_uniformSetup (uniformSetup)
, m_attribFunc (attribFunc)
, m_sampleCount (VK_SAMPLE_COUNT_1_BIT)
+ , m_fuzzyCompare (false)
{
}
: TYPE_LAST;
ShaderEvalFunc evalFunc = DE_NULL;
- if (inScalarSize == 1) evalFunc = funcInfo.evalFuncScalar;
- else if (inScalarSize == 2) evalFunc = funcInfo.evalFuncVec2;
- else if (inScalarSize == 3) evalFunc = funcInfo.evalFuncVec3;
- else if (inScalarSize == 4) evalFunc = funcInfo.evalFuncVec4;
- else DE_ASSERT(false);
+ switch (inScalarSize)
+ {
+ case 1:
+ evalFunc = funcInfo.evalFuncScalar;
+ break;
+ case 2:
+ evalFunc = funcInfo.evalFuncVec2;
+ break;
+ case 3:
+ evalFunc = funcInfo.evalFuncVec3;
+ break;
+ case 4:
+ evalFunc = funcInfo.evalFuncVec4;
+ break;
+ default:
+ DE_ASSERT(false);
+ }
// Skip if no valid eval func.
if (evalFunc == DE_NULL)
: SparseBufferTestInstance (context, flags)
, m_bufferUsage (usage)
, m_minChunkSize (minChunkSize)
+ , m_perDrawBufferOffset (0)
+ , m_stagingBufferSize (0)
{
}
switch(chainOp)
{
case CHAIN_OP_ACCESS_CHAIN:
- specs["chainop"] = "OpAccessChain %_ptr_float_sb %dataInput %uint_0 %uint_0";
specs["chainop"] = "OpAccessChain %_ptr_float_sb %dataInput %uint_0";
break;
case CHAIN_OP_PTR_ACCESS_CHAIN:
flavorNames.push_back("EmulatingFP16");
flavorNames.push_back("DoubleCalc");
+ permutationsFlavorStart = 0;
+ permutationsFlavorEnd = flavorNames.size();
+
// flavorNames will be extended later
}
flavorNames.push_back("FloatCalc");
flavorNames.push_back("DoubleCalc");
+ permutationsFlavorStart = 0;
+ permutationsFlavorEnd = flavorNames.size();
+
// flavorNames will be extended later
}
, queueFamilyIndex (queueFamilyIndex_)
, binaryCollection (binaryCollection_)
, allocator (allocator_)
+ , vertices (0)
, numVertices (0)
+ , renderSize (0)
, waitEvent (false)
{
createFences(vkd, device, false, DE_LENGTH_OF_ARRAY(fences), fences);
PSID* ppEveryoneSID = (PSID*)((PBYTE)pSD + SECURITY_DESCRIPTOR_MIN_LENGTH);
PACL* ppACL = (PACL*)((PBYTE)ppEveryoneSID + sizeof(PSID*));
- InitializeSecurityDescriptor(pSD, SECURITY_DESCRIPTOR_REVISION);
+ bool res = InitializeSecurityDescriptor(pSD, SECURITY_DESCRIPTOR_REVISION);
+ DE_ASSERT(res);
SID_IDENTIFIER_AUTHORITY SIDAuthWorld = SECURITY_WORLD_SID_AUTHORITY;
AllocateAndInitializeSid(&SIDAuthWorld, 1, SECURITY_WORLD_RID, 0, 0, 0, 0, 0, 0, 0, ppEveryoneSID);
SetEntriesInAcl(1, &ea, NULL, ppACL);
- SetSecurityDescriptorDacl(pSD, TRUE, *ppACL, FALSE);
+ res = SetSecurityDescriptorDacl(pSD, TRUE, *ppACL, FALSE);
+ DE_ASSERT(res);
}
return pSD;
, m_imParams (testCaseData.imParams)
, m_samplerParams (testCaseData.samplerParams)
, m_sampleLookupSettings (testCaseData.sampleLookupSettings)
+ , m_numSamples (0)
, m_levels (testCaseData.pba)
, m_gen (gen.release())
{
FilterCase (void)
: textureIndex(-1)
+ , minCoord (0)
+ , maxCoord (0)
, ref (0.0f)
{
}
};
Swizzle2DTestParameters::Swizzle2DTestParameters (void)
- : componentMapping (makeComponentMappingRGBA())
+ : backingMode (TextureBinding::IMAGE_BACKING_MODE_LAST)
+ , componentMapping (makeComponentMappingRGBA())
, texCoordSwizzle (DE_NULL)
, texCoordMapping (DE_NULL)
{
, wrapS (tcu::Sampler::REPEAT_GL)
, format (VK_FORMAT_R8G8B8A8_UNORM)
, unnormal (false)
+ , aspectMask (VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM)
{
}
: wrapT (tcu::Sampler::REPEAT_GL)
, width (64)
, height (64)
+ , mipmaps (false)
{
}
, m_commandLine (0)
{}
- BuildSpirVAsmTask (void) : m_program(DE_NULL) {}
+ BuildSpirVAsmTask (void) : m_program(DE_NULL), m_commandLine(0) {}
void setCommandline (const tcu::CommandLine &commandLine)
{
vector<const char*> extensions;
if (!isExtensionSupported(supportedExtensions, RequiredExtension("VK_KHR_swapchain")))
- TCU_THROW(NotSupportedError, (string(extensions[0]) + " is not supported").c_str());
+ TCU_THROW(NotSupportedError, "VK_KHR_swapchain is not supported");
extensions.push_back("VK_KHR_swapchain");
if (isExtensionSupported(supportedExtensions, RequiredExtension("VK_EXT_hdr_metadata")))
&swapchainImages[imageNdx]))
continue;
else
+ {
+ VK_CHECK(vkd.deviceWaitIdle(device));
return tcu::TestStatus::fail("Colorspace comparison test failed");
- VK_CHECK(vkd.deviceWaitIdle(device));
+ }
}
catch (...)
{
}
}
+ VK_CHECK(vkd.deviceWaitIdle(device));
return tcu::TestStatus::pass("Colorspace comparison test succeeded");
}
std::vector<const char*> extensions;
if (!isExtensionSupported(supportedExtensions, RequiredExtension("VK_KHR_swapchain")))
- TCU_THROW(NotSupportedError, (std::string(extensions[0]) + " is not supported").c_str());
+ TCU_THROW(NotSupportedError, "VK_KHR_swapchain is not supported");
extensions.push_back("VK_KHR_swapchain");
if (isExtensionSupported(supportedExtensions, RequiredExtension("VK_EXT_full_screen_exclusive")))