bool pushDescriptor;
bool testRobustness2;
deUint32 imageDim[3]; // width, height, depth or layers
+ bool readOnly;
};
static bool formatIsR64(const VkFormat& f)
decls << "layout(" << outputimgqualif << ", set = 0, binding = 0) uniform " << imgprefix << "image2D image0_0;\n";
const char *vol = m_data.vol ? "volatile " : "";
+ const char *ro = m_data.readOnly ? "readonly " : "";
// Construct the declaration for the binding
switch (m_data.descriptorType)
break;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
- decls << "layout(scalar, set = 0, binding = 1) " << vol << "buffer sbodef0_1 { " << bufType << " val[]; } ssbo0_1;\n";
- decls << "layout(scalar, set = 0, binding = 1) " << vol << "buffer sbodef0_1_pad { vec4 pad; " << bufType << " val[]; } ssbo0_1_pad;\n";
+ decls << "layout(scalar, set = 0, binding = 1) " << vol << ro << "buffer sbodef0_1 { " << bufType << " val[]; } ssbo0_1;\n";
+ decls << "layout(scalar, set = 0, binding = 1) " << vol << ro << "buffer sbodef0_1_pad { vec4 pad; " << bufType << " val[]; } ssbo0_1_pad;\n";
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
switch(format)
}
}
- if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
- m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER ||
- m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
- m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
+ if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
+ m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER ||
+ m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
+ m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
+ !m_data.readOnly)
{
for (int i = 0; i < numCoords; ++i)
{
{ 1, "fmt_qual", "" },
};
+ TestGroupCase readOnlyCases[] =
+ {
+ { 0, "readwrite", "" },
+ { 1, "readonly", "" },
+ };
+
for (int pushNdx = 0; pushNdx < DE_LENGTH_OF_ARRAY(pushCases); pushNdx++)
{
de::MovePtr<tcu::TestCaseGroup> pushGroup(new tcu::TestCaseGroup(testCtx, pushCases[pushNdx].name, pushCases[pushNdx].name));
for (int descNdx = 0; descNdx < numDescCases; descNdx++)
{
de::MovePtr<tcu::TestCaseGroup> descGroup(new tcu::TestCaseGroup(testCtx, descCases[descNdx].name, descCases[descNdx].name));
- for (int fmtQualNdx = 0; fmtQualNdx < DE_LENGTH_OF_ARRAY(fmtQualCases); fmtQualNdx++)
- {
- de::MovePtr<tcu::TestCaseGroup> fmtQualGroup(new tcu::TestCaseGroup(testCtx, fmtQualCases[fmtQualNdx].name, fmtQualCases[fmtQualNdx].name));
- // format qualifier is only used for storage image and storage texel buffers
- if (fmtQualCases[fmtQualNdx].count &&
- !(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE))
- continue;
+ for (int roNdx = 0; roNdx < DE_LENGTH_OF_ARRAY(readOnlyCases); roNdx++)
+ {
+ de::MovePtr<tcu::TestCaseGroup> rwGroup(new tcu::TestCaseGroup(testCtx, readOnlyCases[roNdx].name, readOnlyCases[roNdx].name));
- if (pushCases[pushNdx].count &&
- (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC || descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH))
+ // readonly cases are just for storage_buffer
+ if (readOnlyCases[roNdx].count != 0 &&
+ descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER &&
+ descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
continue;
- const bool isR64 = formatIsR64((VkFormat)fmtCases[fmtNdx].count);
- int numLenCases = robustness2 ? DE_LENGTH_OF_ARRAY((isR64 ? fullLenCases64Bit : fullLenCases32Bit)) : DE_LENGTH_OF_ARRAY(imgLenCases);
- TestGroupCase *lenCases = robustness2 ? (isR64 ? fullLenCases64Bit : fullLenCases32Bit) : imgLenCases;
-
- for (int lenNdx = 0; lenNdx < numLenCases; lenNdx++)
+ for (int fmtQualNdx = 0; fmtQualNdx < DE_LENGTH_OF_ARRAY(fmtQualCases); fmtQualNdx++)
{
- if (lenCases[lenNdx].count != ~0U)
- {
- bool bufferLen = lenCases[lenNdx].count != 0;
- bool bufferDesc = descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE && descCases[descNdx].count != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
- if (bufferLen != bufferDesc)
- continue;
+ de::MovePtr<tcu::TestCaseGroup> fmtQualGroup(new tcu::TestCaseGroup(testCtx, fmtQualCases[fmtQualNdx].name, fmtQualCases[fmtQualNdx].name));
- // Add template tests cases only for null_descriptor cases
- if (tempCases[tempNdx].count)
- continue;
- }
-
- if ((descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) &&
- ((lenCases[lenNdx].count % fmtSize) != 0) &&
- lenCases[lenNdx].count != ~0U)
- {
+ // format qualifier is only used for storage image and storage texel buffers
+ if (fmtQualCases[fmtQualNdx].count &&
+ !(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE))
continue;
- }
- // "volatile" only applies to storage images/buffers
- if (volCases[volNdx].count && !supportsStores(descCases[descNdx].count))
+ if (pushCases[pushNdx].count &&
+ (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC || descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH))
continue;
- de::MovePtr<tcu::TestCaseGroup> lenGroup(new tcu::TestCaseGroup(testCtx, lenCases[lenNdx].name, lenCases[lenNdx].name));
- for (int sampNdx = 0; sampNdx < DE_LENGTH_OF_ARRAY(sampCases); sampNdx++)
+ const bool isR64 = formatIsR64((VkFormat)fmtCases[fmtNdx].count);
+ int numLenCases = robustness2 ? DE_LENGTH_OF_ARRAY((isR64 ? fullLenCases64Bit : fullLenCases32Bit)) : DE_LENGTH_OF_ARRAY(imgLenCases);
+ TestGroupCase *lenCases = robustness2 ? (isR64 ? fullLenCases64Bit : fullLenCases32Bit) : imgLenCases;
+
+ for (int lenNdx = 0; lenNdx < numLenCases; lenNdx++)
{
- de::MovePtr<tcu::TestCaseGroup> sampGroup(new tcu::TestCaseGroup(testCtx, sampCases[sampNdx].name, sampCases[sampNdx].name));
- for (int viewNdx = 0; viewNdx < DE_LENGTH_OF_ARRAY(viewCases); viewNdx++)
+ if (lenCases[lenNdx].count != ~0U)
{
- if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_1D &&
- descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
- descCases[descNdx].count != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
- {
- // buffer descriptors don't have different dimensionalities. Only test "1D"
+ bool bufferLen = lenCases[lenNdx].count != 0;
+ bool bufferDesc = descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE && descCases[descNdx].count != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ if (bufferLen != bufferDesc)
continue;
- }
- if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D && viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D_ARRAY &&
- sampCases[sampNdx].count != VK_SAMPLE_COUNT_1_BIT)
- {
+ // Add template tests cases only for null_descriptor cases
+ if (tempCases[tempNdx].count)
continue;
- }
+ }
+
+ if ((descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) &&
+ ((lenCases[lenNdx].count % fmtSize) != 0) &&
+ lenCases[lenNdx].count != ~0U)
+ {
+ continue;
+ }
+
+ // "volatile" only applies to storage images/buffers
+ if (volCases[volNdx].count && !supportsStores(descCases[descNdx].count))
+ continue;
- de::MovePtr<tcu::TestCaseGroup> viewGroup(new tcu::TestCaseGroup(testCtx, viewCases[viewNdx].name, viewCases[viewNdx].name));
- for (int stageNdx = 0; stageNdx < DE_LENGTH_OF_ARRAY(stageCases); stageNdx++)
+ de::MovePtr<tcu::TestCaseGroup> lenGroup(new tcu::TestCaseGroup(testCtx, lenCases[lenNdx].name, lenCases[lenNdx].name));
+ for (int sampNdx = 0; sampNdx < DE_LENGTH_OF_ARRAY(sampCases); sampNdx++)
+ {
+ de::MovePtr<tcu::TestCaseGroup> sampGroup(new tcu::TestCaseGroup(testCtx, sampCases[sampNdx].name, sampCases[sampNdx].name));
+ for (int viewNdx = 0; viewNdx < DE_LENGTH_OF_ARRAY(viewCases); viewNdx++)
{
- Stage currentStage = static_cast<Stage>(stageCases[stageNdx].count);
- VkFlags allShaderStages = VK_SHADER_STAGE_COMPUTE_BIT | VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
- VkFlags allPipelineStages = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
- if ((Stage)stageCases[stageNdx].count == STAGE_RAYGEN)
+ if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_1D &&
+ descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
+ descCases[descNdx].count != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
{
- allShaderStages |= VK_SHADER_STAGE_RAYGEN_BIT_NV;
- allPipelineStages |= VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV;
+ // buffer descriptors don't have different dimensionalities. Only test "1D"
+ continue;
}
- if (descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH &&
- currentStage != STAGE_VERTEX)
+ if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D && viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D_ARRAY &&
+ sampCases[sampNdx].count != VK_SAMPLE_COUNT_1_BIT)
+ {
continue;
+ }
- deUint32 imageDim[3] = {5, 11, 6};
- if (viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
- viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE)
- imageDim[1] = imageDim[0];
-
- CaseDef c =
+ de::MovePtr<tcu::TestCaseGroup> viewGroup(new tcu::TestCaseGroup(testCtx, viewCases[viewNdx].name, viewCases[viewNdx].name));
+ for (int stageNdx = 0; stageNdx < DE_LENGTH_OF_ARRAY(stageCases); stageNdx++)
{
- (VkFormat)fmtCases[fmtNdx].count, // VkFormat format;
- currentStage, // Stage stage;
- allShaderStages, // VkFlags allShaderStages;
- allPipelineStages, // VkFlags allPipelineStages;
- (int)descCases[descNdx].count, // VkDescriptorType descriptorType;
- (VkImageViewType)viewCases[viewNdx].count, // VkImageViewType viewType;
- (VkSampleCountFlagBits)sampCases[sampNdx].count, // VkSampleCountFlagBits samples;
- (int)lenCases[lenNdx].count, // int bufferLen;
- (bool)unrollCases[unrollNdx].count, // bool unroll;
- (bool)volCases[volNdx].count, // bool vol;
- (bool)(lenCases[lenNdx].count == ~0U), // bool nullDescriptor
- (bool)tempCases[tempNdx].count, // bool useTemplate
- (bool)fmtQualCases[fmtQualNdx].count, // bool formatQualifier
- (bool)pushCases[pushNdx].count, // bool pushDescriptor;
- (bool)robustness2, // bool testRobustness2;
- { imageDim[0], imageDim[1], imageDim[2] }, // deUint32 imageDim[3];
- };
-
- viewGroup->addChild(new RobustnessExtsTestCase(testCtx, stageCases[stageNdx].name, stageCases[stageNdx].name, c));
+ Stage currentStage = static_cast<Stage>(stageCases[stageNdx].count);
+ VkFlags allShaderStages = VK_SHADER_STAGE_COMPUTE_BIT | VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
+ VkFlags allPipelineStages = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ if ((Stage)stageCases[stageNdx].count == STAGE_RAYGEN)
+ {
+ allShaderStages |= VK_SHADER_STAGE_RAYGEN_BIT_NV;
+ allPipelineStages |= VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV;
+ }
+
+ if (descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH &&
+ currentStage != STAGE_VERTEX)
+ continue;
+
+ deUint32 imageDim[3] = {5, 11, 6};
+ if (viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
+ viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE)
+ imageDim[1] = imageDim[0];
+
+ CaseDef c =
+ {
+ (VkFormat)fmtCases[fmtNdx].count, // VkFormat format;
+ currentStage, // Stage stage;
+ allShaderStages, // VkFlags allShaderStages;
+ allPipelineStages, // VkFlags allPipelineStages;
+ (int)descCases[descNdx].count, // VkDescriptorType descriptorType;
+ (VkImageViewType)viewCases[viewNdx].count, // VkImageViewType viewType;
+ (VkSampleCountFlagBits)sampCases[sampNdx].count, // VkSampleCountFlagBits samples;
+ (int)lenCases[lenNdx].count, // int bufferLen;
+ (bool)unrollCases[unrollNdx].count, // bool unroll;
+ (bool)volCases[volNdx].count, // bool vol;
+ (bool)(lenCases[lenNdx].count == ~0U), // bool nullDescriptor
+ (bool)tempCases[tempNdx].count, // bool useTemplate
+ (bool)fmtQualCases[fmtQualNdx].count, // bool formatQualifier
+ (bool)pushCases[pushNdx].count, // bool pushDescriptor;
+ (bool)robustness2, // bool testRobustness2;
+ { imageDim[0], imageDim[1], imageDim[2] }, // deUint32 imageDim[3];
+ (bool)(readOnlyCases[roNdx].count == 1), // bool readOnly;
+ };
+
+ viewGroup->addChild(new RobustnessExtsTestCase(testCtx, stageCases[stageNdx].name, stageCases[stageNdx].name, c));
+ }
+ sampGroup->addChild(viewGroup.release());
}
- sampGroup->addChild(viewGroup.release());
+ lenGroup->addChild(sampGroup.release());
}
- lenGroup->addChild(sampGroup.release());
+ fmtQualGroup->addChild(lenGroup.release());
}
- fmtQualGroup->addChild(lenGroup.release());
+ // Put storage_buffer tests in separate readonly vs readwrite groups. Other types
+ // go directly into descGroup
+ if (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
+ descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
+ rwGroup->addChild(fmtQualGroup.release());
+ } else {
+ descGroup->addChild(fmtQualGroup.release());
+ }
+ }
+ if (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
+ descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
+ descGroup->addChild(rwGroup.release());
}
- descGroup->addChild(fmtQualGroup.release());
}
volGroup->addChild(descGroup.release());
}