external/vulkancts/modules/vulkan/vktTestGroupUtil.cpp \
external/vulkancts/modules/vulkan/vktTestPackage.cpp \
external/vulkancts/modules/vulkan/vktTestPackageEntry.cpp \
+ external/vulkancts/modules/vulkan/wsi/vktWsiDisplayTimingTests.cpp \
+ external/vulkancts/modules/vulkan/wsi/vktWsiIncrementalPresentTests.cpp \
external/vulkancts/modules/vulkan/wsi/vktWsiSurfaceTests.cpp \
external/vulkancts/modules/vulkan/wsi/vktWsiSwapchainTests.cpp \
external/vulkancts/modules/vulkan/wsi/vktWsiTests.cpp
dEQP-VK.glsl.linkage.varying.rules.vertex_use_fragment_declare
dEQP-VK.glsl.linkage.varying.rules.vertex_use_declare_fragment
dEQP-VK.glsl.linkage.varying.rules.vertex_use_fragment_use
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_1
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_2
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_3
dEQP-VK.glsl.linkage.varying.rules.differing_interpolation_2
dEQP-VK.glsl.linkage.varying.rules.differing_name_1
dEQP-VK.glsl.linkage.varying.rules.differing_name_2
dEQP-VK.wsi.android.swapchain.modify.resize
dEQP-VK.wsi.android.swapchain.destroy.null_handle
dEQP-VK.wsi.android.swapchain.get_images.incomplete
+dEQP-VK.wsi.android.incremental_present.scale_none.immediate.reference
+dEQP-VK.wsi.android.incremental_present.scale_none.immediate.incremental_present
+dEQP-VK.wsi.android.incremental_present.scale_none.mailbox.reference
+dEQP-VK.wsi.android.incremental_present.scale_none.mailbox.incremental_present
+dEQP-VK.wsi.android.incremental_present.scale_none.fifo.reference
+dEQP-VK.wsi.android.incremental_present.scale_none.fifo.incremental_present
+dEQP-VK.wsi.android.incremental_present.scale_none.fifo_relaxed.reference
+dEQP-VK.wsi.android.incremental_present.scale_none.fifo_relaxed.incremental_present
+dEQP-VK.wsi.android.incremental_present.scale_up.immediate.reference
+dEQP-VK.wsi.android.incremental_present.scale_up.immediate.incremental_present
+dEQP-VK.wsi.android.incremental_present.scale_up.mailbox.reference
+dEQP-VK.wsi.android.incremental_present.scale_up.mailbox.incremental_present
+dEQP-VK.wsi.android.incremental_present.scale_up.fifo.reference
+dEQP-VK.wsi.android.incremental_present.scale_up.fifo.incremental_present
+dEQP-VK.wsi.android.incremental_present.scale_up.fifo_relaxed.reference
+dEQP-VK.wsi.android.incremental_present.scale_up.fifo_relaxed.incremental_present
+dEQP-VK.wsi.android.incremental_present.scale_down.immediate.reference
+dEQP-VK.wsi.android.incremental_present.scale_down.immediate.incremental_present
+dEQP-VK.wsi.android.incremental_present.scale_down.mailbox.reference
+dEQP-VK.wsi.android.incremental_present.scale_down.mailbox.incremental_present
+dEQP-VK.wsi.android.incremental_present.scale_down.fifo.reference
+dEQP-VK.wsi.android.incremental_present.scale_down.fifo.incremental_present
+dEQP-VK.wsi.android.incremental_present.scale_down.fifo_relaxed.reference
+dEQP-VK.wsi.android.incremental_present.scale_down.fifo_relaxed.incremental_present
+dEQP-VK.wsi.android.display_timing.fifo.reference
+dEQP-VK.wsi.android.display_timing.fifo.display_timing
+dEQP-VK.wsi.android.display_timing.fifo_relaxed.reference
+dEQP-VK.wsi.android.display_timing.fifo_relaxed.display_timing
+dEQP-VK.wsi.android.display_timing.immediate.reference
+dEQP-VK.wsi.android.display_timing.immediate.display_timing
+dEQP-VK.wsi.android.display_timing.mailbox.reference
+dEQP-VK.wsi.android.display_timing.mailbox.display_timing
dEQP-VK.synchronization.smoke.fences
dEQP-VK.synchronization.smoke.semaphores
dEQP-VK.synchronization.smoke.events
dEQP-VK.glsl.linkage.varying.rules.vertex_use_fragment_declare
dEQP-VK.glsl.linkage.varying.rules.vertex_use_declare_fragment
dEQP-VK.glsl.linkage.varying.rules.vertex_use_fragment_use
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_1
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_2
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_3
dEQP-VK.glsl.linkage.varying.rules.differing_interpolation_2
dEQP-VK.glsl.linkage.varying.rules.differing_name_1
dEQP-VK.glsl.linkage.varying.rules.differing_name_2
dEQP-VK.glsl.linkage.varying.rules.vertex_use_fragment_declare
dEQP-VK.glsl.linkage.varying.rules.vertex_use_declare_fragment
dEQP-VK.glsl.linkage.varying.rules.vertex_use_fragment_use
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_1
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_2
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_3
dEQP-VK.glsl.linkage.varying.rules.differing_interpolation_2
dEQP-VK.glsl.linkage.varying.rules.differing_name_1
dEQP-VK.glsl.linkage.varying.rules.differing_name_2
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
bool func (bool a)
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
bool func (bvec2 a)
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
bool func (bvec3 a)
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
bool func (bvec4 a)
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
bool func (in bool a)
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
void func (out bool a)
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
void func (inout bool a)
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
bool func (const bool a)
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
bool func (bool a) { return !a; }
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
bool func (in bool a[4])
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
bool func (in bool a[4])
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
int func (float a, int b, bool c, int d)
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
int g;
""
end
- # differing precision tests
- case differing_precision_1
- version 310 es
- desc "varying declared as highp in vertex shader, but mediump in fragment shader"
- values
- {
- input float in0 = [ -1.25 | -25.55 | 1.0 | 2.25 | 3.4 | 16.0 ];
- output float out0 = [ -1.25 | -25.55 | 1.0 | 2.25 | 3.4 | 16.0 ];
- }
-
- vertex ""
- #version 310 es
- ${VERTEX_DECLARATIONS}
- layout(location = 0) out highp float var;
- void main()
- {
- var = in0;
- ${VERTEX_OUTPUT}
- }
- ""
- fragment ""
- #version 310 es
- precision highp float;
- ${FRAGMENT_DECLARATIONS}
- layout(location = 0) in mediump float var;
- void main()
- {
- out0 = var;
- ${FRAGMENT_OUTPUT}
- }
- ""
- end
-
- # differing precision tests
- case differing_precision_2
- version 310 es
- desc "varying declared as highp in vertex shader, but lowp in fragment shader"
- values
- {
- input float in0 = [ -1.25 | -25.56 | 1.0 | 2.25 | 3.4 | 16.0 ];
- output float out0 = [ -1.25 | -25.56 | 1.0 | 2.25 | 3.4 | 16.0 ];
- }
-
- vertex ""
- #version 310 es
- ${VERTEX_DECLARATIONS}
- layout(location = 0) out highp vec2 var;
- void main()
- {
- var = vec2(in0, 2.0*in0);
- ${VERTEX_OUTPUT}
- }
- ""
- fragment ""
- #version 310 es
- precision highp float;
- ${FRAGMENT_DECLARATIONS}
- layout(location = 0) in lowp vec2 var;
- void main()
- {
- out0 = var.y - var.x;
- ${FRAGMENT_OUTPUT}
- }
- ""
- end
-
- # differing precision tests
- case differing_precision_3
- version 310 es
- desc "varying declared as lowp in vertex shader, but mediump in fragment shader"
- values
- {
- input float in0 = [ -1.25 | -25.0 | 1.0 | 2.25 | 3.4 | 16.0 ];
- output float out0 = [ -1.25 | -25.0 | 1.0 | 2.25 | 3.4 | 16.0 ];
- }
-
- vertex ""
- #version 310 es
- ${VERTEX_DECLARATIONS}
- layout(location = 0) out lowp vec4 var;
- void main()
- {
- var = vec4(in0, 2.0*in0, -in0, -in0);
- ${VERTEX_OUTPUT}
- }
- ""
- fragment ""
- #version 310 es
- precision highp float;
- ${FRAGMENT_DECLARATIONS}
- layout(location = 0) in mediump vec4 var;
- void main()
- {
- out0 = var.x + var.y + var.z + var.w;
- ${FRAGMENT_OUTPUT}
- }
- ""
- end
-
# different interpolation
case differing_interpolation_2
version 310 es
vertex ""
#version 310 es
${VERTEX_DECLARATIONS}
- layout(location = 0) out mediump float var;
+ layout(location = 0) out float var;
void main()
{
var = in0;
vertex ""
#version 310 es
${VERTEX_DECLARATIONS}
- layout(location = 0) out mediump vec2 var;
+ layout(location = 0) out vec2 var;
void main()
{
var = in0;
vertex ""
#version 310 es
${VERTEX_DECLARATIONS}
- layout(location = 0) out mediump vec3 var;
+ layout(location = 0) out vec3 var;
void main()
{
var = in0;
vertex ""
#version 310 es
${VERTEX_DECLARATIONS}
- layout(location = 0) out mediump vec4 var;
+ layout(location = 0) out vec4 var;
void main()
{
var = in0;
vertex ""
#version 310 es
${VERTEX_DECLARATIONS}
- layout(location = 0) out mediump mat2 var;
+ layout(location = 0) out mat2 var;
void main()
{
var = in0;
vertex ""
#version 310 es
${VERTEX_DECLARATIONS}
- layout(location = 0) out mediump mat2x3 var;
+ layout(location = 0) out mat2x3 var;
void main()
{
var = in0;
vertex ""
#version 310 es
${VERTEX_DECLARATIONS}
- layout(location = 0) out mediump mat2x4 var;
+ layout(location = 0) out mat2x4 var;
void main()
{
var = in0;
vertex ""
#version 310 es
${VERTEX_DECLARATIONS}
- layout(location = 0) out mediump mat3x2 var;
+ layout(location = 0) out mat3x2 var;
void main()
{
var = in0;
vertex ""
#version 310 es
${VERTEX_DECLARATIONS}
- layout(location = 0) out mediump mat3 var;
+ layout(location = 0) out mat3 var;
void main()
{
var = in0;
vertex ""
#version 310 es
${VERTEX_DECLARATIONS}
- layout(location = 0) out mediump mat3x4 var;
+ layout(location = 0) out mat3x4 var;
void main()
{
var = in0;
vertex ""
#version 310 es
${VERTEX_DECLARATIONS}
- layout(location = 0) out mediump mat4x2 var;
+ layout(location = 0) out mat4x2 var;
void main()
{
var = in0;
vertex ""
#version 310 es
${VERTEX_DECLARATIONS}
- layout(location = 0) out mediump mat4x3 var;
+ layout(location = 0) out mat4x3 var;
void main()
{
var = in0;
vertex ""
#version 310 es
${VERTEX_DECLARATIONS}
- layout(location = 0) out mediump mat4 var;
+ layout(location = 0) out mat4 var;
void main()
{
var = in0;
vertex ""
#version 310 es
${VERTEX_DECLARATIONS}
- layout(location = 0) smooth out mediump vec4 var;
+ layout(location = 0) smooth out vec4 var;
void main()
{
var = in0;
vertex ""
#version 310 es
${VERTEX_DECLARATIONS}
- layout(location = 0) centroid out mediump vec4 var;
+ layout(location = 0) centroid out vec4 var;
void main()
{
var = in0;
vertex ""
#version 310 es
${VERTEX_DECLARATIONS}
- layout(location = 0) flat out mediump vec4 var;
+ layout(location = 0) flat out vec4 var;
void main()
{
var = in0;
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
int a = -1;
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
void main()
{
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
int a = -1;
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
void main()
{
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
void main()
{
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
int a = 5;
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
int a = 5;
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
int a = 1;
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
void main()
{
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
struct S { int val; };
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
struct S { int val; };
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
int foo (int x) { return x; }
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
int a = -1;
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
struct S { int x; };
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
int func (int func) { return func; }
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
int func (int inp, int x) { { int x = 5; return inp + x - 5; } }
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
int func (int x);
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
both ""
#version 310 es
precision highp float;
+ precision highp int;
${DECLARATIONS}
if (record.data.reallocation.returnedPtr)
{
- DE_ASSERT(!de::contains(ptrToSlotIndex, record.data.reallocation.returnedPtr));
- ptrToSlotIndex[record.data.reallocation.returnedPtr] = allocations.size();
- allocations.push_back(AllocationSlot(record, true));
+ if (!de::contains(ptrToSlotIndex, record.data.reallocation.returnedPtr))
+ {
+ ptrToSlotIndex[record.data.reallocation.returnedPtr] = allocations.size();
+ allocations.push_back(AllocationSlot(record, true));
+ }
+ else
+ {
+ const size_t slotNdx = ptrToSlotIndex[record.data.reallocation.returnedPtr];
+ DE_ASSERT(!allocations[slotNdx].isLive);
+ allocations[slotNdx].isLive = true;
+ allocations[slotNdx].record = record;
+ }
}
}
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2_KHR = 1000059008,
VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT = 1000061000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR = 1000080000,
+ VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR = 1000084000,
VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR = 1000085000,
VK_STRUCTURE_TYPE_OBJECT_TABLE_CREATE_INFO_NVX = 1000086000,
VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NVX = 1000086001,
VK_STRUCTURE_TYPE_CMD_RESERVE_SPACE_FOR_COMMANDS_INFO_NVX = 1000086003,
VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_LIMITS_NVX = 1000086004,
VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_FEATURES_NVX = 1000086005,
+ VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE = 1000092000,
};
enum VkSystemAllocationScope
enum VkColorSpaceKHR
{
- VK_COLOR_SPACE_SRGB_NONLINEAR_KHR = 0,
-
- VK_COLOR_SPACE_KHR_LAST
+ VK_COLOR_SPACE_SRGB_NONLINEAR_KHR = 0,
+ VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT = 1000104001,
+ VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT = 1000104002,
+ VK_COLOR_SPACE_DCI_P3_LINEAR_EXT = 1000104003,
+ VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT = 1000104004,
+ VK_COLOR_SPACE_BT709_LINEAR_EXT = 1000104005,
+ VK_COLOR_SPACE_BT709_NONLINEAR_EXT = 1000104006,
+ VK_COLOR_SPACE_BT2020_LINEAR_EXT = 1000104007,
+ VK_COLOR_SPACE_HDR10_ST2084_EXT = 1000104008,
+ VK_COLOR_SPACE_DOLBYVISION_EXT = 1000104009,
+ VK_COLOR_SPACE_HDR10_HLG_EXT = 1000104010,
+ VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT = 1000104011,
+ VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT = 1000104012,
+ VK_COLOR_SPACE_PASS_THROUGH_EXT = 1000104013,
};
enum VkPresentModeKHR
virtual void destroyDescriptorUpdateTemplateKHR (VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator) const;
virtual void updateDescriptorSetWithTemplateKHR (VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void* pData) const;
virtual void cmdPushDescriptorSetWithTemplateKHR (VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, VkPipelineLayout layout, deUint32 set, const void* pData) const;
+virtual VkResult getRefreshCycleDurationGOOGLE (VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties) const;
+virtual VkResult getPastPresentationTimingGOOGLE (VkDevice device, VkSwapchainKHR swapchain, deUint32* pPresentationTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings) const;
{
m_vk.cmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, pData);
}
+
+VkResult DeviceDriver::getRefreshCycleDurationGOOGLE (VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties) const
+{
+ return m_vk.getRefreshCycleDurationGOOGLE(device, swapchain, pDisplayTimingProperties);
+}
+
+VkResult DeviceDriver::getPastPresentationTimingGOOGLE (VkDevice device, VkSwapchainKHR swapchain, deUint32* pPresentationTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings) const
+{
+ return m_vk.getPastPresentationTimingGOOGLE(device, swapchain, pPresentationTimingCount, pPresentationTimings);
+}
DestroyDescriptorUpdateTemplateKHRFunc destroyDescriptorUpdateTemplateKHR;
UpdateDescriptorSetWithTemplateKHRFunc updateDescriptorSetWithTemplateKHR;
CmdPushDescriptorSetWithTemplateKHRFunc cmdPushDescriptorSetWithTemplateKHR;
+GetRefreshCycleDurationGOOGLEFunc getRefreshCycleDurationGOOGLE;
+GetPastPresentationTimingGOOGLEFunc getPastPresentationTimingGOOGLE;
typedef VKAPI_ATTR void (VKAPI_CALL* DestroyDescriptorUpdateTemplateKHRFunc) (VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator);
typedef VKAPI_ATTR void (VKAPI_CALL* UpdateDescriptorSetWithTemplateKHRFunc) (VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void* pData);
typedef VKAPI_ATTR void (VKAPI_CALL* CmdPushDescriptorSetWithTemplateKHRFunc) (VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, VkPipelineLayout layout, deUint32 set, const void* pData);
+typedef VKAPI_ATTR VkResult (VKAPI_CALL* GetRefreshCycleDurationGOOGLEFunc) (VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties);
+typedef VKAPI_ATTR VkResult (VKAPI_CALL* GetPastPresentationTimingGOOGLEFunc) (VkDevice device, VkSwapchainKHR swapchain, deUint32* pPresentationTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings);
m_vk.destroyDescriptorUpdateTemplateKHR = (DestroyDescriptorUpdateTemplateKHRFunc) GET_PROC_ADDR("vkDestroyDescriptorUpdateTemplateKHR");
m_vk.updateDescriptorSetWithTemplateKHR = (UpdateDescriptorSetWithTemplateKHRFunc) GET_PROC_ADDR("vkUpdateDescriptorSetWithTemplateKHR");
m_vk.cmdPushDescriptorSetWithTemplateKHR = (CmdPushDescriptorSetWithTemplateKHRFunc) GET_PROC_ADDR("vkCmdPushDescriptorSetWithTemplateKHR");
+m_vk.getRefreshCycleDurationGOOGLE = (GetRefreshCycleDurationGOOGLEFunc) GET_PROC_ADDR("vkGetRefreshCycleDurationGOOGLE");
+m_vk.getPastPresentationTimingGOOGLE = (GetPastPresentationTimingGOOGLEFunc) GET_PROC_ADDR("vkGetPastPresentationTimingGOOGLE");
DE_UNREF(pData);
}
+VKAPI_ATTR VkResult VKAPI_CALL getRefreshCycleDurationGOOGLE (VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties)
+{
+ DE_UNREF(device);
+ DE_UNREF(swapchain);
+ DE_UNREF(pDisplayTimingProperties);
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR VkResult VKAPI_CALL getPastPresentationTimingGOOGLE (VkDevice device, VkSwapchainKHR swapchain, deUint32* pPresentationTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings)
+{
+ DE_UNREF(device);
+ DE_UNREF(swapchain);
+ DE_UNREF(pPresentationTimingCount);
+ DE_UNREF(pPresentationTimings);
+ return VK_SUCCESS;
+}
+
static const tcu::StaticFunctionLibrary::Entry s_platformFunctions[] =
{
VK_NULL_FUNC_ENTRY(vkCreateInstance, createInstance),
VK_NULL_FUNC_ENTRY(vkDestroyDescriptorUpdateTemplateKHR, destroyDescriptorUpdateTemplateKHR),
VK_NULL_FUNC_ENTRY(vkUpdateDescriptorSetWithTemplateKHR, updateDescriptorSetWithTemplateKHR),
VK_NULL_FUNC_ENTRY(vkCmdPushDescriptorSetWithTemplateKHR, cmdPushDescriptorSetWithTemplateKHR),
+ VK_NULL_FUNC_ENTRY(vkGetRefreshCycleDurationGOOGLE, getRefreshCycleDurationGOOGLE),
+ VK_NULL_FUNC_ENTRY(vkGetPastPresentationTimingGOOGLE, getPastPresentationTimingGOOGLE),
};
std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceMemoryProperties2KHR& value);
std::ostream& operator<< (std::ostream& s, const VkSparseImageFormatProperties2KHR& value);
std::ostream& operator<< (std::ostream& s, const VkPhysicalDeviceSparseImageFormatInfo2KHR& value);
+std::ostream& operator<< (std::ostream& s, const VkRectLayerKHR& value);
+std::ostream& operator<< (std::ostream& s, const VkPresentRegionKHR& value);
+std::ostream& operator<< (std::ostream& s, const VkPresentRegionsKHR& value);
std::ostream& operator<< (std::ostream& s, const VkDebugReportCallbackCreateInfoEXT& value);
std::ostream& operator<< (std::ostream& s, const VkPipelineRasterizationStateRasterizationOrderAMD& value);
std::ostream& operator<< (std::ostream& s, const VkDebugMarkerObjectNameInfoEXT& value);
std::ostream& operator<< (std::ostream& s, const VkPhysicalDevicePushDescriptorPropertiesKHR& value);
std::ostream& operator<< (std::ostream& s, const VkDescriptorUpdateTemplateEntryKHR& value);
std::ostream& operator<< (std::ostream& s, const VkDescriptorUpdateTemplateCreateInfoKHR& value);
+std::ostream& operator<< (std::ostream& s, const VkRefreshCycleDurationGOOGLE& value);
+std::ostream& operator<< (std::ostream& s, const VkPastPresentationTimingGOOGLE& value);
+std::ostream& operator<< (std::ostream& s, const VkPresentTimeGOOGLE& value);
+std::ostream& operator<< (std::ostream& s, const VkPresentTimesInfoGOOGLE& value);
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2_KHR: return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2_KHR";
case VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT: return "VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT";
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: return "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR";
+ case VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR: return "VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR";
case VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR: return "VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR";
case VK_STRUCTURE_TYPE_OBJECT_TABLE_CREATE_INFO_NVX: return "VK_STRUCTURE_TYPE_OBJECT_TABLE_CREATE_INFO_NVX";
case VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NVX: return "VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NVX";
case VK_STRUCTURE_TYPE_CMD_RESERVE_SPACE_FOR_COMMANDS_INFO_NVX: return "VK_STRUCTURE_TYPE_CMD_RESERVE_SPACE_FOR_COMMANDS_INFO_NVX";
case VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_LIMITS_NVX: return "VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_LIMITS_NVX";
case VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_FEATURES_NVX: return "VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_FEATURES_NVX";
+ case VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE: return "VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE";
default: return DE_NULL;
}
}
{
switch (value)
{
- case VK_COLOR_SPACE_SRGB_NONLINEAR_KHR: return "VK_COLOR_SPACE_SRGB_NONLINEAR_KHR";
- default: return DE_NULL;
+ case VK_COLOR_SPACE_SRGB_NONLINEAR_KHR: return "VK_COLOR_SPACE_SRGB_NONLINEAR_KHR";
+ case VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT: return "VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT";
+ case VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT: return "VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT";
+ case VK_COLOR_SPACE_DCI_P3_LINEAR_EXT: return "VK_COLOR_SPACE_DCI_P3_LINEAR_EXT";
+ case VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT: return "VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT";
+ case VK_COLOR_SPACE_BT709_LINEAR_EXT: return "VK_COLOR_SPACE_BT709_LINEAR_EXT";
+ case VK_COLOR_SPACE_BT709_NONLINEAR_EXT: return "VK_COLOR_SPACE_BT709_NONLINEAR_EXT";
+ case VK_COLOR_SPACE_BT2020_LINEAR_EXT: return "VK_COLOR_SPACE_BT2020_LINEAR_EXT";
+ case VK_COLOR_SPACE_HDR10_ST2084_EXT: return "VK_COLOR_SPACE_HDR10_ST2084_EXT";
+ case VK_COLOR_SPACE_DOLBYVISION_EXT: return "VK_COLOR_SPACE_DOLBYVISION_EXT";
+ case VK_COLOR_SPACE_HDR10_HLG_EXT: return "VK_COLOR_SPACE_HDR10_HLG_EXT";
+ case VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT: return "VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT";
+ case VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT: return "VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT";
+ case VK_COLOR_SPACE_PASS_THROUGH_EXT: return "VK_COLOR_SPACE_PASS_THROUGH_EXT";
+ default: return DE_NULL;
}
}
return s;
}
+std::ostream& operator<< (std::ostream& s, const VkRectLayerKHR& value)
+{
+ s << "VkRectLayerKHR = {\n";
+ s << "\toffset = " << value.offset << '\n';
+ s << "\textent = " << value.extent << '\n';
+ s << "\tlayer = " << value.layer << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPresentRegionKHR& value)
+{
+ s << "VkPresentRegionKHR = {\n";
+ s << "\trectangleCount = " << value.rectangleCount << '\n';
+ s << "\tpRectangles = " << value.pRectangles << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPresentRegionsKHR& value)
+{
+ s << "VkPresentRegionsKHR = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tswapchainCount = " << value.swapchainCount << '\n';
+ s << "\tpRegions = " << value.pRegions << '\n';
+ s << '}';
+ return s;
+}
+
std::ostream& operator<< (std::ostream& s, const VkDebugReportCallbackCreateInfoEXT& value)
{
s << "VkDebugReportCallbackCreateInfoEXT = {\n";
s << '}';
return s;
}
+
+std::ostream& operator<< (std::ostream& s, const VkRefreshCycleDurationGOOGLE& value)
+{
+ s << "VkRefreshCycleDurationGOOGLE = {\n";
+ s << "\trefreshDuration = " << value.refreshDuration << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPastPresentationTimingGOOGLE& value)
+{
+ s << "VkPastPresentationTimingGOOGLE = {\n";
+ s << "\tpresentID = " << value.presentID << '\n';
+ s << "\tdesiredPresentTime = " << value.desiredPresentTime << '\n';
+ s << "\tactualPresentTime = " << value.actualPresentTime << '\n';
+ s << "\tearliestPresentTime = " << value.earliestPresentTime << '\n';
+ s << "\tpresentMargin = " << value.presentMargin << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPresentTimeGOOGLE& value)
+{
+ s << "VkPresentTimeGOOGLE = {\n";
+ s << "\tpresentID = " << value.presentID << '\n';
+ s << "\tdesiredPresentTime = " << value.desiredPresentTime << '\n';
+ s << '}';
+ return s;
+}
+
+std::ostream& operator<< (std::ostream& s, const VkPresentTimesInfoGOOGLE& value)
+{
+ s << "VkPresentTimesInfoGOOGLE = {\n";
+ s << "\tsType = " << value.sType << '\n';
+ s << "\tpNext = " << value.pNext << '\n';
+ s << "\tswapchainCount = " << value.swapchainCount << '\n';
+ s << "\tpTimes = " << value.pTimes << '\n';
+ s << '}';
+ return s;
+}
VkImageTiling tiling;
};
+struct VkRectLayerKHR
+{
+ VkOffset2D offset;
+ VkExtent2D extent;
+ deUint32 layer;
+};
+
+struct VkPresentRegionKHR
+{
+ deUint32 rectangleCount;
+ const VkRectLayerKHR* pRectangles;
+};
+
+struct VkPresentRegionsKHR
+{
+ VkStructureType sType;
+ const void* pNext;
+ deUint32 swapchainCount;
+ const VkPresentRegionKHR* pRegions;
+};
+
struct VkDebugReportCallbackCreateInfoEXT
{
VkStructureType sType;
deUint32 set;
};
+struct VkRefreshCycleDurationGOOGLE
+{
+ deUint64 refreshDuration;
+};
+
+struct VkPastPresentationTimingGOOGLE
+{
+ deUint32 presentID;
+ deUint64 desiredPresentTime;
+ deUint64 actualPresentTime;
+ deUint64 earliestPresentTime;
+ deUint64 presentMargin;
+};
+
+struct VkPresentTimeGOOGLE
+{
+ deUint32 presentID;
+ deUint64 desiredPresentTime;
+};
+
+struct VkPresentTimesInfoGOOGLE
+{
+ VkStructureType sType;
+ const void* pNext;
+ deUint32 swapchainCount;
+ const VkPresentTimeGOOGLE* pTimes;
+};
+
return res;
}
+inline VkPresentRegionKHR makePresentRegionKHR (deUint32 rectangleCount, const VkRectLayerKHR* pRectangles)
+{
+ VkPresentRegionKHR res;
+ res.rectangleCount = rectangleCount;
+ res.pRectangles = pRectangles;
+ return res;
+}
+
inline VkIndirectCommandsTokenNVX makeIndirectCommandsTokenNVX (VkIndirectCommandsTokenTypeNVX tokenType, VkBuffer buffer, VkDeviceSize offset)
{
VkIndirectCommandsTokenNVX res;
res.stride = stride;
return res;
}
+
+inline VkRefreshCycleDurationGOOGLE makeRefreshCycleDurationGOOGLE (deUint64 refreshDuration)
+{
+ VkRefreshCycleDurationGOOGLE res;
+ res.refreshDuration = refreshDuration;
+ return res;
+}
+
+inline VkPastPresentationTimingGOOGLE makePastPresentationTimingGOOGLE (deUint32 presentID, deUint64 desiredPresentTime, deUint64 actualPresentTime, deUint64 earliestPresentTime, deUint64 presentMargin)
+{
+ VkPastPresentationTimingGOOGLE res;
+ res.presentID = presentID;
+ res.desiredPresentTime = desiredPresentTime;
+ res.actualPresentTime = actualPresentTime;
+ res.earliestPresentTime = earliestPresentTime;
+ res.presentMargin = presentMargin;
+ return res;
+}
+
+inline VkPresentTimeGOOGLE makePresentTimeGOOGLE (deUint32 presentID, deUint64 desiredPresentTime)
+{
+ VkPresentTimeGOOGLE res;
+ res.presentID = presentID;
+ res.desiredPresentTime = desiredPresentTime;
+ return res;
+}
virtual void destroyDescriptorUpdateTemplateKHR (VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator) const = 0;
virtual void updateDescriptorSetWithTemplateKHR (VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void* pData) const = 0;
virtual void cmdPushDescriptorSetWithTemplateKHR (VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, VkPipelineLayout layout, deUint32 set, const void* pData) const = 0;
+virtual VkResult getRefreshCycleDurationGOOGLE (VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties) const = 0;
+virtual VkResult getPastPresentationTimingGOOGLE (VkDevice device, VkSwapchainKHR swapchain, deUint32* pPresentationTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings) const = 0;
{
TestLog& log = context.getTestContext().getLog();
tcu::ResultCollector results (log);
- set<string> allowedExtensions;
+ set<string> allowedInstanceExtensions;
+ set<string> allowedDeviceExtensions;
// All known extensions should be added to allowedExtensions:
// allowedExtensions.insert("VK_GOOGLE_extension1");
+ allowedDeviceExtensions.insert("VK_GOOGLE_display_timing");
// Instance extensions
checkExtensions(results,
- allowedExtensions,
+ allowedInstanceExtensions,
enumerateInstanceExtensionProperties(context.getPlatformInterface(), DE_NULL));
// Extensions exposed by instance layers
for (vector<VkLayerProperties>::const_iterator layer = layers.begin(); layer != layers.end(); ++layer)
{
checkExtensions(results,
- allowedExtensions,
+ allowedInstanceExtensions,
enumerateInstanceExtensionProperties(context.getPlatformInterface(), layer->layerName));
}
}
// Device extensions
checkExtensions(results,
- allowedExtensions,
+ allowedDeviceExtensions,
enumerateDeviceExtensionProperties(context.getInstanceInterface(), context.getPhysicalDevice(), DE_NULL));
// Extensions exposed by device layers
for (vector<VkLayerProperties>::const_iterator layer = layers.begin(); layer != layers.end(); ++layer)
{
checkExtensions(results,
- allowedExtensions,
+ allowedDeviceExtensions,
enumerateDeviceExtensionProperties(context.getInstanceInterface(), context.getPhysicalDevice(), layer->layerName));
}
}
tcu::clear(tcu::getSubregion(dst, dst.getWidth() / 2, dst.getHeight() / 2, dst.getWidth() - dst.getWidth() / 2, dst.getHeight() - dst.getHeight() / 2), c4);
}
-static const vk::VkDescriptorUpdateTemplateEntryKHR createTemplateBinding (uint32_t binding, uint32_t arrayElement, uint32_t descriptorCount, vk::VkDescriptorType descriptorType, size_t offset, size_t stride)
+static const vk::VkDescriptorUpdateTemplateEntryKHR createTemplateBinding (deUint32 binding, deUint32 arrayElement, deUint32 descriptorCount, vk::VkDescriptorType descriptorType, size_t offset, size_t stride)
{
const vk::VkDescriptorUpdateTemplateEntryKHR updateBinding =
{
: ((m_exitingStages & vk::VK_SHADER_STAGE_GEOMETRY_BIT) != 0u) ? ("geo")
: ((m_exitingStages & vk::VK_SHADER_STAGE_FRAGMENT_BIT) != 0u) ? ("frag")
: (DE_NULL);
+ const char* const fragColorPrec = ((m_exitingStages & vk::VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0) ? "highp" : "mediump";
const char* const versionDecl = glu::getGLSLVersionDeclaration(m_glslVersion);
std::ostringstream buf;
buf << versionDecl << "\n"
<< genExtensionDeclarations(vk::VK_SHADER_STAGE_VERTEX_BIT)
<< genResourceDeclarations(vk::VK_SHADER_STAGE_VERTEX_BIT, 0)
- << "layout(location = 0) out highp vec4 " << nextStageName << "_color;\n"
+ << "layout(location = 0) out " << fragColorPrec << " vec4 " << nextStageName << "_color;\n"
<< (onlyVS ? "" : "layout(location = 1) flat out highp int " + de::toString(nextStageName) + "_quadrant_id;\n")
<< genPerVertexBlock(vk::VK_SHADER_STAGE_VERTEX_BIT, m_glslVersion)
<< "void main (void)\n"
deMemcpy(indexAlloc->getHostPtr(), &(m_data.indexes[0]), bufferSize);
+ vk::flushMappedMemoryRange(m_vk, vkDevice, indexAlloc->getMemory(), indexAlloc->getOffset(), bufferSize);
+
m_vk.cmdBindIndexBuffer(*m_cmdBuffer, *indexBuffer, 0u, m_data.indexType);
m_vk.cmdDrawIndexed(*m_cmdBuffer, m_data.params.indexCount, m_data.params.instanceCount, m_data.params.firstIndex, m_data.params.vertexOffset, m_data.params.firstInstance);
m_vk.cmdEndRenderPass(*m_cmdBuffer);
VK_CHECK(vk.bindBufferMemory(vkDevice, *indirectBuffer, indirectAlloc->getMemory(), indirectAlloc->getOffset()));
deMemcpy(indirectAlloc->getHostPtr(), &(m_data.commands[0]), (size_t)indirectInfoSize);
+
+ vk::flushMappedMemoryRange(m_vk, vkDevice, indirectAlloc->getMemory(), indirectAlloc->getOffset(), indirectInfoSize);
}
// If multiDrawIndirect not supported execute single calls
VK_CHECK(vk.bindBufferMemory(vkDevice, *indirectBuffer, indirectAlloc->getMemory(), indirectAlloc->getOffset()));
deMemcpy(indirectAlloc->getHostPtr(), &(m_data.commands[0]), (size_t)indirectInfoSize);
+
+ vk::flushMappedMemoryRange(m_vk, vkDevice, indirectAlloc->getMemory(), indirectAlloc->getOffset(), indirectInfoSize);
}
const deUint32 bufferSize = (deUint32)(m_data.indexes.size() * sizeof(deUint32));
deMemcpy(indexAlloc->getHostPtr(), &(m_data.indexes[0]), bufferSize);
+ vk::flushMappedMemoryRange(m_vk, vkDevice, indexAlloc->getMemory(), indexAlloc->getOffset(), bufferSize);
+
m_vk.cmdBindIndexBuffer(*m_cmdBuffer, *indexBuffer, 0u, m_data.indexType);
// If multiDrawIndirect not supported execute single calls
m_context.getDevice(),
m_indirectBuffer->getBoundMemory().getMemory(),
m_indirectBuffer->getBoundMemory().getOffset(),
- dataSize);
+ dataSize + m_offsetInBuffer);
m_vk.cmdBindPipeline(*m_cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
m_context.getDevice(),
m_indirectBuffer->getBoundMemory().getMemory(),
m_indirectBuffer->getBoundMemory().getOffset(),
- dataSize);
+ dataSize + m_offsetInBuffer);
m_vk.cmdBindPipeline(*m_cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
0u, DE_NULL, 0u, DE_NULL, 1u, &colorAttachmentPreCopyBarrier);
}
{
- const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(resolution.x(), resolution.y(), 0), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
+ const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(resolution.x(), resolution.y(), 1), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
vk.cmdCopyImageToBuffer(*cmdBuffer, *colorAttachmentImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *colorBuffer, 1u, ©Region);
}
std::ostringstream src;
src << "#version 310 es\n"
<<"layout(location = 0) out mediump vec4 fragColor;\n"
- <<"layout(location = 0) in mediump vec4 v_frag_FragColor;\n"
+ <<"layout(location = 0) in highp vec4 v_frag_FragColor;\n"
<<"void main (void)\n"
<<"{\n"
<<" fragColor = v_frag_FragColor;\n"
std::ostringstream src;
src << "#version 310 es\n"
<< "layout(location = 0) out mediump vec4 fragColor;\n"
- << "layout(location = 0) in mediump vec4 v_frag_FragColor;\n"
+ << "layout(location = 0) in highp vec4 v_frag_FragColor;\n"
<< "void main (void)\n"
<< "{\n"
<< " fragColor = v_frag_FragColor;\n"
std::ostringstream src;
src << "#version 310 es\n"
<< "layout(location = 0) out mediump vec4 fragColor;\n"
- << "layout(location = 0) in mediump vec4 v_frag_FragColor;\n"
+ << "layout(location = 0) in highp vec4 v_frag_FragColor;\n"
<< "void main (void)\n"
<< "{\n"
<< " fragColor = v_frag_FragColor;\n"
src << "#version 310 es\n"
<<"layout(location = 0) out highp vec4 fragColor;\n";
if (m_varyingTestSpec.geometryOutputs >= GEOMETRY_ONE)
- src <<"layout(location = 0) in mediump vec4 v_frag_0;\n";
+ src <<"layout(location = 0) in highp vec4 v_frag_0;\n";
if (m_varyingTestSpec.geometryOutputs == GEOMETRY_TWO)
- src <<"layout(location = 1) in mediump vec4 v_frag_1;\n";
+ src <<"layout(location = 1) in highp vec4 v_frag_1;\n";
src <<"void main (void)\n"
<<"{\n";
const VkImageView srcImageView = **m_allSrcImageViews[layerNdx];
const VkImageView dstImageView = **m_allDstImageViews[layerNdx];
- const VkDescriptorImageInfo descriptorSrcImageInfo = makeDescriptorImageInfo(DE_NULL, srcImageView, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
+ const VkDescriptorImageInfo descriptorSrcImageInfo = makeDescriptorImageInfo(DE_NULL, srcImageView, VK_IMAGE_LAYOUT_GENERAL);
const VkDescriptorImageInfo descriptorDstImageInfo = makeDescriptorImageInfo(DE_NULL, dstImageView, VK_IMAGE_LAYOUT_GENERAL);
DescriptorSetUpdateBuilder()
{
const VkImageMemoryBarrier barrierAfterCopy = makeImageMemoryBarrier(
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL,
m_imageSrc->get(), fullImageSubresourceRange);
const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_texture);
return MovePtr<tcu::Texture3DView>(new tcu::Texture3DView((int)levels.size(), &levels[0]));
}
+bool validateResultImage (const TestTexture& texture,
+ const VkImageViewType imageViewType,
+ const VkImageSubresourceRange& subresource,
+ const tcu::Sampler& sampler,
+ const vk::VkComponentMapping& componentMapping,
+ const tcu::ConstPixelBufferAccess& coordAccess,
+ const tcu::Vec2& lodBounds,
+ const tcu::LookupPrecision& lookupPrecision,
+ const tcu::Vec4& lookupScale,
+ const tcu::Vec4& lookupBias,
+ const tcu::ConstPixelBufferAccess& resultAccess,
+ const tcu::PixelBufferAccess& errorAccess)
+{
+ std::vector<tcu::ConstPixelBufferAccess> levels;
+
+ switch (imageViewType)
+ {
+ case VK_IMAGE_VIEW_TYPE_1D:
+ {
+ UniquePtr<tcu::Texture1DView> texView(getTexture1DView(texture, subresource, levels));
+
+ return validateResultImage(*texView, sampler, componentMapping, coordAccess, lodBounds, lookupPrecision, lookupScale, lookupBias, resultAccess, errorAccess);
+ }
+
+ case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
+ {
+ UniquePtr<tcu::Texture1DArrayView> texView(getTexture1DArrayView(texture, subresource, levels));
+
+ return validateResultImage(*texView, sampler, componentMapping, coordAccess, lodBounds, lookupPrecision, lookupScale, lookupBias, resultAccess, errorAccess);
+ }
+
+ case VK_IMAGE_VIEW_TYPE_2D:
+ {
+ UniquePtr<tcu::Texture2DView> texView(getTexture2DView(texture, subresource, levels));
+
+ return validateResultImage(*texView, sampler, componentMapping, coordAccess, lodBounds, lookupPrecision, lookupScale, lookupBias, resultAccess, errorAccess);
+ }
+
+ case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
+ {
+ UniquePtr<tcu::Texture2DArrayView> texView(getTexture2DArrayView(texture, subresource, levels));
+
+ return validateResultImage(*texView, sampler, componentMapping, coordAccess, lodBounds, lookupPrecision, lookupScale, lookupBias, resultAccess, errorAccess);
+ }
+
+ case VK_IMAGE_VIEW_TYPE_CUBE:
+ {
+ UniquePtr<tcu::TextureCubeView> texView(getTextureCubeView(texture, subresource, levels));
+
+ return validateResultImage(*texView, sampler, componentMapping, coordAccess, lodBounds, lookupPrecision, lookupScale, lookupBias, resultAccess, errorAccess);
+ }
+
+ case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
+ {
+ UniquePtr<tcu::TextureCubeArrayView> texView(getTextureCubeArrayView(texture, subresource, levels));
+
+ return validateResultImage(*texView, sampler, componentMapping, coordAccess, lodBounds, lookupPrecision, lookupScale, lookupBias, resultAccess, errorAccess);
+ break;
+ }
+
+ case VK_IMAGE_VIEW_TYPE_3D:
+ {
+ UniquePtr<tcu::Texture3DView> texView(getTexture3DView(texture, subresource, levels));
+
+ return validateResultImage(*texView, sampler, componentMapping, coordAccess, lodBounds, lookupPrecision, lookupScale, lookupBias, resultAccess, errorAccess);
+ }
+
+ default:
+ DE_ASSERT(false);
+ return false;
+ }
+}
+
} // anonymous
tcu::TestStatus ImageSamplingInstance::verifyImage (void)
ReferenceRenderer refRenderer (m_renderSize.x(), m_renderSize.y(), 1, colorFormat, depthStencilFormat, &rrProgram);
bool compareOkAll = true;
+ bool anyWarnings = false;
tcu::Vec4 lookupScale (1.0f);
tcu::Vec4 lookupBias (0.0f);
tcu::TextureLevel errorMask (tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8), (int)m_renderSize.x(), (int)m_renderSize.y());
const tcu::PixelBufferAccess errorAccess = errorMask.getAccess();
+ const bool allowSnorm8Bug = m_texture->getTextureFormat().type == tcu::TextureFormat::SNORM_INT8 &&
+ (m_samplerParams.minFilter == VK_FILTER_LINEAR || m_samplerParams.magFilter == VK_FILTER_LINEAR);
+
tcu::LookupPrecision lookupPrecision;
// Set precision requirements - very low for these tests as
for (int imgNdx = 0; imgNdx < m_imageCount; ++imgNdx)
{
// Read back result image
- UniquePtr<tcu::TextureLevel> result(readColorAttachment(m_context.getDeviceInterface(),
- m_context.getDevice(),
- m_context.getUniversalQueue(),
- m_context.getUniversalQueueFamilyIndex(),
- m_context.getDefaultAllocator(),
- **m_colorImages[imgNdx],
- m_colorFormat,
- m_renderSize));
- const tcu::ConstPixelBufferAccess resultAccess = result->getAccess();
-
- bool compareOk = true;
-
- switch (m_imageViewType)
+ UniquePtr<tcu::TextureLevel> result (readColorAttachment(m_context.getDeviceInterface(),
+ m_context.getDevice(),
+ m_context.getUniversalQueue(),
+ m_context.getUniversalQueueFamilyIndex(),
+ m_context.getDefaultAllocator(),
+ **m_colorImages[imgNdx],
+ m_colorFormat,
+ m_renderSize));
+ const tcu::ConstPixelBufferAccess resultAccess = result->getAccess();
+ bool compareOk = validateResultImage(*m_texture,
+ m_imageViewType,
+ subresource,
+ sampler,
+ m_componentMapping,
+ coordAccess,
+ lodBounds,
+ lookupPrecision,
+ lookupScale,
+ lookupBias,
+ resultAccess,
+ errorAccess);
+
+ if (!compareOk && allowSnorm8Bug)
{
- case VK_IMAGE_VIEW_TYPE_1D:
- {
- std::vector<tcu::ConstPixelBufferAccess> levels;
- UniquePtr<tcu::Texture1DView> texView(getTexture1DView(*m_texture, subresource, levels));
-
- compareOk = validateResultImage(*texView, sampler, m_componentMapping, coordAccess, lodBounds, lookupPrecision, lookupScale, lookupBias, resultAccess, errorAccess);
- break;
- }
-
- case VK_IMAGE_VIEW_TYPE_1D_ARRAY:
- {
- std::vector<tcu::ConstPixelBufferAccess> levels;
- UniquePtr<tcu::Texture1DArrayView> texView(getTexture1DArrayView(*m_texture, subresource, levels));
-
- compareOk = validateResultImage(*texView, sampler, m_componentMapping, coordAccess, lodBounds, lookupPrecision, lookupScale, lookupBias, resultAccess, errorAccess);
- break;
- }
-
- case VK_IMAGE_VIEW_TYPE_2D:
- {
- std::vector<tcu::ConstPixelBufferAccess> levels;
- UniquePtr<tcu::Texture2DView> texView(getTexture2DView(*m_texture, subresource, levels));
-
- compareOk = validateResultImage(*texView, sampler, m_componentMapping, coordAccess, lodBounds, lookupPrecision, lookupScale, lookupBias, resultAccess, errorAccess);
- break;
- }
-
- case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
- {
- std::vector<tcu::ConstPixelBufferAccess> levels;
- UniquePtr<tcu::Texture2DArrayView> texView(getTexture2DArrayView(*m_texture, subresource, levels));
-
- compareOk = validateResultImage(*texView, sampler, m_componentMapping, coordAccess, lodBounds, lookupPrecision, lookupScale, lookupBias, resultAccess, errorAccess);
- break;
- }
-
- case VK_IMAGE_VIEW_TYPE_CUBE:
- {
- std::vector<tcu::ConstPixelBufferAccess> levels;
- UniquePtr<tcu::TextureCubeView> texView(getTextureCubeView(*m_texture, subresource, levels));
-
- compareOk = validateResultImage(*texView, sampler, m_componentMapping, coordAccess, lodBounds, lookupPrecision, lookupScale, lookupBias, resultAccess, errorAccess);
- break;
- }
-
- case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:
- {
- std::vector<tcu::ConstPixelBufferAccess> levels;
- UniquePtr<tcu::TextureCubeArrayView> texView(getTextureCubeArrayView(*m_texture, subresource, levels));
+ // HW waiver (VK-GL-CTS issue: 229)
+ //
+ // Due to an error in bit replication of the fixed point SNORM values, linear filtered
+ // negative SNORM values will differ slightly from ideal precision in the last bit, moving
+ // the values towards 0.
+ //
+ // This occurs on all members of the PowerVR Rogue family of GPUs
+ tcu::LookupPrecision relaxedPrecision;
- compareOk = validateResultImage(*texView, sampler, m_componentMapping, coordAccess, lodBounds, lookupPrecision, lookupScale, lookupBias, resultAccess, errorAccess);
- break;
- }
-
- case VK_IMAGE_VIEW_TYPE_3D:
- {
- std::vector<tcu::ConstPixelBufferAccess> levels;
- UniquePtr<tcu::Texture3DView> texView(getTexture3DView(*m_texture, subresource, levels));
-
- compareOk = validateResultImage(*texView, sampler, m_componentMapping, coordAccess, lodBounds, lookupPrecision, lookupScale, lookupBias, resultAccess, errorAccess);
- break;
- }
+ relaxedPrecision.colorThreshold += tcu::Vec4(4.f / 255.f);
- default:
- DE_ASSERT(false);
+ m_context.getTestContext().getLog()
+ << tcu::TestLog::Message
+ << "Warning: Strict validation failed, re-trying with lower precision for SNORM8 format"
+ << tcu::TestLog::EndMessage;
+ anyWarnings = true;
+
+ compareOk = validateResultImage(*m_texture,
+ m_imageViewType,
+ subresource,
+ sampler,
+ m_componentMapping,
+ coordAccess,
+ lodBounds,
+ relaxedPrecision,
+ lookupScale,
+ lookupBias,
+ resultAccess,
+ errorAccess);
}
if (!compareOk)
}
if (compareOkAll)
- return tcu::TestStatus::pass("Result image matches reference");
+ {
+ if (anyWarnings)
+ return tcu::TestStatus(QP_TEST_RESULT_QUALITY_WARNING, "Inaccurate filtering results");
+ else
+ return tcu::TestStatus::pass("Result image matches reference");
+ }
else
return tcu::TestStatus::fail("Image mismatch");
}
TestInstance* ImageTest::createInstance (Context& context) const
{
tcu::UVec2 renderSize;
+ const VkPhysicalDeviceFeatures& features = context.getDeviceFeatures();
+
+ // Using an loop to index into an array of images requires shaderSampledImageArrayDynamicIndexing
+ if (m_imageCount > 1 && features.shaderSampledImageArrayDynamicIndexing == VK_FALSE)
+ {
+ TCU_THROW(NotSupportedError, "shaderSampledImageArrayDynamicIndexing feature is not supported");
+ }
if (m_imageViewType == VK_IMAGE_VIEW_TYPE_1D || m_imageViewType == VK_IMAGE_VIEW_TYPE_2D)
{
const VkBufferImageCopy bufImageCopy =
{
0u, // VkDeviceSize bufferOffset;
- (deUint32)m_bufSize, // deUint32 bufferRowLength;
+ (deUint32)m_imageWidth, // deUint32 bufferRowLength;
(deUint32)m_imageHeight, // deUint32 bufferImageHeight;
imgSubResCopy, // VkImageSubresourceCopy imageSubresource;
nullOffset, // VkOffset3D imageOffset;
const VkBufferImageCopy imgBufferCopy =
{
0u, // VkDeviceSize bufferOffset;
- (deUint32)m_bufSize, // deUint32 bufferRowLength;
+ (deUint32)m_imageWidth, // deUint32 bufferRowLength;
(deUint32)m_imageHeight, // deUint32 bufferImageHeight;
imgSubResCopy, // VkImageSubresourceCopy imageSubresource;
nullOffset, // VkOffset3D imageOffset;
subRange // VkImageSubresourceRange subresourceRange;
};
- vk.cmdPipelineBarrier(cmdBuffer, 0, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, DE_NULL, 0, DE_NULL, 1, &imageMemBarrier);
+ vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, DE_NULL, 0, DE_NULL, 1, &imageMemBarrier);
}
} // anonymous
#include "vktOpaqueTypeIndexingTests.hpp"
+#include "vkRefUtil.hpp"
+#include "vkImageUtil.hpp"
+#include "vkMemUtil.hpp"
+#include "vkTypeUtil.hpp"
+#include "vkQueryUtil.hpp"
+
#include "tcuTexture.hpp"
#include "tcuTestLog.hpp"
#include "tcuVectorUtil.hpp"
#include "tcuTextureUtil.hpp"
#include "deStringUtil.hpp"
+#include "deSharedPtr.hpp"
#include "deRandom.hpp"
#include "vktShaderExecutor.hpp"
namespace
{
+using de::UniquePtr;
+using de::MovePtr;
+using de::SharedPtr;
+using std::vector;
+
+using namespace vk;
+
+typedef SharedPtr<Unique<VkSampler> > VkSamplerSp;
+
+// Buffer helper
+
+class Buffer
+{
+public:
+ Buffer (Context& context, VkBufferUsageFlags usage, size_t size);
+
+ VkBuffer getBuffer (void) const { return *m_buffer; }
+ void* getHostPtr (void) const { return m_allocation->getHostPtr(); }
+ void flush (void);
+ void invalidate (void);
+
+private:
+ const DeviceInterface& m_vkd;
+ const VkDevice m_device;
+ const Unique<VkBuffer> m_buffer;
+ const UniquePtr<Allocation> m_allocation;
+};
+
+typedef de::SharedPtr<Buffer> BufferSp;
+
+Move<VkBuffer> createBuffer (const DeviceInterface& vkd, VkDevice device, VkDeviceSize size, VkBufferUsageFlags usageFlags)
+{
+ const VkBufferCreateInfo createInfo =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+ DE_NULL,
+ (VkBufferCreateFlags)0,
+ size,
+ usageFlags,
+ VK_SHARING_MODE_EXCLUSIVE,
+ 0u,
+ DE_NULL
+ };
+ return createBuffer(vkd, device, &createInfo);
+}
+
+MovePtr<Allocation> allocateAndBindMemory (const DeviceInterface& vkd, VkDevice device, Allocator& allocator, VkBuffer buffer)
+{
+ MovePtr<Allocation> alloc (allocator.allocate(getBufferMemoryRequirements(vkd, device, buffer), MemoryRequirement::HostVisible));
+
+ VK_CHECK(vkd.bindBufferMemory(device, buffer, alloc->getMemory(), alloc->getOffset()));
+
+ return alloc;
+}
+
+Buffer::Buffer (Context& context, VkBufferUsageFlags usage, size_t size)
+ : m_vkd (context.getDeviceInterface())
+ , m_device (context.getDevice())
+ , m_buffer (createBuffer (context.getDeviceInterface(),
+ context.getDevice(),
+ (VkDeviceSize)size,
+ usage))
+ , m_allocation (allocateAndBindMemory (context.getDeviceInterface(),
+ context.getDevice(),
+ context.getDefaultAllocator(),
+ *m_buffer))
+{
+}
+
+void Buffer::flush (void)
+{
+ flushMappedMemoryRange(m_vkd, m_device, m_allocation->getMemory(), m_allocation->getOffset(), VK_WHOLE_SIZE);
+}
+
+void Buffer::invalidate (void)
+{
+ invalidateMappedMemoryRange(m_vkd, m_device, m_allocation->getMemory(), m_allocation->getOffset(), VK_WHOLE_SIZE);
+}
+
+MovePtr<Buffer> createUniformIndexBuffer (Context& context, int numIndices, const int* indices)
+{
+ MovePtr<Buffer> buffer (new Buffer(context, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, sizeof(int)*numIndices));
+ int* const bufPtr = (int*)buffer->getHostPtr();
+
+ for (int ndx = 0; ndx < numIndices; ++ndx)
+ bufPtr[ndx] = indices[ndx];
+
+ buffer->flush();
+
+ return buffer;
+}
+
+// Tests
+
enum IndexExprType
{
INDEX_EXPR_TYPE_CONST_LITERAL = 0,
const glu::ShaderType shaderType,
const IndexExprType indexExprType);
virtual ~OpaqueTypeIndexingCase (void);
+
virtual void initPrograms (vk::SourceCollections& programCollection) const
{
- m_executor->setShaderSources(programCollection);
+ generateSources(m_shaderType, m_shaderSpec, programCollection);
}
- virtual TestInstance* createInstance (Context& context) const = 0;
- void init (void);
protected:
const char* m_name;
const glu::ShaderType m_shaderType;
const IndexExprType m_indexExprType;
ShaderSpec m_shaderSpec;
- de::MovePtr<ShaderExecutor> m_executor;
- UniformSetup* m_uniformSetup;
};
+OpaqueTypeIndexingCase::OpaqueTypeIndexingCase (tcu::TestContext& testCtx,
+ const char* name,
+ const char* description,
+ const glu::ShaderType shaderType,
+ const IndexExprType indexExprType)
+ : TestCase (testCtx, name, description)
+ , m_name (name)
+ , m_shaderType (shaderType)
+ , m_indexExprType (indexExprType)
+{
+}
+
+OpaqueTypeIndexingCase::~OpaqueTypeIndexingCase (void)
+{
+}
+
class OpaqueTypeIndexingTestInstance : public TestInstance
{
public:
OpaqueTypeIndexingTestInstance (Context& context,
const glu::ShaderType shaderType,
const ShaderSpec& shaderSpec,
- ShaderExecutor& executor,
const char* name,
- UniformSetup* uniformSetup,
const IndexExprType indexExprType);
virtual ~OpaqueTypeIndexingTestInstance (void);
const ShaderSpec& m_shaderSpec;
const char* m_name;
const IndexExprType m_indexExprType;
- ShaderExecutor& m_executor;
- UniformSetup* m_uniformSetup;
};
-OpaqueTypeIndexingCase::OpaqueTypeIndexingCase (tcu::TestContext& testCtx,
- const char* name,
- const char* description,
- const glu::ShaderType shaderType,
- const IndexExprType indexExprType)
- : TestCase (testCtx, name, description)
- , m_name (name)
- , m_shaderType (shaderType)
- , m_indexExprType (indexExprType)
- , m_executor (DE_NULL)
- , m_uniformSetup (new UniformSetup())
-{
-}
-
-OpaqueTypeIndexingCase::~OpaqueTypeIndexingCase (void)
-{
-}
-
-void OpaqueTypeIndexingCase::init (void)
-{
- DE_ASSERT(!m_executor);
-
- m_executor = de::MovePtr<ShaderExecutor>(createExecutor(m_shaderType, m_shaderSpec));
- m_testCtx.getLog() << *m_executor;
-}
-
OpaqueTypeIndexingTestInstance::OpaqueTypeIndexingTestInstance (Context& context,
const glu::ShaderType shaderType,
const ShaderSpec& shaderSpec,
- ShaderExecutor& executor,
const char* name,
- UniformSetup* uniformSetup,
const IndexExprType indexExprType)
: TestInstance (context)
, m_testCtx (context.getTestContext())
, m_shaderSpec (shaderSpec)
, m_name (name)
, m_indexExprType (indexExprType)
- , m_executor (executor)
- , m_uniformSetup (uniformSetup)
{
}
}
}
-static deUint32 getFirstFreeBindingLocation (const glu::ShaderType shaderType)
+static void declareUniformIndexVars (std::ostream& str, deUint32 bindingLocation, const char* varPrefix, int numVars)
{
- deUint32 location;
-
- switch (shaderType)
- {
- case glu::SHADERTYPE_TESSELLATION_CONTROL:
- case glu::SHADERTYPE_TESSELLATION_EVALUATION:
- case glu::SHADERTYPE_COMPUTE:
- // 0 - input buffer
- // 1 - output buffer
- location = 2u;
- break;
+ str << "layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = " << bindingLocation << ", std140) uniform Indices\n{\n";
- default:
- location = 0u;
- break;
- }
-
- return location;
-}
-
-static void declareUniformIndexVars (std::ostream& str, const char* varPrefix, int numVars, deUint32& bindingLocation)
-{
for (int varNdx = 0; varNdx < numVars; varNdx++)
- str << "layout(set = 0, binding = " << bindingLocation++ << ") uniform buf" << varNdx << " { highp int " << varPrefix << varNdx << "; }" << ";\n";
-}
+ str << "\thighp int " << varPrefix << varNdx << ";\n";
-static void uploadUniformIndices (UniformSetup* uniformSetup, int numIndices, const int* indices, deUint32& bindingLocation)
-{
- for (int varNdx = 0; varNdx < numIndices; varNdx++)
- uniformSetup->addData(new UniformData<int>(bindingLocation++, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, indices[varNdx]));
+ str << "};\n";
}
static TextureType getTextureType (glu::DataType samplerType)
}
}
+//! Test image with 1-pixel dimensions and no mipmaps
+class TestImage
+{
+public:
+ TestImage (Context& context, TextureType texType, tcu::TextureFormat format, const void* colorValue);
+
+ VkImageView getImageView (void) const { return *m_imageView; }
+
+private:
+ const Unique<VkImage> m_image;
+ const UniquePtr<Allocation> m_allocation;
+ const Unique<VkImageView> m_imageView;
+};
+
+Move<VkImage> createTestImage (const DeviceInterface& vkd, VkDevice device, TextureType texType, tcu::TextureFormat format)
+{
+ const VkImageCreateInfo createInfo =
+ {
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
+ DE_NULL,
+ (texType == TEXTURE_TYPE_CUBE ? (VkImageCreateFlags)VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : (VkImageCreateFlags)0),
+ getVkImageType(texType),
+ mapTextureFormat(format),
+ makeExtent3D(1, 1, 1),
+ 1u,
+ (texType == TEXTURE_TYPE_CUBE) ? 6u : 1u,
+ VK_SAMPLE_COUNT_1_BIT,
+ VK_IMAGE_TILING_OPTIMAL,
+ VK_IMAGE_USAGE_SAMPLED_BIT|VK_IMAGE_USAGE_TRANSFER_DST_BIT,
+ VK_SHARING_MODE_EXCLUSIVE,
+ 0u,
+ DE_NULL,
+ VK_IMAGE_LAYOUT_UNDEFINED
+ };
+
+ return createImage(vkd, device, &createInfo);
+}
+
+de::MovePtr<Allocation> allocateAndBindMemory (const DeviceInterface& vkd, VkDevice device, Allocator& allocator, VkImage image)
+{
+ de::MovePtr<Allocation> alloc = allocator.allocate(getImageMemoryRequirements(vkd, device, image), MemoryRequirement::Any);
+
+ VK_CHECK(vkd.bindImageMemory(device, image, alloc->getMemory(), alloc->getOffset()));
+
+ return alloc;
+}
+
+Move<VkImageView> createTestImageView (const DeviceInterface& vkd, VkDevice device, VkImage image, TextureType texType, tcu::TextureFormat format)
+{
+ const bool isDepthImage = format.order == tcu::TextureFormat::D;
+ const VkImageViewCreateInfo createInfo =
+ {
+ VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ DE_NULL,
+ (VkImageViewCreateFlags)0,
+ image,
+ getVkImageViewType(texType),
+ mapTextureFormat(format),
+ {
+ VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_IDENTITY,
+ VK_COMPONENT_SWIZZLE_IDENTITY,
+ },
+ {
+ (VkImageAspectFlags)(isDepthImage ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT),
+ 0u,
+ 1u,
+ 0u,
+ (texType == TEXTURE_TYPE_CUBE ? 6u : 1u)
+ }
+ };
+
+ return createImageView(vkd, device, &createInfo);
+}
+
+TestImage::TestImage (Context& context, TextureType texType, tcu::TextureFormat format, const void* colorValue)
+ : m_image (createTestImage (context.getDeviceInterface(), context.getDevice(), texType, format))
+ , m_allocation (allocateAndBindMemory (context.getDeviceInterface(), context.getDevice(), context.getDefaultAllocator(), *m_image))
+ , m_imageView (createTestImageView (context.getDeviceInterface(), context.getDevice(), *m_image, texType, format))
+{
+ const DeviceInterface& vkd = context.getDeviceInterface();
+ const VkDevice device = context.getDevice();
+
+ const size_t pixelSize = (size_t)format.getPixelSize();
+ const deUint32 numLayers = (texType == TEXTURE_TYPE_CUBE) ? 6u : 1u;
+ const size_t numReplicas = (size_t)numLayers;
+ const size_t stagingBufferSize = pixelSize*numReplicas;
+
+ const VkBufferCreateInfo stagingBufferInfo =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+ DE_NULL,
+ (VkBufferCreateFlags)0u,
+ (VkDeviceSize)stagingBufferSize,
+ (VkBufferCreateFlags)VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
+ VK_SHARING_MODE_EXCLUSIVE,
+ 0u,
+ DE_NULL,
+ };
+ const Unique<VkBuffer> stagingBuffer (createBuffer(vkd, device, &stagingBufferInfo));
+ const UniquePtr<Allocation> alloc (context.getDefaultAllocator().allocate(getBufferMemoryRequirements(vkd, device, *stagingBuffer), MemoryRequirement::HostVisible));
+
+ VK_CHECK(vkd.bindBufferMemory(device, *stagingBuffer, alloc->getMemory(), alloc->getOffset()));
+
+ for (size_t ndx = 0; ndx < numReplicas; ++ndx)
+ deMemcpy((deUint8*)alloc->getHostPtr() + ndx*pixelSize, colorValue, pixelSize);
+
+ {
+ const VkCommandPoolCreateInfo cmdPoolInfo =
+ {
+ VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
+ DE_NULL,
+ (VkCommandPoolCreateFlags)VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
+ context.getUniversalQueueFamilyIndex(),
+ };
+ const Unique<VkCommandPool> cmdPool (createCommandPool(vkd, device, &cmdPoolInfo));
+ const VkCommandBufferAllocateInfo allocInfo =
+ {
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
+ DE_NULL,
+ *cmdPool,
+ VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+ 1u,
+ };
+ const Unique<VkCommandBuffer> cmdBuf (allocateCommandBuffer(vkd, device, &allocInfo));
+ const VkCommandBufferBeginInfo beginInfo =
+ {
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
+ DE_NULL,
+ (VkCommandBufferUsageFlags)VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
+ (const VkCommandBufferInheritanceInfo*)DE_NULL,
+ };
+ const VkImageAspectFlags imageAspect = (VkImageAspectFlags)(format.order == tcu::TextureFormat::D ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT);
+ const VkBufferImageCopy copyInfo =
+ {
+ 0u,
+ 1u,
+ 1u,
+ {
+ imageAspect,
+ 0u,
+ 0u,
+ numLayers
+ },
+ { 0u, 0u, 0u },
+ { 1u, 1u, 1u }
+ };
+ const VkImageMemoryBarrier preCopyBarrier =
+ {
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+ DE_NULL,
+ (VkAccessFlags)0u,
+ (VkAccessFlags)VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_IMAGE_LAYOUT_UNDEFINED,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_QUEUE_FAMILY_IGNORED,
+ VK_QUEUE_FAMILY_IGNORED,
+ *m_image,
+ {
+ imageAspect,
+ 0u,
+ 1u,
+ 0u,
+ numLayers
+ }
+ };
+ const VkImageMemoryBarrier postCopyBarrier =
+ {
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+ DE_NULL,
+ (VkAccessFlags)VK_ACCESS_TRANSFER_WRITE_BIT,
+ (VkAccessFlags)VK_ACCESS_SHADER_READ_BIT,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
+ VK_QUEUE_FAMILY_IGNORED,
+ VK_QUEUE_FAMILY_IGNORED,
+ *m_image,
+ {
+ imageAspect,
+ 0u,
+ 1u,
+ 0u,
+ numLayers
+ }
+ };
+
+ VK_CHECK(vkd.beginCommandBuffer(*cmdBuf, &beginInfo));
+ vkd.cmdPipelineBarrier(*cmdBuf,
+ (VkPipelineStageFlags)VK_PIPELINE_STAGE_HOST_BIT,
+ (VkPipelineStageFlags)VK_PIPELINE_STAGE_TRANSFER_BIT,
+ (VkDependencyFlags)0u,
+ 0u,
+ (const VkMemoryBarrier*)DE_NULL,
+ 0u,
+ (const VkBufferMemoryBarrier*)DE_NULL,
+ 1u,
+ &preCopyBarrier);
+ vkd.cmdCopyBufferToImage(*cmdBuf, *stagingBuffer, *m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Info);
+ vkd.cmdPipelineBarrier(*cmdBuf,
+ (VkPipelineStageFlags)VK_PIPELINE_STAGE_TRANSFER_BIT,
+ (VkPipelineStageFlags)VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
+ (VkDependencyFlags)0u,
+ 0u,
+ (const VkMemoryBarrier*)DE_NULL,
+ 0u,
+ (const VkBufferMemoryBarrier*)DE_NULL,
+ 1u,
+ &postCopyBarrier);
+ VK_CHECK(vkd.endCommandBuffer(*cmdBuf));
+
+ {
+ const VkFenceCreateInfo fenceInfo =
+ {
+ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
+ DE_NULL,
+ (VkFenceCreateFlags)0,
+ };
+ const Unique<VkFence> fence (createFence(vkd, device, &fenceInfo));
+ const VkSubmitInfo submitInfo =
+ {
+ VK_STRUCTURE_TYPE_SUBMIT_INFO,
+ DE_NULL,
+ 0u,
+ (const VkSemaphore*)DE_NULL,
+ (const VkPipelineStageFlags*)DE_NULL,
+ 1u,
+ &cmdBuf.get(),
+ 0u,
+ (const VkSemaphore*)DE_NULL,
+ };
+
+ VK_CHECK(vkd.queueSubmit(context.getUniversalQueue(), 1u, &submitInfo, *fence));
+ VK_CHECK(vkd.waitForFences(device, 1u, &fence.get(), VK_TRUE, ~0ull));
+ }
+ }
+}
+
+typedef SharedPtr<TestImage> TestImageSp;
+
// SamplerIndexingCaseInstance
class SamplerIndexingCaseInstance : public OpaqueTypeIndexingTestInstance
SamplerIndexingCaseInstance (Context& context,
const glu::ShaderType shaderType,
const ShaderSpec& shaderSpec,
- ShaderExecutor& executor,
const char* name,
glu::DataType samplerType,
const IndexExprType indexExprType,
- UniformSetup* uniformSetup,
const std::vector<int>& lookupIndices);
virtual ~SamplerIndexingCaseInstance (void);
protected:
const glu::DataType m_samplerType;
- const std::vector<int>& m_lookupIndices;
+ const std::vector<int> m_lookupIndices;
};
SamplerIndexingCaseInstance::SamplerIndexingCaseInstance (Context& context,
const glu::ShaderType shaderType,
const ShaderSpec& shaderSpec,
- ShaderExecutor& executor,
const char* name,
glu::DataType samplerType,
const IndexExprType indexExprType,
- UniformSetup* uniformSetup,
const std::vector<int>& lookupIndices)
- : OpaqueTypeIndexingTestInstance (context, shaderType, shaderSpec, executor, name, uniformSetup, indexExprType)
+ : OpaqueTypeIndexingTestInstance (context, shaderType, shaderSpec, name, indexExprType)
, m_samplerType (samplerType)
, m_lookupIndices (lookupIndices)
{
const glu::DataType outputType = getSamplerOutputType(m_samplerType);
const tcu::TextureFormat texFormat = getSamplerTextureFormat(m_samplerType);
const int outLookupStride = numInvocations*getDataTypeScalarSize(outputType);
- std::vector<float> coords;
- std::vector<deUint32> outData;
- std::vector<deUint8> texData (numSamplers * texFormat.getPixelSize());
+ vector<float> coords;
+ vector<deUint32> outData;
+ vector<deUint8> texData (numSamplers * texFormat.getPixelSize());
const tcu::PixelBufferAccess refTexAccess (texFormat, numSamplers, 1, 1, &texData[0]);
de::Random rnd (deInt32Hash(m_samplerType) ^ deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
const TextureType texType = getTextureType(m_samplerType);
- const vk::VkImageType imageType = getVkImageType(texType);
- const vk::VkImageViewType imageViewType = getVkImageViewType(texType);
const tcu::Sampler::FilterMode filterMode = (isShadowSampler(m_samplerType) || isIntegerFormat(texFormat)) ? tcu::Sampler::NEAREST : tcu::Sampler::LINEAR;
// The shadow sampler with unnormalized coordinates is only used with the reference texture. Actual samplers in shaders use normalized coords.
: tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
filterMode, filterMode);
+ const DeviceInterface& vkd = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+ vector<TestImageSp> images;
+ vector<VkSamplerSp> samplers;
+ MovePtr<Buffer> indexBuffer;
+ Move<VkDescriptorSetLayout> extraResourcesLayout;
+ Move<VkDescriptorPool> extraResourcesSetPool;
+ Move<VkDescriptorSet> extraResourcesSet;
+
checkSupported(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
coords.resize(numInvocations * getDataTypeScalarSize(coordType));
outData.resize(numLookups*outLookupStride);
+ for (int ndx = 0; ndx < numSamplers; ++ndx)
{
- std::vector<void*> inputs;
- std::vector<void*> outputs;
- std::vector<int> expandedIndices;
- deUint32 bindingLocation = getFirstFreeBindingLocation(m_shaderType);
+ images.push_back(TestImageSp(new TestImage(m_context, texType, texFormat, &texData[ndx * texFormat.getPixelSize()])));
- inputs.push_back(&coords[0]);
+ {
+ tcu::Sampler samplerCopy (refSampler);
+ samplerCopy.normalizedCoords = true;
+
+ {
+ const VkSamplerCreateInfo samplerParams = mapSampler(samplerCopy, texFormat);
+ samplers.push_back(VkSamplerSp(new Unique<VkSampler>(createSampler(vkd, device, &samplerParams))));
+ }
+ }
+ }
+
+ if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
+ indexBuffer = createUniformIndexBuffer(m_context, numLookups, &m_lookupIndices[0]);
+
+ {
+ const VkDescriptorSetLayoutBinding bindings[] =
+ {
+ { 0u, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, (deUint32)numSamplers, VK_SHADER_STAGE_ALL, DE_NULL },
+ { (deUint32)numSamplers, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u, VK_SHADER_STAGE_ALL, DE_NULL }
+ };
+ const VkDescriptorSetLayoutCreateInfo layoutInfo =
+ {
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
+ DE_NULL,
+ (VkDescriptorSetLayoutCreateFlags)0u,
+ DE_LENGTH_OF_ARRAY(bindings),
+ bindings,
+ };
+
+ extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
+ }
+ {
+ const VkDescriptorPoolSize poolSizes[] =
+ {
+ { VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, (deUint32)numSamplers },
+ { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u, }
+ };
+ const VkDescriptorPoolCreateInfo poolInfo =
{
- tcu::Sampler sampler = refSampler;
- sampler.normalizedCoords = true;
+ VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
+ DE_NULL,
+ (VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
+ 1u, // maxSets
+ DE_LENGTH_OF_ARRAY(poolSizes),
+ poolSizes,
+ };
+
+ extraResourcesSetPool = createDescriptorPool(vkd, device, &poolInfo);
+ }
- m_uniformSetup->addData(new SamplerUniformData(bindingLocation++, (deUint32)numSamplers, sampler, texFormat, tcu::IVec3(1, 1, 1), imageType, imageViewType, &texData[0]));
+ {
+ const VkDescriptorSetAllocateInfo allocInfo =
+ {
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
+ DE_NULL,
+ *extraResourcesSetPool,
+ 1u,
+ &extraResourcesLayout.get(),
+ };
+
+ extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
+ }
+
+ {
+ vector<VkDescriptorImageInfo> imageInfos (numSamplers);
+ const VkWriteDescriptorSet descriptorWrite =
+ {
+ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+ DE_NULL,
+ *extraResourcesSet,
+ 0u, // dstBinding
+ 0u, // dstArrayElement
+ (deUint32)numSamplers,
+ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+ &imageInfos[0],
+ (const VkDescriptorBufferInfo*)DE_NULL,
+ (const VkBufferView*)DE_NULL,
+ };
+
+ for (int ndx = 0; ndx < numSamplers; ++ndx)
+ {
+ imageInfos[ndx].sampler = **samplers[ndx];
+ imageInfos[ndx].imageView = images[ndx]->getImageView();
+ imageInfos[ndx].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
}
+ vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
+ }
+
+ if (indexBuffer)
+ {
+ const VkDescriptorBufferInfo bufferInfo =
+ {
+ indexBuffer->getBuffer(),
+ 0u,
+ VK_WHOLE_SIZE
+ };
+ const VkWriteDescriptorSet descriptorWrite =
+ {
+ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+ DE_NULL,
+ *extraResourcesSet,
+ (deUint32)numSamplers, // dstBinding
+ 0u, // dstArrayElement
+ 1u,
+ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
+ (const VkDescriptorImageInfo*)DE_NULL,
+ &bufferInfo,
+ (const VkBufferView*)DE_NULL,
+ };
+
+ vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
+ }
+
+ {
+ std::vector<void*> inputs;
+ std::vector<void*> outputs;
+ std::vector<int> expandedIndices;
+ UniquePtr<ShaderExecutor> executor (createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
+
+ inputs.push_back(&coords[0]);
+
if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
{
expandedIndices.resize(numInvocations * m_lookupIndices.size());
for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
inputs.push_back(&expandedIndices[lookupNdx*numInvocations]);
}
- else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
- uploadUniformIndices(m_uniformSetup, numLookups, &m_lookupIndices[0], bindingLocation);
for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
outputs.push_back(&outData[outLookupStride*lookupNdx]);
- m_executor.setUniforms(m_uniformSetup);
-
- m_executor.execute(m_context, numInvocations, &inputs[0], &outputs[0]);
+ executor->execute(numInvocations, &inputs[0], &outputs[0], *extraResourcesSet);
}
{
return new SamplerIndexingCaseInstance(ctx,
m_shaderType,
m_shaderSpec,
- *m_executor,
m_name,
m_samplerType,
m_indexExprType,
- m_uniformSetup,
m_lookupIndices);
}
void SamplerIndexingCase::createShaderSpec (void)
{
de::Random rnd (deInt32Hash(m_samplerType) ^ deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
- deUint32 binding = getFirstFreeBindingLocation(m_shaderType);
const char* samplersName = "texSampler";
const char* coordsName = "coords";
const char* indicesPrefix = "index";
global << "const highp int indexBase = 1;\n";
global <<
- "layout(set = 0, binding = " << binding++ << ") uniform highp " << getDataTypeName(m_samplerType) << " " << samplersName << "[" << m_numSamplers << "];\n";
+ "layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = 0) uniform highp " << getDataTypeName(m_samplerType) << " " << samplersName << "[" << m_numSamplers << "];\n";
if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
{
}
}
else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
- declareUniformIndexVars(global, indicesPrefix, m_numLookups, binding);
+ declareUniformIndexVars(global, (deUint32)m_numSamplers, indicesPrefix, m_numLookups);
for (int lookupNdx = 0; lookupNdx < m_numLookups; lookupNdx++)
{
BlockArrayIndexingCaseInstance (Context& context,
const glu::ShaderType shaderType,
const ShaderSpec& shaderSpec,
- ShaderExecutor& executor,
const char* name,
BlockType blockType,
const IndexExprType indexExprType,
- UniformSetup* uniformSetup,
const std::vector<int>& readIndices,
const std::vector<deUint32>& inValues);
virtual ~BlockArrayIndexingCaseInstance (void);
BlockArrayIndexingCaseInstance::BlockArrayIndexingCaseInstance (Context& context,
const glu::ShaderType shaderType,
const ShaderSpec& shaderSpec,
- ShaderExecutor& executor,
const char* name,
BlockType blockType,
const IndexExprType indexExprType,
- UniformSetup* uniformSetup,
const std::vector<int>& readIndices,
const std::vector<deUint32>& inValues)
- : OpaqueTypeIndexingTestInstance (context, shaderType, shaderSpec, executor, name, uniformSetup, indexExprType)
+ : OpaqueTypeIndexingTestInstance (context, shaderType, shaderSpec, name, indexExprType)
, m_blockType (blockType)
, m_readIndices (readIndices)
, m_inValues (inValues)
const int numReads = NUM_READS;
std::vector<deUint32> outValues (numInvocations*numReads);
+ tcu::TestLog& log = m_context.getTestContext().getLog();
+ tcu::TestStatus testResult = tcu::TestStatus::pass("Pass");
+
+ std::vector<int> expandedIndices;
+ std::vector<void*> inputs;
+ std::vector<void*> outputs;
+ const VkBufferUsageFlags bufferUsage = m_blockType == BLOCKTYPE_UNIFORM ? VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT : VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
+ const VkDescriptorType descriptorType = m_blockType == BLOCKTYPE_UNIFORM ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+
+ const DeviceInterface& vkd = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+
+ // \note Using separate buffer per element - might want to test
+ // offsets & single buffer in the future.
+ vector<BufferSp> buffers (m_inValues.size());
+ MovePtr<Buffer> indexBuffer;
+
+ Move<VkDescriptorSetLayout> extraResourcesLayout;
+ Move<VkDescriptorPool> extraResourcesSetPool;
+ Move<VkDescriptorSet> extraResourcesSet;
+
+ checkSupported(descriptorType);
+
+ for (size_t bufferNdx = 0; bufferNdx < m_inValues.size(); ++bufferNdx)
{
- tcu::TestLog& log = m_context.getTestContext().getLog();
- tcu::TestStatus testResult = tcu::TestStatus::pass("Pass");
- std::vector<int> expandedIndices;
- std::vector<void*> inputs;
- std::vector<void*> outputs;
- deUint32 bindingLocation = getFirstFreeBindingLocation(m_shaderType);
- VkDescriptorType descriptorType = m_blockType == BLOCKTYPE_UNIFORM ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
+ buffers[bufferNdx] = BufferSp(new Buffer(m_context, bufferUsage, sizeof(deUint32)));
+ *(deUint32*)buffers[bufferNdx]->getHostPtr() = m_inValues[bufferNdx];
+ buffers[bufferNdx]->flush();
+ }
- checkSupported(descriptorType);
+ if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
+ indexBuffer = createUniformIndexBuffer(m_context, numReads, &m_readIndices[0]);
- m_uniformSetup->addData(new UniformArrayData<deUint32>(bindingLocation++, descriptorType, m_inValues));
+ {
+ const VkDescriptorSetLayoutBinding bindings[] =
+ {
+ { 0u, descriptorType, (deUint32)m_inValues.size(), VK_SHADER_STAGE_ALL, DE_NULL },
+ { (deUint32)m_inValues.size(), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u, VK_SHADER_STAGE_ALL, DE_NULL }
+ };
+ const VkDescriptorSetLayoutCreateInfo layoutInfo =
+ {
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
+ DE_NULL,
+ (VkDescriptorSetLayoutCreateFlags)0u,
+ DE_LENGTH_OF_ARRAY(bindings),
+ bindings,
+ };
- if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
+ extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
+ }
+
+ {
+ const VkDescriptorPoolSize poolSizes[] =
+ {
+ { descriptorType, (deUint32)m_inValues.size() },
+ { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u, }
+ };
+ const VkDescriptorPoolCreateInfo poolInfo =
{
- expandedIndices.resize(numInvocations * m_readIndices.size());
+ VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
+ DE_NULL,
+ (VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
+ 1u, // maxSets
+ DE_LENGTH_OF_ARRAY(poolSizes),
+ poolSizes,
+ };
- for (int readNdx = 0; readNdx < numReads; readNdx++)
- {
- int* dst = &expandedIndices[numInvocations*readNdx];
- std::fill(dst, dst+numInvocations, m_readIndices[readNdx]);
- }
+ extraResourcesSetPool = createDescriptorPool(vkd, device, &poolInfo);
+ }
- for (int readNdx = 0; readNdx < numReads; readNdx++)
- inputs.push_back(&expandedIndices[readNdx*numInvocations]);
+ {
+ const VkDescriptorSetAllocateInfo allocInfo =
+ {
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
+ DE_NULL,
+ *extraResourcesSetPool,
+ 1u,
+ &extraResourcesLayout.get(),
+ };
+
+ extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
+ }
+
+ {
+ vector<VkDescriptorBufferInfo> bufferInfos (m_inValues.size());
+ const VkWriteDescriptorSet descriptorWrite =
+ {
+ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+ DE_NULL,
+ *extraResourcesSet,
+ 0u, // dstBinding
+ 0u, // dstArrayElement
+ (deUint32)m_inValues.size(),
+ descriptorType,
+ (const VkDescriptorImageInfo*)DE_NULL,
+ &bufferInfos[0],
+ (const VkBufferView*)DE_NULL,
+ };
+
+ for (size_t ndx = 0; ndx < m_inValues.size(); ++ndx)
+ {
+ bufferInfos[ndx].buffer = buffers[ndx]->getBuffer();
+ bufferInfos[ndx].offset = 0u;
+ bufferInfos[ndx].range = VK_WHOLE_SIZE;
}
- else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
- uploadUniformIndices(m_uniformSetup, numReads, &m_readIndices[0], bindingLocation);
+
+ vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
+ }
+
+ if (indexBuffer)
+ {
+ const VkDescriptorBufferInfo bufferInfo =
+ {
+ indexBuffer->getBuffer(),
+ 0u,
+ VK_WHOLE_SIZE
+ };
+ const VkWriteDescriptorSet descriptorWrite =
+ {
+ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+ DE_NULL,
+ *extraResourcesSet,
+ (deUint32)m_inValues.size(), // dstBinding
+ 0u, // dstArrayElement
+ 1u,
+ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
+ (const VkDescriptorImageInfo*)DE_NULL,
+ &bufferInfo,
+ (const VkBufferView*)DE_NULL,
+ };
+
+ vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
+ }
+
+ if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
+ {
+ expandedIndices.resize(numInvocations * m_readIndices.size());
for (int readNdx = 0; readNdx < numReads; readNdx++)
- outputs.push_back(&outValues[readNdx*numInvocations]);
+ {
+ int* dst = &expandedIndices[numInvocations*readNdx];
+ std::fill(dst, dst+numInvocations, m_readIndices[readNdx]);
+ }
- m_executor.setUniforms(m_uniformSetup);
+ for (int readNdx = 0; readNdx < numReads; readNdx++)
+ inputs.push_back(&expandedIndices[readNdx*numInvocations]);
+ }
- m_executor.execute(m_context, numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0]);
+ for (int readNdx = 0; readNdx < numReads; readNdx++)
+ outputs.push_back(&outValues[readNdx*numInvocations]);
- for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
+ {
+ UniquePtr<ShaderExecutor> executor (createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
+
+ executor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0], *extraResourcesSet);
+ }
+
+ for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
+ {
+ for (int readNdx = 0; readNdx < numReads; readNdx++)
{
- for (int readNdx = 0; readNdx < numReads; readNdx++)
- {
- const deUint32 refValue = m_inValues[m_readIndices[readNdx]];
- const deUint32 resValue = outValues[readNdx*numInvocations + invocationNdx];
+ const deUint32 refValue = m_inValues[m_readIndices[readNdx]];
+ const deUint32 resValue = outValues[readNdx*numInvocations + invocationNdx];
- if (refValue != resValue)
- {
- log << tcu::TestLog::Message << "ERROR: at invocation " << invocationNdx
- << ", read " << readNdx << ": expected "
- << tcu::toHex(refValue) << ", got " << tcu::toHex(resValue)
- << tcu::TestLog::EndMessage;
+ if (refValue != resValue)
+ {
+ log << tcu::TestLog::Message << "ERROR: at invocation " << invocationNdx
+ << ", read " << readNdx << ": expected "
+ << tcu::toHex(refValue) << ", got " << tcu::toHex(resValue)
+ << tcu::TestLog::EndMessage;
- if (testResult.getCode() == QP_TEST_RESULT_PASS)
- testResult = tcu::TestStatus::fail("Invalid result value");
- }
+ if (testResult.getCode() == QP_TEST_RESULT_PASS)
+ testResult = tcu::TestStatus::fail("Invalid result value");
}
}
-
- return testResult;
}
+
+ return testResult;
}
class BlockArrayIndexingCase : public OpaqueTypeIndexingCase
return new BlockArrayIndexingCaseInstance(ctx,
m_shaderType,
m_shaderSpec,
- *m_executor,
m_name,
m_blockType,
m_indexExprType,
- m_uniformSetup,
m_readIndices,
m_inValues);
}
const int numInstances = BlockArrayIndexingCaseInstance::NUM_INSTANCES;
const int numReads = BlockArrayIndexingCaseInstance::NUM_READS;
de::Random rnd (deInt32Hash(m_shaderType) ^ deInt32Hash(m_blockType) ^ deInt32Hash(m_indexExprType));
- deUint32 binding = getFirstFreeBindingLocation(m_shaderType);
const char* blockName = "Block";
const char* instanceName = "block";
const char* indicesPrefix = "index";
global << "const highp int indexBase = 1;\n";
global <<
- "layout(set = 0, binding = " << binding++ << ") " << interfaceName << " " << blockName << "\n"
+ "layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = 0) " << interfaceName << " " << blockName << "\n"
"{\n"
" highp uint value;\n"
"} " << instanceName << "[" << numInstances << "];\n";
}
}
else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
- declareUniformIndexVars(global, indicesPrefix, numReads, binding);
+ declareUniformIndexVars(global, (deUint32)m_inValues.size(), indicesPrefix, numReads);
for (int readNdx = 0; readNdx < numReads; readNdx++)
{
AtomicCounterIndexingCaseInstance (Context& context,
const glu::ShaderType shaderType,
const ShaderSpec& shaderSpec,
- ShaderExecutor& executor,
const char* name,
- UniformSetup* uniformSetup,
const std::vector<int>& opIndices,
const IndexExprType indexExprType);
virtual ~AtomicCounterIndexingCaseInstance (void);
AtomicCounterIndexingCaseInstance::AtomicCounterIndexingCaseInstance (Context& context,
const glu::ShaderType shaderType,
const ShaderSpec& shaderSpec,
- ShaderExecutor& executor,
const char* name,
- UniformSetup* uniformSetup,
const std::vector<int>& opIndices,
const IndexExprType indexExprType)
- : OpaqueTypeIndexingTestInstance (context, shaderType, shaderSpec, executor, name, uniformSetup, indexExprType)
+ : OpaqueTypeIndexingTestInstance (context, shaderType, shaderSpec, name, indexExprType)
, m_opIndices (opIndices)
{
}
std::vector<void*> inputs;
std::vector<void*> outputs;
std::vector<deUint32> outValues (numInvocations*numOps);
- deUint32 bindingLocation = getFirstFreeBindingLocation(m_shaderType);
- const deUint32 atomicCounterLocation = bindingLocation++;
+ const DeviceInterface& vkd = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+
+ // \note Using separate buffer per element - might want to test
+ // offsets & single buffer in the future.
+ Buffer atomicOpBuffer (m_context, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, sizeof(deUint32)*numCounters);
+ MovePtr<Buffer> indexBuffer;
+
+ Move<VkDescriptorSetLayout> extraResourcesLayout;
+ Move<VkDescriptorPool> extraResourcesSetPool;
+ Move<VkDescriptorSet> extraResourcesSet;
checkSupported(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
+ deMemset(atomicOpBuffer.getHostPtr(), 0, sizeof(deUint32)*numCounters);
+ atomicOpBuffer.flush();
+
+ if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
+ indexBuffer = createUniformIndexBuffer(m_context, numOps, &m_opIndices[0]);
+
{
- DE_ASSERT(numCounters <= 4);
- // Add the atomic counters' base value, all zero.
- m_uniformSetup->addData(new UniformData<tcu::Mat4>(atomicCounterLocation, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, tcu::Mat4(0.0)));
+ const VkDescriptorSetLayoutBinding bindings[] =
+ {
+ { 0u, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u, VK_SHADER_STAGE_ALL, DE_NULL },
+ { 1u, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u, VK_SHADER_STAGE_ALL, DE_NULL }
+ };
+ const VkDescriptorSetLayoutCreateInfo layoutInfo =
+ {
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
+ DE_NULL,
+ (VkDescriptorSetLayoutCreateFlags)0u,
+ DE_LENGTH_OF_ARRAY(bindings),
+ bindings,
+ };
- if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
+ extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
+ }
+
+ {
+ const VkDescriptorPoolSize poolSizes[] =
{
- expandedIndices.resize(numInvocations * m_opIndices.size());
+ { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u, },
+ { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u, }
+ };
+ const VkDescriptorPoolCreateInfo poolInfo =
+ {
+ VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
+ DE_NULL,
+ (VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
+ 1u, // maxSets
+ DE_LENGTH_OF_ARRAY(poolSizes),
+ poolSizes,
+ };
- for (int opNdx = 0; opNdx < numOps; opNdx++)
- {
- int* dst = &expandedIndices[numInvocations*opNdx];
- std::fill(dst, dst+numInvocations, m_opIndices[opNdx]);
- }
+ extraResourcesSetPool = createDescriptorPool(vkd, device, &poolInfo);
+ }
- for (int opNdx = 0; opNdx < numOps; opNdx++)
- inputs.push_back(&expandedIndices[opNdx*numInvocations]);
+ {
+ const VkDescriptorSetAllocateInfo allocInfo =
+ {
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
+ DE_NULL,
+ *extraResourcesSetPool,
+ 1u,
+ &extraResourcesLayout.get(),
+ };
+
+ extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
+ }
+
+ {
+ const VkDescriptorBufferInfo bufferInfo =
+ {
+ atomicOpBuffer.getBuffer(),
+ 0u,
+ VK_WHOLE_SIZE
+ };
+ const VkWriteDescriptorSet descriptorWrite =
+ {
+ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+ DE_NULL,
+ *extraResourcesSet,
+ 0u, // dstBinding
+ 0u, // dstArrayElement
+ 1u,
+ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+ (const VkDescriptorImageInfo*)DE_NULL,
+ &bufferInfo,
+ (const VkBufferView*)DE_NULL,
+ };
+
+ vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
+ }
+
+ if (indexBuffer)
+ {
+ const VkDescriptorBufferInfo bufferInfo =
+ {
+ indexBuffer->getBuffer(),
+ 0u,
+ VK_WHOLE_SIZE
+ };
+ const VkWriteDescriptorSet descriptorWrite =
+ {
+ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+ DE_NULL,
+ *extraResourcesSet,
+ 1u, // dstBinding
+ 0u, // dstArrayElement
+ 1u,
+ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
+ (const VkDescriptorImageInfo*)DE_NULL,
+ &bufferInfo,
+ (const VkBufferView*)DE_NULL,
+ };
+
+ vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
+ }
+
+ if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
+ {
+ expandedIndices.resize(numInvocations * m_opIndices.size());
+
+ for (int opNdx = 0; opNdx < numOps; opNdx++)
+ {
+ int* dst = &expandedIndices[numInvocations*opNdx];
+ std::fill(dst, dst+numInvocations, m_opIndices[opNdx]);
}
- else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
- uploadUniformIndices(m_uniformSetup, numOps, &m_opIndices[0], bindingLocation);
for (int opNdx = 0; opNdx < numOps; opNdx++)
- outputs.push_back(&outValues[opNdx*numInvocations]);
+ inputs.push_back(&expandedIndices[opNdx*numInvocations]);
+ }
- m_executor.setUniforms(m_uniformSetup);
+ for (int opNdx = 0; opNdx < numOps; opNdx++)
+ outputs.push_back(&outValues[opNdx*numInvocations]);
+
+ {
+ UniquePtr<ShaderExecutor> executor (createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
- m_executor.execute(m_context, numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0]);
+ executor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0], *extraResourcesSet);
}
{
// Read counter values
{
- const void* mapPtr = m_executor.getBufferPtr(atomicCounterLocation);
+ const void* mapPtr = atomicOpBuffer.getHostPtr();
DE_ASSERT(mapPtr != DE_NULL);
+ atomicOpBuffer.invalidate();
std::copy((const deUint32*)mapPtr, (const deUint32*)mapPtr + numCounters, &counterValues[0]);
}
return new AtomicCounterIndexingCaseInstance(ctx,
m_shaderType,
m_shaderSpec,
- *m_executor,
m_name,
- m_uniformSetup,
m_opIndices,
m_indexExprType);
}
{
const int numCounters = AtomicCounterIndexingCaseInstance::NUM_COUNTERS;
const int numOps = AtomicCounterIndexingCaseInstance::NUM_OPS;
- deUint32 binding = getFirstFreeBindingLocation(m_shaderType);
de::Random rnd (deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
for (int opNdx = 0; opNdx < numOps; opNdx++)
global << "const highp int indexBase = 1;\n";
global <<
- "layout(set = 0, binding = " << binding++ << ") buffer AtomicBuffer { highp uint counter[" << numCounters << "]; };\n";
+ "layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = 0, std430) buffer AtomicBuffer { highp uint counter[" << numCounters << "]; };\n";
if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
{
}
}
else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
- declareUniformIndexVars(global, indicesPrefix, numOps, binding);
+ declareUniformIndexVars(global, 1, indicesPrefix, numOps);
for (int opNdx = 0; opNdx < numOps; opNdx++)
{
{
public:
BuiltinPrecisionCaseTestInstance (Context& context,
- const CaseContext caseCtx,
- ShaderExecutor& executor,
- const Variables<In, Out> variables,
- const Samplings<In>& samplings,
- const StatementP stmt)
+ const CaseContext caseCtx,
+ const ShaderSpec& shaderSpec,
+ const Variables<In, Out> variables,
+ const Samplings<In>& samplings,
+ const StatementP stmt)
: TestInstance (context)
, m_caseCtx (caseCtx)
- , m_executor (executor)
, m_variables (variables)
, m_samplings (samplings)
, m_stmt (stmt)
+ , m_executor (createExecutor(context, caseCtx.shaderType, shaderSpec))
{
}
virtual tcu::TestStatus iterate (void);
protected:
CaseContext m_caseCtx;
- ShaderExecutor& m_executor;
Variables<In, Out> m_variables;
const Samplings<In>& m_samplings;
StatementP m_stmt;
+ de::UniquePtr<ShaderExecutor> m_executor;
};
template<class In, class Out>
default: break;
}
- m_executor.execute(m_context, int(numValues), inputArr, outputArr);
+ m_executor->execute(int(numValues), inputArr, outputArr);
// Initialize environment with dummy values so we don't need to bind in inner loop.
{
: TestCase (context.testContext, name.c_str(), name.c_str())
, m_ctx (context)
, m_extension (extension)
- , m_executor (DE_NULL)
{
}
virtual void initPrograms (vk::SourceCollections& programCollection) const
{
- m_executor->setShaderSources(programCollection);
+ generateSources(m_ctx.shaderType, m_spec, programCollection);
}
const FloatFormat& getFormat (void) const { return m_ctx.floatFormat; }
return Symbol(variable.getName(), getVarTypeOf<T>(m_ctx.precision));
}
- CaseContext m_ctx;
- const string m_extension;
- ShaderSpec m_spec;
- de::MovePtr<ShaderExecutor> m_executor;
+ CaseContext m_ctx;
+ const string m_extension;
+ ShaderSpec m_spec;
};
template <typename In, typename Out>
}
m_spec.source = de::toString(stmt);
-
- m_executor = de::MovePtr<ShaderExecutor>(createExecutor(m_ctx.shaderType, m_spec));
}
template <typename T>
virtual TestInstance* createInstance (Context& context) const
{
- return new BuiltinPrecisionCaseTestInstance<In, Out>(context, m_ctx, *m_executor, m_variables, getSamplings(), m_stmt);
+ return new BuiltinPrecisionCaseTestInstance<In, Out>(context, m_ctx, m_spec, m_variables, getSamplings(), m_stmt);
}
protected:
}
virtual TestInstance* createInstance (Context& context) const
{
- return new BuiltinPrecisionCaseTestInstance<In, Out>(context, m_ctx, *m_executor, m_variables, getSamplings(), m_stmt);
+ return new BuiltinPrecisionCaseTestInstance<In, Out>(context, m_ctx, m_spec, m_variables, getSamplings(), m_stmt);
}
protected:
~CommonFunctionCase (void);
virtual void initPrograms (vk::SourceCollections& programCollection) const
{
- m_executor->setShaderSources(programCollection);
+ generateSources(m_shaderType, m_spec, programCollection);
}
virtual TestInstance* createInstance (Context& context) const = 0;
- void init (void);
-
protected:
- CommonFunctionCase (const CommonFunctionCase& other);
- CommonFunctionCase& operator= (const CommonFunctionCase& other);
+ CommonFunctionCase (const CommonFunctionCase&);
+ CommonFunctionCase& operator= (const CommonFunctionCase&);
const glu::ShaderType m_shaderType;
ShaderSpec m_spec;
const int m_numValues;
- de::MovePtr<ShaderExecutor> m_executor;
};
CommonFunctionCase::CommonFunctionCase (tcu::TestContext& testCtx, const char* name, const char* description, glu::ShaderType shaderType)
: TestCase (testCtx, name, description)
, m_shaderType (shaderType)
, m_numValues (100)
- , m_executor (DE_NULL)
{
}
{
}
-void CommonFunctionCase::init (void)
-{
- DE_ASSERT(!m_executor);
-
- m_executor = de::MovePtr<ShaderExecutor>(createExecutor(m_shaderType, m_spec));
- m_testCtx.getLog() << *m_executor;
-}
-
// CommonFunctionTestInstance
class CommonFunctionTestInstance : public TestInstance
{
public:
- CommonFunctionTestInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
+ CommonFunctionTestInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
: TestInstance (context)
, m_shaderType (shaderType)
, m_spec (spec)
, m_numValues (numValues)
, m_name (name)
- , m_executor (executor)
+ , m_executor (createExecutor(context, shaderType, spec))
{
}
virtual tcu::TestStatus iterate (void);
virtual bool compare (const void* const* inputs, const void* const* outputs) = 0;
const glu::ShaderType m_shaderType;
- ShaderSpec m_spec;
+ const ShaderSpec m_spec;
const int m_numValues;
+ // \todo [2017-03-07 pyry] Hack used to generate seeds for test cases - get rid of this.
const char* m_name;
std::ostringstream m_failMsg; //!< Comparison failure help message.
- ShaderExecutor& m_executor;
+ de::UniquePtr<ShaderExecutor> m_executor;
};
tcu::TestStatus CommonFunctionTestInstance::iterate (void)
getInputValues(m_numValues, &inputPointers[0]);
// Execute shader.
- m_executor.execute(m_context, m_numValues, &inputPointers[0], &outputPointers[0]);
+ m_executor->execute(m_numValues, &inputPointers[0], &outputPointers[0]);
// Compare results.
{
class AbsCaseInstance : public CommonFunctionTestInstance
{
public:
- AbsCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : CommonFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ AbsCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : CommonFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision)));
m_spec.source = "out0 = abs(in0);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new AbsCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new AbsCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class SignCaseInstance : public CommonFunctionTestInstance
{
public:
- SignCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : CommonFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ SignCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : CommonFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision)));
m_spec.source = "out0 = sign(in0);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new SignCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new SignCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class RoundEvenCaseInstance : public CommonFunctionTestInstance
{
public:
- RoundEvenCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : CommonFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ RoundEvenCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : CommonFunctionTestInstance(context, shaderType, spec, numValues, name)
{
}
m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision)));
m_spec.source = "out0 = roundEven(in0);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new RoundEvenCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new RoundEvenCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class ModfCaseInstance : public CommonFunctionTestInstance
{
public:
- ModfCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : CommonFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ ModfCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : CommonFunctionTestInstance(context, shaderType, spec, numValues, name)
{
}
m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision)));
m_spec.outputs.push_back(Symbol("out1", glu::VarType(baseType, precision)));
m_spec.source = "out0 = modf(in0, out1);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new ModfCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new ModfCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class IsnanCaseInstance : public CommonFunctionTestInstance
{
public:
- IsnanCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : CommonFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ IsnanCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : CommonFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
m_spec.outputs.push_back(Symbol("out0", glu::VarType(boolType, glu::PRECISION_LAST)));
m_spec.source = "out0 = isnan(in0);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new IsnanCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new IsnanCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class IsinfCaseInstance : public CommonFunctionTestInstance
{
public:
- IsinfCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : CommonFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ IsinfCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : CommonFunctionTestInstance(context, shaderType, spec, numValues, name)
{
}
m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
m_spec.outputs.push_back(Symbol("out0", glu::VarType(boolType, glu::PRECISION_LAST)));
m_spec.source = "out0 = isinf(in0);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new IsinfCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new IsinfCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class FloatBitsToUintIntCaseInstance : public CommonFunctionTestInstance
{
public:
- FloatBitsToUintIntCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : CommonFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ FloatBitsToUintIntCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : CommonFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
m_spec.outputs.push_back(Symbol("out0", glu::VarType(intType, glu::PRECISION_HIGHP)));
m_spec.source = outIsSigned ? "out0 = floatBitsToInt(in0);" : "out0 = floatBitsToUint(in0);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new FloatBitsToUintIntCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new FloatBitsToUintIntCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class FloatBitsToIntCaseInstance : public FloatBitsToUintIntCaseInstance
{
public:
- FloatBitsToIntCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : FloatBitsToUintIntCaseInstance (context, shaderType, spec, executor, numValues, name)
+ FloatBitsToIntCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : FloatBitsToUintIntCaseInstance (context, shaderType, spec, numValues, name)
{
}
};
class FloatBitsToUintCaseInstance : public FloatBitsToUintIntCaseInstance
{
public:
- FloatBitsToUintCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : FloatBitsToUintIntCaseInstance (context, shaderType, spec, executor, numValues, name)
+ FloatBitsToUintCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : FloatBitsToUintIntCaseInstance (context, shaderType, spec, numValues, name)
{
}
};
class BitsToFloatCaseInstance : public CommonFunctionTestInstance
{
public:
- BitsToFloatCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : CommonFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ BitsToFloatCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : CommonFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, glu::PRECISION_HIGHP)));
m_spec.outputs.push_back(Symbol("out0", glu::VarType(floatType, glu::PRECISION_HIGHP)));
m_spec.source = inIsSigned ? "out0 = intBitsToFloat(in0);" : "out0 = uintBitsToFloat(in0);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new BitsToFloatCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new BitsToFloatCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class FloorCaseInstance : public CommonFunctionTestInstance
{
public:
- FloorCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : CommonFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ FloorCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : CommonFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision)));
m_spec.source = "out0 = floor(in0);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new FloorCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new FloorCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class TruncCaseInstance : public CommonFunctionTestInstance
{
public:
- TruncCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : CommonFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ TruncCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : CommonFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision)));
m_spec.source = "out0 = trunc(in0);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new TruncCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new TruncCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class RoundCaseInstance : public CommonFunctionTestInstance
{
public:
- RoundCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : CommonFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ RoundCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : CommonFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision)));
m_spec.source = "out0 = round(in0);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new RoundCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new RoundCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class CeilCaseInstance : public CommonFunctionTestInstance
{
public:
- CeilCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : CommonFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ CeilCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : CommonFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision)));
m_spec.source = "out0 = ceil(in0);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new CeilCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new CeilCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class FractCaseInstance : public CommonFunctionTestInstance
{
public:
- FractCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : CommonFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ FractCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : CommonFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
m_spec.inputs.push_back(Symbol("in0", glu::VarType(baseType, precision)));
m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, precision)));
m_spec.source = "out0 = fract(in0);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new FractCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new FractCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class FrexpCaseInstance : public CommonFunctionTestInstance
{
public:
- FrexpCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : CommonFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ FrexpCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : CommonFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, glu::PRECISION_HIGHP)));
m_spec.outputs.push_back(Symbol("out1", glu::VarType(intType, glu::PRECISION_HIGHP)));
m_spec.source = "out0 = frexp(in0, out1);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new FrexpCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new FrexpCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class LdexpCaseInstance : public CommonFunctionTestInstance
{
public:
- LdexpCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : CommonFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ LdexpCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : CommonFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
m_spec.inputs.push_back(Symbol("in1", glu::VarType(intType, glu::PRECISION_HIGHP)));
m_spec.outputs.push_back(Symbol("out0", glu::VarType(baseType, glu::PRECISION_HIGHP)));
m_spec.source = "out0 = ldexp(in0, in1);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new LdexpCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new LdexpCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class FmaCaseInstance : public CommonFunctionTestInstance
{
public:
- FmaCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : CommonFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ FmaCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : CommonFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
class FmaCase : public CommonFunctionCase
{
public:
- void init (void)
- {
- CommonFunctionCase::init();
- }
-
FmaCase (tcu::TestContext& testCtx, glu::DataType baseType, glu::Precision precision, glu::ShaderType shaderType)
: CommonFunctionCase (testCtx, getCommonFuncCaseName(baseType, precision, shaderType).c_str(), "fma", shaderType)
{
m_spec.outputs.push_back(Symbol("res", glu::VarType(baseType, precision)));
m_spec.source = "res = fma(a, b, c);";
m_spec.globalDeclarations = "#extension GL_EXT_gpu_shader5 : require\n";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new FmaCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new FmaCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
*//*--------------------------------------------------------------------*/
#include "vktShaderExecutor.hpp"
-#include <map>
-#include <sstream>
-#include <iostream>
-
-#include "tcuVector.hpp"
-#include "tcuTestLog.hpp"
-#include "tcuFormatUtil.hpp"
-#include "tcuTextureUtil.hpp"
-#include "deUniquePtr.hpp"
-#include "deStringUtil.hpp"
-#include "deSharedPtr.hpp"
#include "vkMemUtil.hpp"
#include "vkRef.hpp"
-#include "vkPlatform.hpp"
#include "vkPrograms.hpp"
-#include "vkStrUtil.hpp"
#include "vkRefUtil.hpp"
#include "vkTypeUtil.hpp"
#include "vkQueryUtil.hpp"
-#include "vkDeviceUtil.hpp"
-#include "vkImageUtil.hpp"
+#include "vkBuilderUtil.hpp"
#include "gluShaderUtil.hpp"
+#include "tcuVector.hpp"
+#include "tcuTestLog.hpp"
+#include "tcuTextureUtil.hpp"
+
+#include "deUniquePtr.hpp"
+#include "deStringUtil.hpp"
+#include "deSharedPtr.hpp"
+
+#include <map>
+#include <sstream>
+#include <iostream>
+
using std::vector;
using namespace vk;
DEFAULT_RENDER_HEIGHT = 100,
};
+// Common typedefs
+
+typedef de::SharedPtr<Unique<VkImage> > VkImageSp;
+typedef de::SharedPtr<Unique<VkImageView> > VkImageViewSp;
+typedef de::SharedPtr<Unique<VkBuffer> > VkBufferSp;
+typedef de::SharedPtr<Allocation> AllocationSp;
+
// Shader utilities
static VkClearValue getDefaultClearColor (void)
return makeClearValueColorF32(0.125f, 0.25f, 0.5f, 1.0f);
}
-static void checkSupported (const Context& ctx, glu::ShaderType shaderType)
-{
- const VkPhysicalDeviceFeatures& features = ctx.getDeviceFeatures();
-
- if (shaderType == glu::SHADERTYPE_GEOMETRY && !features.geometryShader)
- TCU_THROW(NotSupportedError, "Geometry shader type not supported by device");
- else if (shaderType == glu::SHADERTYPE_TESSELLATION_CONTROL && !features.tessellationShader)
- TCU_THROW(NotSupportedError, "Tessellation shader type not supported by device");
- else if (shaderType == glu::SHADERTYPE_TESSELLATION_EVALUATION && !features.tessellationShader)
- TCU_THROW(NotSupportedError, "Tessellation shader type not supported by device");
-}
-
-static std::string generateEmptyFragmentSource ()
+static std::string generateEmptyFragmentSource (void)
{
std::ostringstream src;
class FragmentOutExecutor : public ShaderExecutor
{
public:
- FragmentOutExecutor (const ShaderSpec& shaderSpec, glu::ShaderType shaderType);
+ FragmentOutExecutor (Context& context, glu::ShaderType shaderType, const ShaderSpec& shaderSpec, VkDescriptorSetLayout extraResourcesLayout);
virtual ~FragmentOutExecutor (void);
- virtual void execute (const Context& ctx,
- int numValues,
+ virtual void execute (int numValues,
const void* const* inputs,
- void* const* outputs);
+ void* const* outputs,
+ VkDescriptorSet extraResources);
protected:
+ const glu::ShaderType m_shaderType;
const FragmentOutputLayout m_outputLayout;
+
private:
- void bindAttributes (const Context& ctx,
- Allocator& memAlloc,
- int numValues,
+ void bindAttributes (int numValues,
const void* const* inputs);
- void addAttribute (const Context& ctx,
- Allocator& memAlloc,
- deUint32 bindingLocation,
+ void addAttribute (deUint32 bindingLocation,
VkFormat format,
deUint32 sizePerElement,
deUint32 count,
// reinit render data members
virtual void clearRenderData (void);
- typedef de::SharedPtr<Unique<VkImage> > VkImageSp;
- typedef de::SharedPtr<Unique<VkImageView> > VkImageViewSp;
- typedef de::SharedPtr<Unique<VkBuffer> > VkBufferSp;
- typedef de::SharedPtr<de::UniquePtr<Allocation> > AllocationSp;
+ const VkDescriptorSetLayout m_extraResourcesLayout;
std::vector<VkVertexInputBindingDescription> m_vertexBindingDescriptions;
std::vector<VkVertexInputAttributeDescription> m_vertexAttributeDescriptions;
return ret;
}
-FragmentOutExecutor::FragmentOutExecutor (const ShaderSpec& shaderSpec, glu::ShaderType shaderType)
- : ShaderExecutor (shaderSpec, shaderType)
- , m_outputLayout (computeFragmentOutputLayout(m_shaderSpec.outputs))
+FragmentOutExecutor::FragmentOutExecutor (Context& context, glu::ShaderType shaderType, const ShaderSpec& shaderSpec, VkDescriptorSetLayout extraResourcesLayout)
+ : ShaderExecutor (context, shaderSpec)
+ , m_shaderType (shaderType)
+ , m_outputLayout (computeFragmentOutputLayout(m_shaderSpec.outputs))
+ , m_extraResourcesLayout (extraResourcesLayout)
{
}
}
}
-void FragmentOutExecutor::addAttribute (const Context& ctx, Allocator& memAlloc, deUint32 bindingLocation, VkFormat format, deUint32 sizePerElement, deUint32 count, const void* dataPtr)
+void FragmentOutExecutor::addAttribute (deUint32 bindingLocation, VkFormat format, deUint32 sizePerElement, deUint32 count, const void* dataPtr)
{
// Add binding specification
- const deUint32 binding = (deUint32)m_vertexBindingDescriptions.size();
- const VkVertexInputBindingDescription bindingDescription =
+ const deUint32 binding = (deUint32)m_vertexBindingDescriptions.size();
+ const VkVertexInputBindingDescription bindingDescription =
{
binding,
sizePerElement,
m_vertexAttributeDescriptions.push_back(attributeDescription);
// Upload data to buffer
- const VkDevice vkDevice = ctx.getDevice();
- const DeviceInterface& vk = ctx.getDeviceInterface();
- const deUint32 queueFamilyIndex = ctx.getUniversalQueueFamilyIndex();
+ const VkDevice vkDevice = m_context.getDevice();
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
- const VkDeviceSize inputSize = sizePerElement * count;
- const VkBufferCreateInfo vertexBufferParams =
+ const VkDeviceSize inputSize = sizePerElement * count;
+ const VkBufferCreateInfo vertexBufferParams =
{
VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
DE_NULL, // const void* pNext;
&queueFamilyIndex // const deUint32* pQueueFamilyIndices;
};
- Move<VkBuffer> buffer = createBuffer(vk, vkDevice, &vertexBufferParams);
- de::MovePtr<Allocation> alloc = memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *buffer), MemoryRequirement::HostVisible);
+ Move<VkBuffer> buffer = createBuffer(vk, vkDevice, &vertexBufferParams);
+ de::MovePtr<Allocation> alloc = m_context.getDefaultAllocator().allocate(getBufferMemoryRequirements(vk, vkDevice, *buffer), MemoryRequirement::HostVisible);
+
VK_CHECK(vk.bindBufferMemory(vkDevice, *buffer, alloc->getMemory(), alloc->getOffset()));
deMemcpy(alloc->getHostPtr(), dataPtr, (size_t)inputSize);
flushMappedMemoryRange(vk, vkDevice, alloc->getMemory(), alloc->getOffset(), inputSize);
m_vertexBuffers.push_back(de::SharedPtr<Unique<VkBuffer> >(new Unique<VkBuffer>(buffer)));
- m_vertexBufferAllocs.push_back(de::SharedPtr<de::UniquePtr<Allocation> >(new de::UniquePtr<Allocation>(alloc)));
+ m_vertexBufferAllocs.push_back(AllocationSp(alloc.release()));
}
-void FragmentOutExecutor::bindAttributes (const Context& ctx, Allocator& memAlloc, int numValues, const void* const* inputs)
+void FragmentOutExecutor::bindAttributes (int numValues, const void* const* inputs)
{
// Input attributes
for (int inputNdx = 0; inputNdx < (int)m_shaderSpec.inputs.size(); inputNdx++)
// add attributes, in case of matrix every column is binded as an attribute
for (int attrNdx = 0; attrNdx < numAttrsToAdd; attrNdx++)
{
- addAttribute(ctx, memAlloc, (deUint32)m_vertexBindingDescriptions.size(), format, elementSize * vecSize, numValues, ptr);
+ addAttribute((deUint32)m_vertexBindingDescriptions.size(), format, elementSize * vecSize, numValues, ptr);
}
}
}
m_vertexBufferAllocs.clear();
}
-void FragmentOutExecutor::execute (const Context& ctx, int numValues, const void* const* inputs, void* const* outputs)
+static Move<VkDescriptorSetLayout> createEmptyDescriptorSetLayout (const DeviceInterface& vkd, VkDevice device)
+{
+ const VkDescriptorSetLayoutCreateInfo createInfo =
+ {
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
+ DE_NULL,
+ (VkDescriptorSetLayoutCreateFlags)0,
+ 0u,
+ DE_NULL,
+ };
+ return createDescriptorSetLayout(vkd, device, &createInfo);
+}
+
+static Move<VkDescriptorPool> createDummyDescriptorPool (const DeviceInterface& vkd, VkDevice device)
+{
+ const VkDescriptorPoolSize dummySize =
+ {
+ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
+ 1u,
+ };
+ const VkDescriptorPoolCreateInfo createInfo =
+ {
+ VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
+ DE_NULL,
+ (VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
+ 1u,
+ 1u,
+ &dummySize
+ };
+ return createDescriptorPool(vkd, device, &createInfo);
+}
+
+static Move<VkDescriptorSet> allocateSingleDescriptorSet (const DeviceInterface& vkd, VkDevice device, VkDescriptorPool pool, VkDescriptorSetLayout layout)
{
- checkSupported(ctx, m_shaderType);
+ const VkDescriptorSetAllocateInfo allocInfo =
+ {
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
+ DE_NULL,
+ pool,
+ 1u,
+ &layout,
+ };
+ return allocateDescriptorSet(vkd, device, &allocInfo);
+}
- const VkDevice vkDevice = ctx.getDevice();
- const DeviceInterface& vk = ctx.getDeviceInterface();
- const VkQueue queue = ctx.getUniversalQueue();
- const deUint32 queueFamilyIndex = ctx.getUniversalQueueFamilyIndex();
- Allocator& memAlloc = ctx.getDefaultAllocator();
+void FragmentOutExecutor::execute (int numValues, const void* const* inputs, void* const* outputs, VkDescriptorSet extraResources)
+{
+ const VkDevice vkDevice = m_context.getDevice();
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const VkQueue queue = m_context.getUniversalQueue();
+ const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
+ Allocator& memAlloc = m_context.getDefaultAllocator();
const deUint32 renderSizeX = de::min(static_cast<deUint32>(DEFAULT_RENDER_WIDTH), (deUint32)numValues);
const deUint32 renderSizeY = ((deUint32)numValues / renderSizeX) + (((deUint32)numValues % renderSizeX != 0) ? 1u : 0u);
Move<VkFence> fence;
- Move<VkDescriptorPool> descriptorPool;
- Move<VkDescriptorSetLayout> descriptorSetLayout;
- Move<VkDescriptorSet> descriptorSet;
+ Unique<VkDescriptorSetLayout> emptyDescriptorSetLayout (createEmptyDescriptorSetLayout(vk, vkDevice));
+ Unique<VkDescriptorPool> dummyDescriptorPool (createDummyDescriptorPool(vk, vkDevice));
+ Unique<VkDescriptorSet> emptyDescriptorSet (allocateSingleDescriptorSet(vk, vkDevice, *dummyDescriptorPool, *emptyDescriptorSetLayout));
clearRenderData();
positions = computeVertexPositions(numValues, renderSize.cast<int>());
// Bind attributes
- addAttribute(ctx, memAlloc, 0u, VK_FORMAT_R32G32_SFLOAT, sizeof(tcu::Vec2), (deUint32)positions.size(), &positions[0]);
- bindAttributes(ctx, memAlloc, numValues, inputs);
+ addAttribute(0u, VK_FORMAT_R32G32_SFLOAT, sizeof(tcu::Vec2), (deUint32)positions.size(), &positions[0]);
+ bindAttributes(numValues, inputs);
// Create color images
{
{
de::MovePtr<Allocation> colorImageAlloc = memAlloc.allocate(getImageMemoryRequirements(vk, vkDevice, *((const VkImage*) colorImages.back().get())), MemoryRequirement::Any);
VK_CHECK(vk.bindImageMemory(vkDevice, colorImages.back().get()->get(), colorImageAlloc->getMemory(), colorImageAlloc->getOffset()));
- colorImageAllocs.push_back(de::SharedPtr<de::UniquePtr<Allocation> >(new de::UniquePtr<Allocation>(colorImageAlloc)));
+ colorImageAllocs.push_back(de::SharedPtr<Allocation>(colorImageAlloc.release()));
attachments.push_back(colorAttachmentDescription);
colorBlendAttachmentStates.push_back(colorBlendAttachmentState);
- const VkAttachmentReference colorAttachmentReference = {
+ const VkAttachmentReference colorAttachmentReference =
+ {
(deUint32) (colorImages.size() - 1), // deUint32 attachment;
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
};
framebuffer = createFramebuffer(vk, vkDevice, &framebufferParams);
}
- // Create descriptors
+ // Create pipeline layout
{
- addUniforms(vkDevice, vk, queue, queueFamilyIndex, memAlloc);
-
- descriptorSetLayout = m_descriptorSetLayoutBuilder.build(vk, vkDevice);
- if (!m_uniformInfos.empty())
- descriptorPool = m_descriptorPoolBuilder.build(vk, vkDevice, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
- else
- {
- const VkDescriptorPoolSize poolSizeCount = { vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1 };
- const VkDescriptorPoolCreateInfo createInfo =
- {
- VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
- DE_NULL,
- VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
- 1u,
- 1u,
- &poolSizeCount,
- };
-
- descriptorPool = createDescriptorPool(vk, vkDevice, &createInfo);
- }
-
- const VkDescriptorSetAllocateInfo allocInfo =
+ const VkDescriptorSetLayout setLayouts[] =
{
- VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
- DE_NULL,
- *descriptorPool,
- 1u,
- &*descriptorSetLayout
+ *emptyDescriptorSetLayout,
+ m_extraResourcesLayout
};
-
- descriptorSet = allocateDescriptorSet(vk, vkDevice, &allocInfo);
-
- // Update descriptors
- {
- vk::DescriptorSetUpdateBuilder descriptorSetUpdateBuilder;
-
- uploadUniforms(descriptorSetUpdateBuilder, *descriptorSet);
-
- descriptorSetUpdateBuilder.update(vk, vkDevice);
- }
- }
-
- // Create pipeline layout
- {
- const VkPipelineLayoutCreateInfo pipelineLayoutParams =
+ const VkPipelineLayoutCreateInfo pipelineLayoutParams =
{
VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
DE_NULL, // const void* pNext;
(VkPipelineLayoutCreateFlags)0, // VkPipelineLayoutCreateFlags flags;
- 1, // deUint32 descriptorSetCount;
- &*descriptorSetLayout, // const VkDescriptorSetLayout* pSetLayouts;
+ (m_extraResourcesLayout != 0 ? 2u : 0u), // deUint32 descriptorSetCount;
+ setLayouts, // const VkDescriptorSetLayout* pSetLayouts;
0u, // deUint32 pushConstantRangeCount;
DE_NULL // const VkPushConstantRange* pPushConstantRanges;
};
// Create shaders
{
- vertexShaderModule = createShaderModule(vk, vkDevice, ctx.getBinaryCollection().get("vert"), 0);
- fragmentShaderModule = createShaderModule(vk, vkDevice, ctx.getBinaryCollection().get("frag"), 0);
+ vertexShaderModule = createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get("vert"), 0);
+ fragmentShaderModule = createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get("frag"), 0);
if (useGeometryShader)
{
- geometryShaderModule = createShaderModule(vk, vkDevice, ctx.getBinaryCollection().get("geom"), 0);
+ geometryShaderModule = createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get("geom"), 0);
}
}
vk.cmdBeginRenderPass(*cmdBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *graphicsPipeline);
- vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u, &*descriptorSet, 0u, DE_NULL);
+
+ if (m_extraResourcesLayout != 0)
+ {
+ DE_ASSERT(extraResources != 0);
+ const VkDescriptorSet descriptorSets[] = { *emptyDescriptorSet, extraResources };
+ vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, DE_LENGTH_OF_ARRAY(descriptorSets), descriptorSets, 0u, DE_NULL);
+ }
+ else
+ DE_ASSERT(extraResources == 0);
const deUint32 numberOfVertexAttributes = (deUint32)m_vertexBuffers.size();
class VertexShaderExecutor : public FragmentOutExecutor
{
public:
- VertexShaderExecutor (const ShaderSpec& shaderSpec, glu::ShaderType shaderType);
+ VertexShaderExecutor (Context& context, const ShaderSpec& shaderSpec, VkDescriptorSetLayout extraResourcesLayout);
virtual ~VertexShaderExecutor (void);
- virtual void log (tcu::TestLog& dst) const { /* TODO */ (void)dst;}
-
- virtual void setShaderSources (SourceCollections& programCollection) const;
-
+ static void generateSources (const ShaderSpec& shaderSpec, SourceCollections& dst);
};
-VertexShaderExecutor::VertexShaderExecutor (const ShaderSpec& shaderSpec, glu::ShaderType shaderType)
- : FragmentOutExecutor (shaderSpec, shaderType)
+VertexShaderExecutor::VertexShaderExecutor (Context& context, const ShaderSpec& shaderSpec, VkDescriptorSetLayout extraResourcesLayout)
+ : FragmentOutExecutor(context, glu::SHADERTYPE_VERTEX, shaderSpec, extraResourcesLayout)
{
}
{
}
-void VertexShaderExecutor::setShaderSources (SourceCollections& programCollection) const
+void VertexShaderExecutor::generateSources (const ShaderSpec& shaderSpec, SourceCollections& programCollection)
{
- programCollection.glslSources.add("vert") << glu::VertexSource(generateVertexShader(m_shaderSpec, "a_", "vtx_out_"));
+ const FragmentOutputLayout outputLayout (computeFragmentOutputLayout(shaderSpec.outputs));
+
+ programCollection.glslSources.add("vert") << glu::VertexSource(generateVertexShader(shaderSpec, "a_", "vtx_out_"));
/* \todo [2015-09-11 hegedusd] set useIntOutputs parameter if needed. */
- programCollection.glslSources.add("frag") << glu::FragmentSource(generatePassthroughFragmentShader(m_shaderSpec, false, m_outputLayout.locationMap, "vtx_out_", "o_"));
+ programCollection.glslSources.add("frag") << glu::FragmentSource(generatePassthroughFragmentShader(shaderSpec, false, outputLayout.locationMap, "vtx_out_", "o_"));
}
// GeometryShaderExecutor
class GeometryShaderExecutor : public FragmentOutExecutor
{
public:
- GeometryShaderExecutor (const ShaderSpec& shaderSpec, glu::ShaderType shaderType);
+ GeometryShaderExecutor (Context& context, const ShaderSpec& shaderSpec, VkDescriptorSetLayout extraResourcesLayout);
virtual ~GeometryShaderExecutor (void);
- virtual void log (tcu::TestLog& dst) const { /* TODO */ (void)dst; }
-
- virtual void setShaderSources (SourceCollections& programCollection) const;
+ static void generateSources (const ShaderSpec& shaderSpec, SourceCollections& programCollection);
};
-GeometryShaderExecutor::GeometryShaderExecutor (const ShaderSpec& shaderSpec, glu::ShaderType shaderType)
- : FragmentOutExecutor (shaderSpec, shaderType)
+GeometryShaderExecutor::GeometryShaderExecutor (Context& context, const ShaderSpec& shaderSpec, VkDescriptorSetLayout extraResourcesLayout)
+ : FragmentOutExecutor(context, glu::SHADERTYPE_GEOMETRY, shaderSpec, extraResourcesLayout)
{
+ const VkPhysicalDeviceFeatures& features = context.getDeviceFeatures();
+
+ if (!features.geometryShader)
+ TCU_THROW(NotSupportedError, "Geometry shader type not supported by device");
}
GeometryShaderExecutor::~GeometryShaderExecutor (void)
{
}
-void GeometryShaderExecutor::setShaderSources (SourceCollections& programCollection) const
+void GeometryShaderExecutor::generateSources (const ShaderSpec& shaderSpec, SourceCollections& programCollection)
{
- programCollection.glslSources.add("vert") << glu::VertexSource(generatePassthroughVertexShader(m_shaderSpec.inputs, "a_", "vtx_out_"));
+ const FragmentOutputLayout outputLayout (computeFragmentOutputLayout(shaderSpec.outputs));
- programCollection.glslSources.add("geom") << glu::GeometrySource(generateGeometryShader(m_shaderSpec, "vtx_out_", "geom_out_"));
+ programCollection.glslSources.add("vert") << glu::VertexSource(generatePassthroughVertexShader(shaderSpec.inputs, "a_", "vtx_out_"));
+
+ programCollection.glslSources.add("geom") << glu::GeometrySource(generateGeometryShader(shaderSpec, "vtx_out_", "geom_out_"));
/* \todo [2015-09-18 rsipka] set useIntOutputs parameter if needed. */
- programCollection.glslSources.add("frag") << glu::FragmentSource(generatePassthroughFragmentShader(m_shaderSpec, false, m_outputLayout.locationMap, "geom_out_", "o_"));
+ programCollection.glslSources.add("frag") << glu::FragmentSource(generatePassthroughFragmentShader(shaderSpec, false, outputLayout.locationMap, "geom_out_", "o_"));
}
class FragmentShaderExecutor : public FragmentOutExecutor
{
public:
- FragmentShaderExecutor (const ShaderSpec& shaderSpec, glu::ShaderType shaderType);
+ FragmentShaderExecutor (Context& context, const ShaderSpec& shaderSpec, VkDescriptorSetLayout extraResourcesLayout);
virtual ~FragmentShaderExecutor (void);
- virtual void log (tcu::TestLog& dst) const { /* TODO */ (void)dst; }
-
- virtual void setShaderSources (SourceCollections& programCollection) const;
+ static void generateSources (const ShaderSpec& shaderSpec, SourceCollections& programCollection);
};
-FragmentShaderExecutor::FragmentShaderExecutor (const ShaderSpec& shaderSpec, glu::ShaderType shaderType)
- : FragmentOutExecutor (shaderSpec, shaderType)
+FragmentShaderExecutor::FragmentShaderExecutor (Context& context, const ShaderSpec& shaderSpec, VkDescriptorSetLayout extraResourcesLayout)
+ : FragmentOutExecutor(context, glu::SHADERTYPE_FRAGMENT, shaderSpec, extraResourcesLayout)
{
}
{
}
-void FragmentShaderExecutor::setShaderSources (SourceCollections& programCollection) const
+void FragmentShaderExecutor::generateSources (const ShaderSpec& shaderSpec, SourceCollections& programCollection)
{
- programCollection.glslSources.add("vert") << glu::VertexSource(generatePassthroughVertexShader(m_shaderSpec.inputs, "a_", "vtx_out_"));
+ const FragmentOutputLayout outputLayout (computeFragmentOutputLayout(shaderSpec.outputs));
+
+ programCollection.glslSources.add("vert") << glu::VertexSource(generatePassthroughVertexShader(shaderSpec.inputs, "a_", "vtx_out_"));
/* \todo [2015-09-11 hegedusd] set useIntOutputs parameter if needed. */
- programCollection.glslSources.add("frag") << glu::FragmentSource(generateFragmentShader(m_shaderSpec, false, m_outputLayout.locationMap, "vtx_out_", "o_"));
+ programCollection.glslSources.add("frag") << glu::FragmentSource(generateFragmentShader(shaderSpec, false, outputLayout.locationMap, "vtx_out_", "o_"));
}
// Shared utilities for compute and tess executors
class BufferIoExecutor : public ShaderExecutor
{
public:
- BufferIoExecutor (const ShaderSpec& shaderSpec, glu::ShaderType shaderType);
+ BufferIoExecutor (Context& context, const ShaderSpec& shaderSpec);
virtual ~BufferIoExecutor (void);
- virtual void log (tcu::TestLog& dst) const { /* TODO */ (void)dst; }
-
protected:
enum
{
OUTPUT_BUFFER_BINDING = 1,
};
- void initBuffers (const Context& ctx, int numValues);
+ void initBuffers (int numValues);
VkBuffer getInputBuffer (void) const { return *m_inputBuffer; }
VkBuffer getOutputBuffer (void) const { return *m_outputBuffer; }
deUint32 getInputStride (void) const { return getLayoutStride(m_inputLayout); }
deUint32 getOutputStride (void) const { return getLayoutStride(m_outputLayout); }
- void uploadInputBuffer (const Context& ctx, const void* const* inputPtrs, int numValues);
- void readOutputBuffer (const Context& ctx, void* const* outputPtrs, int numValues);
+ void uploadInputBuffer (const void* const* inputPtrs, int numValues);
+ void readOutputBuffer (void* const* outputPtrs, int numValues);
static void declareBufferBlocks (std::ostream& src, const ShaderSpec& spec);
static void generateExecBufferIo(std::ostream& src, const ShaderSpec& spec, const char* invocationNdxName);
vector<VarLayout> m_outputLayout;
};
-BufferIoExecutor::BufferIoExecutor (const ShaderSpec& shaderSpec, glu::ShaderType shaderType)
- : ShaderExecutor (shaderSpec, shaderType)
+BufferIoExecutor::BufferIoExecutor (Context& context, const ShaderSpec& shaderSpec)
+ : ShaderExecutor(context, shaderSpec)
{
computeVarLayout(m_shaderSpec.inputs, &m_inputLayout);
computeVarLayout(m_shaderSpec.outputs, &m_outputLayout);
throw tcu::InternalError("Unsupported type");
}
-void BufferIoExecutor::uploadInputBuffer (const Context& ctx, const void* const* inputPtrs, int numValues)
+void BufferIoExecutor::uploadInputBuffer (const void* const* inputPtrs, int numValues)
{
- const VkDevice vkDevice = ctx.getDevice();
- const DeviceInterface& vk = ctx.getDeviceInterface();
+ const VkDevice vkDevice = m_context.getDevice();
+ const DeviceInterface& vk = m_context.getDeviceInterface();
const deUint32 inputStride = getLayoutStride(m_inputLayout);
const int inputBufferSize = inputStride * numValues;
flushMappedMemoryRange(vk, vkDevice, m_inputAlloc->getMemory(), m_inputAlloc->getOffset(), inputBufferSize);
}
-void BufferIoExecutor::readOutputBuffer (const Context& ctx, void* const* outputPtrs, int numValues)
+void BufferIoExecutor::readOutputBuffer (void* const* outputPtrs, int numValues)
{
- const VkDevice vkDevice = ctx.getDevice();
- const DeviceInterface& vk = ctx.getDeviceInterface();
+ const VkDevice vkDevice = m_context.getDevice();
+ const DeviceInterface& vk = m_context.getDeviceInterface();
const deUint32 outputStride = getLayoutStride(m_outputLayout);
const int outputBufferSize = numValues * outputStride;
}
}
-void BufferIoExecutor::initBuffers (const Context& ctx, int numValues)
+void BufferIoExecutor::initBuffers (int numValues)
{
const deUint32 inputStride = getLayoutStride(m_inputLayout);
const deUint32 outputStride = getLayoutStride(m_outputLayout);
const size_t outputBufferSize = numValues * outputStride;
// Upload data to buffer
- const VkDevice vkDevice = ctx.getDevice();
- const DeviceInterface& vk = ctx.getDeviceInterface();
- const deUint32 queueFamilyIndex = ctx.getUniversalQueueFamilyIndex();
- Allocator& memAlloc = ctx.getDefaultAllocator();
+ const VkDevice vkDevice = m_context.getDevice();
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
+ Allocator& memAlloc = m_context.getDefaultAllocator();
const VkBufferCreateInfo inputBufferParams =
{
class ComputeShaderExecutor : public BufferIoExecutor
{
public:
- ComputeShaderExecutor (const ShaderSpec& shaderSpec, glu::ShaderType shaderType);
+ ComputeShaderExecutor (Context& context, const ShaderSpec& shaderSpec, VkDescriptorSetLayout extraResourcesLayout);
virtual ~ComputeShaderExecutor (void);
- virtual void setShaderSources (SourceCollections& programCollection) const;
+ static void generateSources (const ShaderSpec& shaderSpec, SourceCollections& programCollection);
- virtual void execute (const Context& ctx, int numValues, const void* const* inputs, void* const* outputs);
+ virtual void execute (int numValues, const void* const* inputs, void* const* outputs, VkDescriptorSet extraResources);
protected:
static std::string generateComputeShader (const ShaderSpec& spec);
+
+private:
+ const VkDescriptorSetLayout m_extraResourcesLayout;
};
-ComputeShaderExecutor::ComputeShaderExecutor (const ShaderSpec& shaderSpec, glu::ShaderType shaderType)
- : BufferIoExecutor (shaderSpec, shaderType)
+ComputeShaderExecutor::ComputeShaderExecutor(Context& context, const ShaderSpec& shaderSpec, VkDescriptorSetLayout extraResourcesLayout)
+ : BufferIoExecutor (context, shaderSpec)
+ , m_extraResourcesLayout (extraResourcesLayout)
{
}
return src.str();
}
-void ComputeShaderExecutor::setShaderSources (SourceCollections& programCollection) const
+void ComputeShaderExecutor::generateSources (const ShaderSpec& shaderSpec, SourceCollections& programCollection)
{
- programCollection.glslSources.add("compute") << glu::ComputeSource(generateComputeShader(m_shaderSpec));
+ programCollection.glslSources.add("compute") << glu::ComputeSource(generateComputeShader(shaderSpec));
}
-void ComputeShaderExecutor::execute (const Context& ctx, int numValues, const void* const* inputs, void* const* outputs)
+void ComputeShaderExecutor::execute (int numValues, const void* const* inputs, void* const* outputs, VkDescriptorSet extraResources)
{
- checkSupported(ctx, m_shaderType);
+ const VkDevice vkDevice = m_context.getDevice();
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const VkQueue queue = m_context.getUniversalQueue();
+ const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
- const VkDevice vkDevice = ctx.getDevice();
- const DeviceInterface& vk = ctx.getDeviceInterface();
- const VkQueue queue = ctx.getUniversalQueue();
- const deUint32 queueFamilyIndex = ctx.getUniversalQueueFamilyIndex();
- Allocator& memAlloc = ctx.getDefaultAllocator();
+ DescriptorPoolBuilder descriptorPoolBuilder;
+ DescriptorSetLayoutBuilder descriptorSetLayoutBuilder;
Move<VkShaderModule> computeShaderModule;
Move<VkPipeline> computePipeline;
Move<VkDescriptorPool> descriptorPool;
Move<VkDescriptorSetLayout> descriptorSetLayout;
Move<VkDescriptorSet> descriptorSet;
+ const deUint32 numDescriptorSets = (m_extraResourcesLayout != 0) ? 2u : 1u;
Move<VkFence> fence;
- initBuffers(ctx, numValues);
+ DE_ASSERT((m_extraResourcesLayout != 0) == (extraResources != 0));
+
+ initBuffers(numValues);
// Setup input buffer & copy data
- uploadInputBuffer(ctx, inputs, numValues);
+ uploadInputBuffer(inputs, numValues);
// Create command pool
{
(const VkCommandBufferInheritanceInfo*)DE_NULL,
};
- m_descriptorSetLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT);
- m_descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
- m_descriptorSetLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT);
- m_descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
-
- addUniforms(vkDevice, vk, queue, queueFamilyIndex, memAlloc);
+ descriptorSetLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT);
+ descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
+ descriptorSetLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT);
+ descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
- descriptorSetLayout = m_descriptorSetLayoutBuilder.build(vk, vkDevice);
- descriptorPool = m_descriptorPoolBuilder.build(vk, vkDevice, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
+ descriptorSetLayout = descriptorSetLayoutBuilder.build(vk, vkDevice);
+ descriptorPool = descriptorPoolBuilder.build(vk, vkDevice, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
const VkDescriptorSetAllocateInfo allocInfo =
{
// Create pipeline layout
{
- const VkPipelineLayoutCreateInfo pipelineLayoutParams =
+ const VkDescriptorSetLayout descriptorSetLayouts[] =
+ {
+ *descriptorSetLayout,
+ m_extraResourcesLayout
+ };
+ const VkPipelineLayoutCreateInfo pipelineLayoutParams =
{
VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
DE_NULL, // const void* pNext;
(VkPipelineLayoutCreateFlags)0, // VkPipelineLayoutCreateFlags flags;
- 1u, // deUint32 CdescriptorSetCount;
- &*descriptorSetLayout, // const VkDescriptorSetLayout* pSetLayouts;
+ numDescriptorSets, // deUint32 CdescriptorSetCount;
+ descriptorSetLayouts, // const VkDescriptorSetLayout* pSetLayouts;
0u, // deUint32 pushConstantRangeCount;
DE_NULL // const VkPushConstantRange* pPushConstantRanges;
};
// Create shaders
{
- computeShaderModule = createShaderModule(vk, vkDevice, ctx.getBinaryCollection().get("compute"), 0);
+ computeShaderModule = createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get("compute"), 0);
}
// create pipeline
fence = createFence(vk, vkDevice, &fenceParams);
}
- const int maxValuesPerInvocation = ctx.getDeviceProperties().limits.maxComputeWorkGroupSize[0];
- int curOffset = 0;
- const deUint32 inputStride = getInputStride();
- const deUint32 outputStride = getOutputStride();
+ const int maxValuesPerInvocation = m_context.getDeviceProperties().limits.maxComputeWorkGroupSize[0];
+ int curOffset = 0;
+ const deUint32 inputStride = getInputStride();
+ const deUint32 outputStride = getOutputStride();
while (curOffset < numValues)
{
- Move<VkCommandBuffer> cmdBuffer;
- const int numToExec = de::min(maxValuesPerInvocation, numValues-curOffset);
+ Move<VkCommandBuffer> cmdBuffer;
+ const int numToExec = de::min(maxValuesPerInvocation, numValues-curOffset);
// Update descriptors
{
descriptorSetUpdateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding((deUint32)INPUT_BUFFER_BINDING), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &inputDescriptorBufferInfo);
}
- uploadUniforms(descriptorSetUpdateBuilder, *descriptorSet);
-
descriptorSetUpdateBuilder.update(vk, vkDevice);
}
VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *computePipeline);
- vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &*descriptorSet, 0u, DE_NULL);
+ {
+ const VkDescriptorSet descriptorSets[] = { *descriptorSet, extraResources };
+ vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, numDescriptorSets, descriptorSets, 0u, DE_NULL);
+ }
vk.cmdDispatch(*cmdBuffer, numToExec, 1, 1);
}
// Read back data
- readOutputBuffer(ctx, outputs, numValues);
+ readOutputBuffer(outputs, numValues);
}
// Tessellation utils
class TessellationExecutor : public BufferIoExecutor
{
public:
- TessellationExecutor (const ShaderSpec& shaderSpec, glu::ShaderType shaderType);
- virtual ~TessellationExecutor (void);
+ TessellationExecutor (Context& context, const ShaderSpec& shaderSpec, VkDescriptorSetLayout extraResourcesLayout);
+ virtual ~TessellationExecutor (void);
+
+ void renderTess (deUint32 numValues, deUint32 vertexCount, deUint32 patchControlPoints, VkDescriptorSet extraResources);
- void renderTess (const Context& ctx, deUint32 numValues, deUint32 vertexCount, deUint32 patchControlPoints);
+private:
+ const VkDescriptorSetLayout m_extraResourcesLayout;
};
-TessellationExecutor::TessellationExecutor (const ShaderSpec& shaderSpec, glu::ShaderType shaderType)
- : BufferIoExecutor (shaderSpec, shaderType)
+TessellationExecutor::TessellationExecutor (Context& context, const ShaderSpec& shaderSpec, VkDescriptorSetLayout extraResourcesLayout)
+ : BufferIoExecutor (context, shaderSpec)
+ , m_extraResourcesLayout (extraResourcesLayout)
{
+ const VkPhysicalDeviceFeatures& features = context.getDeviceFeatures();
+
+ if (!features.tessellationShader)
+ TCU_THROW(NotSupportedError, "Tessellation shader is not supported by device");
}
TessellationExecutor::~TessellationExecutor (void)
{
}
-void TessellationExecutor::renderTess (const Context& ctx, deUint32 numValues, deUint32 vertexCount, deUint32 patchControlPoints)
+void TessellationExecutor::renderTess (deUint32 numValues, deUint32 vertexCount, deUint32 patchControlPoints, VkDescriptorSet extraResources)
{
const size_t inputBufferSize = numValues * getInputStride();
- const VkDevice vkDevice = ctx.getDevice();
- const DeviceInterface& vk = ctx.getDeviceInterface();
- const VkQueue queue = ctx.getUniversalQueue();
- const deUint32 queueFamilyIndex = ctx.getUniversalQueueFamilyIndex();
- Allocator& memAlloc = ctx.getDefaultAllocator();
+ const VkDevice vkDevice = m_context.getDevice();
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const VkQueue queue = m_context.getUniversalQueue();
+ const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
+ Allocator& memAlloc = m_context.getDefaultAllocator();
const tcu::UVec2 renderSize (DEFAULT_RENDER_WIDTH, DEFAULT_RENDER_HEIGHT);
Move<VkDescriptorPool> descriptorPool;
Move<VkDescriptorSetLayout> descriptorSetLayout;
Move<VkDescriptorSet> descriptorSet;
+ const deUint32 numDescriptorSets = (m_extraResourcesLayout != 0) ? 2u : 1u;
+
+ DE_ASSERT((m_extraResourcesLayout != 0) == (extraResources != 0));
// Create color image
{
// Create descriptors
{
- m_descriptorSetLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_ALL);
- m_descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
- m_descriptorSetLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_ALL);
- m_descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
+ DescriptorPoolBuilder descriptorPoolBuilder;
+ DescriptorSetLayoutBuilder descriptorSetLayoutBuilder;
- addUniforms(vkDevice, vk, queue, queueFamilyIndex, memAlloc);
+ descriptorSetLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_ALL);
+ descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
+ descriptorSetLayoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_ALL);
+ descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
- descriptorSetLayout = m_descriptorSetLayoutBuilder.build(vk, vkDevice);
- descriptorPool = m_descriptorPoolBuilder.build(vk, vkDevice, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
+ descriptorSetLayout = descriptorSetLayoutBuilder.build(vk, vkDevice);
+ descriptorPool = descriptorPoolBuilder.build(vk, vkDevice, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
const VkDescriptorSetAllocateInfo allocInfo =
{
descriptorSetUpdateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding((deUint32)INPUT_BUFFER_BINDING), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &inputDescriptorBufferInfo);
}
- uploadUniforms(descriptorSetUpdateBuilder, *descriptorSet);
-
descriptorSetUpdateBuilder.update(vk, vkDevice);
}
}
// Create pipeline layout
{
+ const VkDescriptorSetLayout descriptorSetLayouts[] =
+ {
+ *descriptorSetLayout,
+ m_extraResourcesLayout
+ };
const VkPipelineLayoutCreateInfo pipelineLayoutParams =
{
VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
DE_NULL, // const void* pNext;
(VkPipelineLayoutCreateFlags)0, // VkPipelineLayoutCreateFlags flags;
- 1u, // deUint32 descriptorSetCount;
- &*descriptorSetLayout, // const VkDescriptorSetLayout* pSetLayouts;
+ numDescriptorSets, // deUint32 descriptorSetCount;
+ descriptorSetLayouts, // const VkDescriptorSetLayout* pSetLayouts;
0u, // deUint32 pushConstantRangeCount;
DE_NULL // const VkPushConstantRange* pPushConstantRanges;
};
// Create shader modules
{
- vertexShaderModule = createShaderModule(vk, vkDevice, ctx.getBinaryCollection().get("vert"), 0);
- tessControlShaderModule = createShaderModule(vk, vkDevice, ctx.getBinaryCollection().get("tess_control"), 0);
- tessEvalShaderModule = createShaderModule(vk, vkDevice, ctx.getBinaryCollection().get("tess_eval"), 0);
- fragmentShaderModule = createShaderModule(vk, vkDevice, ctx.getBinaryCollection().get("frag"), 0);
+ vertexShaderModule = createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get("vert"), 0);
+ tessControlShaderModule = createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get("tess_control"), 0);
+ tessEvalShaderModule = createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get("tess_eval"), 0);
+ fragmentShaderModule = createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get("frag"), 0);
}
// Create pipeline
vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *graphicsPipeline);
- vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u, &*descriptorSet, 0u, DE_NULL);
+ {
+ const VkDescriptorSet descriptorSets[] = { *descriptorSet, extraResources };
+ vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, numDescriptorSets, descriptorSets, 0u, DE_NULL);
+ }
vk.cmdDraw(*cmdBuffer, vertexCount, 1, 0, 0);
class TessControlExecutor : public TessellationExecutor
{
public:
- TessControlExecutor (const ShaderSpec& shaderSpec, glu::ShaderType shaderType);
+ TessControlExecutor (Context& context, const ShaderSpec& shaderSpec, VkDescriptorSetLayout extraResourcesLayout);
virtual ~TessControlExecutor (void);
- virtual void setShaderSources (SourceCollections& programCollection) const;
+ static void generateSources (const ShaderSpec& shaderSpec, SourceCollections& programCollection);
- virtual void execute (const Context& ctx, int numValues, const void* const* inputs, void* const* outputs);
+ virtual void execute (int numValues, const void* const* inputs, void* const* outputs, VkDescriptorSet extraResources);
protected:
static std::string generateTessControlShader (const ShaderSpec& shaderSpec);
};
-TessControlExecutor::TessControlExecutor (const ShaderSpec& shaderSpec, glu::ShaderType shaderType)
- : TessellationExecutor (shaderSpec, shaderType)
+TessControlExecutor::TessControlExecutor (Context& context, const ShaderSpec& shaderSpec, VkDescriptorSetLayout extraResourcesLayout)
+ : TessellationExecutor(context, shaderSpec, extraResourcesLayout)
{
}
return src.str();
}
-void TessControlExecutor::setShaderSources (SourceCollections& programCollection) const
+void TessControlExecutor::generateSources (const ShaderSpec& shaderSpec, SourceCollections& programCollection)
{
programCollection.glslSources.add("vert") << glu::VertexSource(generateVertexShaderForTess());
- programCollection.glslSources.add("tess_control") << glu::TessellationControlSource(generateTessControlShader(m_shaderSpec));
+ programCollection.glslSources.add("tess_control") << glu::TessellationControlSource(generateTessControlShader(shaderSpec));
programCollection.glslSources.add("tess_eval") << glu::TessellationEvaluationSource(generateEmptyTessEvalShader());
programCollection.glslSources.add("frag") << glu::FragmentSource(generateEmptyFragmentSource());
}
-void TessControlExecutor::execute (const Context& ctx, int numValues, const void* const* inputs, void* const* outputs)
+void TessControlExecutor::execute (int numValues, const void* const* inputs, void* const* outputs, VkDescriptorSet extraResources)
{
const deUint32 patchSize = 3;
- checkSupported(ctx, m_shaderType);
-
- initBuffers(ctx, numValues);
+ initBuffers(numValues);
// Setup input buffer & copy data
- uploadInputBuffer(ctx, inputs, numValues);
+ uploadInputBuffer(inputs, numValues);
- renderTess(ctx, numValues, patchSize * numValues, patchSize);
+ renderTess(numValues, patchSize * numValues, patchSize, extraResources);
// Read back data
- readOutputBuffer(ctx, outputs, numValues);
+ readOutputBuffer(outputs, numValues);
}
// TessEvaluationExecutor
class TessEvaluationExecutor : public TessellationExecutor
{
public:
- TessEvaluationExecutor (const ShaderSpec& shaderSpec, glu::ShaderType shaderType);
+ TessEvaluationExecutor (Context& context, const ShaderSpec& shaderSpec, VkDescriptorSetLayout extraResourcesLayout);
virtual ~TessEvaluationExecutor (void);
- virtual void setShaderSources (SourceCollections& programCollection) const;
+ static void generateSources (const ShaderSpec& shaderSpec, SourceCollections& programCollection);
- virtual void execute (const Context& ctx, int numValues, const void* const* inputs, void* const* outputs);
+ virtual void execute (int numValues, const void* const* inputs, void* const* outputs, VkDescriptorSet extraResources);
protected:
static std::string generateTessEvalShader (const ShaderSpec& shaderSpec);
};
-TessEvaluationExecutor::TessEvaluationExecutor (const ShaderSpec& shaderSpec, glu::ShaderType shaderType)
- : TessellationExecutor (shaderSpec, shaderType)
+TessEvaluationExecutor::TessEvaluationExecutor (Context& context, const ShaderSpec& shaderSpec, VkDescriptorSetLayout extraResourcesLayout)
+ : TessellationExecutor (context, shaderSpec, extraResourcesLayout)
{
}
return src.str();
}
-void TessEvaluationExecutor::setShaderSources (SourceCollections& programCollection) const
+void TessEvaluationExecutor::generateSources (const ShaderSpec& shaderSpec, SourceCollections& programCollection)
{
programCollection.glslSources.add("vert") << glu::VertexSource(generateVertexShaderForTess());
programCollection.glslSources.add("tess_control") << glu::TessellationControlSource(generatePassthroughTessControlShader());
- programCollection.glslSources.add("tess_eval") << glu::TessellationEvaluationSource(generateTessEvalShader(m_shaderSpec));
+ programCollection.glslSources.add("tess_eval") << glu::TessellationEvaluationSource(generateTessEvalShader(shaderSpec));
programCollection.glslSources.add("frag") << glu::FragmentSource(generateEmptyFragmentSource());
}
-void TessEvaluationExecutor::execute (const Context& ctx, int numValues, const void* const* inputs, void* const* outputs)
+void TessEvaluationExecutor::execute (int numValues, const void* const* inputs, void* const* outputs, VkDescriptorSet extraResources)
{
- checkSupported(ctx, m_shaderType);
-
const int patchSize = 2;
const int alignedValues = deAlign32(numValues, patchSize);
// Initialize buffers with aligned value count to make room for padding
- initBuffers(ctx, alignedValues);
+ initBuffers(alignedValues);
// Setup input buffer & copy data
- uploadInputBuffer(ctx, inputs, numValues);
+ uploadInputBuffer(inputs, numValues);
- renderTess(ctx, (deUint32)alignedValues, (deUint32)alignedValues, (deUint32)patchSize);
+ renderTess((deUint32)alignedValues, (deUint32)alignedValues, (deUint32)patchSize, extraResources);
// Read back data
- readOutputBuffer(ctx, outputs, numValues);
+ readOutputBuffer(outputs, numValues);
}
} // anonymous
// ShaderExecutor
-ShaderExecutor::ShaderExecutor (const ShaderSpec& shaderSpec, glu::ShaderType shaderType)
- : m_shaderSpec (shaderSpec)
- , m_shaderType (shaderType)
-{
-}
-
ShaderExecutor::~ShaderExecutor (void)
{
}
// Utilities
-ShaderExecutor* createExecutor (glu::ShaderType shaderType, const ShaderSpec& shaderSpec)
+void generateSources (glu::ShaderType shaderType, const ShaderSpec& shaderSpec, vk::SourceCollections& dst)
{
switch (shaderType)
{
- case glu::SHADERTYPE_VERTEX: return new VertexShaderExecutor (shaderSpec, shaderType);
- case glu::SHADERTYPE_TESSELLATION_CONTROL: return new TessControlExecutor (shaderSpec, shaderType);
- case glu::SHADERTYPE_TESSELLATION_EVALUATION: return new TessEvaluationExecutor (shaderSpec, shaderType);
- case glu::SHADERTYPE_GEOMETRY: return new GeometryShaderExecutor (shaderSpec, shaderType);
- case glu::SHADERTYPE_FRAGMENT: return new FragmentShaderExecutor (shaderSpec, shaderType);
- case glu::SHADERTYPE_COMPUTE: return new ComputeShaderExecutor (shaderSpec, shaderType);
+ case glu::SHADERTYPE_VERTEX: VertexShaderExecutor::generateSources (shaderSpec, dst); break;
+ case glu::SHADERTYPE_TESSELLATION_CONTROL: TessControlExecutor::generateSources (shaderSpec, dst); break;
+ case glu::SHADERTYPE_TESSELLATION_EVALUATION: TessEvaluationExecutor::generateSources (shaderSpec, dst); break;
+ case glu::SHADERTYPE_GEOMETRY: GeometryShaderExecutor::generateSources (shaderSpec, dst); break;
+ case glu::SHADERTYPE_FRAGMENT: FragmentShaderExecutor::generateSources (shaderSpec, dst); break;
+ case glu::SHADERTYPE_COMPUTE: ComputeShaderExecutor::generateSources (shaderSpec, dst); break;
default:
- throw tcu::InternalError("Unsupported shader type");
- }
-}
-
-de::MovePtr<ShaderExecutor::BufferUniform> ShaderExecutor::createBufferUniform (const VkDevice& vkDevice,
- const DeviceInterface& vk,
- const VkQueue /*queue*/,
- const deUint32 queueFamilyIndex,
- Allocator& memAlloc,
- deUint32 bindingLocation,
- VkDescriptorType descriptorType,
- deUint32 size,
- const void* dataPtr)
-{
- DE_ASSERT(descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
-
- VkImageUsageFlags usage = descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ? VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT : VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
-
- const VkBufferCreateInfo uniformBufferParams =
- {
- VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
- DE_NULL, // const void* pNext;
- 0u, // VkBufferCreateFlags flags;
- size, // VkDeviceSize size;
- usage, // VkBufferUsageFlags usage;
- VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
- 1u, // deUint32 queueFamilyIndexCount;
- &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
- };
-
- Move<VkBuffer> buffer = createBuffer(vk, vkDevice, &uniformBufferParams);
- de::MovePtr<Allocation> alloc = memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *buffer), MemoryRequirement::HostVisible);
- VK_CHECK(vk.bindBufferMemory(vkDevice, *buffer, alloc->getMemory(), alloc->getOffset()));
-
- deMemcpy(alloc->getHostPtr(), dataPtr, size);
- flushMappedMemoryRange(vk, vkDevice, alloc->getMemory(), alloc->getOffset(), size);
-
- de::MovePtr<BufferUniform> uniformInfo(new BufferUniform());
- uniformInfo->type = descriptorType;
- uniformInfo->descriptor = makeDescriptorBufferInfo(*buffer, 0u, size);
- uniformInfo->location = bindingLocation;
- uniformInfo->buffer = VkBufferSp(new Unique<VkBuffer>(buffer));
- uniformInfo->alloc = AllocationSp(alloc.release());
-
- return uniformInfo;
-}
-
-void ShaderExecutor::setupUniformData (const VkDevice& vkDevice,
- const DeviceInterface& vk,
- const VkQueue queue,
- const deUint32 queueFamilyIndex,
- Allocator& memAlloc,
- deUint32 bindingLocation,
- VkDescriptorType descriptorType,
- deUint32 size,
- const void* dataPtr)
-{
- de::MovePtr<BufferUniform> uniform = createBufferUniform(vkDevice, vk, queue, queueFamilyIndex, memAlloc, bindingLocation, descriptorType, size, dataPtr);
-
- m_descriptorSetLayoutBuilder.addSingleBinding(descriptorType, VK_SHADER_STAGE_ALL);
- m_descriptorPoolBuilder.addType(descriptorType);
-
- m_uniformInfos.push_back(UniformInfoSp(new de::UniquePtr<UniformInfo>(uniform)));
-}
-
-void ShaderExecutor::setupUniformArray (const VkDevice& vkDevice,
- const DeviceInterface& vk,
- const VkQueue queue,
- const deUint32 queueFamilyIndex,
- Allocator& memAlloc,
- deUint32 bindingLocation,
- VkDescriptorType descriptorType,
- deUint32 arraySize,
- deUint32 size,
- const void* dataPtr)
-{
- DE_ASSERT(arraySize > 0);
-
- de::MovePtr<BufferArrayUniform> bufferArray (new BufferArrayUniform());
-
- bufferArray->type = descriptorType;
- bufferArray->location = bindingLocation;
-
- for (deUint32 ndx = 0; ndx < arraySize; ++ndx)
- {
- const void* bufferData = ((deUint8*)dataPtr) + (ndx * size);
- de::MovePtr<BufferUniform> uniform = createBufferUniform(vkDevice, vk, queue, queueFamilyIndex, memAlloc, bindingLocation, descriptorType, size, bufferData);
-
- bufferArray->uniforms.push_back(BufferUniformSp(new de::UniquePtr<BufferUniform>(uniform)));
+ TCU_THROW(InternalError, "Unsupported shader type");
}
-
- m_descriptorSetLayoutBuilder.addArrayBinding(descriptorType, arraySize, VK_SHADER_STAGE_ALL);
- m_descriptorPoolBuilder.addType(descriptorType, arraySize);
-
- m_uniformInfos.push_back(UniformInfoSp(new de::UniquePtr<UniformInfo>(bufferArray)));
-}
-
-void ShaderExecutor::setupSamplerData (const VkDevice& vkDevice,
- const DeviceInterface& vk,
- const VkQueue queue,
- const deUint32 queueFamilyIndex,
- Allocator& memAlloc,
- deUint32 bindingLocation,
- deUint32 numSamplers,
- const tcu::Sampler& refSampler,
- const tcu::TextureFormat& texFormat,
- const tcu::IVec3& texSize,
- VkImageType imageType,
- VkImageViewType imageViewType,
- const void* data)
-{
- DE_ASSERT(numSamplers > 0);
-
- de::MovePtr<SamplerArrayUniform> samplers (new SamplerArrayUniform());
-
- samplers->type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
- samplers->location = bindingLocation;
-
- for (deUint32 ndx = 0; ndx < numSamplers; ++ndx)
- {
- const int offset = ndx * texSize.x() * texSize.y() * texSize.z() * texFormat.getPixelSize();
- const void* samplerData = ((deUint8*)data) + offset;
- de::MovePtr<SamplerUniform> uniform = createSamplerUniform(vkDevice, vk, queue, queueFamilyIndex, memAlloc, bindingLocation, refSampler, texFormat, texSize, imageType, imageViewType, samplerData);
-
- samplers->uniforms.push_back(SamplerUniformSp(new de::UniquePtr<SamplerUniform>(uniform)));
- }
-
- m_descriptorSetLayoutBuilder.addArraySamplerBinding(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, numSamplers, VK_SHADER_STAGE_ALL, DE_NULL);
- m_descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, numSamplers);
-
- m_uniformInfos.push_back(UniformInfoSp(new de::UniquePtr<UniformInfo>(samplers)));
-}
-
-void ShaderExecutor::addSamplerUniform (deUint32 bindingLocation,
- VkImageView imageView,
- VkSampler sampler)
-{
- de::MovePtr<UnmanagedSamplerUniform> samplerUniform(new UnmanagedSamplerUniform());
-
- const VkDescriptorImageInfo descriptor =
- {
- sampler,
- imageView,
- VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
- };
-
- samplerUniform->type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
- samplerUniform->location = bindingLocation;
- samplerUniform->descriptor = descriptor;
-
- m_uniformInfos.push_back(UniformInfoSp(new de::UniquePtr<UniformInfo>(samplerUniform)));
-
- m_descriptorSetLayoutBuilder.addSingleSamplerBinding(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, VK_SHADER_STAGE_ALL, DE_NULL);
- m_descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1);
-}
-
-const void* ShaderExecutor::getBufferPtr (const deUint32 bindingLocation) const
-{
- std::vector<UniformInfoSp>::const_iterator it = m_uniformInfos.begin();
- for (; it != m_uniformInfos.end(); it++)
- {
- const UniformInfo* uniformInfo = it->get()->get();
- if (uniformInfo->getType() == UniformInfo::UNIFORM_TYPE_BUFFER && uniformInfo->location == bindingLocation)
- {
- const BufferUniform* bufferUniform = static_cast<const BufferUniform*>(uniformInfo);
- return bufferUniform->alloc->getHostPtr();
- }
- }
-
- return DE_NULL;
}
-void ShaderExecutor::addUniforms (const VkDevice& vkDevice, const DeviceInterface& vk, const VkQueue queue, const deUint32 queueFamilyIndex, Allocator& memAlloc)
+ShaderExecutor* createExecutor (Context& context, glu::ShaderType shaderType, const ShaderSpec& shaderSpec, VkDescriptorSetLayout extraResourcesLayout)
{
- if (!m_uniformSetup)
- return;
-
- for (std::vector<UniformDataSp>::const_iterator it = m_uniformSetup->uniforms().begin(); it != m_uniformSetup->uniforms().end(); ++it)
- {
- const UniformDataBase* uniformData = it->get()->get();
- uniformData->setup(*this, vkDevice, vk, queue, queueFamilyIndex, memAlloc);
- }
-}
-
-void ShaderExecutor::uploadUniforms (DescriptorSetUpdateBuilder& descriptorSetUpdateBuilder, VkDescriptorSet descriptorSet)
-{
- for (std::vector<UniformInfoSp>::const_iterator it = m_uniformInfos.begin(); it != m_uniformInfos.end(); ++it)
- {
- const UniformInfo* uniformInfo = it->get()->get();
- UniformInfo::UniformType uniformType = uniformInfo->getType();
-
- if (uniformType == UniformInfo::UNIFORM_TYPE_BUFFER_ARRAY)
- {
- const BufferArrayUniform* arrayInfo = static_cast<const BufferArrayUniform*>(uniformInfo);
- std::vector<VkDescriptorBufferInfo> descriptors;
-
- for (std::vector<BufferUniformSp>::const_iterator ait = arrayInfo->uniforms.begin(); ait != arrayInfo->uniforms.end(); ++ait)
- {
- descriptors.push_back(ait->get()->get()->descriptor);
- }
-
- descriptorSetUpdateBuilder.writeArray(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(uniformInfo->location), uniformInfo->type, (deUint32)descriptors.size(), &descriptors[0]);
- }
- else if (uniformType == UniformInfo::UNIFORM_TYPE_SAMPLER_ARRAY)
- {
- const SamplerArrayUniform* arrayInfo = static_cast<const SamplerArrayUniform*>(uniformInfo);
- std::vector<VkDescriptorImageInfo> descriptors;
-
- for (std::vector<SamplerUniformSp>::const_iterator ait = arrayInfo->uniforms.begin(); ait != arrayInfo->uniforms.end(); ++ait)
- {
- descriptors.push_back(ait->get()->get()->descriptor);
- }
-
- descriptorSetUpdateBuilder.writeArray(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(uniformInfo->location), uniformInfo->type, (deUint32)descriptors.size(), &descriptors[0]);
- }
- else if (uniformType == UniformInfo::UNIFORM_TYPE_BUFFER)
- {
- const BufferUniform* bufferUniform = static_cast<const BufferUniform*>(uniformInfo);
- descriptorSetUpdateBuilder.writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(bufferUniform->location), bufferUniform->type, &bufferUniform->descriptor);
- }
- else if (uniformType == UniformInfo::UNIFORM_TYPE_SAMPLER)
- {
- const SamplerUniform* samplerUniform = static_cast<const SamplerUniform*>(uniformInfo);
- descriptorSetUpdateBuilder.writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(samplerUniform->location), samplerUniform->type, &samplerUniform->descriptor);
- }
- else if (uniformType == UniformInfo::UNIFORM_TYPE_UNMANAGED_SAMPLER)
- {
- const UnmanagedSamplerUniform* samplerUniform = static_cast<const UnmanagedSamplerUniform*>(uniformInfo);
- descriptorSetUpdateBuilder.writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(samplerUniform->location), samplerUniform->type, &samplerUniform->descriptor);
- }
- }
-}
-
-void ShaderExecutor::uploadImage (const VkDevice& vkDevice,
- const DeviceInterface& vk,
- const VkQueue queue,
- const deUint32 queueFamilyIndex,
- Allocator& memAlloc,
- const tcu::TextureFormat& texFormat,
- const tcu::IVec3& texSize,
- const void* data,
- const deUint32 arraySize,
- const VkImageAspectFlags aspectMask,
- VkImage destImage)
-{
- const deUint32 unalignedTextureSize = texSize.x() * texSize.y() * texSize.z() * texFormat.getPixelSize();
- const deUint32 alignedTextureSize = deAlign32(unalignedTextureSize, 4u);
- deUint32 bufferSize;
- Move<VkBuffer> buffer;
- de::MovePtr<Allocation> bufferAlloc;
- Move<VkCommandPool> cmdPool;
- Move<VkCommandBuffer> cmdBuffer;
- Move<VkFence> fence;
- std::vector<deUint32> levelDataSizes;
-
- // Calculate buffer size
- bufferSize = arraySize * alignedTextureSize;
-
- // Create source buffer
- {
- const VkBufferCreateInfo bufferParams =
- {
- VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
- DE_NULL, // const void* pNext;
- 0u, // VkBufferCreateFlags flags;
- bufferSize, // VkDeviceSize size;
- VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage;
- VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
- 0u, // deUint32 queueFamilyIndexCount;
- DE_NULL, // const deUint32* pQueueFamilyIndices;
- };
-
- buffer = createBuffer(vk, vkDevice, &bufferParams);
- bufferAlloc = memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *buffer), MemoryRequirement::HostVisible);
- VK_CHECK(vk.bindBufferMemory(vkDevice, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset()));
- }
-
- // Create command pool and buffer
- {
- const VkCommandPoolCreateInfo cmdPoolParams =
- {
- VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // VkStructureType sType;
- DE_NULL, // const void* pNext;
- VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, // VkCommandPoolCreateFlags flags;
- queueFamilyIndex, // deUint32 queueFamilyIndex;
- };
-
- cmdPool = createCommandPool(vk, vkDevice, &cmdPoolParams);
-
- const VkCommandBufferAllocateInfo cmdBufferAllocateInfo =
- {
- VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // VkStructureType sType;
- DE_NULL, // const void* pNext;
- *cmdPool, // VkCommandPool commandPool;
- VK_COMMAND_BUFFER_LEVEL_PRIMARY, // VkCommandBufferLevel level;
- 1u, // deUint32 bufferCount;
- };
-
- cmdBuffer = allocateCommandBuffer(vk, vkDevice, &cmdBufferAllocateInfo);
- }
-
- // Create fence
- {
- const VkFenceCreateInfo fenceParams =
- {
- VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, // VkStructureType sType;
- DE_NULL, // const void* pNext;
- 0u // VkFenceCreateFlags flags;
- };
-
- fence = createFence(vk, vkDevice, &fenceParams);
- }
-
- // Barriers for copying buffer to image
- const VkBufferMemoryBarrier preBufferBarrier =
- {
- VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
- DE_NULL, // const void* pNext;
- VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
- VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
- VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
- VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
- *buffer, // VkBuffer buffer;
- 0u, // VkDeviceSize offset;
- bufferSize // VkDeviceSize size;
- };
-
- const VkImageMemoryBarrier preImageBarrier =
- {
- VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
- DE_NULL, // const void* pNext;
- 0u, // VkAccessFlags srcAccessMask;
- VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask;
- VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout;
- VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
- VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
- destImage, // VkImage image;
- { // VkImageSubresourceRange subresourceRange;
- aspectMask, // VkImageAspect aspect;
- 0u, // deUint32 baseMipLevel;
- 1u, // deUint32 mipLevels;
- 0u, // deUint32 baseArraySlice;
- arraySize // deUint32 arraySize;
- }
- };
-
- const VkImageMemoryBarrier postImageBarrier =
- {
- VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
- DE_NULL, // const void* pNext;
- VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
- VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
- VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
- VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // VkImageLayout newLayout;
- VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
- VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
- destImage, // VkImage image;
- { // VkImageSubresourceRange subresourceRange;
- aspectMask, // VkImageAspect aspect;
- 0u, // deUint32 baseMipLevel;
- 1u, // deUint32 mipLevels;
- 0u, // deUint32 baseArraySlice;
- arraySize // deUint32 arraySize;
- }
- };
-
- const VkCommandBufferBeginInfo cmdBufferBeginInfo =
- {
- VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
- DE_NULL, // const void* pNext;
- VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, // VkCommandBufferUsageFlags flags;
- (const VkCommandBufferInheritanceInfo*)DE_NULL,
- };
-
- std::vector<VkBufferImageCopy> copyRegions;
-
- {
- deUint32 layerDataOffset = 0;
-
- for (deUint32 layerNdx = 0; layerNdx < arraySize; ++layerNdx)
- {
- const VkBufferImageCopy layerRegion =
- {
- layerDataOffset, // VkDeviceSize bufferOffset;
- (deUint32)texSize.x(), // deUint32 bufferRowLength;
- (deUint32)texSize.y(), // deUint32 bufferImageHeight;
- { // VkImageSubresourceLayers imageSubresource;
- aspectMask,
- 0u,
- (deUint32)layerNdx,
- 1u
- },
- { 0u, 0u, 0u }, // VkOffset3D imageOffset;
- { // VkExtent3D imageExtent;
- (deUint32)texSize.x(),
- (deUint32)texSize.y(),
- (deUint32)texSize.z()
- }
- };
-
- copyRegions.push_back(layerRegion);
- layerDataOffset += alignedTextureSize;
- }
- }
-
- // Write buffer data
+ switch (shaderType)
{
- deUint8* destPtr = (deUint8*)bufferAlloc->getHostPtr();
- deUint32 levelOffset = 0;
-
- for (deUint32 layerNdx = 0; layerNdx < arraySize; ++layerNdx)
- {
- tcu::ConstPixelBufferAccess access (texFormat, texSize, data);
- tcu::PixelBufferAccess destAccess (texFormat, texSize, destPtr + levelOffset);
-
- tcu::copy(destAccess, access);
- levelOffset += alignedTextureSize;
- }
+ case glu::SHADERTYPE_VERTEX: return new VertexShaderExecutor (context, shaderSpec, extraResourcesLayout);
+ case glu::SHADERTYPE_TESSELLATION_CONTROL: return new TessControlExecutor (context, shaderSpec, extraResourcesLayout);
+ case glu::SHADERTYPE_TESSELLATION_EVALUATION: return new TessEvaluationExecutor (context, shaderSpec, extraResourcesLayout);
+ case glu::SHADERTYPE_GEOMETRY: return new GeometryShaderExecutor (context, shaderSpec, extraResourcesLayout);
+ case glu::SHADERTYPE_FRAGMENT: return new FragmentShaderExecutor (context, shaderSpec, extraResourcesLayout);
+ case glu::SHADERTYPE_COMPUTE: return new ComputeShaderExecutor (context, shaderSpec, extraResourcesLayout);
+ default:
+ TCU_THROW(InternalError, "Unsupported shader type");
}
-
- flushMappedMemoryRange(vk, vkDevice, bufferAlloc->getMemory(), bufferAlloc->getOffset(), bufferSize);
-
- // Copy buffer to image
- VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
- vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &preBufferBarrier, 1, &preImageBarrier);
- vk.cmdCopyBufferToImage(*cmdBuffer, *buffer, destImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, (deUint32)copyRegions.size(), copyRegions.data());
- vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
- VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
-
- const VkSubmitInfo submitInfo =
- {
- VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
- DE_NULL, // const void* pNext;
- 0u, // deUint32 waitSemaphoreCount;
- DE_NULL, // const VkSemaphore* pWaitSemaphores;
- DE_NULL,
- 1u, // deUint32 commandBufferCount;
- &cmdBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
- 0u, // deUint32 signalSemaphoreCount;
- DE_NULL // const VkSemaphore* pSignalSemaphores;
- };
-
- VK_CHECK(vk.queueSubmit(queue, 1, &submitInfo, *fence));
- VK_CHECK(vk.waitForFences(vkDevice, 1, &fence.get(), true, ~(0ull) /* infinity */));
-}
-
-de::MovePtr<ShaderExecutor::SamplerUniform> ShaderExecutor::createSamplerUniform (const VkDevice& vkDevice,
- const DeviceInterface& vk,
- const VkQueue queue,
- const deUint32 queueFamilyIndex,
- Allocator& memAlloc,
- deUint32 bindingLocation,
- const tcu::Sampler& refSampler,
- const tcu::TextureFormat& texFormat,
- const tcu::IVec3& texSize,
- VkImageType imageType,
- VkImageViewType imageViewType,
- const void* data)
-{
- const VkFormat format = mapTextureFormat(texFormat);
- const bool isCube = imageViewType == VK_IMAGE_VIEW_TYPE_CUBE;
- const bool isShadowSampler = texFormat == tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNORM_INT16);
- const VkImageCreateFlags imageFlags = isCube ? (VkImageCreateFlags)VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : (VkImageCreateFlags)0;
- const deUint32 arraySize = isCube ? 6u : 1u;
- const VkImageAspectFlags aspectMask = isShadowSampler ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
- VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
- Move<VkImage> vkTexture;
- de::MovePtr<Allocation> allocation;
-
- if (isShadowSampler)
- imageUsage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
-
- // Create image
- const VkImageCreateInfo imageParams =
- {
- VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
- DE_NULL, // const void* pNext;
- imageFlags, // VkImageCreateFlags flags;
- imageType, // VkImageType imageType;
- format, // VkFormat format;
- { // VkExtent3D extent;
- (deUint32)texSize.x(),
- (deUint32)texSize.y(),
- (deUint32)texSize.z()
- },
- 1u, // deUint32 mipLevels;
- arraySize, // deUint32 arrayLayers;
- VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
- VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
- imageUsage, // VkImageUsageFlags usage;
- VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
- 1u, // deUint32 queueFamilyIndexCount;
- &queueFamilyIndex, // const deUint32* pQueueFamilyIndices;
- VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
- };
-
- vkTexture = createImage(vk, vkDevice, &imageParams);
- allocation = memAlloc.allocate(getImageMemoryRequirements(vk, vkDevice, *vkTexture), MemoryRequirement::Any);
- VK_CHECK(vk.bindImageMemory(vkDevice, *vkTexture, allocation->getMemory(), allocation->getOffset()));
-
- // Upload texture data
- uploadImage(vkDevice, vk, queue, queueFamilyIndex, memAlloc, texFormat, texSize, data, arraySize, aspectMask, *vkTexture);
-
- // Create sampler
- const VkSamplerCreateInfo samplerParams = mapSampler(refSampler, texFormat);
- Move<VkSampler> sampler = createSampler(vk, vkDevice, &samplerParams);
-
- const VkImageViewCreateInfo viewParams =
- {
- VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
- NULL, // const voide* pNexŧ;
- 0u, // VkImageViewCreateFlags flags;
- *vkTexture, // VkImage image;
- imageViewType, // VkImageViewType viewType;
- format, // VkFormat format;
- {
- VK_COMPONENT_SWIZZLE_R, // VkComponentSwizzle r;
- VK_COMPONENT_SWIZZLE_G, // VkComponentSwizzle g;
- VK_COMPONENT_SWIZZLE_B, // VkComponentSwizzle b;
- VK_COMPONENT_SWIZZLE_A // VkComponentSwizzle a;
- }, // VkComponentMapping components;
- {
- aspectMask, // VkImageAspectFlags aspectMask;
- 0, // deUint32 baseMipLevel;
- 1, // deUint32 mipLevels;
- 0, // deUint32 baseArraySlice;
- arraySize // deUint32 arraySize;
- } // VkImageSubresourceRange subresourceRange;
- };
-
- Move<VkImageView> imageView = createImageView(vk, vkDevice, &viewParams);
-
- const VkDescriptorImageInfo descriptor =
- {
- sampler.get(), // VkSampler sampler;
- imageView.get(), // VkImageView imageView;
- VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL // VkImageLayout imageLayout;
- };
-
- de::MovePtr<SamplerUniform> uniform(new SamplerUniform());
- uniform->type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
- uniform->descriptor = descriptor;
- uniform->location = bindingLocation;
- uniform->image = VkImageSp(new Unique<VkImage>(vkTexture));
- uniform->imageView = VkImageViewSp(new Unique<VkImageView>(imageView));
- uniform->sampler = VkSamplerSp(new Unique<VkSampler>(sampler));
- uniform->alloc = AllocationSp(allocation.release());
-
- return uniform;
-}
-
-SamplerUniformData::SamplerUniformData (deUint32 bindingLocation,
- deUint32 numSamplers,
- const tcu::Sampler& refSampler,
- const tcu::TextureFormat& texFormat,
- const tcu::IVec3& texSize,
- VkImageType imageType,
- VkImageViewType imageViewType,
- const void* data)
- : UniformDataBase (bindingLocation)
- , m_numSamplers (numSamplers)
- , m_refSampler (refSampler)
- , m_texFormat (texFormat)
- , m_texSize (texSize)
- , m_imageType (imageType)
- , m_imageViewType (imageViewType)
- , m_data (data)
-{
-}
-
-SamplerUniformData::~SamplerUniformData (void)
-{
-}
-
-void SamplerUniformData::setup (ShaderExecutor& executor, const VkDevice& vkDevice, const DeviceInterface& vk, const VkQueue queue, const deUint32 queueFamilyIndex, Allocator& memAlloc) const
-{
- executor.setupSamplerData(vkDevice, vk, queue, queueFamilyIndex, memAlloc, m_bindingLocation, m_numSamplers, m_refSampler, m_texFormat, m_texSize, m_imageType, m_imageViewType, m_data);
}
} // shaderexecutor
* \brief Vulkan ShaderExecutor
*//*--------------------------------------------------------------------*/
-#include "deSharedPtr.hpp"
-
+#include "tcuDefs.hpp"
#include "vktTestCase.hpp"
-#include "vkMemUtil.hpp"
-#include "vkBuilderUtil.hpp"
-
#include "gluVarType.hpp"
-#include "tcuTexture.hpp"
-
#include <vector>
+#include <string>
namespace vkt
{
namespace shaderexecutor
{
-using namespace vk;
-
//! Shader input / output variable declaration.
struct Symbol
{
ShaderSpec (void) {}
};
-// UniformSetup
-
-class UniformDataBase;
-class ShaderExecutor;
-
-typedef de::SharedPtr<de::UniquePtr<UniformDataBase> > UniformDataSp;
-
-class UniformSetup
+enum
{
-public:
- UniformSetup (void) {}
- virtual ~UniformSetup (void) {}
-
- void addData (UniformDataBase* uniformData)
- {
- m_uniforms.push_back(UniformDataSp(new de::UniquePtr<UniformDataBase>(uniformData)));
- }
-
- const std::vector<UniformDataSp>& uniforms (void) const
- {
- return m_uniforms;
- }
-
-private:
- UniformSetup (const UniformSetup&); // not allowed!
- UniformSetup& operator= (const UniformSetup&); // not allowed!
-
- std::vector<UniformDataSp> m_uniforms;
+ //!< Descriptor set index for additional resources
+ EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX = 1,
};
//! Base class for shader executor.
public:
virtual ~ShaderExecutor (void);
- //! Log executor details (program etc.).
- virtual void log (tcu::TestLog& log) const = 0;
-
//! Execute
- virtual void execute (const Context& ctx, int numValues, const void* const* inputs, void* const* outputs) = 0;
-
- virtual void setShaderSources (SourceCollections& programCollection) const = 0;
-
- void setUniforms (const UniformSetup* uniformSetup)
- {
- m_uniformSetup = de::MovePtr<const UniformSetup>(uniformSetup);
- };
-
- void setupUniformData (const VkDevice& vkDevice,
- const DeviceInterface& vk,
- const VkQueue queue,
- const deUint32 queueFamilyIndex,
- Allocator& memAlloc,
- deUint32 bindingLocation,
- VkDescriptorType descriptorType,
- deUint32 size,
- const void* dataPtr);
-
- void setupUniformArray (const VkDevice& vkDevice,
- const DeviceInterface& vk,
- const VkQueue queue,
- const deUint32 queueFamilyIndex,
- Allocator& memAlloc,
- deUint32 bindingLocation,
- VkDescriptorType descriptorType,
- deUint32 arraySize,
- deUint32 size,
- const void* dataPtr);
-
- void setupSamplerData (const VkDevice& vkDevice,
- const DeviceInterface& vk,
- const VkQueue queue,
- const deUint32 queueFamilyIndex,
- Allocator& memAlloc,
- deUint32 bindingLocation,
- deUint32 numSamplers,
- const tcu::Sampler& refSampler,
- const tcu::TextureFormat& texFormat,
- const tcu::IVec3& texSize,
- VkImageType imageType,
- VkImageViewType imageViewType,
- const void* data);
-
- void addSamplerUniform (deUint32 bindingLocation,
- VkImageView imageView,
- VkSampler sampler);
-
- const void* getBufferPtr (const deUint32 bindingLocation) const;
+ virtual void execute (int numValues, const void* const* inputs, void* const* outputs, vk::VkDescriptorSet extraResources = (vk::VkDescriptorSet)0) = 0;
protected:
- ShaderExecutor (const ShaderSpec& shaderSpec, glu::ShaderType shaderType);
-
- void addUniforms (const VkDevice& vkDevice, const DeviceInterface& vk, const VkQueue queue, const deUint32 queueFamilyIndex, Allocator& memAlloc);
-
- void uploadUniforms (DescriptorSetUpdateBuilder& descriptorSetUpdateBuilder, VkDescriptorSet descriptorSet);
-
- class UniformInfo;
- typedef de::SharedPtr<de::UniquePtr<UniformInfo> > UniformInfoSp;
- class BufferUniform;
- typedef de::SharedPtr<de::UniquePtr<BufferUniform> > BufferUniformSp;
- class SamplerUniform;
- typedef de::SharedPtr<de::UniquePtr<SamplerUniform> > SamplerUniformSp;
-
- typedef de::SharedPtr<Unique<VkBuffer> > VkBufferSp;
- typedef de::SharedPtr<Unique<VkImage> > VkImageSp;
- typedef de::SharedPtr<Unique<VkImageView> > VkImageViewSp;
- typedef de::SharedPtr<Unique<VkSampler> > VkSamplerSp;
- typedef de::SharedPtr<Allocation> AllocationSp;
-
- class UniformInfo
- {
- public:
- enum UniformType
- {
- UNIFORM_TYPE_BUFFER = 0,
- UNIFORM_TYPE_SAMPLER,
- UNIFORM_TYPE_UNMANAGED_SAMPLER,
- UNIFORM_TYPE_BUFFER_ARRAY,
- UNIFORM_TYPE_SAMPLER_ARRAY,
-
- UNIFORM_TYPE_LAST
- };
-
- UniformInfo (void) {}
- virtual ~UniformInfo (void) {}
- virtual UniformType getType (void) const = 0;
-
- VkDescriptorType type;
- deUint32 location;
- };
-
- class BufferUniform : public UniformInfo
- {
- public:
- BufferUniform (void) {}
- virtual ~BufferUniform (void) {}
- virtual UniformType getType (void) const { return UNIFORM_TYPE_BUFFER; }
-
- VkBufferSp buffer;
- AllocationSp alloc;
- VkDescriptorBufferInfo descriptor;
- };
-
- class SamplerUniform : public UniformInfo
- {
- public:
- SamplerUniform (void) {}
- virtual ~SamplerUniform (void) {}
- virtual UniformType getType (void) const { return UNIFORM_TYPE_SAMPLER; }
-
- VkImageSp image;
- VkImageViewSp imageView;
- VkSamplerSp sampler;
- AllocationSp alloc;
- VkDescriptorImageInfo descriptor;
- };
-
- class UnmanagedSamplerUniform : public UniformInfo
- {
- public:
- UnmanagedSamplerUniform (void) {}
- virtual ~UnmanagedSamplerUniform (void) {}
- virtual UniformType getType (void) const { return UNIFORM_TYPE_UNMANAGED_SAMPLER; }
-
- VkImageView imageView;
- VkSampler sampler;
- VkDescriptorImageInfo descriptor;
- };
-
- class BufferArrayUniform : public UniformInfo
- {
- public:
- BufferArrayUniform (void) {}
- virtual ~BufferArrayUniform (void) {}
- virtual UniformType getType (void) const { return UNIFORM_TYPE_BUFFER_ARRAY; }
-
- std::vector<BufferUniformSp> uniforms;
- };
-
- class SamplerArrayUniform : public UniformInfo
- {
- public:
- SamplerArrayUniform (void) {}
- virtual ~SamplerArrayUniform (void) {}
- virtual UniformType getType (void) const { return UNIFORM_TYPE_SAMPLER_ARRAY; }
-
- std::vector<SamplerUniformSp> uniforms;
- };
-
- void uploadImage (const VkDevice& vkDevice,
- const DeviceInterface& vk,
- const VkQueue queue,
- const deUint32 queueFamilyIndex,
- Allocator& memAlloc,
- const tcu::TextureFormat& texFormat,
- const tcu::IVec3& texSize,
- const void* data,
- const deUint32 arraySize,
- const VkImageAspectFlags aspectMask,
- VkImage destImage);
+ ShaderExecutor (Context& context, const ShaderSpec& shaderSpec)
+ : m_context (context)
+ , m_shaderSpec (shaderSpec)
+ {}
- de::MovePtr<SamplerUniform> createSamplerUniform (const VkDevice& vkDevice,
- const DeviceInterface& vk,
- const VkQueue queue,
- const deUint32 queueFamilyIndex,
- Allocator& memAlloc,
- deUint32 bindingLocation,
- const tcu::Sampler& refSampler,
- const tcu::TextureFormat& texFormat,
- const tcu::IVec3& texSize,
- VkImageType imageType,
- VkImageViewType imageViewType,
- const void* data);
-
- de::MovePtr<BufferUniform> createBufferUniform (const VkDevice& vkDevice,
- const DeviceInterface& vk,
- const VkQueue queue,
- const deUint32 queueFamilyIndex,
- Allocator& memAlloc,
- deUint32 bindingLocation,
- VkDescriptorType descriptorType,
- deUint32 size,
- const void* dataPtr);
-
- const ShaderSpec m_shaderSpec;
- const glu::ShaderType m_shaderType;
-
- std::vector<UniformInfoSp> m_uniformInfos;
- de::MovePtr<const UniformSetup> m_uniformSetup;
- DescriptorSetLayoutBuilder m_descriptorSetLayoutBuilder;
- DescriptorPoolBuilder m_descriptorPoolBuilder;
-
-};
-
-inline tcu::TestLog& operator<< (tcu::TestLog& log, const ShaderExecutor* executor) { executor->log(log); return log; }
-inline tcu::TestLog& operator<< (tcu::TestLog& log, const ShaderExecutor& executor) { executor.log(log); return log; }
-
-ShaderExecutor* createExecutor(glu::ShaderType shaderType, const ShaderSpec& shaderSpec);
-
-class UniformDataBase
-{
-public:
- UniformDataBase (deUint32 bindingLocation)
- : m_bindingLocation (bindingLocation)
- {
- }
- virtual ~UniformDataBase (void) {}
- virtual void setup (ShaderExecutor&, const VkDevice&, const DeviceInterface&, const VkQueue, const deUint32, Allocator&) const = 0;
-
-protected:
- const deUint32 m_bindingLocation;
-};
-
-template<typename T>
-class UniformData : public UniformDataBase
-{
-public:
- UniformData (deUint32 bindingLocation, VkDescriptorType descriptorType, const T data);
- virtual ~UniformData (void);
- virtual void setup (ShaderExecutor& executor, const VkDevice& vkDevice, const DeviceInterface& vk, const VkQueue queue, const deUint32 queueFamilyIndex, Allocator& memAlloc) const;
+ Context& m_context;
+ const ShaderSpec m_shaderSpec;
private:
- VkDescriptorType m_descriptorType;
- T m_data;
+ ShaderExecutor (const ShaderExecutor&);
+ ShaderExecutor& operator= (const ShaderExecutor&);
};
-template<typename T>
-UniformData<T>::UniformData (deUint32 bindingLocation, VkDescriptorType descriptorType, const T data)
- : UniformDataBase (bindingLocation)
- , m_descriptorType (descriptorType)
- , m_data (data)
-{
-}
-
-template<typename T>
-UniformData<T>::~UniformData (void)
-{
-}
-
-template<typename T>
-void UniformData<T>::setup (ShaderExecutor& executor, const VkDevice& vkDevice, const DeviceInterface& vk, const VkQueue queue, const deUint32 queueFamilyIndex, Allocator& memAlloc) const
-{
- executor.setupUniformData(vkDevice, vk, queue, queueFamilyIndex, memAlloc, m_bindingLocation, m_descriptorType, sizeof(T), &m_data);
-}
-
-template<typename T>
-class UniformArrayData : public UniformDataBase
-{
-public:
- UniformArrayData (deUint32 bindingLocation, VkDescriptorType descriptorType, const std::vector<T>& data);
- virtual ~UniformArrayData (void);
- virtual void setup (ShaderExecutor& executor, const VkDevice& vkDevice, const DeviceInterface& vk, const VkQueue queue, const deUint32 queueFamilyIndex, Allocator& memAlloc) const;
-
-private:
- VkDescriptorType m_descriptorType;
- std::vector<T> m_data;
-};
-
-template<typename T>
-UniformArrayData<T>::UniformArrayData (deUint32 bindingLocation, VkDescriptorType descriptorType, const std::vector<T>& data)
- : UniformDataBase (bindingLocation)
- , m_descriptorType (descriptorType)
- , m_data (data)
-{
-}
-
-template<typename T>
-UniformArrayData<T>::~UniformArrayData (void)
-{
-}
-
-template<typename T>
-void UniformArrayData<T>::setup (ShaderExecutor& executor, const VkDevice& vkDevice, const DeviceInterface& vk, const VkQueue queue, const deUint32 queueFamilyIndex, Allocator& memAlloc) const
-{
- DE_ASSERT(!m_data.empty());
- executor.setupUniformArray(vkDevice, vk, queue, queueFamilyIndex, memAlloc, m_bindingLocation, m_descriptorType, (deUint32)m_data.size(), sizeof(T), &m_data[0]);
-}
-
-class SamplerUniformData : public UniformDataBase
-{
-public:
- SamplerUniformData (deUint32 bindingLocation,
- deUint32 numSamplers,
- const tcu::Sampler& refSampler,
- const tcu::TextureFormat& texFormat,
- const tcu::IVec3& texSize,
- VkImageType imageType,
- VkImageViewType imageViewType,
- const void* data);
- virtual ~SamplerUniformData (void);
- virtual void setup (ShaderExecutor& executor, const VkDevice& vkDevice, const DeviceInterface& vk, const VkQueue queue, const deUint32 queueFamilyIndex, Allocator& memAlloc) const;
-
-private:
- deUint32 m_numSamplers;
- const tcu::Sampler m_refSampler;
- const tcu::TextureFormat m_texFormat;
- const tcu::IVec3 m_texSize;
- VkImageType m_imageType;
- VkImageViewType m_imageViewType;
- const void* m_data;
-};
+void generateSources (glu::ShaderType shaderType, const ShaderSpec& shaderSpec, vk::SourceCollections& dst);
+ShaderExecutor* createExecutor (Context& context, glu::ShaderType shaderType, const ShaderSpec& shaderSpec, vk::VkDescriptorSetLayout extraResourcesLayout = (vk::VkDescriptorSetLayout)0);
} // shaderexecutor
} // vkt
virtual void initPrograms (vk::SourceCollections& programCollection) const
{
- m_executor->setShaderSources(programCollection);
+ generateSources(m_shaderType, m_spec, programCollection);
}
virtual TestInstance* createInstance (Context& context) const = 0;
- virtual void init (void);
protected:
IntegerFunctionCase (const IntegerFunctionCase& other);
ShaderSpec m_spec;
- de::MovePtr<ShaderExecutor> m_executor;
-
const int m_numValues;
};
IntegerFunctionCase::IntegerFunctionCase (tcu::TestContext& testCtx, const char* name, const char* description, glu::ShaderType shaderType)
: TestCase (testCtx, name, description)
, m_shaderType (shaderType)
- , m_executor (DE_NULL)
, m_numValues (100)
{
}
{
}
-void IntegerFunctionCase::init (void)
-{
- DE_ASSERT(!m_executor);
-
- m_executor = de::MovePtr<ShaderExecutor>(createExecutor(m_shaderType, m_spec));
- m_testCtx.getLog() << *m_executor;
-}
-
// IntegerFunctionTestInstance
class IntegerFunctionTestInstance : public TestInstance
{
public:
- IntegerFunctionTestInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
+ IntegerFunctionTestInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
: TestInstance (context)
, m_shaderType (shaderType)
, m_spec (spec)
, m_numValues (numValues)
, m_name (name)
- , m_executor (executor)
+ , m_executor (createExecutor(context, m_shaderType, m_spec))
{
}
virtual tcu::TestStatus iterate (void);
std::ostringstream m_failMsg; //!< Comparison failure help message.
- ShaderExecutor& m_executor;
+ de::UniquePtr<ShaderExecutor> m_executor;
};
tcu::TestStatus IntegerFunctionTestInstance::iterate (void)
getInputValues(m_numValues, &inputPointers[0]);
// Execute shader.
- m_executor.execute(m_context, m_numValues, &inputPointers[0], &outputPointers[0]);
+ m_executor->execute(m_numValues, &inputPointers[0], &outputPointers[0]);
// Compare results.
{
class UaddCarryCaseInstance : public IntegerFunctionTestInstance
{
public:
- UaddCarryCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : IntegerFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ UaddCarryCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : IntegerFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
m_spec.outputs.push_back(Symbol("sum", glu::VarType(baseType, precision)));
m_spec.outputs.push_back(Symbol("carry", glu::VarType(baseType, glu::PRECISION_LOWP)));
m_spec.source = "sum = uaddCarry(x, y, carry);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new UaddCarryCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new UaddCarryCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class UsubBorrowCaseInstance : public IntegerFunctionTestInstance
{
public:
- UsubBorrowCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : IntegerFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ UsubBorrowCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : IntegerFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
m_spec.outputs.push_back(Symbol("diff", glu::VarType(baseType, precision)));
m_spec.outputs.push_back(Symbol("carry", glu::VarType(baseType, glu::PRECISION_LOWP)));
m_spec.source = "diff = usubBorrow(x, y, carry);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new UsubBorrowCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new UsubBorrowCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class UmulExtendedCaseInstance : public IntegerFunctionTestInstance
{
public:
- UmulExtendedCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : IntegerFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ UmulExtendedCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : IntegerFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
m_spec.outputs.push_back(Symbol("msb", glu::VarType(baseType, precision)));
m_spec.outputs.push_back(Symbol("lsb", glu::VarType(baseType, precision)));
m_spec.source = "umulExtended(x, y, msb, lsb);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new UmulExtendedCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new UmulExtendedCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class ImulExtendedCaseInstance : public IntegerFunctionTestInstance
{
public:
- ImulExtendedCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : IntegerFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ ImulExtendedCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : IntegerFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
m_spec.outputs.push_back(Symbol("msb", glu::VarType(baseType, precision)));
m_spec.outputs.push_back(Symbol("lsb", glu::VarType(baseType, precision)));
m_spec.source = "imulExtended(x, y, msb, lsb);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new ImulExtendedCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new ImulExtendedCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class BitfieldExtractCaseInstance : public IntegerFunctionTestInstance
{
public:
- BitfieldExtractCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : IntegerFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ BitfieldExtractCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : IntegerFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
m_spec.inputs.push_back(Symbol("bits", glu::VarType(glu::TYPE_INT, precision)));
m_spec.outputs.push_back(Symbol("extracted", glu::VarType(baseType, precision)));
m_spec.source = "extracted = bitfieldExtract(value, offset, bits);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new BitfieldExtractCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new BitfieldExtractCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class BitfieldInsertCaseInstance : public IntegerFunctionTestInstance
{
public:
- BitfieldInsertCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : IntegerFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ BitfieldInsertCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : IntegerFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
m_spec.inputs.push_back(Symbol("bits", glu::VarType(glu::TYPE_INT, precision)));
m_spec.outputs.push_back(Symbol("result", glu::VarType(baseType, precision)));
m_spec.source = "result = bitfieldInsert(base, insert, offset, bits);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new BitfieldInsertCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new BitfieldInsertCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class BitfieldReverseCaseInstance : public IntegerFunctionTestInstance
{
public:
- BitfieldReverseCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : IntegerFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ BitfieldReverseCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : IntegerFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
m_spec.inputs.push_back(Symbol("value", glu::VarType(baseType, precision)));
m_spec.outputs.push_back(Symbol("result", glu::VarType(baseType, glu::PRECISION_HIGHP)));
m_spec.source = "result = bitfieldReverse(value);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new BitfieldReverseCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new BitfieldReverseCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class BitCountCaseInstance : public IntegerFunctionTestInstance
{
public:
- BitCountCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : IntegerFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ BitCountCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : IntegerFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
m_spec.inputs.push_back(Symbol("value", glu::VarType(baseType, precision)));
m_spec.outputs.push_back(Symbol("count", glu::VarType(intType, glu::PRECISION_MEDIUMP)));
m_spec.source = "count = bitCount(value);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new BitCountCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new BitCountCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class FindLSBCaseInstance : public IntegerFunctionTestInstance
{
public:
- FindLSBCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : IntegerFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ FindLSBCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : IntegerFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
m_spec.inputs.push_back(Symbol("value", glu::VarType(baseType, precision)));
m_spec.outputs.push_back(Symbol("lsb", glu::VarType(intType, glu::PRECISION_LOWP)));
m_spec.source = "lsb = findLSB(value);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new FindLSBCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new FindLSBCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
class findMSBCaseInstance : public IntegerFunctionTestInstance
{
public:
- findMSBCaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, ShaderExecutor& executor, int numValues, const char* name)
- : IntegerFunctionTestInstance (context, shaderType, spec, executor, numValues, name)
+ findMSBCaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, int numValues, const char* name)
+ : IntegerFunctionTestInstance (context, shaderType, spec, numValues, name)
{
}
m_spec.inputs.push_back(Symbol("value", glu::VarType(baseType, precision)));
m_spec.outputs.push_back(Symbol("msb", glu::VarType(intType, glu::PRECISION_LOWP)));
m_spec.source = "msb = findMSB(value);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new findMSBCaseInstance(ctx, m_shaderType, m_spec, *m_executor, m_numValues, getName());
+ return new findMSBCaseInstance(ctx, m_shaderType, m_spec, m_numValues, getName());
}
};
virtual void initPrograms (vk::SourceCollections& programCollection) const
{
- m_executor->setShaderSources(programCollection);
+ generateSources(m_shaderType, m_spec, programCollection);
}
- virtual TestInstance* createInstance (Context& context) const = 0;
- void init (void);
protected:
const glu::ShaderType m_shaderType;
ShaderSpec m_spec;
- de::SharedPtr<ShaderExecutor> m_executor;
private:
ShaderPackingFunctionCase (const ShaderPackingFunctionCase& other);
ShaderPackingFunctionCase::ShaderPackingFunctionCase (tcu::TestContext& testCtx, const char* name, const char* description, glu::ShaderType shaderType)
: TestCase (testCtx, name, description)
, m_shaderType (shaderType)
- , m_executor (DE_NULL)
{
}
{
}
-void ShaderPackingFunctionCase::init (void)
-{
- DE_ASSERT(!m_executor);
-
- m_executor = de::SharedPtr<ShaderExecutor>(createExecutor(m_shaderType, m_spec));
- m_testCtx.getLog() << *m_executor;
-}
-
// ShaderPackingFunctionTestInstance
class ShaderPackingFunctionTestInstance : public TestInstance
{
public:
- ShaderPackingFunctionTestInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, de::SharedPtr<ShaderExecutor> executor, const char* name)
+ ShaderPackingFunctionTestInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, const char* name)
: TestInstance (context)
, m_testCtx (context.getTestContext())
, m_shaderType (shaderType)
, m_spec (spec)
, m_name (name)
- , m_executor (executor)
+ , m_executor (createExecutor(context, m_shaderType, m_spec))
{
}
virtual tcu::TestStatus iterate (void) = 0;
const glu::ShaderType m_shaderType;
ShaderSpec m_spec;
const char* m_name;
- de::SharedPtr<ShaderExecutor> m_executor;
+ de::UniquePtr<ShaderExecutor> m_executor;
};
// Test cases
class PackSnorm2x16CaseInstance: public ShaderPackingFunctionTestInstance
{
public:
- PackSnorm2x16CaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, glu::Precision precision, de::SharedPtr<ShaderExecutor> executor, const char* name)
- : ShaderPackingFunctionTestInstance (context, shaderType, spec, executor, name)
+ PackSnorm2x16CaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, glu::Precision precision, const char* name)
+ : ShaderPackingFunctionTestInstance (context, shaderType, spec, name)
, m_precision (precision)
{
}
const void* in = &inputs[0];
void* out = &outputs[0];
- m_executor->execute(m_context, (int)inputs.size(), &in, &out);
+ m_executor->execute((int)inputs.size(), &in, &out);
}
// Verify
m_spec.outputs.push_back(Symbol("out0", glu::VarType(glu::TYPE_UINT, glu::PRECISION_HIGHP)));
m_spec.source = "out0 = packSnorm2x16(in0);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new PackSnorm2x16CaseInstance(ctx, m_shaderType, m_spec, m_precision, m_executor, getName());
+ return new PackSnorm2x16CaseInstance(ctx, m_shaderType, m_spec, m_precision, getName());
}
private:
class UnpackSnorm2x16CaseInstance : public ShaderPackingFunctionTestInstance
{
public:
- UnpackSnorm2x16CaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, de::SharedPtr<ShaderExecutor> executor, const char* name)
- : ShaderPackingFunctionTestInstance (context, shaderType, spec, executor, name)
+ UnpackSnorm2x16CaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, const char* name)
+ : ShaderPackingFunctionTestInstance (context, shaderType, spec, name)
{
}
const void* in = &inputs[0];
void* out = &outputs[0];
- m_executor->execute(m_context, (int)inputs.size(), &in, &out);
+ m_executor->execute((int)inputs.size(), &in, &out);
}
// Verify
m_spec.outputs.push_back(Symbol("out0", glu::VarType(glu::TYPE_FLOAT_VEC2, glu::PRECISION_HIGHP)));
m_spec.source = "out0 = unpackSnorm2x16(in0);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new UnpackSnorm2x16CaseInstance(ctx, m_shaderType, m_spec, m_executor, getName());
+ return new UnpackSnorm2x16CaseInstance(ctx, m_shaderType, m_spec, getName());
}
};
class PackUnorm2x16CaseInstance : public ShaderPackingFunctionTestInstance
{
public:
- PackUnorm2x16CaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, glu::Precision precision, de::SharedPtr<ShaderExecutor> executor, const char* name)
- : ShaderPackingFunctionTestInstance (context, shaderType, spec, executor, name)
+ PackUnorm2x16CaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, glu::Precision precision, const char* name)
+ : ShaderPackingFunctionTestInstance (context, shaderType, spec, name)
, m_precision (precision)
{
}
const void* in = &inputs[0];
void* out = &outputs[0];
- m_executor->execute(m_context, (int)inputs.size(), &in, &out);
+ m_executor->execute((int)inputs.size(), &in, &out);
}
// Verify
m_spec.outputs.push_back(Symbol("out0", glu::VarType(glu::TYPE_UINT, glu::PRECISION_HIGHP)));
m_spec.source = "out0 = packUnorm2x16(in0);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new PackUnorm2x16CaseInstance(ctx, m_shaderType, m_spec, m_precision, m_executor, getName());
+ return new PackUnorm2x16CaseInstance(ctx, m_shaderType, m_spec, m_precision, getName());
}
private:
class UnpackUnorm2x16CaseInstance : public ShaderPackingFunctionTestInstance
{
public:
- UnpackUnorm2x16CaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, de::SharedPtr<ShaderExecutor> executor, const char* name)
- : ShaderPackingFunctionTestInstance (context, shaderType, spec, executor, name)
+ UnpackUnorm2x16CaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, const char* name)
+ : ShaderPackingFunctionTestInstance (context, shaderType, spec, name)
{
}
const void* in = &inputs[0];
void* out = &outputs[0];
- m_executor->execute(m_context, (int)inputs.size(), &in, &out);
+ m_executor->execute((int)inputs.size(), &in, &out);
}
// Verify
m_spec.outputs.push_back(Symbol("out0", glu::VarType(glu::TYPE_FLOAT_VEC2, glu::PRECISION_HIGHP)));
m_spec.source = "out0 = unpackUnorm2x16(in0);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new UnpackUnorm2x16CaseInstance(ctx, m_shaderType, m_spec, m_executor, getName());
+ return new UnpackUnorm2x16CaseInstance(ctx, m_shaderType, m_spec, getName());
}
};
class PackHalf2x16CaseInstance : public ShaderPackingFunctionTestInstance
{
public:
- PackHalf2x16CaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, de::SharedPtr<ShaderExecutor> executor, const char* name)
- : ShaderPackingFunctionTestInstance (context, shaderType, spec, executor, name)
+ PackHalf2x16CaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, const char* name)
+ : ShaderPackingFunctionTestInstance (context, shaderType, spec, name)
{
}
const void* in = &inputs[0];
void* out = &outputs[0];
- m_executor->execute(m_context, (int)inputs.size(), &in, &out);
+ m_executor->execute((int)inputs.size(), &in, &out);
}
// Verify
m_spec.outputs.push_back(Symbol("out0", glu::VarType(glu::TYPE_UINT, glu::PRECISION_HIGHP)));
m_spec.source = "out0 = packHalf2x16(in0);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new PackHalf2x16CaseInstance(ctx, m_shaderType, m_spec, m_executor, getName());
+ return new PackHalf2x16CaseInstance(ctx, m_shaderType, m_spec, getName());
}
};
class UnpackHalf2x16CaseInstance : public ShaderPackingFunctionTestInstance
{
public:
- UnpackHalf2x16CaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, de::SharedPtr<ShaderExecutor> executor, const char* name)
- : ShaderPackingFunctionTestInstance (context, shaderType, spec, executor, name)
+ UnpackHalf2x16CaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, const char* name)
+ : ShaderPackingFunctionTestInstance (context, shaderType, spec, name)
{
}
const void* in = &inputs[0];
void* out = &outputs[0];
- m_executor->execute(m_context, (int)inputs.size(), &in, &out);
+ m_executor->execute((int)inputs.size(), &in, &out);
}
// Verify
m_spec.outputs.push_back(Symbol("out0", glu::VarType(glu::TYPE_FLOAT_VEC2, glu::PRECISION_MEDIUMP)));
m_spec.source = "out0 = unpackHalf2x16(in0);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new UnpackHalf2x16CaseInstance(ctx, m_shaderType, m_spec, m_executor, getName());
+ return new UnpackHalf2x16CaseInstance(ctx, m_shaderType, m_spec, getName());
}
};
class PackSnorm4x8CaseInstance : public ShaderPackingFunctionTestInstance
{
public:
- PackSnorm4x8CaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, glu::Precision precision, de::SharedPtr<ShaderExecutor> executor, const char* name)
- : ShaderPackingFunctionTestInstance (context, shaderType, spec, executor, name)
+ PackSnorm4x8CaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, glu::Precision precision, const char* name)
+ : ShaderPackingFunctionTestInstance (context, shaderType, spec, name)
, m_precision (precision)
{
}
const void* in = &inputs[0];
void* out = &outputs[0];
- m_executor->execute(m_context, (int)inputs.size(), &in, &out);
+ m_executor->execute((int)inputs.size(), &in, &out);
}
// Verify
m_spec.outputs.push_back(Symbol("out0", glu::VarType(glu::TYPE_UINT, glu::PRECISION_HIGHP)));
m_spec.source = "out0 = packSnorm4x8(in0);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new PackSnorm4x8CaseInstance(ctx, m_shaderType, m_spec, m_precision, m_executor, getName());
+ return new PackSnorm4x8CaseInstance(ctx, m_shaderType, m_spec, m_precision, getName());
}
private:
class UnpackSnorm4x8CaseInstance : public ShaderPackingFunctionTestInstance
{
public:
- UnpackSnorm4x8CaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, de::SharedPtr<ShaderExecutor> executor, const char* name)
- : ShaderPackingFunctionTestInstance (context, shaderType, spec, executor, name)
+ UnpackSnorm4x8CaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, const char* name)
+ : ShaderPackingFunctionTestInstance (context, shaderType, spec, name)
{
}
const void* in = &inputs[0];
void* out = &outputs[0];
- m_executor->execute(m_context, (int)inputs.size(), &in, &out);
+ m_executor->execute((int)inputs.size(), &in, &out);
}
// Verify
m_spec.outputs.push_back(Symbol("out0", glu::VarType(glu::TYPE_FLOAT_VEC4, glu::PRECISION_HIGHP)));
m_spec.source = "out0 = unpackSnorm4x8(in0);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new UnpackSnorm4x8CaseInstance(ctx, m_shaderType, m_spec, m_executor, getName());
+ return new UnpackSnorm4x8CaseInstance(ctx, m_shaderType, m_spec, getName());
}
};
class PackUnorm4x8CaseInstance : public ShaderPackingFunctionTestInstance
{
public:
- PackUnorm4x8CaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, glu::Precision precision, de::SharedPtr<ShaderExecutor> executor, const char* name)
- : ShaderPackingFunctionTestInstance (context, shaderType, spec, executor, name)
+ PackUnorm4x8CaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, glu::Precision precision, const char* name)
+ : ShaderPackingFunctionTestInstance (context, shaderType, spec, name)
, m_precision (precision)
{
}
const void* in = &inputs[0];
void* out = &outputs[0];
- m_executor->execute(m_context, (int)inputs.size(), &in, &out);
+ m_executor->execute((int)inputs.size(), &in, &out);
}
// Verify
m_spec.outputs.push_back(Symbol("out0", glu::VarType(glu::TYPE_UINT, glu::PRECISION_HIGHP)));
m_spec.source = "out0 = packUnorm4x8(in0);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new PackUnorm4x8CaseInstance(ctx, m_shaderType, m_spec, m_precision, m_executor, getName());
+ return new PackUnorm4x8CaseInstance(ctx, m_shaderType, m_spec, m_precision, getName());
}
private:
class UnpackUnorm4x8CaseInstance : public ShaderPackingFunctionTestInstance
{
public:
- UnpackUnorm4x8CaseInstance (Context& context, glu::ShaderType shaderType, ShaderSpec spec, de::SharedPtr<ShaderExecutor> executor, const char* name)
- : ShaderPackingFunctionTestInstance (context, shaderType, spec, executor, name)
+ UnpackUnorm4x8CaseInstance (Context& context, glu::ShaderType shaderType, const ShaderSpec& spec, const char* name)
+ : ShaderPackingFunctionTestInstance (context, shaderType, spec, name)
{
}
const void* in = &inputs[0];
void* out = &outputs[0];
- m_executor->execute(m_context, (int)inputs.size(), &in, &out);
+ m_executor->execute((int)inputs.size(), &in, &out);
}
// Verify
m_spec.outputs.push_back(Symbol("out0", glu::VarType(glu::TYPE_FLOAT_VEC4, glu::PRECISION_HIGHP)));
m_spec.source = "out0 = unpackUnorm4x8(in0);";
- init();
}
TestInstance* createInstance (Context& ctx) const
{
- return new UnpackUnorm4x8CaseInstance(ctx, m_shaderType, m_spec, m_executor, getName());
+ return new UnpackUnorm4x8CaseInstance(ctx, m_shaderType, m_spec, getName());
}
};
}
}
+std::string getImageComponentTypeName (const tcu::TextureFormat& format)
+{
+ switch (tcu::getTextureChannelClass(format.type))
+ {
+ case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
+ return "%type_uint";
+ case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
+ return "%type_int";
+ default:
+ DE_ASSERT(0);
+ return "";
+ }
+}
+
+std::string getImageComponentVec4TypeName (const tcu::TextureFormat& format)
+{
+ switch (tcu::getTextureChannelClass(format.type))
+ {
+ case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
+ return "%type_uvec4";
+ case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
+ return "%type_ivec4";
+ default:
+ DE_ASSERT(0);
+ return "";
+ }
+}
+
std::string getOpTypeImageSparse (const ImageType imageType,
const tcu::TextureFormat& format,
const std::string& componentType,
SPARSE_SPIRV_FUNCTION_TYPE_LAST
};
-std::string getOpTypeImageComponent (const tcu::TextureFormat& format);
+std::string getOpTypeImageComponent (const tcu::TextureFormat& format);
+std::string getImageComponentTypeName (const tcu::TextureFormat& format);
+std::string getImageComponentVec4TypeName (const tcu::TextureFormat& format);
std::string getOpTypeImageSparse (const ImageType imageType,
const tcu::TextureFormat& format,
// Create fragment shader
std::ostringstream fs;
+ const std::string typeImgComp = getImageComponentTypeName(m_format);
+ const std::string typeImgCompVec4 = getImageComponentVec4TypeName(m_format);
+
fs << "OpCapability Shader\n"
<< "OpCapability SampledCubeArray\n"
<< "OpCapability ImageCubeArray\n"
<< "%type_vec2 = OpTypeVector %type_float 2\n"
<< "%type_vec3 = OpTypeVector %type_float 3\n"
<< "%type_vec4 = OpTypeVector %type_float 4\n"
+ << "%type_ivec4 = OpTypeVector %type_int 4\n"
+ << "%type_uvec4 = OpTypeVector %type_uint 4\n"
<< "%type_uniformblock = OpTypeStruct %type_uint %type_vec2\n"
- << "%type_img_comp = " << getOpTypeImageComponent(m_format) << "\n"
- << "%type_img_comp_vec4 = OpTypeVector %type_img_comp 4\n"
- << "%type_struct_int_img_comp_vec4 = OpTypeStruct %type_int %type_img_comp_vec4\n"
+ << "%type_struct_int_img_comp_vec4 = OpTypeStruct %type_int " << typeImgCompVec4 << "\n"
<< "%type_input_vec3 = OpTypePointer Input %type_vec3\n"
<< "%type_input_float = OpTypePointer Input %type_float\n"
- << "%type_output_img_comp_vec4 = OpTypePointer Output %type_img_comp_vec4\n"
+ << "%type_output_img_comp_vec4 = OpTypePointer Output " << typeImgCompVec4 << "\n"
<< "%type_output_uint = OpTypePointer Output %type_uint\n"
<< "%type_function_int = OpTypePointer Function %type_int\n"
- << "%type_function_img_comp = OpTypePointer Function %type_img_comp\n"
- << "%type_function_img_comp_vec4 = OpTypePointer Function %type_img_comp_vec4\n"
+ << "%type_function_img_comp_vec4 = OpTypePointer Function " << typeImgCompVec4 << "\n"
<< "%type_function_int_img_comp_vec4 = OpTypePointer Function %type_struct_int_img_comp_vec4\n"
<< "%type_pushconstant_uniformblock = OpTypePointer PushConstant %type_uniformblock\n"
<< "%type_pushconstant_uniformblock_member_lod = OpTypePointer PushConstant %type_uint\n"
<< "%type_pushconstant_uniformblock_member_size = OpTypePointer PushConstant %type_vec2\n"
- << "%type_image_sparse = " << getOpTypeImageSparse(m_imageType, m_format, "%type_img_comp", true) << "\n"
+ << "%type_image_sparse = " << getOpTypeImageSparse(m_imageType, m_format, typeImgComp, true) << "\n"
<< "%type_sampled_image_sparse = OpTypeSampledImage %type_image_sparse\n"
<< "%type_uniformconst_image_sparse = OpTypePointer UniformConstant %type_sampled_image_sparse\n"
<< sparseImageOpString("%local_sparse_op_result", "%type_struct_int_img_comp_vec4", "%local_image_sparse", coordString, "%local_uniformblock_member_float_lod") << "\n"
// Load texel value
- << "%local_img_comp_vec4 = OpCompositeExtract %type_img_comp_vec4 %local_sparse_op_result 1\n"
+ << "%local_img_comp_vec4 = OpCompositeExtract " << typeImgCompVec4 << " %local_sparse_op_result 1\n"
<< "OpStore %output_texel %local_img_comp_vec4\n"
std::ostringstream src;
+ const std::string typeImgComp = getImageComponentTypeName(m_format);
+ const std::string typeImgCompVec4 = getImageComponentVec4TypeName(m_format);
+
// Bias the coord value by half a texel, so we sample from center of 2x2 gather rectangle
src << "%local_image_width = OpCompositeExtract %type_float %local_uniformblock_member_size 0\n";
src << "%local_gather_residency_code = OpCompositeExtract %type_int %local_sparse_gather_result_x 0\n";
- src << "%local_gather_texels_x = OpCompositeExtract %type_img_comp_vec4 %local_sparse_gather_result_x 1\n";
- src << "%local_gather_texels_y = OpCompositeExtract %type_img_comp_vec4 %local_sparse_gather_result_y 1\n";
- src << "%local_gather_texels_z = OpCompositeExtract %type_img_comp_vec4 %local_sparse_gather_result_z 1\n";
- src << "%local_gather_texels_w = OpCompositeExtract %type_img_comp_vec4 %local_sparse_gather_result_w 1\n";
+ src << "%local_gather_texels_x = OpCompositeExtract " << typeImgCompVec4 << " %local_sparse_gather_result_x 1\n";
+ src << "%local_gather_texels_y = OpCompositeExtract " << typeImgCompVec4 << " %local_sparse_gather_result_y 1\n";
+ src << "%local_gather_texels_z = OpCompositeExtract " << typeImgCompVec4 << " %local_sparse_gather_result_z 1\n";
+ src << "%local_gather_texels_w = OpCompositeExtract " << typeImgCompVec4 << " %local_sparse_gather_result_w 1\n";
- src << "%local_gather_primary_texel_x = OpCompositeExtract %type_img_comp %local_gather_texels_x 3\n";
- src << "%local_gather_primary_texel_y = OpCompositeExtract %type_img_comp %local_gather_texels_y 3\n";
- src << "%local_gather_primary_texel_z = OpCompositeExtract %type_img_comp %local_gather_texels_z 3\n";
- src << "%local_gather_primary_texel_w = OpCompositeExtract %type_img_comp %local_gather_texels_w 3\n";
+ src << "%local_gather_primary_texel_x = OpCompositeExtract " << typeImgComp << " %local_gather_texels_x 3\n";
+ src << "%local_gather_primary_texel_y = OpCompositeExtract " << typeImgComp << " %local_gather_texels_y 3\n";
+ src << "%local_gather_primary_texel_z = OpCompositeExtract " << typeImgComp << " %local_gather_texels_z 3\n";
+ src << "%local_gather_primary_texel_w = OpCompositeExtract " << typeImgComp << " %local_gather_texels_w 3\n";
- src << "%local_gather_primary_texel = OpCompositeConstruct %type_img_comp_vec4 %local_gather_primary_texel_x %local_gather_primary_texel_y %local_gather_primary_texel_z %local_gather_primary_texel_w\n";
+ src << "%local_gather_primary_texel = OpCompositeConstruct " << typeImgCompVec4 << " %local_gather_primary_texel_x %local_gather_primary_texel_y %local_gather_primary_texel_z %local_gather_primary_texel_w\n";
src << resultVariable << " = OpCompositeConstruct " << resultType << " %local_gather_residency_code %local_gather_primary_texel\n";
return src.str();
// Create compute program
std::ostringstream src;
+ const std::string typeImgComp = getImageComponentTypeName(m_format);
+ const std::string typeImgCompVec4 = getImageComponentVec4TypeName(m_format);
+ const std::string typeImageSparse = getSparseImageTypeName();
+ const std::string typeUniformConstImageSparse = getUniformConstSparseImageTypeName();
+
src << "OpCapability Shader\n"
<< "OpCapability ImageCubeArray\n"
<< "OpCapability SparseResidency\n"
<< "%type_uint = OpTypeInt 32 0\n"
<< "%type_ivec2 = OpTypeVector %type_int 2\n"
<< "%type_ivec3 = OpTypeVector %type_int 3\n"
+ << "%type_ivec4 = OpTypeVector %type_int 4\n"
<< "%type_uvec3 = OpTypeVector %type_uint 3\n"
<< "%type_uvec4 = OpTypeVector %type_uint 4\n"
- << "%type_img_comp = " << getOpTypeImageComponent(m_format) << "\n"
- << "%type_img_comp_vec4 = OpTypeVector %type_img_comp 4\n"
- << "%type_struct_int_img_comp_vec4 = OpTypeStruct %type_int %type_img_comp_vec4\n"
+ << "%type_struct_int_img_comp_vec4 = OpTypeStruct %type_int " << typeImgCompVec4 << "\n"
<< "%type_input_uint = OpTypePointer Input %type_uint\n"
<< "%type_input_uvec3 = OpTypePointer Input %type_uvec3\n"
<< "%type_function_int = OpTypePointer Function %type_int\n"
- << "%type_function_img_comp_vec4 = OpTypePointer Function %type_img_comp_vec4\n"
+ << "%type_function_img_comp_vec4 = OpTypePointer Function " << typeImgCompVec4 << "\n"
<< "%type_void = OpTypeVoid\n"
<< "%type_void_func = OpTypeFunction %type_void\n"
- // Sparse image type declaration
- << sparseImageTypeDecl("%type_image_sparse", "%type_img_comp")
- << "%type_uniformconst_image_sparse = OpTypePointer UniformConstant %type_image_sparse\n"
+ // Sparse image without sampler type declaration
+ << "%type_image_sparse = " << getOpTypeImageSparse(m_imageType, m_format, typeImgComp, false) << "\n"
+ << "%type_uniformconst_image_sparse = OpTypePointer UniformConstant %type_image_sparse\n"
- // Texels image type declaration
- << "%type_image_texels = " << getOpTypeImageSparse(m_imageType, m_format, "%type_img_comp", false) << "\n"
- << "%type_uniformconst_image_texels = OpTypePointer UniformConstant %type_image_texels\n"
+ // Sparse image with sampler type declaration
+ << "%type_image_sparse_with_sampler = " << getOpTypeImageSparse(m_imageType, m_format, typeImgComp, true) << "\n"
+ << "%type_uniformconst_image_sparse_with_sampler = OpTypePointer UniformConstant %type_image_sparse_with_sampler\n"
// Residency image type declaration
<< "%type_image_residency = " << getOpTypeImageResidency(m_imageType) << "\n"
<< "%type_uniformconst_image_residency = OpTypePointer UniformConstant %type_image_residency\n"
// Declare sparse image variable
- << "%uniform_image_sparse = OpVariable %type_uniformconst_image_sparse UniformConstant\n"
+ << "%uniform_image_sparse = OpVariable " << typeUniformConstImageSparse << " UniformConstant\n"
// Declare output image variable for storing texels
- << "%uniform_image_texels = OpVariable %type_uniformconst_image_texels UniformConstant\n"
+ << "%uniform_image_texels = OpVariable %type_uniformconst_image_sparse UniformConstant\n"
// Declare output image variable for storing residency information
<< "%uniform_image_residency = OpVariable %type_uniformconst_image_residency UniformConstant\n"
<< "%label_in_range_z = OpLabel\n"
// Load sparse image
- << "%local_image_sparse = OpLoad %type_image_sparse %uniform_image_sparse\n"
+ << "%local_image_sparse = OpLoad " << typeImageSparse << " %uniform_image_sparse\n"
// Call OpImageSparse*
<< sparseImageOpString("%local_sparse_op_result", "%type_struct_int_img_comp_vec4", "%local_image_sparse", coordString, "%constant_int_0") << "\n"
// Load the texel from the sparse image to local variable for OpImageSparse*
- << "%local_img_comp_vec4 = OpCompositeExtract %type_img_comp_vec4 %local_sparse_op_result 1\n"
+ << "%local_img_comp_vec4 = OpCompositeExtract " << typeImgCompVec4 << " %local_sparse_op_result 1\n"
// Load residency code for OpImageSparse*
<< "%local_residency_code = OpCompositeExtract %type_int %local_sparse_op_result 0\n"
// End Call OpImageSparse*
// Load texels image
- << "%local_image_texels = OpLoad %type_image_texels %uniform_image_texels\n"
+ << "%local_image_texels = OpLoad %type_image_sparse %uniform_image_texels\n"
// Write the texel to output image via OpImageWrite
<< "OpImageWrite %local_image_texels " << coordString << " %local_img_comp_vec4\n"
programCollection.spirvAsmSources.add("compute") << src.str();
}
-std::string SparseCaseOpImageSparseFetch::sparseImageTypeDecl (const std::string& imageType, const std::string& componentType) const
+std::string SparseCaseOpImageSparseFetch::getSparseImageTypeName (void) const
{
- std::ostringstream src;
-
- src << imageType << " = " << getOpTypeImageSparse(m_imageType, m_format, componentType, true) << "\n";
+ return "%type_image_sparse_with_sampler";
+}
- return src.str();
+std::string SparseCaseOpImageSparseFetch::getUniformConstSparseImageTypeName (void) const
+{
+ return "%type_uniformconst_image_sparse_with_sampler";
}
std::string SparseCaseOpImageSparseFetch::sparseImageOpString (const std::string& resultVariable,
return src.str();
}
-std::string SparseCaseOpImageSparseRead::sparseImageTypeDecl (const std::string& imageType, const std::string& componentType) const
+std::string SparseCaseOpImageSparseRead::getSparseImageTypeName (void) const
{
- std::ostringstream src;
-
- src << imageType << " = " << getOpTypeImageSparse(m_imageType, m_format, componentType, false) << "\n";
+ return "%type_image_sparse";
+}
- return src.str();
+std::string SparseCaseOpImageSparseRead::getUniformConstSparseImageTypeName (void) const
+{
+ return "%type_uniformconst_image_sparse";
}
std::string SparseCaseOpImageSparseRead::sparseImageOpString (const std::string& resultVariable,
void initPrograms (vk::SourceCollections& programCollection) const;
- virtual std::string sparseImageTypeDecl (const std::string& imageType,
- const std::string& componentType) const = 0;
+ virtual std::string getSparseImageTypeName (void) const = 0;
+ virtual std::string getUniformConstSparseImageTypeName (void) const = 0;
virtual std::string sparseImageOpString (const std::string& resultVariable,
const std::string& resultType,
TestInstance* createInstance (Context& context) const;
- std::string sparseImageTypeDecl (const std::string& imageType,
- const std::string& componentType) const;
+ std::string getSparseImageTypeName (void) const;
+ std::string getUniformConstSparseImageTypeName (void) const;
std::string sparseImageOpString (const std::string& resultVariable,
const std::string& resultType,
TestInstance* createInstance (Context& context) const;
- std::string sparseImageTypeDecl (const std::string& imageType,
- const std::string& componentType) const;
+ std::string getSparseImageTypeName (void) const;
+ std::string getUniformConstSparseImageTypeName (void) const;
std::string sparseImageOpString (const std::string& resultVariable,
const std::string& resultType,
#include "deUniquePtr.hpp"
#include "tcuStringTemplate.hpp"
-#include <cmath>
#include "vktSpvAsmComputeShaderCase.hpp"
#include "vktSpvAsmComputeShaderTestUtil.hpp"
#include "vktTestCaseUtil.hpp"
}
vk.cmdDraw(cmdBuffer, static_cast<deUint32>(dataSizeBytes / sizeof(tcu::UVec4)), 1u, 0u, 0u);
+
+ const VkBufferMemoryBarrier barrier = makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **m_outputBuffer, 0u, m_resource.getBuffer().size);
+ vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 1u, &barrier, 0u, DE_NULL);
+
endRenderPass(vk, cmdBuffer);
}
0u, DE_NULL, 0u, DE_NULL, 1u, &colorAttachmentPreCopyBarrier);
}
{
- const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(renderSize.x(), renderSize.y(), 0), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
+ const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(renderSize.x(), renderSize.y(), 1), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
vk.cmdCopyImageToBuffer(*cmdBuffer, *colorAttachmentImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *colorBuffer, 1u, ©Region);
}
{
{
std::ostringstream src;
src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_310_ES) << "\n"
- << "layout(location = 0) flat in mediump vec4 v_color;\n"
+ << "layout(location = 0) flat in highp vec4 v_color;\n"
<< "layout(location = 0) out mediump vec4 fragColor;\n"
<< "\n"
<< "void main (void)\n"
}
{
const VkImageSubresourceLayers subresourceLayers = makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, m_params.numLayers);
- const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(renderSize.x(), renderSize.y(), 0), subresourceLayers);
+ const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(renderSize.x(), renderSize.y(), 1), subresourceLayers);
vk.cmdCopyImageToBuffer(*cmdBuffer, *colorAttachmentImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *colorBuffer, 1u, ©Region);
}
{
std::ostringstream src;
src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_310_ES) << "\n"
<< "\n"
- << "layout(location = 0) in mediump vec4 v_fragment_color;\n"
+ << "layout(location = 0) in highp vec4 v_fragment_color;\n"
<< "layout(location = 0) out mediump vec4 fragColor;\n"
<< "void main (void)\n"
<< "{\n"
0u, DE_NULL, 0u, DE_NULL, 1u, &colorAttachmentPreCopyBarrier);
}
{
- const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(renderSize.x(), renderSize.y(), 0), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
+ const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(renderSize.x(), renderSize.y(), 1), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
vk.cmdCopyImageToBuffer(*cmdBuffer, *colorAttachmentImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, colorBuffer[pipelineNdx]->get(), 1u, ©Region);
}
{
0u, DE_NULL, 0u, DE_NULL, 1u, &colorAttachmentPreCopyBarrier);
}
{
- const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(renderSize.x(), renderSize.y(), 0), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
+ const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(renderSize.x(), renderSize.y(), 1), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
vk.cmdCopyImageToBuffer(*cmdBuffer, *colorAttachmentImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *colorBuffer, 1u, ©Region);
}
{
0u, DE_NULL, 0u, DE_NULL, 1u, &colorAttachmentPreCopyBarrier);
}
{
- const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(renderSize.x(), renderSize.y(), 0), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
+ const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(renderSize.x(), renderSize.y(), 1), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
vk.cmdCopyImageToBuffer(*cmdBuffer, *colorAttachmentImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *colorBuffer, 1u, ©Region);
}
{
0u, DE_NULL, 0u, DE_NULL, 1u, &colorAttachmentPreCopyBarrier);
}
{
- const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(renderSize.x(), renderSize.y(), 0), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
+ const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(renderSize.x(), renderSize.y(), 1), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
vk.cmdCopyImageToBuffer(*cmdBuffer, *colorAttachmentImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *colorBuffer, 1u, ©Region);
}
{
0u, DE_NULL, 0u, DE_NULL, 1u, &colorAttachmentPreCopyBarrier);
}
{
- const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(renderSize.x(), renderSize.y(), 0), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
+ const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(renderSize.x(), renderSize.y(), 1), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
vk.cmdCopyImageToBuffer(*cmdBuffer, *colorAttachmentImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *colorBuffer, 1u, ©Region);
}
{
0u, DE_NULL, 0u, DE_NULL, 1u, &colorAttachmentPreCopyBarrier);
}
{
- const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(renderSize.x(), renderSize.y(), 0), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
+ const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(renderSize.x(), renderSize.y(), 1), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
vk.cmdCopyImageToBuffer(*cmdBuffer, *colorAttachmentImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *colorBuffer, 1u, ©Region);
}
{
0u, DE_NULL, 0u, DE_NULL, 1u, &colorAttachmentPreCopyBarrier);
}
{
- const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(renderSize.x(), renderSize.y(), 0), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
+ const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(renderSize.x(), renderSize.y(), 1), makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
vk.cmdCopyImageToBuffer(*cmdBuffer, *colorAttachmentImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *colorBuffer, 1u, ©Region);
}
{
const SampleLookupSettings& sampleLookupSettings,
int coordBits,
int mipmapBits,
- const std::vector<tcu::ConstPixelBufferAccess>& pba)
-// \todo [2016-07-29 collinbaker] Get rid of magic numbers
- : m_internalFormat (-14, 15, 10, true) // fp16 format
- , m_imParams (imParams)
+ const tcu::FloatFormat& conversionPrecision,
+ const tcu::FloatFormat& filteringPrecision,
+ const std::vector<tcu::ConstPixelBufferAccess>& levels)
+ : m_imParams (imParams)
, m_samplerParams (samplerParams)
, m_sampleLookupSettings (sampleLookupSettings)
, m_coordBits (coordBits)
, m_mipmapBits (mipmapBits)
+ , m_conversionPrecision (conversionPrecision)
+ , m_filteringPrecision (filteringPrecision)
, m_unnormalizedDim (calcUnnormalizedDim(imParams.dim))
- , m_pba (pba)
+ , m_levels (levels)
{
}
{
DE_ASSERT(compNdx >= 0 && compNdx < 3);
- return coord[compNdx] < 0 || coord[compNdx] >= m_pba[level].getSize()[compNdx];
+ return coord[compNdx] < 0 || coord[compNdx] >= m_levels[level].getSize()[compNdx];
}
void SampleVerifier::fetchTexelWrapped (const IVec3& coord,
if (m_imParams.dim == IMG_DIM_1D)
{
- pixelPtr = m_pba[level].getPixelPtr(coord[0], layer, 0);
+ pixelPtr = m_levels[level].getPixelPtr(coord[0], layer, 0);
}
else if (m_imParams.dim == IMG_DIM_2D || m_imParams.dim == IMG_DIM_CUBE)
{
- pixelPtr = m_pba[level].getPixelPtr(coord[0], coord[1], layer);
+ pixelPtr = m_levels[level].getPixelPtr(coord[0], coord[1], layer);
}
else
{
- pixelPtr = m_pba[level].getPixelPtr(coord[0], coord[1], coord[2]);
+ pixelPtr = m_levels[level].getPixelPtr(coord[0], coord[1], coord[2]);
}
- convertFormat(pixelPtr, mapVkFormat(m_imParams.format), m_internalFormat, resultMin, resultMax);
+ convertFormat(pixelPtr, mapVkFormat(m_imParams.format), m_conversionPrecision, resultMin, resultMax);
+
+#if defined(DE_DEBUG)
+ // Make sure tcuTexture agrees
+ const tcu::ConstPixelBufferAccess& levelAccess = m_levels[level];
+ const tcu::Vec4 refPix = (m_imParams.dim == IMG_DIM_1D) ? levelAccess.getPixel(coord[0], layer, 0)
+ : (m_imParams.dim == IMG_DIM_2D || m_imParams.dim == IMG_DIM_CUBE) ? levelAccess.getPixel(coord[0], coord[1], layer)
+ : levelAccess.getPixel(coord[0], coord[1], coord[2]);
+
+ for (int c = 0; c < 4; c++)
+ DE_ASSERT(de::inRange(refPix[c], resultMin[c], resultMax[c]));
+#endif
}
void SampleVerifier::fetchTexel (const IVec3& coordIn,
int newFace = 0;
wrapCubemapEdge(coord.swizzle(0, 1),
- m_pba[level].getSize().swizzle(0, 1),
+ m_levels[level].getSize().swizzle(0, 1),
arrayFace,
newCoord,
newFace);
IVec2 cornerCoords[3];
wrapCubemapCorner(coord.swizzle(0, 1),
- m_pba[level].getSize().swizzle(0, 1),
+ m_levels[level].getSize().swizzle(0, 1),
arrayFace,
faces[1],
faces[2],
if (isSrgb)
{
- cornerTexels[ndx] += sRGBToLinear(m_pba[level].getPixel(cornerCoords[ndx][0], cornerCoords[ndx][1], cornerLayer));
+ cornerTexels[ndx] += sRGBToLinear(m_levels[level].getPixel(cornerCoords[ndx][0], cornerCoords[ndx][1], cornerLayer));
}
else
{
- cornerTexels[ndx] += m_pba[level].getPixel(cornerCoords[ndx][0], cornerCoords[ndx][1], cornerLayer);
+ cornerTexels[ndx] += m_levels[level].getPixel(cornerCoords[ndx][0], cornerCoords[ndx][1], cornerLayer);
}
}
for (int compNdx = 0; compNdx < 3; ++compNdx)
{
- const int size = m_pba[level].getSize()[compNdx];
+ const int size = m_levels[level].getSize()[compNdx];
coord[compNdx] = wrapTexelCoord(coord[compNdx], size, wrappingModes[compNdx]);
}
for (int i = 0; i < 2; ++i)
{
- const Interval weightInterval = m_internalFormat.roundOut(Interval(i == 0 ? 1.0f - weight : weight), false);
+ const Interval weightInterval = m_filteringPrecision.roundOut(Interval(i == 0 ? 1.0f - weight : weight), false);
for (int compNdx = 0; compNdx < 4; ++compNdx)
{
const Interval texelInterval(false, texelsMin[i][compNdx], texelsMax[i][compNdx]);
- resultIntervals[compNdx] = m_internalFormat.roundOut(resultIntervals[compNdx] + weightInterval * texelInterval, false);
+ resultIntervals[compNdx] = m_filteringPrecision.roundOut(resultIntervals[compNdx] + weightInterval * texelInterval, false);
}
}
for (int i = 0; i < 2; ++i)
{
- const Interval iWeightInterval = m_internalFormat.roundOut(Interval(i == 0 ? 1.0f - weights[1] : weights[1]), false);
+ const Interval iWeightInterval = m_filteringPrecision.roundOut(Interval(i == 0 ? 1.0f - weights[1] : weights[1]), false);
for (int j = 0; j < 2; ++j)
{
- const Interval jWeightInterval = m_internalFormat.roundOut(iWeightInterval * Interval(j == 0 ? 1.0f - weights[0] : weights[0]), false);
+ const Interval jWeightInterval = m_filteringPrecision.roundOut(iWeightInterval * Interval(j == 0 ? 1.0f - weights[0] : weights[0]), false);
for (int compNdx = 0; compNdx < 4; ++compNdx)
{
const Interval texelInterval(false, texelsMin[2 * i + j][compNdx], texelsMax[2 * i + j][compNdx]);
- resultIntervals[compNdx] = m_internalFormat.roundOut(resultIntervals[compNdx] + jWeightInterval * texelInterval, false);
+ resultIntervals[compNdx] = m_filteringPrecision.roundOut(resultIntervals[compNdx] + jWeightInterval * texelInterval, false);
}
}
}
for (int i = 0; i < 2; ++i)
{
- const Interval iWeightInterval = m_internalFormat.roundOut(Interval(i == 0 ? 1.0f - weights[2] : weights[2]), false);
+ const Interval iWeightInterval = m_filteringPrecision.roundOut(Interval(i == 0 ? 1.0f - weights[2] : weights[2]), false);
for (int j = 0; j < 2; ++j)
{
- const Interval jWeightInterval = m_internalFormat.roundOut(iWeightInterval * Interval(j == 0 ? 1.0f - weights[1] : weights[1]), false);
+ const Interval jWeightInterval = m_filteringPrecision.roundOut(iWeightInterval * Interval(j == 0 ? 1.0f - weights[1] : weights[1]), false);
for (int k = 0; k < 2; ++k)
{
- const Interval kWeightInterval = m_internalFormat.roundOut(jWeightInterval * Interval(k == 0 ? 1.0f - weights[0] : weights[0]), false);
+ const Interval kWeightInterval = m_filteringPrecision.roundOut(jWeightInterval * Interval(k == 0 ? 1.0f - weights[0] : weights[0]), false);
for (int compNdx = 0; compNdx < 4; ++compNdx)
{
const Interval texelInterval(false, texelsMin[4 * i + 2 * j + k][compNdx], texelsMax[4 * i + 2 * j + k][compNdx]);
- resultIntervals[compNdx] = m_internalFormat.roundOut(resultIntervals[compNdx] + kWeightInterval * texelInterval, false);
+ resultIntervals[compNdx] = m_filteringPrecision.roundOut(resultIntervals[compNdx] + kWeightInterval * texelInterval, false);
}
}
}
const Interval idealSampleHi(false, idealSampleHiMin[compNdx], idealSampleHiMax[compNdx]);
const Interval idealSample
- = m_internalFormat.roundOut(Interval(weight) * idealSampleLo + Interval(1.0f - weight) * idealSampleHi, false);
+ = m_filteringPrecision.roundOut(Interval(weight) * idealSampleLo + Interval(1.0f - weight) * idealSampleHi, false);
idealSampleMin[compNdx] = (float)idealSample.lo();
idealSampleMax[compNdx] = (float)idealSample.hi();
const FloatFormat coordFormat(-32, 32, 16, true);
calcUnnormalizedCoordRange(coord,
- m_pba[level].getSize(),
+ m_levels[level].getSize(),
coordFormat,
unnormalizedCoordMin[0],
unnormalizedCoordMax[0]);
if (mipmapFilter == VK_SAMPLER_MIPMAP_MODE_LINEAR)
{
calcUnnormalizedCoordRange(coord,
- m_pba[level+1].getSize(),
+ m_levels[level+1].getSize(),
coordFormat,
unnormalizedCoordMin[1],
unnormalizedCoordMax[1]);
const SampleLookupSettings& sampleLookupSettings,
int coordBits,
int mipmapBits,
- const std::vector<tcu::ConstPixelBufferAccess>& pba);
+ const tcu::FloatFormat& conversionPrecision,
+ const tcu::FloatFormat& filteringPrecision,
+ const std::vector<tcu::ConstPixelBufferAccess>& levels);
bool verifySample (const SampleArguments& args,
const tcu::Vec4& result) const;
deInt32& stepMin,
deInt32& stepMax) const;
- const tcu::FloatFormat m_internalFormat;
-
const ImageViewParameters& m_imParams;
const SamplerParameters& m_samplerParams;
const SampleLookupSettings& m_sampleLookupSettings;
const int m_coordBits;
const int m_mipmapBits;
+ const tcu::FloatFormat m_conversionPrecision;
+ const tcu::FloatFormat m_filteringPrecision;
const int m_unnormalizedDim;
- const std::vector<tcu::ConstPixelBufferAccess>& m_pba;
+ const std::vector<tcu::ConstPixelBufferAccess>& m_levels;
};
} // texture
const BaseType mask = (BaseType) (((BaseType) 1 << (BaseType) numBits) - (BaseType) 1);
- return mask & (pack >> (BaseType) (8 * (int) sizeof(BaseType) - bitOffset - numBits));
+ return mask & (pack >> (BaseType) (8 * (int) sizeof(BaseType) - bitOffset - numBits));
}
deUint64 readChannel (const void* ptr,
#include "vkRef.hpp"
#include "vkRefUtil.hpp"
#include "vkStrUtil.hpp"
+#include "vkTypeUtil.hpp"
+#include "vkQueryUtil.hpp"
+#include "vkMemUtil.hpp"
#include "tcuTexLookupVerifier.hpp"
#include "tcuTestLog.hpp"
namespace
{
+tcu::FloatFormat getConversionPrecision (VkFormat format)
+{
+ const tcu::FloatFormat reallyLow (0, 0, 8, false, tcu::YES);
+ const tcu::FloatFormat fp16 (-14, 15, 10, false);
+ const tcu::FloatFormat fp32 (-126, 127, 23, true);
+
+ switch (format)
+ {
+ case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+ case VK_FORMAT_R5G6B5_UNORM_PACK16:
+ case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+ return reallyLow;
+
+ case VK_FORMAT_R8_UNORM:
+ case VK_FORMAT_R8_SNORM:
+ case VK_FORMAT_R8G8_UNORM:
+ case VK_FORMAT_R8G8_SNORM:
+ case VK_FORMAT_R8G8B8A8_UNORM:
+ case VK_FORMAT_R8G8B8A8_SNORM:
+ case VK_FORMAT_B8G8R8A8_UNORM:
+ case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+ case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+ case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+ return fp16;
+
+ case VK_FORMAT_R16_SFLOAT:
+ case VK_FORMAT_R16G16_SFLOAT:
+ case VK_FORMAT_R16G16B16A16_SFLOAT:
+ return fp16;
+
+ case VK_FORMAT_R32_SFLOAT:
+ case VK_FORMAT_R32G32_SFLOAT:
+ case VK_FORMAT_R32G32B32A32_SFLOAT:
+ return fp32;
+
+ default:
+ DE_FATAL("Precision not defined for format");
+ return fp32;
+ }
+}
+
+tcu::FloatFormat getFilteringPrecision (VkFormat format)
+{
+ const tcu::FloatFormat reallyLow (0, 0, 6, false, tcu::YES);
+ const tcu::FloatFormat low (0, 0, 7, false, tcu::YES);
+ const tcu::FloatFormat fp16 (-14, 15, 10, false);
+ const tcu::FloatFormat fp32 (-126, 127, 23, true);
+
+ switch (format)
+ {
+ case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
+ case VK_FORMAT_R5G6B5_UNORM_PACK16:
+ case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
+ return reallyLow;
+
+ case VK_FORMAT_R8_UNORM:
+ case VK_FORMAT_R8_SNORM:
+ case VK_FORMAT_R8G8_UNORM:
+ case VK_FORMAT_R8G8_SNORM:
+ case VK_FORMAT_R8G8B8A8_UNORM:
+ case VK_FORMAT_R8G8B8A8_SNORM:
+ case VK_FORMAT_B8G8R8A8_UNORM:
+ case VK_FORMAT_A8B8G8R8_UNORM_PACK32:
+ case VK_FORMAT_A8B8G8R8_SNORM_PACK32:
+ case VK_FORMAT_A2B10G10R10_UNORM_PACK32:
+ return low;
+
+ case VK_FORMAT_R16_SFLOAT:
+ case VK_FORMAT_R16G16_SFLOAT:
+ case VK_FORMAT_R16G16B16A16_SFLOAT:
+ return fp16;
+
+ case VK_FORMAT_R32_SFLOAT:
+ case VK_FORMAT_R32G32_SFLOAT:
+ case VK_FORMAT_R32G32B32A32_SFLOAT:
+ return fp32;
+
+ default:
+ DE_FATAL("Precision not defined for format");
+ return fp32;
+ }
+}
+
using namespace shaderexecutor;
string genSamplerDeclaration(const ImageViewParameters& imParams,
virtual std::vector<SampleArguments> getSampleArgs (void) const = 0;
protected:
- DataGenerator (void) {}
+ DataGenerator (void) {}
};
class TextureFilteringTestInstance : public TestInstance
{
public:
- TextureFilteringTestInstance (Context& ctx,
- const TestCaseData& testCaseData,
- ShaderExecutor& shaderExecutor,
- de::MovePtr<DataGenerator> gen)
-
- : TestInstance (ctx)
- , m_imParams (testCaseData.imParams)
- , m_samplerParams (testCaseData.samplerParams)
- , m_sampleLookupSettings (testCaseData.sampleLookupSettings)
- , m_shaderExecutor (shaderExecutor)
- , m_ctx (ctx)
- , m_vki (m_ctx.getInstanceInterface())
- , m_vkd (m_ctx.getDeviceInterface())
- , m_instance (m_ctx.getInstance())
- , m_physicalDevice (m_ctx.getPhysicalDevice())
- , m_device (m_ctx.getDevice())
- , m_uqfi (m_ctx.getUniversalQueueFamilyIndex())
- , m_pba (testCaseData.pba)
- , m_gen (gen.release())
- {
- for (deUint8 compNdx = 0; compNdx < 3; ++compNdx)
- {
- DE_ASSERT(m_imParams.size[compNdx] > 0);
- }
-
- m_imExtent.width = m_imParams.size[0];
- m_imExtent.height = m_imParams.size[1];
- m_imExtent.depth = m_imParams.size[2];
- }
+ TextureFilteringTestInstance (Context& ctx,
+ const TestCaseData& testCaseData,
+ const ShaderSpec& shaderSpec,
+ de::MovePtr<DataGenerator> gen);
- virtual TestStatus iterate (void)
- {
- return runTest();
- }
+ virtual TestStatus iterate (void) { return runTest(); }
protected:
- TestStatus runTest (void);
- bool isSupported (void);
- void createResources (void);
- void execute (void);
- bool verify (void);
+ TestStatus runTest (void);
+ bool isSupported (void);
+ void createResources (void);
+ void execute (void);
+ bool verify (void);
- tcu::Sampler mapTcuSampler (void);
+ tcu::Sampler mapTcuSampler (void) const;
- const ImageViewParameters& m_imParams;
- const SamplerParameters& m_samplerParams;
- const SampleLookupSettings& m_sampleLookupSettings;
+ const glu::ShaderType m_shaderType;
+ const ShaderSpec m_shaderSpec;
+ const ImageViewParameters m_imParams;
+ const SamplerParameters m_samplerParams;
+ const SampleLookupSettings m_sampleLookupSettings;
std::vector<SampleArguments> m_sampleArguments;
deUint32 m_numSamples;
- ShaderExecutor& m_shaderExecutor;
- Context& m_ctx;
- const InstanceInterface& m_vki;
- const DeviceInterface& m_vkd;
- VkInstance m_instance;
- VkPhysicalDevice m_physicalDevice;
- VkDevice m_device;
- deUint32 m_uqfi;
-
- VkExtent3D m_imExtent;
-
- int m_coordBits;
- int m_mipmapBits;
-
de::MovePtr<Allocation> m_imAllocation;
Move<VkImage> m_im;
Move<VkImageView> m_imView;
Move<VkSampler> m_sampler;
- std::vector<ConstPixelBufferAccess> m_pba;
+ Move<VkDescriptorSetLayout> m_extraResourcesLayout;
+ Move<VkDescriptorPool> m_extraResourcesPool;
+ Move<VkDescriptorSet> m_extraResourcesSet;
+
+ de::MovePtr<ShaderExecutor> m_executor;
+
+ std::vector<ConstPixelBufferAccess> m_levels;
de::MovePtr<DataGenerator> m_gen;
std::vector<Vec4> m_resultSamples;
std::vector<Vec4> m_resultCoords;
};
+TextureFilteringTestInstance::TextureFilteringTestInstance (Context& ctx,
+ const TestCaseData& testCaseData,
+ const ShaderSpec& shaderSpec,
+ de::MovePtr<DataGenerator> gen)
+ : TestInstance (ctx)
+ , m_shaderType (testCaseData.shaderType)
+ , m_shaderSpec (shaderSpec)
+ , m_imParams (testCaseData.imParams)
+ , m_samplerParams (testCaseData.samplerParams)
+ , m_sampleLookupSettings (testCaseData.sampleLookupSettings)
+ , m_levels (testCaseData.pba)
+ , m_gen (gen.release())
+{
+ for (deUint8 compNdx = 0; compNdx < 3; ++compNdx)
+ DE_ASSERT(m_imParams.size[compNdx] > 0);
+}
+
TestStatus TextureFilteringTestInstance::runTest (void)
{
if (!isSupported())
- {
TCU_THROW(NotSupportedError, "Unsupported combination of filtering and image format");
- }
TCU_CHECK(m_gen->generate());
- m_pba = m_gen->getPba();
+ m_levels = m_gen->getPba();
m_sampleArguments = m_gen->getSampleArgs();
m_numSamples = (deUint32)m_sampleArguments.size();
createResources();
- initializeImage(m_ctx, m_im.get(), &m_pba[0], m_imParams);
-
- m_shaderExecutor.addSamplerUniform(0, m_imView.get(), m_sampler.get());
+ initializeImage(m_context, m_im.get(), &m_levels[0], m_imParams);
deUint64 startTime, endTime;
execute();
endTime = deGetMicroseconds();
- m_ctx.getTestContext().getLog() << TestLog::Message
- << "Execution time: "
- << endTime - startTime
- << "us"
- << TestLog::EndMessage;
+ m_context.getTestContext().getLog() << TestLog::Message
+ << "Execution time: "
+ << endTime - startTime
+ << "us"
+ << TestLog::EndMessage;
startTime = deGetMicroseconds();
bool result = verify();
endTime = deGetMicroseconds();
- m_ctx.getTestContext().getLog() << TestLog::Message
- << "Verification time: "
- << endTime - startTime
- << "us"
- << TestLog::EndMessage;
+ m_context.getTestContext().getLog() << TestLog::Message
+ << "Verification time: "
+ << endTime - startTime
+ << "us"
+ << TestLog::EndMessage;
if (result)
{
{
// \todo [2016-06-24 collinbaker] Handle cubemaps
- m_coordBits = (deUint8) m_ctx.getDeviceProperties().limits.subTexelPrecisionBits;
- m_mipmapBits = (deUint8) m_ctx.getDeviceProperties().limits.mipmapPrecisionBits;
+ const int coordBits = (int)m_context.getDeviceProperties().limits.subTexelPrecisionBits;
+ const int mipmapBits = (int)m_context.getDeviceProperties().limits.mipmapPrecisionBits;
+ const int maxPrintedFailures = 5;
+ int failCount = 0;
- SampleVerifier verifier(m_imParams,
- m_samplerParams,
- m_sampleLookupSettings,
- m_coordBits,
- m_mipmapBits,
- m_pba);
+ const SampleVerifier verifier (m_imParams,
+ m_samplerParams,
+ m_sampleLookupSettings,
+ coordBits,
+ mipmapBits,
+ getConversionPrecision(m_imParams.format),
+ getFilteringPrecision(m_imParams.format),
+ m_levels);
- const int maxPrintedFailures = 5;
- int failCount = 0;
for (deUint32 sampleNdx = 0; sampleNdx < m_numSamples; ++sampleNdx)
{
std::string report;
verifier.verifySampleReport(m_sampleArguments[sampleNdx], m_resultSamples[sampleNdx], report);
- m_ctx.getTestContext().getLog()
+ m_context.getTestContext().getLog()
<< TestLog::Section("Failed sample", "Failed sample")
<< TestLog::Message
<< "Sample " << sampleNdx << ".\n"
}
}
- m_ctx.getTestContext().getLog()
+ m_context.getTestContext().getLog()
<< TestLog::Message
<< "Passed " << m_numSamples - failCount << " out of " << m_numSamples << "."
<< TestLog::EndMessage;
reinterpret_cast<void*>(&resultCoordsTemp[0])
};
- m_shaderExecutor.execute(m_ctx, m_numSamples, inputs, outputs);
+ m_executor->execute(m_numSamples, inputs, outputs, *m_extraResourcesSet);
m_resultSamples.resize(m_numSamples);
m_resultCoords .resize(m_numSamples);
{
// Create VkImage
- const VkImageCreateFlags imCreateFlags =
- (m_imParams.dim == IMG_DIM_CUBE) ? VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0;
-
- const VkImageCreateInfo imCreateInfo =
- {
- VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
- DE_NULL, // pNext
- imCreateFlags, // flags
- mapImageType(m_imParams.dim), // imageType
- m_imParams.format, // format
- m_imExtent, // extent
- (deUint32)m_imParams.levels, // mipLevels
- (deUint32)m_imParams.arrayLayers, // arrayLayers
- VK_SAMPLE_COUNT_1_BIT, // samples
- VK_IMAGE_TILING_OPTIMAL, // tiling
- VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, // usage
- VK_SHARING_MODE_EXCLUSIVE, // sharingMode
- 1, // queueFamilyIndexCount
- &m_uqfi, // pQueueFamilyIndices
- VK_IMAGE_LAYOUT_UNDEFINED // initialLayout
+ const DeviceInterface& vkd = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+
+ const deUint32 queueFamily = m_context.getUniversalQueueFamilyIndex();
+ const VkImageCreateFlags imCreateFlags =(m_imParams.dim == IMG_DIM_CUBE) ? VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0;
+
+ const VkImageCreateInfo imCreateInfo =
+ {
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
+ DE_NULL,
+ imCreateFlags,
+ mapImageType(m_imParams.dim),
+ m_imParams.format,
+ makeExtent3D(m_imParams.size[0], m_imParams.size[1], m_imParams.size[2]),
+ (deUint32)m_imParams.levels,
+ (deUint32)m_imParams.arrayLayers,
+ VK_SAMPLE_COUNT_1_BIT,
+ VK_IMAGE_TILING_OPTIMAL,
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,
+ VK_SHARING_MODE_EXCLUSIVE,
+ 1,
+ &queueFamily,
+ VK_IMAGE_LAYOUT_UNDEFINED
};
- m_im = createImage(m_vkd, m_device, &imCreateInfo);
+ m_im = createImage(vkd, device, &imCreateInfo);
// Allocate memory for image
VkMemoryRequirements imMemReq;
- m_vkd.getImageMemoryRequirements(m_device, m_im.get(), &imMemReq);
+ vkd.getImageMemoryRequirements(device, m_im.get(), &imMemReq);
- m_imAllocation = m_ctx.getDefaultAllocator().allocate(imMemReq, MemoryRequirement::Any);
- VK_CHECK(m_vkd.bindImageMemory(m_device, m_im.get(), m_imAllocation->getMemory(), m_imAllocation->getOffset()));
+ m_imAllocation = m_context.getDefaultAllocator().allocate(imMemReq, MemoryRequirement::Any);
+ VK_CHECK(vkd.bindImageMemory(device, m_im.get(), m_imAllocation->getMemory(), m_imAllocation->getOffset()));
// Create VkImageView
// \todo [2016-06-23 collinbaker] Pick aspectMask based on image type (i.e. support depth and/or stencil images)
- VkImageSubresourceRange imViewSubresourceRange =
+ DE_ASSERT(m_imParams.dim != IMG_DIM_CUBE); // \todo Support cube maps
+ const VkImageSubresourceRange imViewSubresourceRange =
{
VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
0, // baseMipLevel
(deUint32)m_imParams.arrayLayers // layerCount
};
- if (m_imParams.dim == IMG_DIM_CUBE)
- {
- imViewSubresourceRange.layerCount *= 6;
- }
-
const VkComponentMapping imViewCompMap =
{
VK_COMPONENT_SWIZZLE_R,
imViewSubresourceRange // subresourceRange
};
- m_imView = createImageView(m_vkd, m_device, &imViewCreateInfo);
+ m_imView = createImageView(vkd, device, &imViewCreateInfo);
// Create VkSampler
const VkSamplerCreateInfo samplerCreateInfo = mapSamplerCreateInfo(m_samplerParams);
- m_sampler = createSampler(m_vkd, m_device, &samplerCreateInfo);
-}
+ m_sampler = createSampler(vkd, device, &samplerCreateInfo);
-bool TextureFilteringTestInstance::isSupported (void)
-{
- const VkImageCreateFlags imCreateFlags =
- (m_imParams.dim == IMG_DIM_CUBE) ? VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0;
+ // Create additional descriptors
- VkImageFormatProperties imFormatProperties;
- VkFormatProperties formatProperties;
+ {
+ const VkDescriptorSetLayoutBinding bindings[] =
+ {
+ { 0u, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1u, VK_SHADER_STAGE_ALL, DE_NULL },
+ };
+ const VkDescriptorSetLayoutCreateInfo layoutInfo =
+ {
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
+ DE_NULL,
+ (VkDescriptorSetLayoutCreateFlags)0u,
+ DE_LENGTH_OF_ARRAY(bindings),
+ bindings,
+ };
- m_vki.getPhysicalDeviceImageFormatProperties(m_physicalDevice,
- m_imParams.format,
- mapImageType(m_imParams.dim),
- VK_IMAGE_TILING_OPTIMAL,
- VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,
- imCreateFlags,
- &imFormatProperties);
+ m_extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
+ }
+
+ {
+ const VkDescriptorPoolSize poolSizes[] =
+ {
+ { VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1u },
+ };
+ const VkDescriptorPoolCreateInfo poolInfo =
+ {
+ VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
+ DE_NULL,
+ (VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
+ 1u, // maxSets
+ DE_LENGTH_OF_ARRAY(poolSizes),
+ poolSizes,
+ };
- m_vki.getPhysicalDeviceFormatProperties( m_physicalDevice,
- m_imParams.format,
- &formatProperties);
+ m_extraResourcesPool = createDescriptorPool(vkd, device, &poolInfo);
+ }
- // \todo [2016-06-23 collinbaker] Check image parameters against imFormatProperties
+ {
+ const VkDescriptorSetAllocateInfo allocInfo =
+ {
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
+ DE_NULL,
+ *m_extraResourcesPool,
+ 1u,
+ &m_extraResourcesLayout.get(),
+ };
- VkFormatFeatureFlags reqImFeatures = VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT;
+ m_extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
+ }
- if (m_samplerParams.minFilter == VK_FILTER_LINEAR ||
- m_samplerParams.magFilter == VK_FILTER_LINEAR ||
- m_samplerParams.mipmapFilter == VK_SAMPLER_MIPMAP_MODE_LINEAR)
{
- reqImFeatures |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT;
+ const VkDescriptorImageInfo imageInfo =
+ {
+ *m_sampler,
+ *m_imView,
+ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ };
+ const VkWriteDescriptorSet descriptorWrite =
+ {
+ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+ DE_NULL,
+ *m_extraResourcesSet,
+ 0u, // dstBinding
+ 0u, // dstArrayElement
+ 1u,
+ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
+ &imageInfo,
+ (const VkDescriptorBufferInfo*)DE_NULL,
+ (const VkBufferView*)DE_NULL,
+ };
+
+ vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
+ }
+
+ m_executor = de::MovePtr<ShaderExecutor>(createExecutor(m_context, m_shaderType, m_shaderSpec, *m_extraResourcesLayout));
+}
+
+VkFormatFeatureFlags getRequiredFormatFeatures (const SamplerParameters& samplerParams)
+{
+ VkFormatFeatureFlags features = VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT;
+
+ if (samplerParams.minFilter == VK_FILTER_LINEAR ||
+ samplerParams.magFilter == VK_FILTER_LINEAR ||
+ samplerParams.mipmapFilter == VK_SAMPLER_MIPMAP_MODE_LINEAR)
+ {
+ features |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT;
}
+ return features;
+}
+
+bool TextureFilteringTestInstance::isSupported (void)
+{
+ const VkImageCreateFlags imCreateFlags = (m_imParams.dim == IMG_DIM_CUBE) ? VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0;
+ const VkFormatFeatureFlags reqImFeatures = getRequiredFormatFeatures(m_samplerParams);
+
+ const VkImageFormatProperties imFormatProperties = getPhysicalDeviceImageFormatProperties(m_context.getInstanceInterface(),
+ m_context.getPhysicalDevice(),
+ m_imParams.format,
+ mapImageType(m_imParams.dim),
+ VK_IMAGE_TILING_OPTIMAL,
+ VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,
+ imCreateFlags);
+ const VkFormatProperties formatProperties = getPhysicalDeviceFormatProperties(m_context.getInstanceInterface(),
+ m_context.getPhysicalDevice(),
+ m_imParams.format);
+
+ // \todo [2016-06-23 collinbaker] Check image parameters against imFormatProperties
+ DE_UNREF(imFormatProperties);
+
return (formatProperties.optimalTilingFeatures & reqImFeatures) == reqImFeatures;
}
class TextureFilteringTestCase : public TestCase
{
public:
- TextureFilteringTestCase (tcu::TestContext& testCtx,
- const char* name,
- const char* description)
+ TextureFilteringTestCase (tcu::TestContext& testCtx,
+ const char* name,
+ const char* description)
: TestCase(testCtx, name, description)
{
}
- void init (void);
+ void initSpec (void);
virtual void initPrograms (vk::SourceCollections& programCollection) const
{
- DE_ASSERT(m_executor);
- m_executor->setShaderSources(programCollection);
+ generateSources(m_testCaseData.shaderType, m_shaderSpec, programCollection);
}
virtual de::MovePtr<DataGenerator> createGenerator (void) const = 0;
virtual TestInstance* createInstance (Context& ctx) const
{
- return new TextureFilteringTestInstance(ctx, m_testCaseData, *m_executor, createGenerator());
+ return new TextureFilteringTestInstance(ctx, m_testCaseData, m_shaderSpec, createGenerator());
}
protected:
de::MovePtr<ShaderExecutor> m_executor;
TestCaseData m_testCaseData;
+ ShaderSpec m_shaderSpec;
};
-void TextureFilteringTestCase::init (void)
+void TextureFilteringTestCase::initSpec (void)
{
- ShaderSpec shaderSpec;
- shaderSpec.source = genLookupCode(m_testCaseData.imParams,
- m_testCaseData.samplerParams,
- m_testCaseData.sampleLookupSettings);
- shaderSpec.source += "\nsampledCoord = coord;";
-
- shaderSpec.outputs.push_back(Symbol("result", glu::VarType(glu::TYPE_FLOAT_VEC4, glu::PRECISION_HIGHP)));
- shaderSpec.outputs.push_back(Symbol("sampledCoord", glu::VarType(glu::TYPE_FLOAT_VEC4, glu::PRECISION_HIGHP)));
- shaderSpec.inputs .push_back(Symbol("coord", glu::VarType(glu::TYPE_FLOAT_VEC4, glu::PRECISION_HIGHP)));
- shaderSpec.inputs .push_back(Symbol("layer", glu::VarType(glu::TYPE_FLOAT, glu::PRECISION_HIGHP)));
- shaderSpec.inputs .push_back(Symbol("dRef", glu::VarType(glu::TYPE_FLOAT, glu::PRECISION_HIGHP)));
- shaderSpec.inputs .push_back(Symbol("dPdx", glu::VarType(glu::TYPE_FLOAT_VEC4, glu::PRECISION_HIGHP)));
- shaderSpec.inputs .push_back(Symbol("dPdy", glu::VarType(glu::TYPE_FLOAT_VEC4, glu::PRECISION_HIGHP)));
- shaderSpec.inputs .push_back(Symbol("lod", glu::VarType(glu::TYPE_FLOAT, glu::PRECISION_HIGHP)));
-
- shaderSpec.globalDeclarations = "layout(set=0, binding=0) uniform highp ";
- shaderSpec.globalDeclarations += genSamplerDeclaration(m_testCaseData.imParams,
+ m_shaderSpec.source = genLookupCode(m_testCaseData.imParams,
+ m_testCaseData.samplerParams,
+ m_testCaseData.sampleLookupSettings);
+ m_shaderSpec.source += "\nsampledCoord = coord;";
+
+ m_shaderSpec.outputs.push_back(Symbol("result", glu::VarType(glu::TYPE_FLOAT_VEC4, glu::PRECISION_HIGHP)));
+ m_shaderSpec.outputs.push_back(Symbol("sampledCoord", glu::VarType(glu::TYPE_FLOAT_VEC4, glu::PRECISION_HIGHP)));
+ m_shaderSpec.inputs .push_back(Symbol("coord", glu::VarType(glu::TYPE_FLOAT_VEC4, glu::PRECISION_HIGHP)));
+ m_shaderSpec.inputs .push_back(Symbol("layer", glu::VarType(glu::TYPE_FLOAT, glu::PRECISION_HIGHP)));
+ m_shaderSpec.inputs .push_back(Symbol("dRef", glu::VarType(glu::TYPE_FLOAT, glu::PRECISION_HIGHP)));
+ m_shaderSpec.inputs .push_back(Symbol("dPdx", glu::VarType(glu::TYPE_FLOAT_VEC4, glu::PRECISION_HIGHP)));
+ m_shaderSpec.inputs .push_back(Symbol("dPdy", glu::VarType(glu::TYPE_FLOAT_VEC4, glu::PRECISION_HIGHP)));
+ m_shaderSpec.inputs .push_back(Symbol("lod", glu::VarType(glu::TYPE_FLOAT, glu::PRECISION_HIGHP)));
+
+ m_shaderSpec.globalDeclarations = "layout(set=" + de::toString((int)EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX) + ", binding=0) uniform highp ";
+ m_shaderSpec.globalDeclarations += genSamplerDeclaration(m_testCaseData.imParams,
m_testCaseData.samplerParams);
- shaderSpec.globalDeclarations += " testSampler;";
-
- m_executor = de::MovePtr<ShaderExecutor>(createExecutor(m_testCaseData.shaderType, shaderSpec));
- DE_ASSERT(m_executor);
-
- m_testCtx.getLog() << *m_executor;
+ m_shaderSpec.globalDeclarations += " testSampler;";
}
class Texture2DGradientTestCase : public TextureFilteringTestCase
{
public:
- Texture2DGradientTestCase (TestContext& testCtx,
- const char* name,
- const char* desc,
- TextureFormat format,
- IVec3 dimensions,
- VkFilter magFilter,
- VkFilter minFilter,
- VkSamplerMipmapMode mipmapFilter,
- VkSamplerAddressMode wrappingMode,
- bool useDerivatives)
+ Texture2DGradientTestCase (TestContext& testCtx,
+ const char* name,
+ const char* desc,
+ TextureFormat format,
+ IVec3 dimensions,
+ VkFilter magFilter,
+ VkFilter minFilter,
+ VkSamplerMipmapMode mipmapFilter,
+ VkSamplerAddressMode wrappingMode,
+ bool useDerivatives)
: TextureFilteringTestCase (testCtx, name, desc)
, m_format (format)
, m_useDerivatives (useDerivatives)
{
m_testCaseData = genTestCaseData();
- init();
+ initSpec();
}
protected:
const SamplerParameters samplerParameters =
{
- m_magFilter, // magFilter
- m_minFilter, // minFilter
- m_mipmapFilter, // mipmapFilter
- m_wrappingMode, // wrappingModeU
- m_wrappingMode, // wrappingModeV
- m_wrappingMode, // wrappingModeW
- VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE, // borderColor
- 0.0f, // lodBias
- -1.0f, // minLod
- 50.0f, // maxLod
- false, // isUnnormalized
- false // isCompare
+ m_magFilter,
+ m_minFilter,
+ m_mipmapFilter,
+ m_wrappingMode,
+ m_wrappingMode,
+ m_wrappingMode,
+ VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE,
+ 0.0f,
+ -1.0f,
+ 50.0f,
+ false,
+ false
};
const deUint8 numLevels = (deUint8) (1 + deLog2Floor32(de::max(m_dimensions[0],
const ImageViewParameters imParameters =
{
- IMG_DIM_2D, // dim
- mapTextureFormat(m_format), // format
- m_dimensions, // size
- numLevels, // levels
- false, // isArrayed
- 1, // arrayLayers
+ IMG_DIM_2D,
+ mapTextureFormat(m_format),
+ m_dimensions,
+ numLevels,
+ false,
+ 1,
};
const TestCaseData data =
de::MovePtr<TestCaseGroup> tests(
new TestCaseGroup(testCtx, "formats", "Various image formats"));
- VkFormat formats[] =
+ const VkFormat formats[] =
{
VK_FORMAT_B4G4R4A4_UNORM_PACK16,
VK_FORMAT_R5G6B5_UNORM_PACK16,
deUint32 getInputAttachmentCount (void) const { return (deUint32)m_inputAttachments.size(); }
deUint32 getInputAttachmentIndex (deUint32 attachmentNdx) const { return m_inputAttachments[attachmentNdx].getAttachment(); }
+ VkImageLayout getInputAttachmentLayout (deUint32 attachmentNdx) const { return m_inputAttachments[attachmentNdx].getImageLayout(); }
deUint32 getColorAttachmentCount (void) const { return (deUint32)m_colorAttachments.size(); }
VkImageLayout getColorAttachmentLayout (deUint32 attachmentNdx) const { return m_colorAttachments[attachmentNdx].getImageLayout(); }
for (deUint32 inputAttachmentNdx = 0; inputAttachmentNdx < renderInfo.getInputAttachmentCount(); inputAttachmentNdx++)
{
- const Attachment attachmentInfo = attachmentInfos[renderInfo.getInputAttachmentIndex(inputAttachmentNdx)];
- const tcu::TextureFormat format = mapVkFormat(attachmentInfo.getFormat());
- const bool isDepthFormat = tcu::hasDepthComponent(format.order);
- const bool isStencilFormat = tcu::hasStencilComponent(format.order);
+ const Attachment attachmentInfo = attachmentInfos[renderInfo.getInputAttachmentIndex(inputAttachmentNdx)];
+ const tcu::TextureFormat format = mapVkFormat(attachmentInfo.getFormat());
+ const bool isDepthFormat = tcu::hasDepthComponent(format.order);
+ const bool isStencilFormat = tcu::hasStencilComponent(format.order);
+ const VkImageLayout inputAttachmentLayout = renderInfo.getInputAttachmentLayout(inputAttachmentNdx);
if (isDepthFormat && isStencilFormat)
{
{
(VkSampler)0,
attachmentViews[renderInfo.getInputAttachmentIndex(inputAttachmentNdx)].first,
- VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ inputAttachmentLayout
};
imageInfos[bindingIndex] = imageInfo;
{
(VkSampler)0,
attachmentViews[renderInfo.getInputAttachmentIndex(inputAttachmentNdx)].second,
- VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ inputAttachmentLayout
};
imageInfos[bindingIndex] = imageInfo;
{
(VkSampler)0,
attachmentViews[renderInfo.getInputAttachmentIndex(inputAttachmentNdx)].first,
- VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
+ inputAttachmentLayout
};
imageInfos[bindingIndex] = imageInfo;
if (m_renderInfo.getDepthStencilAttachmentIndex() && (m_renderInfo.getInputAttachmentIndex(inputAttachmentNdx) == *m_renderInfo.getDepthStencilAttachmentIndex()))
{
- const VkImageMemoryBarrier barrier =
- {
- VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType;
- DE_NULL, // pNext;
-
- VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, // srcAccessMask
- VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, // dstAccessMask
-
- VK_IMAGE_LAYOUT_GENERAL, // oldLayout
- VK_IMAGE_LAYOUT_GENERAL, // newLayout;
-
- VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex;
- VK_QUEUE_FAMILY_IGNORED, // destQueueFamilyIndex;
-
- m_depthStencilAttachmentImage, // image;
- { // subresourceRange;
- VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT, // aspect;
- 0, // baseMipLevel;
- 1, // mipLevels;
- 0, // baseArraySlice;
- 1 // arraySize;
- }
- };
+ const tcu::TextureFormat format = mapVkFormat(m_renderInfo.getDepthStencilAttachment()->getFormat());
+ const bool hasDepth = hasDepthComponent(format.order);
+ const bool hasStencil = hasStencilComponent(format.order);
+ const VkImageMemoryBarrier barrier =
+ {
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType;
+ DE_NULL, // pNext;
+
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, // srcAccessMask
+ VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, // dstAccessMask
+
+ VK_IMAGE_LAYOUT_GENERAL, // oldLayout
+ VK_IMAGE_LAYOUT_GENERAL, // newLayout;
+
+ VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex;
+ VK_QUEUE_FAMILY_IGNORED, // destQueueFamilyIndex;
+
+ m_depthStencilAttachmentImage, // image;
+ { // subresourceRange;
+ (hasDepth ? (VkImageAspectFlags)VK_IMAGE_ASPECT_DEPTH_BIT : 0u)
+ | (hasStencil ? (VkImageAspectFlags)VK_IMAGE_ASPECT_STENCIL_BIT : 0u), // aspect;
+ 0, // baseMipLevel;
+ 1, // mipLevels;
+ 0, // baseArraySlice;
+ 1 // arraySize;
+ }
+ };
- srcStages |= VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
- dstStages |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
+ srcStages |= VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
+ dstStages |= VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
- selfDeps.push_back(barrier);
+ selfDeps.push_back(barrier);
}
}
vktWsiSurfaceTests.hpp
vktWsiSwapchainTests.cpp
vktWsiSwapchainTests.hpp
+ vktWsiIncrementalPresentTests.cpp
+ vktWsiIncrementalPresentTests.hpp
+ vktWsiDisplayTimingTests.cpp
+ vktWsiDisplayTimingTests.hpp
)
set(DEQP_VK_WSI_LIBS
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2017 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Tests for VK_GOOGLE_display_timing
+ *//*--------------------------------------------------------------------*/
+
+#include "vktWsiDisplayTimingTests.hpp"
+
+#include "vktTestCaseUtil.hpp"
+#include "vktTestGroupUtil.hpp"
+#include "vkRefUtil.hpp"
+#include "vkWsiPlatform.hpp"
+#include "vkWsiUtil.hpp"
+#include "vkQueryUtil.hpp"
+#include "vkDeviceUtil.hpp"
+#include "vkPlatform.hpp"
+#include "vkTypeUtil.hpp"
+#include "vkPrograms.hpp"
+
+#include "vkWsiUtil.hpp"
+
+#include "tcuPlatform.hpp"
+#include "tcuResultCollector.hpp"
+#include "deClock.h"
+
+#include <vector>
+#include <string>
+
+using std::vector;
+using std::string;
+
+using tcu::Maybe;
+using tcu::UVec2;
+using tcu::TestLog;
+
+namespace vk
+{
+
+Move<VkSemaphore> createSemaphore (const DeviceInterface& vk,
+ VkDevice device,
+ VkSemaphoreCreateFlags flags = (VkSemaphoreCreateFlags)0,
+ const VkAllocationCallbacks* pAllocator = DE_NULL)
+{
+ const VkSemaphoreCreateInfo createInfo =
+ {
+ VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
+ DE_NULL,
+
+ flags
+ };
+
+ return createSemaphore(vk, device, &createInfo, pAllocator);
+}
+
+Move<VkFence> createFence (const DeviceInterface& vk,
+ VkDevice device,
+ VkFenceCreateFlags flags = (VkFenceCreateFlags)0,
+ const VkAllocationCallbacks* pAllocator = DE_NULL)
+{
+ const VkFenceCreateInfo createInfo =
+ {
+ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
+ DE_NULL,
+
+ flags
+ };
+
+ return createFence(vk, device, &createInfo, pAllocator);
+}
+
+VkQueue getDeviceQueue (const DeviceInterface& vkd, VkDevice device, deUint32 queueFamilyIndex, deUint32 queueIndex)
+{
+ VkQueue queue;
+
+ vkd.getDeviceQueue(device, queueFamilyIndex, queueIndex, &queue);
+
+ return queue;
+}
+
+} // vk
+
+namespace vkt
+{
+namespace wsi
+{
+namespace
+{
+static const deUint64 MILLISECOND = 1000ull * 1000ull;
+static const deUint64 SECOND = 1000ull * MILLISECOND;
+
+typedef vector<vk::VkExtensionProperties> Extensions;
+
+void checkAllSupported (const Extensions& supportedExtensions, const vector<string>& requiredExtensions)
+{
+ for (vector<string>::const_iterator requiredExtName = requiredExtensions.begin();
+ requiredExtName != requiredExtensions.end();
+ ++requiredExtName)
+ {
+ if (!isExtensionSupported(supportedExtensions, vk::RequiredExtension(*requiredExtName)))
+ TCU_THROW(NotSupportedError, (*requiredExtName + " is not supported").c_str());
+ }
+}
+
+vk::Move<vk::VkInstance> createInstanceWithWsi (const vk::PlatformInterface& vkp,
+ const Extensions& supportedExtensions,
+ vk::wsi::Type wsiType)
+{
+ vector<string> extensions;
+
+ extensions.push_back("VK_KHR_surface");
+ extensions.push_back(getExtensionName(wsiType));
+
+ checkAllSupported(supportedExtensions, extensions);
+
+ return vk::createDefaultInstance(vkp, vector<string>(), extensions);
+}
+
+vk::VkPhysicalDeviceFeatures getDeviceNullFeatures (void)
+{
+ vk::VkPhysicalDeviceFeatures features;
+ deMemset(&features, 0, sizeof(features));
+ return features;
+}
+
+deUint32 getNumQueueFamilyIndices (const vk::InstanceInterface& vki, vk::VkPhysicalDevice physicalDevice)
+{
+ deUint32 numFamilies = 0;
+
+ vki.getPhysicalDeviceQueueFamilyProperties(physicalDevice, &numFamilies, DE_NULL);
+
+ return numFamilies;
+}
+
+vector<deUint32> getSupportedQueueFamilyIndices (const vk::InstanceInterface& vki, vk::VkPhysicalDevice physicalDevice, vk::VkSurfaceKHR surface)
+{
+ const deUint32 numTotalFamilyIndices = getNumQueueFamilyIndices(vki, physicalDevice);
+ vector<deUint32> supportedFamilyIndices;
+
+ for (deUint32 queueFamilyNdx = 0; queueFamilyNdx < numTotalFamilyIndices; ++queueFamilyNdx)
+ {
+ if (vk::wsi::getPhysicalDeviceSurfaceSupport(vki, physicalDevice, queueFamilyNdx, surface) == VK_TRUE)
+ supportedFamilyIndices.push_back(queueFamilyNdx);
+ }
+
+ return supportedFamilyIndices;
+}
+
+deUint32 chooseQueueFamilyIndex (const vk::InstanceInterface& vki, vk::VkPhysicalDevice physicalDevice, vk::VkSurfaceKHR surface)
+{
+ const vector<deUint32> supportedFamilyIndices = getSupportedQueueFamilyIndices(vki, physicalDevice, surface);
+
+ if (supportedFamilyIndices.empty())
+ TCU_THROW(NotSupportedError, "Device doesn't support presentation");
+
+ return supportedFamilyIndices[0];
+}
+
+vk::Move<vk::VkDevice> createDeviceWithWsi (const vk::InstanceInterface& vki,
+ vk::VkPhysicalDevice physicalDevice,
+ const Extensions& supportedExtensions,
+ const deUint32 queueFamilyIndex,
+ bool requiresDisplayTiming,
+ const vk::VkAllocationCallbacks* pAllocator = DE_NULL)
+{
+ const float queuePriorities[] = { 1.0f };
+ const vk::VkDeviceQueueCreateInfo queueInfos[] =
+ {
+ {
+ vk::VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
+ DE_NULL,
+ (vk::VkDeviceQueueCreateFlags)0,
+ queueFamilyIndex,
+ DE_LENGTH_OF_ARRAY(queuePriorities),
+ &queuePriorities[0]
+ }
+ };
+ const vk::VkPhysicalDeviceFeatures features = getDeviceNullFeatures();
+ const char* const extensions[] =
+ {
+ "VK_KHR_swapchain",
+ "VK_GOOGLE_display_timing"
+ };
+
+ const vk::VkDeviceCreateInfo deviceParams =
+ {
+ vk::VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
+ DE_NULL,
+ (vk::VkDeviceCreateFlags)0,
+ DE_LENGTH_OF_ARRAY(queueInfos),
+ &queueInfos[0],
+ 0u,
+ DE_NULL,
+ requiresDisplayTiming ? 2u : 1u,
+ DE_ARRAY_BEGIN(extensions),
+ &features
+ };
+
+ for (int ndx = 0; ndx < DE_LENGTH_OF_ARRAY(extensions); ++ndx)
+ {
+ if (!isExtensionSupported(supportedExtensions, vk::RequiredExtension(extensions[ndx])))
+ TCU_THROW(NotSupportedError, (string(extensions[ndx]) + " is not supported").c_str());
+ }
+
+ return createDevice(vki, physicalDevice, &deviceParams, pAllocator);
+}
+
+de::MovePtr<vk::wsi::Display> createDisplay (const vk::Platform& platform,
+ const Extensions& supportedExtensions,
+ vk::wsi::Type wsiType)
+{
+ try
+ {
+ return de::MovePtr<vk::wsi::Display>(platform.createWsiDisplay(wsiType));
+ }
+ catch (const tcu::NotSupportedError& e)
+ {
+ if (isExtensionSupported(supportedExtensions, vk::RequiredExtension(getExtensionName(wsiType))))
+ {
+ // If VK_KHR_{platform}_surface was supported, vk::Platform implementation
+ // must support creating native display & window for that WSI type.
+ throw tcu::TestError(e.getMessage());
+ }
+ else
+ throw;
+ }
+}
+
+de::MovePtr<vk::wsi::Window> createWindow (const vk::wsi::Display& display, const Maybe<UVec2>& initialSize)
+{
+ try
+ {
+ return de::MovePtr<vk::wsi::Window>(display.createWindow(initialSize));
+ }
+ catch (const tcu::NotSupportedError& e)
+ {
+ // See createDisplay - assuming that wsi::Display was supported platform port
+ // should also support creating a window.
+ throw tcu::TestError(e.getMessage());
+ }
+}
+
+void initSemaphores (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ std::vector<vk::VkSemaphore>& semaphores)
+{
+ for (size_t ndx = 0; ndx < semaphores.size(); ndx++)
+ semaphores[ndx] = createSemaphore(vkd, device).disown();
+}
+
+void deinitSemaphores (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ std::vector<vk::VkSemaphore>& semaphores)
+{
+ for (size_t ndx = 0; ndx < semaphores.size(); ndx++)
+ {
+ if (semaphores[ndx] != (vk::VkSemaphore)0)
+ vkd.destroySemaphore(device, semaphores[ndx], DE_NULL);
+
+ semaphores[ndx] = (vk::VkSemaphore)0;
+ }
+
+ semaphores.clear();
+}
+
+void initFences (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ std::vector<vk::VkFence>& fences)
+{
+ for (size_t ndx = 0; ndx < fences.size(); ndx++)
+ fences[ndx] = createFence(vkd, device).disown();
+}
+
+void deinitFences (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ std::vector<vk::VkFence>& fences)
+{
+ for (size_t ndx = 0; ndx < fences.size(); ndx++)
+ {
+ if (fences[ndx] != (vk::VkFence)0)
+ vkd.destroyFence(device, fences[ndx], DE_NULL);
+
+ fences[ndx] = (vk::VkFence)0;
+ }
+
+ fences.clear();
+}
+
+void cmdRenderFrame (const vk::DeviceInterface& vkd,
+ vk::VkCommandBuffer commandBuffer,
+ vk::VkPipelineLayout pipelineLayout,
+ vk::VkPipeline pipeline,
+ size_t frameNdx,
+ deUint32 quadCount)
+{
+ const deUint32 frameNdxValue = (deUint32)frameNdx;
+
+ vkd.cmdPushConstants(commandBuffer, pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, 4u, &frameNdxValue);
+ vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
+ vkd.cmdDraw(commandBuffer, quadCount * 6u, 1u, 0u, 0u);
+}
+
+vk::Move<vk::VkCommandBuffer> createCommandBuffer (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ vk::VkCommandPool commandPool,
+ vk::VkPipelineLayout pipelineLayout,
+ vk::VkRenderPass renderPass,
+ vk::VkFramebuffer framebuffer,
+ vk::VkPipeline pipeline,
+ size_t frameNdx,
+ deUint32 quadCount,
+ deUint32 imageWidth,
+ deUint32 imageHeight)
+{
+ const vk::VkCommandBufferAllocateInfo allocateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
+ DE_NULL,
+
+ commandPool,
+ vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+ 1
+ };
+ const vk::VkCommandBufferBeginInfo beginInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
+ DE_NULL,
+ 0u,
+ DE_NULL
+ };
+
+ vk::Move<vk::VkCommandBuffer> commandBuffer (vk::allocateCommandBuffer(vkd, device, &allocateInfo));
+ VK_CHECK(vkd.beginCommandBuffer(*commandBuffer, &beginInfo));
+
+ {
+ const vk::VkClearValue clearValue = vk::makeClearValueColorF32(0.25f, 0.50f, 0.75f, 1.00f);
+ const vk::VkRenderPassBeginInfo renderPassBeginInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
+ DE_NULL,
+
+ renderPass,
+ framebuffer,
+
+ {
+ { (deInt32)0, (deInt32)0 },
+ { imageWidth, imageHeight }
+ },
+ 1u,
+ &clearValue
+ };
+ vkd.cmdBeginRenderPass(*commandBuffer, &renderPassBeginInfo, vk::VK_SUBPASS_CONTENTS_INLINE);
+ }
+
+ cmdRenderFrame(vkd, *commandBuffer, pipelineLayout, pipeline, frameNdx, quadCount);
+
+ vkd.cmdEndRenderPass(*commandBuffer);
+
+ VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
+ return commandBuffer;
+}
+
+void deinitCommandBuffers (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ vk::VkCommandPool commandPool,
+ std::vector<vk::VkCommandBuffer>& commandBuffers)
+{
+ for (size_t ndx = 0; ndx < commandBuffers.size(); ndx++)
+ {
+ if (commandBuffers[ndx] != (vk::VkCommandBuffer)0)
+ vkd.freeCommandBuffers(device, commandPool, 1u, &commandBuffers[ndx]);
+
+ commandBuffers[ndx] = (vk::VkCommandBuffer)0;
+ }
+
+ commandBuffers.clear();
+}
+
+vk::Move<vk::VkCommandPool> createCommandPool (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ deUint32 queueFamilyIndex)
+{
+ const vk::VkCommandPoolCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
+ DE_NULL,
+ 0u,
+ queueFamilyIndex
+ };
+
+ return vk::createCommandPool(vkd, device, &createInfo);
+}
+
+vk::Move<vk::VkFramebuffer> createFramebuffer (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ vk::VkRenderPass renderPass,
+ vk::VkImageView imageView,
+ deUint32 width,
+ deUint32 height)
+{
+ const vk::VkFramebufferCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
+ DE_NULL,
+
+ 0u,
+ renderPass,
+ 1u,
+ &imageView,
+ width,
+ height,
+ 1u
+ };
+
+ return vk::createFramebuffer(vkd, device, &createInfo);
+}
+
+void initFramebuffers (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ vk::VkRenderPass renderPass,
+ std::vector<vk::VkImageView> imageViews,
+ deUint32 width,
+ deUint32 height,
+ std::vector<vk::VkFramebuffer>& framebuffers)
+{
+ DE_ASSERT(framebuffers.size() == imageViews.size());
+
+ for (size_t ndx = 0; ndx < framebuffers.size(); ndx++)
+ framebuffers[ndx] = createFramebuffer(vkd, device, renderPass, imageViews[ndx], width, height).disown();
+}
+
+void deinitFramebuffers (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ std::vector<vk::VkFramebuffer>& framebuffers)
+{
+ for (size_t ndx = 0; ndx < framebuffers.size(); ndx++)
+ {
+ if (framebuffers[ndx] != (vk::VkFramebuffer)0)
+ vkd.destroyFramebuffer(device, framebuffers[ndx], DE_NULL);
+
+ framebuffers[ndx] = (vk::VkFramebuffer)0;
+ }
+
+ framebuffers.clear();
+}
+
+vk::Move<vk::VkImageView> createImageView (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ vk::VkImage image,
+ vk::VkFormat format)
+{
+ const vk::VkImageViewCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ DE_NULL,
+
+ 0u,
+ image,
+ vk::VK_IMAGE_VIEW_TYPE_2D,
+ format,
+ vk::makeComponentMappingRGBA(),
+ {
+ vk::VK_IMAGE_ASPECT_COLOR_BIT,
+ 0u,
+ 1u,
+ 0u,
+ 1u
+ }
+ };
+
+ return vk::createImageView(vkd, device, &createInfo, DE_NULL);
+}
+
+void initImageViews (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ const std::vector<vk::VkImage>& images,
+ vk::VkFormat format,
+ std::vector<vk::VkImageView>& imageViews)
+{
+ DE_ASSERT(images.size() == imageViews.size());
+
+ for (size_t ndx = 0; ndx < imageViews.size(); ndx++)
+ imageViews[ndx] = createImageView(vkd, device, images[ndx], format).disown();
+}
+
+void deinitImageViews (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ std::vector<vk::VkImageView>& imageViews)
+{
+ for (size_t ndx = 0; ndx < imageViews.size(); ndx++)
+ {
+ if (imageViews[ndx] != (vk::VkImageView)0)
+ vkd.destroyImageView(device, imageViews[ndx], DE_NULL);
+
+ imageViews[ndx] = (vk::VkImageView)0;
+ }
+
+ imageViews.clear();
+}
+
+vk::Move<vk::VkRenderPass> createRenderPass (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ vk::VkFormat format)
+{
+ const vk::VkAttachmentDescription attachments[] =
+ {
+ {
+ 0u,
+ format,
+ vk::VK_SAMPLE_COUNT_1_BIT,
+
+ vk::VK_ATTACHMENT_LOAD_OP_LOAD,
+ vk::VK_ATTACHMENT_STORE_OP_STORE,
+
+ vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+ vk::VK_ATTACHMENT_STORE_OP_DONT_CARE,
+
+ vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+ vk::VK_IMAGE_LAYOUT_PRESENT_SRC_KHR
+ }
+ };
+ const vk::VkAttachmentReference colorAttachmentRefs[] =
+ {
+ {
+ 0u,
+ vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
+ }
+ };
+ const vk::VkSubpassDescription subpasses[] =
+ {
+ {
+ 0u,
+ vk::VK_PIPELINE_BIND_POINT_GRAPHICS,
+ 0u,
+ DE_NULL,
+
+ DE_LENGTH_OF_ARRAY(colorAttachmentRefs),
+ colorAttachmentRefs,
+ DE_NULL,
+
+ DE_NULL,
+ 0u,
+ DE_NULL
+ }
+ };
+
+ const vk::VkRenderPassCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
+ DE_NULL,
+ 0u,
+
+ DE_LENGTH_OF_ARRAY(attachments),
+ attachments,
+
+ DE_LENGTH_OF_ARRAY(subpasses),
+ subpasses,
+
+ 0u,
+ DE_NULL
+ };
+
+ return vk::createRenderPass(vkd, device, &createInfo);
+}
+
+vk::Move<vk::VkPipeline> createPipeline (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ vk::VkRenderPass renderPass,
+ vk::VkPipelineLayout layout,
+ vk::VkShaderModule vertexShaderModule,
+ vk::VkShaderModule fragmentShaderModule,
+ deUint32 width,
+ deUint32 height)
+{
+ const vk::VkSpecializationInfo shaderSpecialization =
+ {
+ 0u,
+ DE_NULL,
+ 0,
+ DE_NULL
+ };
+ const vk::VkPipelineShaderStageCreateInfo stages[] =
+ {
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ DE_NULL,
+ 0u,
+ vk::VK_SHADER_STAGE_VERTEX_BIT,
+ vertexShaderModule,
+ "main",
+ &shaderSpecialization
+ },
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ DE_NULL,
+ 0u,
+ vk::VK_SHADER_STAGE_FRAGMENT_BIT,
+ fragmentShaderModule,
+ "main",
+ &shaderSpecialization
+ }
+ };
+ const vk::VkPipelineVertexInputStateCreateInfo vertexInputState =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
+ DE_NULL,
+ 0u,
+ 0u,
+ DE_NULL,
+ 0u,
+ DE_NULL
+ };
+ const vk::VkPipelineInputAssemblyStateCreateInfo inputAssemblyState =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
+ DE_NULL,
+ 0u,
+ vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
+ VK_FALSE
+ };
+ const vk::VkViewport viewports[] =
+ {
+ {
+ 0.0f, 0.0f,
+ (float)width, (float)height,
+ 0.0f, 1.0f
+ }
+ };
+ const vk::VkRect2D scissors[] =
+ {
+ {
+ { 0u, 0u },
+ { width, height }
+ }
+ };
+ const vk::VkPipelineViewportStateCreateInfo viewportState =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
+ DE_NULL,
+ 0u,
+
+ DE_LENGTH_OF_ARRAY(viewports),
+ viewports,
+ DE_LENGTH_OF_ARRAY(scissors),
+ scissors
+ };
+ const vk::VkPipelineRasterizationStateCreateInfo rasterizationState =
+ {
+ vk::VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
+ DE_NULL,
+ 0u,
+ VK_TRUE,
+ VK_FALSE,
+ vk::VK_POLYGON_MODE_FILL,
+ vk::VK_CULL_MODE_NONE,
+ vk::VK_FRONT_FACE_CLOCKWISE,
+ VK_FALSE,
+ 0.0f,
+ 0.0f,
+ 0.0f,
+ 1.0f
+ };
+ const vk::VkSampleMask sampleMask = ~0u;
+ const vk::VkPipelineMultisampleStateCreateInfo multisampleState =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
+ DE_NULL,
+ 0u,
+ vk::VK_SAMPLE_COUNT_1_BIT,
+ VK_FALSE,
+ 0.0f,
+ &sampleMask,
+ VK_FALSE,
+ VK_FALSE
+ };
+ const vk::VkPipelineDepthStencilStateCreateInfo depthStencilState =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
+ DE_NULL,
+ 0u,
+ DE_FALSE,
+ DE_FALSE,
+ vk::VK_COMPARE_OP_ALWAYS,
+ DE_FALSE,
+ DE_FALSE,
+ {
+ vk::VK_STENCIL_OP_KEEP,
+ vk::VK_STENCIL_OP_KEEP,
+ vk::VK_STENCIL_OP_KEEP,
+ vk::VK_COMPARE_OP_ALWAYS,
+ 0u,
+ 0u,
+ 0u,
+ },
+ {
+ vk::VK_STENCIL_OP_KEEP,
+ vk::VK_STENCIL_OP_KEEP,
+ vk::VK_STENCIL_OP_KEEP,
+ vk::VK_COMPARE_OP_ALWAYS,
+ 0u,
+ 0u,
+ 0u,
+ },
+ 0.0f,
+ 1.0f
+ };
+ const vk::VkPipelineColorBlendAttachmentState attachmentBlendState =
+ {
+ VK_FALSE,
+ vk::VK_BLEND_FACTOR_ONE,
+ vk::VK_BLEND_FACTOR_ZERO,
+ vk::VK_BLEND_OP_ADD,
+ vk::VK_BLEND_FACTOR_ONE,
+ vk::VK_BLEND_FACTOR_ZERO,
+ vk::VK_BLEND_OP_ADD,
+ (vk::VK_COLOR_COMPONENT_R_BIT|
+ vk::VK_COLOR_COMPONENT_G_BIT|
+ vk::VK_COLOR_COMPONENT_B_BIT|
+ vk::VK_COLOR_COMPONENT_A_BIT),
+ };
+ const vk::VkPipelineColorBlendStateCreateInfo blendState =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
+ DE_NULL,
+ 0u,
+ DE_FALSE,
+ vk::VK_LOGIC_OP_COPY,
+ 1u,
+ &attachmentBlendState,
+ { 0.0f, 0.0f, 0.0f, 0.0f }
+ };
+ const vk::VkPipelineDynamicStateCreateInfo dynamicState =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
+ DE_NULL,
+ 0u,
+
+ 0u,
+ DE_NULL
+ };
+ const vk::VkGraphicsPipelineCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
+ DE_NULL,
+ 0u,
+
+ DE_LENGTH_OF_ARRAY(stages),
+ stages,
+ &vertexInputState,
+ &inputAssemblyState,
+ DE_NULL,
+ &viewportState,
+ &rasterizationState,
+ &multisampleState,
+ &depthStencilState,
+ &blendState,
+ &dynamicState,
+ layout,
+ renderPass,
+ 0u,
+ DE_NULL,
+ 0u
+ };
+
+ return vk::createGraphicsPipeline(vkd, device, DE_NULL, &createInfo);
+}
+
+vk::Move<vk::VkPipelineLayout> createPipelineLayout (const vk::DeviceInterface& vkd,
+ vk::VkDevice device)
+{
+ const vk::VkPushConstantRange pushConstants[] =
+ {
+ {
+ vk::VK_SHADER_STAGE_FRAGMENT_BIT,
+ 0u,
+ 4u
+ }
+ };
+ const vk::VkPipelineLayoutCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+ DE_NULL,
+ 0u,
+
+ 0u,
+ DE_NULL,
+
+ DE_LENGTH_OF_ARRAY(pushConstants),
+ pushConstants
+ };
+
+ return vk::createPipelineLayout(vkd, device, &createInfo);
+}
+
+struct TestConfig
+{
+ vk::wsi::Type wsiType;
+ bool useDisplayTiming;
+ vk::VkPresentModeKHR presentMode;
+};
+
+class DisplayTimingTestInstance : public TestInstance
+{
+public:
+ DisplayTimingTestInstance (Context& context, const TestConfig& testConfig);
+ ~DisplayTimingTestInstance (void);
+
+ tcu::TestStatus iterate (void);
+
+private:
+ const bool m_useDisplayTiming;
+ const deUint32 m_quadCount;
+ const vk::PlatformInterface& m_vkp;
+ const Extensions m_instanceExtensions;
+ const vk::Unique<vk::VkInstance> m_instance;
+ const vk::InstanceDriver m_vki;
+ const vk::VkPhysicalDevice m_physicalDevice;
+ const de::UniquePtr<vk::wsi::Display> m_nativeDisplay;
+ const de::UniquePtr<vk::wsi::Window> m_nativeWindow;
+ const vk::Unique<vk::VkSurfaceKHR> m_surface;
+
+ const deUint32 m_queueFamilyIndex;
+ const Extensions m_deviceExtensions;
+ const vk::Unique<vk::VkDevice> m_device;
+ const vk::DeviceDriver m_vkd;
+ const vk::VkQueue m_queue;
+
+ const vk::Unique<vk::VkCommandPool> m_commandPool;
+ const vk::Unique<vk::VkShaderModule> m_vertexShaderModule;
+ const vk::Unique<vk::VkShaderModule> m_fragmentShaderModule;
+ const vk::Unique<vk::VkPipelineLayout> m_pipelineLayout;
+
+ const vk::VkSurfaceCapabilitiesKHR m_surfaceProperties;
+ const vector<vk::VkSurfaceFormatKHR> m_surfaceFormats;
+ const vector<vk::VkPresentModeKHR> m_presentModes;
+
+ tcu::ResultCollector m_resultCollector;
+
+ vk::Move<vk::VkSwapchainKHR> m_swapchain;
+ std::vector<vk::VkImage> m_swapchainImages;
+
+ vk::Move<vk::VkRenderPass> m_renderPass;
+ vk::Move<vk::VkPipeline> m_pipeline;
+
+ std::vector<vk::VkImageView> m_swapchainImageViews;
+ std::vector<vk::VkFramebuffer> m_framebuffers;
+ std::vector<vk::VkCommandBuffer> m_commandBuffers;
+ std::vector<vk::VkSemaphore> m_acquireSemaphores;
+ std::vector<vk::VkSemaphore> m_renderSemaphores;
+ std::vector<vk::VkFence> m_fences;
+
+ vk::VkSemaphore m_freeAcquireSemaphore;
+ vk::VkSemaphore m_freeRenderSemaphore;
+
+ vk::VkSwapchainCreateInfoKHR m_swapchainConfig;
+
+ const size_t m_frameCount;
+ size_t m_frameNdx;
+
+ const size_t m_maxOutOfDateCount;
+ size_t m_outOfDateCount;
+
+ std::map<deUint32, deUint64> m_queuePresentTimes;
+
+ vk::VkRefreshCycleDurationGOOGLE m_rcDuration;
+ deUint64 m_refreshDurationMultiplier;
+ deUint64 m_targetIPD;
+ deUint64 m_prevDesiredPresentTime;
+ deUint32 m_nextPresentID;
+ deUint32 m_ignoreThruPresentID;
+ bool m_ExpectImage80Late;
+
+ void initSwapchainResources (void);
+ void deinitSwapchainResources (void);
+ void render (void);
+};
+
+vk::VkSwapchainCreateInfoKHR createSwapchainConfig (vk::VkSurfaceKHR surface,
+ deUint32 queueFamilyIndex,
+ const vk::VkSurfaceCapabilitiesKHR& properties,
+ const vector<vk::VkSurfaceFormatKHR>& formats,
+ const vector<vk::VkPresentModeKHR>& presentModes,
+ vk::VkPresentModeKHR presentMode)
+{
+ const deUint32 imageLayers = 1u;
+ const vk::VkImageUsageFlags imageUsage = properties.supportedUsageFlags;
+ const vk::VkBool32 clipped = VK_FALSE;
+
+ const deUint32 imageWidth = (properties.currentExtent.width != 0xFFFFFFFFu)
+ ? properties.currentExtent.width
+ : de::min(1024u, properties.minImageExtent.width + ((properties.maxImageExtent.width - properties.minImageExtent.width) / 2));
+ const deUint32 imageHeight = (properties.currentExtent.height != 0xFFFFFFFFu)
+ ? properties.currentExtent.height
+ : de::min(1024u, properties.minImageExtent.height + ((properties.maxImageExtent.height - properties.minImageExtent.height) / 2));
+ const vk::VkExtent2D imageSize = { imageWidth, imageHeight };
+
+ {
+ size_t presentModeNdx;
+
+ for (presentModeNdx = 0; presentModeNdx < presentModes.size(); presentModeNdx++)
+ {
+ if (presentModes[presentModeNdx] == presentMode)
+ break;
+ }
+
+ if (presentModeNdx == presentModes.size())
+ TCU_THROW(NotSupportedError, "Present mode not supported");
+ }
+
+ // Pick the first supported transform, alpha, and format:
+ vk::VkSurfaceTransformFlagsKHR transform;
+ for (transform = 1u; transform <= properties.supportedTransforms; transform = transform << 1u)
+ {
+ if ((properties.supportedTransforms & transform) != 0)
+ break;
+ }
+
+ vk::VkCompositeAlphaFlagsKHR alpha;
+ for (alpha = 1u; alpha <= properties.supportedCompositeAlpha; alpha = alpha << 1u)
+ {
+ if ((alpha & properties.supportedCompositeAlpha) != 0)
+ break;
+ }
+
+ {
+ const vk::VkSurfaceTransformFlagBitsKHR preTransform = (vk::VkSurfaceTransformFlagBitsKHR)transform;
+ const vk::VkCompositeAlphaFlagBitsKHR compositeAlpha = (vk::VkCompositeAlphaFlagBitsKHR)alpha;
+ const vk::VkFormat imageFormat = formats[0].format;
+ const vk::VkColorSpaceKHR imageColorSpace = formats[0].colorSpace;
+ const vk::VkSwapchainCreateInfoKHR createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
+ DE_NULL,
+ 0u,
+ surface,
+ properties.minImageCount,
+ imageFormat,
+ imageColorSpace,
+ imageSize,
+ imageLayers,
+ imageUsage,
+ vk::VK_SHARING_MODE_EXCLUSIVE,
+ 1u,
+ &queueFamilyIndex,
+ preTransform,
+ compositeAlpha,
+ presentMode,
+ clipped,
+ (vk::VkSwapchainKHR)0
+ };
+
+ return createInfo;
+ }
+}
+
+DisplayTimingTestInstance::DisplayTimingTestInstance (Context& context, const TestConfig& testConfig)
+ : TestInstance (context)
+ , m_useDisplayTiming (testConfig.useDisplayTiming)
+ , m_quadCount (16u)
+ , m_vkp (context.getPlatformInterface())
+ , m_instanceExtensions (vk::enumerateInstanceExtensionProperties(m_vkp, DE_NULL))
+ , m_instance (createInstanceWithWsi(m_vkp, m_instanceExtensions, testConfig.wsiType))
+ , m_vki (m_vkp, *m_instance)
+ , m_physicalDevice (vk::chooseDevice(m_vki, *m_instance, context.getTestContext().getCommandLine()))
+ , m_nativeDisplay (createDisplay(context.getTestContext().getPlatform().getVulkanPlatform(), m_instanceExtensions, testConfig.wsiType))
+ , m_nativeWindow (createWindow(*m_nativeDisplay, tcu::nothing<UVec2>()))
+ , m_surface (vk::wsi::createSurface(m_vki, *m_instance, testConfig.wsiType, *m_nativeDisplay, *m_nativeWindow))
+
+ , m_queueFamilyIndex (chooseQueueFamilyIndex(m_vki, m_physicalDevice, *m_surface))
+ , m_deviceExtensions (vk::enumerateDeviceExtensionProperties(m_vki, m_physicalDevice, DE_NULL))
+ , m_device (createDeviceWithWsi(m_vki, m_physicalDevice, m_deviceExtensions, m_queueFamilyIndex, testConfig.useDisplayTiming))
+ , m_vkd (m_vki, *m_device)
+ , m_queue (getDeviceQueue(m_vkd, *m_device, m_queueFamilyIndex, 0u))
+
+ , m_commandPool (createCommandPool(m_vkd, *m_device, m_queueFamilyIndex))
+ , m_vertexShaderModule (vk::createShaderModule(m_vkd, *m_device, context.getBinaryCollection().get("quad-vert"), 0u))
+ , m_fragmentShaderModule (vk::createShaderModule(m_vkd, *m_device, context.getBinaryCollection().get("quad-frag"), 0u))
+ , m_pipelineLayout (createPipelineLayout(m_vkd, *m_device))
+
+ , m_surfaceProperties (vk::wsi::getPhysicalDeviceSurfaceCapabilities(m_vki, m_physicalDevice, *m_surface))
+ , m_surfaceFormats (vk::wsi::getPhysicalDeviceSurfaceFormats(m_vki, m_physicalDevice, *m_surface))
+ , m_presentModes (vk::wsi::getPhysicalDeviceSurfacePresentModes(m_vki, m_physicalDevice, *m_surface))
+
+ , m_freeAcquireSemaphore ((vk::VkSemaphore)0)
+ , m_freeRenderSemaphore ((vk::VkSemaphore)0)
+
+ , m_swapchainConfig (createSwapchainConfig(*m_surface, m_queueFamilyIndex, m_surfaceProperties, m_surfaceFormats, m_presentModes, testConfig.presentMode))
+
+ , m_frameCount (60u * 5u)
+ , m_frameNdx (0u)
+
+ , m_maxOutOfDateCount (20u)
+ , m_outOfDateCount (0u)
+ , m_ExpectImage80Late (false)
+{
+ {
+ const tcu::ScopedLogSection surfaceInfo (m_context.getTestContext().getLog(), "SurfaceCapabilities", "SurfaceCapabilities");
+ m_context.getTestContext().getLog() << TestLog::Message << m_surfaceProperties << TestLog::EndMessage;
+ }
+}
+
+DisplayTimingTestInstance::~DisplayTimingTestInstance (void)
+{
+ deinitSwapchainResources();
+}
+
+void DisplayTimingTestInstance::initSwapchainResources (void)
+{
+ const size_t fenceCount = 6;
+ const deUint32 imageWidth = m_swapchainConfig.imageExtent.width;
+ const deUint32 imageHeight = m_swapchainConfig.imageExtent.height;
+ const vk::VkFormat imageFormat = m_swapchainConfig.imageFormat;
+
+ m_swapchain = vk::createSwapchainKHR(m_vkd, *m_device, &m_swapchainConfig);
+ m_swapchainImages = vk::wsi::getSwapchainImages(m_vkd, *m_device, *m_swapchain);
+
+ m_renderPass = createRenderPass(m_vkd, *m_device, imageFormat);
+ m_pipeline = createPipeline(m_vkd, *m_device, *m_renderPass, *m_pipelineLayout, *m_vertexShaderModule, *m_fragmentShaderModule, imageWidth, imageHeight);
+
+ m_swapchainImageViews = std::vector<vk::VkImageView>(m_swapchainImages.size(), (vk::VkImageView)0);
+ m_framebuffers = std::vector<vk::VkFramebuffer>(m_swapchainImages.size(), (vk::VkFramebuffer)0);
+ m_acquireSemaphores = std::vector<vk::VkSemaphore>(m_swapchainImages.size(), (vk::VkSemaphore)0);
+ m_renderSemaphores = std::vector<vk::VkSemaphore>(m_swapchainImages.size(), (vk::VkSemaphore)0);
+
+ m_fences = std::vector<vk::VkFence>(fenceCount, (vk::VkFence)0);
+ m_commandBuffers = std::vector<vk::VkCommandBuffer>(m_fences.size(), (vk::VkCommandBuffer)0);
+
+ m_freeAcquireSemaphore = (vk::VkSemaphore)0;
+ m_freeRenderSemaphore = (vk::VkSemaphore)0;
+
+ m_freeAcquireSemaphore = createSemaphore(m_vkd, *m_device).disown();
+ m_freeRenderSemaphore = createSemaphore(m_vkd, *m_device).disown();
+
+ initImageViews(m_vkd, *m_device, m_swapchainImages, imageFormat, m_swapchainImageViews);
+ initFramebuffers(m_vkd, *m_device, *m_renderPass, m_swapchainImageViews, imageWidth, imageHeight, m_framebuffers);
+ initSemaphores(m_vkd, *m_device, m_acquireSemaphores);
+ initSemaphores(m_vkd, *m_device, m_renderSemaphores);
+
+ initFences(m_vkd, *m_device, m_fences);
+
+ if (m_useDisplayTiming)
+ {
+ // This portion should do interesting bits
+ m_queuePresentTimes = std::map<deUint32, deUint64>();
+
+ m_vkd.getRefreshCycleDurationGOOGLE(*m_device, *m_swapchain, &m_rcDuration);
+
+ m_refreshDurationMultiplier = 1u;
+ m_targetIPD = m_rcDuration.refreshDuration;
+ m_prevDesiredPresentTime = 0u;
+ m_nextPresentID = 0u;
+ m_ignoreThruPresentID = 0u;
+ }
+}
+
+void DisplayTimingTestInstance::deinitSwapchainResources (void)
+{
+ VK_CHECK(m_vkd.queueWaitIdle(m_queue));
+
+ if (m_freeAcquireSemaphore != (vk::VkSemaphore)0)
+ {
+ m_vkd.destroySemaphore(*m_device, m_freeAcquireSemaphore, DE_NULL);
+ m_freeAcquireSemaphore = (vk::VkSemaphore)0;
+ }
+
+ if (m_freeRenderSemaphore != (vk::VkSemaphore)0)
+ {
+ m_vkd.destroySemaphore(*m_device, m_freeRenderSemaphore, DE_NULL);
+ m_freeRenderSemaphore = (vk::VkSemaphore)0;
+ }
+
+ deinitSemaphores(m_vkd, *m_device, m_acquireSemaphores);
+ deinitSemaphores(m_vkd, *m_device, m_renderSemaphores);
+ deinitFences(m_vkd, *m_device, m_fences);
+ deinitCommandBuffers(m_vkd, *m_device, *m_commandPool, m_commandBuffers);
+ deinitFramebuffers(m_vkd, *m_device, m_framebuffers);
+ deinitImageViews(m_vkd, *m_device, m_swapchainImageViews);
+
+ m_swapchainImages.clear();
+
+ m_swapchain = vk::Move<vk::VkSwapchainKHR>();
+ m_renderPass = vk::Move<vk::VkRenderPass>();
+ m_pipeline = vk::Move<vk::VkPipeline>();
+
+}
+
+vector<vk::VkPastPresentationTimingGOOGLE> getPastPresentationTiming (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ vk::VkSwapchainKHR swapchain)
+{
+ vector<vk::VkPastPresentationTimingGOOGLE> pastPresentationTimings;
+ deUint32 numPastPresentationTimings = 0;
+
+ vkd.getPastPresentationTimingGOOGLE(device, swapchain, &numPastPresentationTimings, DE_NULL);
+
+ pastPresentationTimings.resize(numPastPresentationTimings);
+
+ if (numPastPresentationTimings > 0)
+ vkd.getPastPresentationTimingGOOGLE(device, swapchain, &numPastPresentationTimings, &pastPresentationTimings[0]);
+
+ return pastPresentationTimings;
+}
+
+void DisplayTimingTestInstance::render (void)
+{
+ const deUint64 foreverNs = ~0x0ull;
+ const vk::VkFence fence = m_fences[m_frameNdx % m_fences.size()];
+ const deUint32 width = m_swapchainConfig.imageExtent.width;
+ const deUint32 height = m_swapchainConfig.imageExtent.height;
+ tcu::TestLog& log = m_context.getTestContext().getLog();
+
+ // Throttle execution
+ if (m_frameNdx >= m_fences.size())
+ {
+ VK_CHECK(m_vkd.waitForFences(*m_device, 1u, &fence, VK_TRUE, foreverNs));
+ VK_CHECK(m_vkd.resetFences(*m_device, 1u, &fence));
+
+ m_vkd.freeCommandBuffers(*m_device, *m_commandPool, 1u, &m_commandBuffers[m_frameNdx % m_commandBuffers.size()]);
+ m_commandBuffers[m_frameNdx % m_commandBuffers.size()] = (vk::VkCommandBuffer)0;
+ }
+
+ vk::VkSemaphore currentAcquireSemaphore = m_freeAcquireSemaphore;
+ vk::VkSemaphore currentRenderSemaphore = m_freeRenderSemaphore;
+ deUint32 imageIndex;
+
+ // Acquire next image
+ VK_CHECK(m_vkd.acquireNextImageKHR(*m_device, *m_swapchain, foreverNs, currentAcquireSemaphore, fence, &imageIndex));
+
+ // Create command buffer
+ m_commandBuffers[m_frameNdx % m_commandBuffers.size()] = createCommandBuffer(m_vkd, *m_device, *m_commandPool, *m_pipelineLayout, *m_renderPass, m_framebuffers[imageIndex], *m_pipeline, m_frameNdx, m_quadCount, width, height).disown();
+
+ // Obtain timing data from previous frames
+ if (m_useDisplayTiming)
+ {
+ const vector<vk::VkPastPresentationTimingGOOGLE> pastPresentationTimings (getPastPresentationTiming(m_vkd, *m_device, *m_swapchain));
+ bool isEarly = false;
+ bool isLate = false;
+
+ for (size_t pastPresentationInfoNdx = 0 ; pastPresentationInfoNdx < pastPresentationTimings.size(); pastPresentationInfoNdx++)
+ {
+ if (m_queuePresentTimes[pastPresentationTimings[pastPresentationInfoNdx].presentID] > pastPresentationTimings[pastPresentationInfoNdx].actualPresentTime)
+ {
+ m_resultCollector.fail("Image with PresentID " + de::toString(pastPresentationTimings[pastPresentationInfoNdx].presentID) + "was displayed before vkQueuePresentKHR was called.");
+ }
+ if (!m_ignoreThruPresentID)
+ {
+ // This is the first time that we've received an
+ // actualPresentTime for this swapchain. In order to not
+ // perceive these early frames as "late", we need to sync-up
+ // our future desiredPresentTime's with the
+ // actualPresentTime(s) that we're receiving now.
+ const deInt64 multiple = m_nextPresentID - pastPresentationTimings.back().presentID;
+
+ m_prevDesiredPresentTime = pastPresentationTimings.back().actualPresentTime + (multiple * m_targetIPD);
+ m_ignoreThruPresentID = pastPresentationTimings[pastPresentationInfoNdx].presentID + 1;
+ }
+ else if (pastPresentationTimings[pastPresentationInfoNdx].presentID > m_ignoreThruPresentID)
+ {
+ if (pastPresentationTimings[pastPresentationInfoNdx].actualPresentTime > (pastPresentationTimings[pastPresentationInfoNdx].desiredPresentTime + m_rcDuration.refreshDuration + MILLISECOND))
+ {
+ const deUint64 actual = pastPresentationTimings[pastPresentationInfoNdx].actualPresentTime;
+ const deUint64 desired = pastPresentationTimings[pastPresentationInfoNdx].desiredPresentTime;
+ const deUint64 rdur = m_rcDuration.refreshDuration;
+ const deUint64 diff1 = actual - (desired + rdur);
+ const deUint64 diff2 = actual - desired;
+
+ log << TestLog::Message << "Image PresentID " << pastPresentationTimings[pastPresentationInfoNdx].presentID << " was " << diff1 << " nsec late." << TestLog::EndMessage;
+ if (m_ExpectImage80Late && (pastPresentationTimings[pastPresentationInfoNdx].presentID == 80))
+ {
+ if (diff1 > (SECOND / 2))
+ log << TestLog::Message << "\tNote: Image PresentID 80 was expected to be late by approximately 1 second." << TestLog::EndMessage;
+ else
+ m_resultCollector.fail("Image PresentID 80 was not late by approximately 1 second, as expected.");
+ }
+ log << TestLog::Message << "\t\t actualPresentTime = " << actual << " nsec" << TestLog::EndMessage;
+ log << TestLog::Message << "\t\t - desiredPresentTime= " << desired << " nsec" << TestLog::EndMessage;
+ log << TestLog::Message << "\t\t =========================================" << TestLog::EndMessage;
+ log << TestLog::Message << "\t\t diff = " << diff2 << " nsec" << TestLog::EndMessage;
+ log << TestLog::Message << "\t\t - refreshDuration = " << rdur << " nsec" << TestLog::EndMessage;
+ log << TestLog::Message << "\t\t =========================================" << TestLog::EndMessage;
+ log << TestLog::Message << "\t\t diff = " << diff1 << " nsec" << TestLog::EndMessage;
+
+ isLate = true;
+ }
+ else if ((pastPresentationTimings[pastPresentationInfoNdx].actualPresentTime > pastPresentationTimings[pastPresentationInfoNdx].earliestPresentTime) &&
+ (pastPresentationTimings[pastPresentationInfoNdx].presentMargin > (2 * MILLISECOND)))
+ {
+ const deUint64 actual = pastPresentationTimings[pastPresentationInfoNdx].actualPresentTime;
+ const deUint64 earliest = pastPresentationTimings[pastPresentationInfoNdx].earliestPresentTime;
+ const deUint64 diff = actual - earliest;
+
+ log << TestLog::Message << "Image PresentID " << pastPresentationTimings[pastPresentationInfoNdx].presentID << " can be presented " << diff << " nsec earlier." << TestLog::EndMessage;
+ log << TestLog::Message << "\t\t actualPresentTime = " << actual << " nsec" << TestLog::EndMessage;
+ log << TestLog::Message << "\t\t -earliestPresentTime= " << earliest << " nsec" << TestLog::EndMessage;
+ log << TestLog::Message << "\t\t =========================================" << TestLog::EndMessage;
+ log << TestLog::Message << "\t\t diff = " << diff << " nsec" << TestLog::EndMessage;
+
+ isEarly = true;
+ }
+ }
+ }
+ // Preference is given to late presents over early presents:
+ if (isLate)
+ {
+ // Demonstrate how to slow down the frame rate if a frame is late,
+ // but don't go too slow (for test time reasons):
+ if (++m_refreshDurationMultiplier > 2)
+ m_refreshDurationMultiplier = 2;
+ else
+ log << TestLog::Message << "Increasing multiplier." << TestLog::EndMessage;
+ }
+ else if (isEarly)
+ {
+ // Demonstrate how to speed up the frame rate if a frame is early,
+ // but don't let the multiplier hit zero:
+ if (--m_refreshDurationMultiplier == 0)
+ m_refreshDurationMultiplier = 1;
+ else
+ log << TestLog::Message << "Decreasing multiplier." << TestLog::EndMessage;
+ }
+ m_targetIPD = m_rcDuration.refreshDuration * m_refreshDurationMultiplier;
+ }
+
+ // Submit command buffer
+ {
+ const vk::VkPipelineStageFlags dstStageMask = vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ const vk::VkSubmitInfo submitInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
+ DE_NULL,
+ 1u,
+ ¤tAcquireSemaphore,
+ &dstStageMask,
+ 1u,
+ &m_commandBuffers[m_frameNdx % m_commandBuffers.size()],
+ 1u,
+ ¤tRenderSemaphore
+ };
+
+ VK_CHECK(m_vkd.queueSubmit(m_queue, 1u, &submitInfo, (vk::VkFence)0));
+ }
+
+ // Present frame
+ if (m_useDisplayTiming)
+ {
+ // This portion should do interesting bits
+
+ // Initially, mirror reference to move things along
+ vk::VkResult result;
+ vk::VkPresentTimeGOOGLE presentTime =
+ {
+ ++m_nextPresentID,
+ m_prevDesiredPresentTime
+ };
+ // Record the current time, to record as the time of the vkQueuePresentKHR() call:
+ const deUint64 curtimeNano = deGetMicroseconds() * 1000;
+ m_queuePresentTimes[m_nextPresentID] = curtimeNano;
+
+ deUint64 desiredPresentTime = 0u;
+ if (m_prevDesiredPresentTime == 0)
+ {
+ // This must be the first present for this swapchain. Find out the
+ // current time, as the basis for desiredPresentTime:
+ if (curtimeNano != 0)
+ {
+ presentTime.desiredPresentTime = curtimeNano;
+ presentTime.desiredPresentTime += (m_targetIPD / 2);
+ }
+ else
+ {
+ // Since we didn't find out the current time, don't give a
+ // desiredPresentTime:
+ presentTime.desiredPresentTime = 0;
+ }
+ }
+ else
+ {
+ desiredPresentTime = m_prevDesiredPresentTime + m_targetIPD;
+ if (presentTime.presentID == 80)
+ {
+ // Test if desiredPresentTime is 1 second earlier (i.e. before the previous image could have been presented)
+ presentTime.desiredPresentTime -= SECOND;
+ m_ExpectImage80Late = true;
+ }
+ }
+ m_prevDesiredPresentTime = desiredPresentTime;
+
+ const vk::VkPresentTimesInfoGOOGLE presentTimesInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE,
+ DE_NULL,
+ 1u,
+ &presentTime
+ };
+ const vk::VkPresentInfoKHR presentInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
+ &presentTimesInfo,
+ 1u,
+ ¤tRenderSemaphore,
+ 1u,
+ &*m_swapchain,
+ &imageIndex,
+ &result
+ };
+
+ VK_CHECK(m_vkd.queuePresentKHR(m_queue, &presentInfo));
+ VK_CHECK(result);
+ }
+ else
+ {
+ vk::VkResult result;
+ const vk::VkPresentInfoKHR presentInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
+ DE_NULL,
+ 1u,
+ ¤tRenderSemaphore,
+ 1u,
+ &*m_swapchain,
+ &imageIndex,
+ &result
+ };
+
+ VK_CHECK(m_vkd.queuePresentKHR(m_queue, &presentInfo));
+ VK_CHECK(result);
+ }
+
+ {
+ m_freeAcquireSemaphore = m_acquireSemaphores[imageIndex];
+ m_acquireSemaphores[imageIndex] = currentAcquireSemaphore;
+
+ m_freeRenderSemaphore = m_renderSemaphores[imageIndex];
+ m_renderSemaphores[imageIndex] = currentRenderSemaphore;
+ }
+}
+
+tcu::TestStatus DisplayTimingTestInstance::iterate (void)
+{
+ // Initialize swapchain specific resources
+ // Render test
+ try
+ {
+ if (m_frameNdx == 0)
+ {
+ if (m_outOfDateCount == 0)
+ m_context.getTestContext().getLog() << tcu::TestLog::Message << "Swapchain: " << m_swapchainConfig << tcu::TestLog::EndMessage;
+
+ initSwapchainResources();
+ }
+
+ render();
+ }
+ catch (const vk::Error& error)
+ {
+ if (error.getError() == vk::VK_ERROR_OUT_OF_DATE_KHR)
+ {
+ if (m_outOfDateCount < m_maxOutOfDateCount)
+ {
+ m_context.getTestContext().getLog() << TestLog::Message << "Frame " << m_frameNdx << ": Swapchain out of date. Recreating resources." << TestLog::EndMessage;
+ deinitSwapchainResources();
+ m_frameNdx = 0;
+ m_outOfDateCount++;
+
+ return tcu::TestStatus::incomplete();
+ }
+ else
+ {
+ m_context.getTestContext().getLog() << TestLog::Message << "Frame " << m_frameNdx << ": Swapchain out of date." << TestLog::EndMessage;
+ m_resultCollector.fail("Received too many VK_ERROR_OUT_OF_DATE_KHR errors. Received " + de::toString(m_outOfDateCount) + ", max " + de::toString(m_maxOutOfDateCount));
+ }
+ }
+ else
+ {
+ m_resultCollector.fail(error.what());
+ }
+
+ deinitSwapchainResources();
+
+ return tcu::TestStatus(m_resultCollector.getResult(), m_resultCollector.getMessage());
+ }
+
+ m_frameNdx++;
+
+ if (m_frameNdx >= m_frameCount)
+ {
+ deinitSwapchainResources();
+
+ return tcu::TestStatus(m_resultCollector.getResult(), m_resultCollector.getMessage());
+ }
+ else
+ return tcu::TestStatus::incomplete();
+}
+
+struct Programs
+{
+ static void init (vk::SourceCollections& dst, TestConfig)
+ {
+ dst.glslSources.add("quad-vert") << glu::VertexSource(
+ "#version 450\n"
+ "out gl_PerVertex {\n"
+ "\tvec4 gl_Position;\n"
+ "};\n"
+ "highp float;\n"
+ "void main (void) {\n"
+ "\tgl_Position = vec4(((gl_VertexIndex + 2) / 3) % 2 == 0 ? -1.0 : 1.0,\n"
+ "\t ((gl_VertexIndex + 1) / 3) % 2 == 0 ? -1.0 : 1.0, 0.0, 1.0);\n"
+ "}\n");
+ dst.glslSources.add("quad-frag") << glu::FragmentSource(
+ "#version 310 es\n"
+ "layout(location = 0) out highp vec4 o_color;\n"
+ "layout(push_constant) uniform PushConstant {\n"
+ "\thighp uint frameNdx;\n"
+ "} pushConstants;\n"
+ "void main (void)\n"
+ "{\n"
+ "\thighp uint frameNdx = pushConstants.frameNdx;\n"
+ "\thighp uint x = frameNdx + uint(gl_FragCoord.x);\n"
+ "\thighp uint y = frameNdx + uint(gl_FragCoord.y);\n"
+ "\thighp uint r = 128u * bitfieldExtract(x, 0, 1)\n"
+ "\t + 64u * bitfieldExtract(y, 1, 1)\n"
+ "\t + 32u * bitfieldExtract(x, 3, 1);\n"
+ "\thighp uint g = 128u * bitfieldExtract(y, 0, 1)\n"
+ "\t + 64u * bitfieldExtract(x, 2, 1)\n"
+ "\t + 32u * bitfieldExtract(y, 3, 1);\n"
+ "\thighp uint b = 128u * bitfieldExtract(x, 1, 1)\n"
+ "\t + 64u * bitfieldExtract(y, 2, 1)\n"
+ "\t + 32u * bitfieldExtract(x, 4, 1);\n"
+ "\to_color = vec4(float(r) / 255.0, float(g) / 255.0, float(b) / 255.0, 1.0);\n"
+ "}\n");
+ }
+};
+
+} // anonymous
+
+void createDisplayTimingTests (tcu::TestCaseGroup* testGroup, vk::wsi::Type wsiType)
+{
+ const struct
+ {
+ vk::VkPresentModeKHR mode;
+ const char* name;
+ } presentModes[] =
+ {
+ { vk::VK_PRESENT_MODE_FIFO_KHR, "fifo" },
+ { vk::VK_PRESENT_MODE_FIFO_RELAXED_KHR, "fifo_relaxed" },
+ { vk::VK_PRESENT_MODE_IMMEDIATE_KHR, "immediate" },
+ { vk::VK_PRESENT_MODE_MAILBOX_KHR, "mailbox" },
+ };
+
+ for (size_t presentModeNdx = 0; presentModeNdx < DE_LENGTH_OF_ARRAY(presentModes); presentModeNdx++)
+ {
+ de::MovePtr<tcu::TestCaseGroup> presentModeGroup (new tcu::TestCaseGroup(testGroup->getTestContext(), presentModes[presentModeNdx].name, presentModes[presentModeNdx].name));
+
+ for (size_t ref = 0; ref < 2; ref++)
+ {
+ const bool isReference = (ref == 0);
+ const char* const name = isReference ? "reference" : "display_timing";
+ TestConfig config;
+
+ config.wsiType = wsiType;
+ config.useDisplayTiming = !isReference;
+ config.presentMode = presentModes[presentModeNdx].mode;
+
+ presentModeGroup->addChild(new vkt::InstanceFactory1<DisplayTimingTestInstance, TestConfig, Programs>(testGroup->getTestContext(), tcu::NODETYPE_SELF_VALIDATE, name, name, Programs(), config));
+ }
+
+ testGroup->addChild(presentModeGroup.release());
+ }
+}
+
+} // wsi
+} // vkt
--- /dev/null
+#ifndef _VKTWSIDISPLAYTIMINGTESTS_HPP
+#define _VKTWSIDISPLAYTIMINGTESTS_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2017 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Tests for VK_GOOGLE_display_timing
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuDefs.hpp"
+#include "tcuTestCase.hpp"
+#include "vkDefs.hpp"
+
+namespace vkt
+{
+namespace wsi
+{
+
+void createDisplayTimingTests (tcu::TestCaseGroup* testGroup, vk::wsi::Type wsiType);
+
+} // wsi
+} // vkt
+
+#endif // _VKTWSIDISPLAYTIMINGTESTS_HPP
--- /dev/null
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2016 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Tests for incremental present extension
+ *//*--------------------------------------------------------------------*/
+
+#include "vktWsiIncrementalPresentTests.hpp"
+
+#include "vktTestCaseUtil.hpp"
+#include "vktTestGroupUtil.hpp"
+#include "vkRefUtil.hpp"
+#include "vkWsiPlatform.hpp"
+#include "vkWsiUtil.hpp"
+#include "vkQueryUtil.hpp"
+#include "vkDeviceUtil.hpp"
+#include "vkPlatform.hpp"
+#include "vkTypeUtil.hpp"
+#include "vkPrograms.hpp"
+
+#include "vkWsiUtil.hpp"
+
+#include "tcuPlatform.hpp"
+#include "tcuResultCollector.hpp"
+
+#include <vector>
+#include <string>
+
+using std::vector;
+using std::string;
+
+using tcu::Maybe;
+using tcu::UVec2;
+using tcu::TestLog;
+
+namespace vkt
+{
+namespace wsi
+{
+namespace
+{
+enum Scaling
+{
+ SCALING_NONE,
+ SCALING_UP,
+ SCALING_DOWN
+};
+
+typedef vector<vk::VkExtensionProperties> Extensions;
+
+void checkAllSupported (const Extensions& supportedExtensions, const vector<string>& requiredExtensions)
+{
+ for (vector<string>::const_iterator requiredExtName = requiredExtensions.begin();
+ requiredExtName != requiredExtensions.end();
+ ++requiredExtName)
+ {
+ if (!isExtensionSupported(supportedExtensions, vk::RequiredExtension(*requiredExtName)))
+ TCU_THROW(NotSupportedError, (*requiredExtName + " is not supported").c_str());
+ }
+}
+
+vk::Move<vk::VkInstance> createInstanceWithWsi (const vk::PlatformInterface& vkp,
+ const Extensions& supportedExtensions,
+ vk::wsi::Type wsiType)
+{
+ vector<string> extensions;
+
+ extensions.push_back("VK_KHR_surface");
+ extensions.push_back(getExtensionName(wsiType));
+
+ checkAllSupported(supportedExtensions, extensions);
+
+ return vk::createDefaultInstance(vkp, vector<string>(), extensions);
+}
+
+vk::VkPhysicalDeviceFeatures getDeviceNullFeatures (void)
+{
+ vk::VkPhysicalDeviceFeatures features;
+ deMemset(&features, 0, sizeof(features));
+ return features;
+}
+
+deUint32 getNumQueueFamilyIndices (const vk::InstanceInterface& vki, vk::VkPhysicalDevice physicalDevice)
+{
+ deUint32 numFamilies = 0;
+
+ vki.getPhysicalDeviceQueueFamilyProperties(physicalDevice, &numFamilies, DE_NULL);
+
+ return numFamilies;
+}
+
+vector<deUint32> getSupportedQueueFamilyIndices (const vk::InstanceInterface& vki, vk::VkPhysicalDevice physicalDevice, vk::VkSurfaceKHR surface)
+{
+ const deUint32 numTotalFamilyIndices = getNumQueueFamilyIndices(vki, physicalDevice);
+ vector<deUint32> supportedFamilyIndices;
+
+ for (deUint32 queueFamilyNdx = 0; queueFamilyNdx < numTotalFamilyIndices; ++queueFamilyNdx)
+ {
+ if (vk::wsi::getPhysicalDeviceSurfaceSupport(vki, physicalDevice, queueFamilyNdx, surface) != VK_FALSE)
+ supportedFamilyIndices.push_back(queueFamilyNdx);
+ }
+
+ return supportedFamilyIndices;
+}
+
+deUint32 chooseQueueFamilyIndex (const vk::InstanceInterface& vki, vk::VkPhysicalDevice physicalDevice, vk::VkSurfaceKHR surface)
+{
+ const vector<deUint32> supportedFamilyIndices = getSupportedQueueFamilyIndices(vki, physicalDevice, surface);
+
+ if (supportedFamilyIndices.empty())
+ TCU_THROW(NotSupportedError, "Device doesn't support presentation");
+
+ return supportedFamilyIndices[0];
+}
+
+vk::Move<vk::VkDevice> createDeviceWithWsi (const vk::InstanceInterface& vki,
+ vk::VkPhysicalDevice physicalDevice,
+ const Extensions& supportedExtensions,
+ const deUint32 queueFamilyIndex,
+ bool requiresIncrementalPresent,
+ const vk::VkAllocationCallbacks* pAllocator = DE_NULL)
+{
+ const float queuePriorities[] = { 1.0f };
+ const vk::VkDeviceQueueCreateInfo queueInfos[] =
+ {
+ {
+ vk::VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
+ DE_NULL,
+ (vk::VkDeviceQueueCreateFlags)0,
+ queueFamilyIndex,
+ DE_LENGTH_OF_ARRAY(queuePriorities),
+ &queuePriorities[0]
+ }
+ };
+ const vk::VkPhysicalDeviceFeatures features = getDeviceNullFeatures();
+ const char* const extensions[] =
+ {
+ "VK_KHR_swapchain",
+ "VK_KHR_incremental_present"
+ };
+
+ const vk::VkDeviceCreateInfo deviceParams =
+ {
+ vk::VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
+ DE_NULL,
+ (vk::VkDeviceCreateFlags)0,
+ DE_LENGTH_OF_ARRAY(queueInfos),
+ &queueInfos[0],
+ 0u,
+ DE_NULL,
+ requiresIncrementalPresent ? 2u : 1u,
+ DE_ARRAY_BEGIN(extensions),
+ &features
+ };
+
+ for (int ndx = 0; ndx < DE_LENGTH_OF_ARRAY(extensions); ++ndx)
+ {
+ if (!isExtensionSupported(supportedExtensions, vk::RequiredExtension(extensions[ndx])))
+ TCU_THROW(NotSupportedError, (string(extensions[ndx]) + " is not supported").c_str());
+ }
+
+ return createDevice(vki, physicalDevice, &deviceParams, pAllocator);
+}
+
+de::MovePtr<vk::wsi::Display> createDisplay (const vk::Platform& platform,
+ const Extensions& supportedExtensions,
+ vk::wsi::Type wsiType)
+{
+ try
+ {
+ return de::MovePtr<vk::wsi::Display>(platform.createWsiDisplay(wsiType));
+ }
+ catch (const tcu::NotSupportedError& e)
+ {
+ if (isExtensionSupported(supportedExtensions, vk::RequiredExtension(getExtensionName(wsiType))))
+ {
+ // If VK_KHR_{platform}_surface was supported, vk::Platform implementation
+ // must support creating native display & window for that WSI type.
+ throw tcu::TestError(e.getMessage());
+ }
+ else
+ throw;
+ }
+}
+
+de::MovePtr<vk::wsi::Window> createWindow (const vk::wsi::Display& display, const Maybe<UVec2>& initialSize)
+{
+ try
+ {
+ return de::MovePtr<vk::wsi::Window>(display.createWindow(initialSize));
+ }
+ catch (const tcu::NotSupportedError& e)
+ {
+ // See createDisplay - assuming that wsi::Display was supported platform port
+ // should also support creating a window.
+ throw tcu::TestError(e.getMessage());
+ }
+}
+
+vk::VkQueue getDeviceQueue (const vk::DeviceInterface& vkd, vk::VkDevice device, deUint32 queueFamilyIndex, deUint32 queueIndex)
+{
+ vk::VkQueue queue = (vk::VkQueue)0;
+ vkd.getDeviceQueue(device, queueFamilyIndex, queueIndex, &queue);
+ return queue;
+}
+
+vk::Move<vk::VkSemaphore> createSemaphore (const vk::DeviceInterface& vkd,
+ vk::VkDevice device)
+{
+ const vk::VkSemaphoreCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
+ DE_NULL,
+ 0u
+ };
+ return vk::createSemaphore(vkd, device, &createInfo);
+}
+
+void initSemaphores (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ std::vector<vk::VkSemaphore>& semaphores)
+{
+ for (size_t ndx = 0; ndx < semaphores.size(); ndx++)
+ semaphores[ndx] = createSemaphore(vkd, device).disown();
+}
+
+void deinitSemaphores (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ std::vector<vk::VkSemaphore>& semaphores)
+{
+ for (size_t ndx = 0; ndx < semaphores.size(); ndx++)
+ {
+ if (semaphores[ndx] != (vk::VkSemaphore)0)
+ vkd.destroySemaphore(device, semaphores[ndx], DE_NULL);
+
+ semaphores[ndx] = (vk::VkSemaphore)0;
+ }
+
+ semaphores.clear();
+}
+
+vk::Move<vk::VkFence> createFence (const vk::DeviceInterface& vkd,
+ vk::VkDevice device)
+{
+ const vk::VkFenceCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
+ DE_NULL,
+ 0u
+ };
+ return vk::createFence(vkd, device, &createInfo);
+}
+
+void initFences (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ std::vector<vk::VkFence>& fences)
+{
+ for (size_t ndx = 0; ndx < fences.size(); ndx++)
+ fences[ndx] = createFence(vkd, device).disown();
+}
+
+void deinitFences (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ std::vector<vk::VkFence>& fences)
+{
+ for (size_t ndx = 0; ndx < fences.size(); ndx++)
+ {
+ if (fences[ndx] != (vk::VkFence)0)
+ vkd.destroyFence(device, fences[ndx], DE_NULL);
+
+ fences[ndx] = (vk::VkFence)0;
+ }
+
+ fences.clear();
+}
+
+vk::VkRect2D getRenderFrameRect (size_t frameNdx,
+ deUint32 imageWidth,
+ deUint32 imageHeight)
+{
+ const deUint32 x = frameNdx == 0
+ ? 0
+ : de::min(((deUint32)frameNdx) % imageWidth, imageWidth - 1u);
+ const deUint32 y = frameNdx == 0
+ ? 0
+ : de::min(((deUint32)frameNdx) % imageHeight, imageHeight - 1u);
+ const deUint32 width = frameNdx == 0
+ ? imageWidth
+ : 1 + de::min((deUint32)(frameNdx) % de::min<deUint32>(100, imageWidth / 3), imageWidth - x);
+ const deUint32 height = frameNdx == 0
+ ? imageHeight
+ : 1 + de::min((deUint32)(frameNdx) % de::min<deUint32>(100, imageHeight / 3), imageHeight - y);
+ const vk::VkRect2D rect =
+ {
+ { (deInt32)x, (deInt32)y },
+ { width, height }
+ };
+
+ DE_ASSERT(width > 0);
+ DE_ASSERT(height > 0);
+
+ return rect;
+}
+
+vector<vk::VkRectLayerKHR> getUpdatedRects (size_t firstFrameNdx,
+ size_t lastFrameNdx,
+ deUint32 width,
+ deUint32 height)
+{
+ vector<vk::VkRectLayerKHR> rects;
+
+ for (size_t frameNdx = firstFrameNdx; frameNdx <= lastFrameNdx; frameNdx++)
+ {
+ const vk::VkRect2D rect = getRenderFrameRect(frameNdx, width, height);
+ const vk::VkRectLayerKHR rectLayer =
+ {
+ rect.offset,
+ rect.extent,
+ 0
+ };
+
+ rects.push_back(rectLayer);
+ }
+
+ return rects;
+}
+
+void cmdRenderFrame (const vk::DeviceInterface& vkd,
+ vk::VkCommandBuffer commandBuffer,
+ vk::VkPipelineLayout pipelineLayout,
+ vk::VkPipeline pipeline,
+ size_t frameNdx,
+ deUint32 imageWidth,
+ deUint32 imageHeight)
+{
+ const deUint32 mask = (deUint32)frameNdx;
+
+ if (frameNdx == 0)
+ {
+ const vk::VkRect2D scissor =
+ {
+ { 0u, 0u },
+ { imageWidth, imageHeight }
+ };
+
+ vkd.cmdSetScissor(commandBuffer, 0u, 1u, &scissor);
+ const vk::VkClearAttachment attachment =
+ {
+ vk::VK_IMAGE_ASPECT_COLOR_BIT,
+ 0u,
+ vk::makeClearValueColorF32(0.25f, 0.50, 0.75f, 1.00f)
+ };
+ const vk::VkClearRect rect =
+ {
+ scissor,
+ 0u,
+ 1u
+ };
+
+ vkd.cmdClearAttachments(commandBuffer, 1u, &attachment, 1u, &rect);
+ }
+
+ {
+ const vk::VkRect2D scissor = getRenderFrameRect(frameNdx, imageWidth, imageHeight);
+ vkd.cmdSetScissor(commandBuffer, 0u, 1u, &scissor);
+
+ vkd.cmdPushConstants(commandBuffer, pipelineLayout, vk::VK_SHADER_STAGE_FRAGMENT_BIT, 0u, 4u, &mask);
+ vkd.cmdBindPipeline(commandBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
+ vkd.cmdDraw(commandBuffer, 6u, 1u, 0u, 0u);
+ }
+}
+
+vk::Move<vk::VkCommandBuffer> createCommandBuffer (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ vk::VkCommandPool commandPool,
+ vk::VkPipelineLayout pipelineLayout,
+ vk::VkRenderPass renderPass,
+ vk::VkFramebuffer framebuffer,
+ vk::VkPipeline pipeline,
+ size_t imageNextFrame,
+ size_t currentFrame,
+ deUint32 imageWidth,
+ deUint32 imageHeight)
+{
+ const vk::VkCommandBufferAllocateInfo allocateInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
+ DE_NULL,
+
+ commandPool,
+ vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+ 1
+ };
+ const vk::VkCommandBufferBeginInfo beginInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
+ DE_NULL,
+ 0u,
+ DE_NULL
+ };
+
+ vk::Move<vk::VkCommandBuffer> commandBuffer (vk::allocateCommandBuffer(vkd, device, &allocateInfo));
+ VK_CHECK(vkd.beginCommandBuffer(*commandBuffer, &beginInfo));
+
+ {
+ const vk::VkClearValue clearValue = vk::makeClearValueColorF32(0.25f, 0.50f, 0.75f, 1.00f);
+ const vk::VkRenderPassBeginInfo renderPassBeginInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
+ DE_NULL,
+
+ renderPass,
+ framebuffer,
+
+ {
+ { (deInt32)0, (deInt32)0 },
+ { imageWidth, imageHeight }
+ },
+ 1u,
+ &clearValue
+ };
+ vkd.cmdBeginRenderPass(*commandBuffer, &renderPassBeginInfo, vk::VK_SUBPASS_CONTENTS_INLINE);
+ }
+
+ for (size_t frameNdx = imageNextFrame; frameNdx <= currentFrame; frameNdx++)
+ cmdRenderFrame(vkd, *commandBuffer, pipelineLayout, pipeline, frameNdx, imageWidth, imageHeight);
+
+ vkd.cmdEndRenderPass(*commandBuffer);
+
+ VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
+ return commandBuffer;
+}
+
+void deinitCommandBuffers (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ vk::VkCommandPool commandPool,
+ std::vector<vk::VkCommandBuffer>& commandBuffers)
+{
+ for (size_t ndx = 0; ndx < commandBuffers.size(); ndx++)
+ {
+ if (commandBuffers[ndx] != (vk::VkCommandBuffer)0)
+ vkd.freeCommandBuffers(device, commandPool, 1u, &commandBuffers[ndx]);
+
+ commandBuffers[ndx] = (vk::VkCommandBuffer)0;
+ }
+
+ commandBuffers.clear();
+}
+
+vk::Move<vk::VkCommandPool> createCommandPool (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ deUint32 queueFamilyIndex)
+{
+ const vk::VkCommandPoolCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
+ DE_NULL,
+ 0u,
+ queueFamilyIndex
+ };
+
+ return vk::createCommandPool(vkd, device, &createInfo);
+}
+
+vk::Move<vk::VkFramebuffer> createFramebuffer (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ vk::VkRenderPass renderPass,
+ vk::VkImageView imageView,
+ deUint32 width,
+ deUint32 height)
+{
+ const vk::VkFramebufferCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
+ DE_NULL,
+
+ 0u,
+ renderPass,
+ 1u,
+ &imageView,
+ width,
+ height,
+ 1u
+ };
+
+ return vk::createFramebuffer(vkd, device, &createInfo);
+}
+
+void initFramebuffers (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ vk::VkRenderPass renderPass,
+ std::vector<vk::VkImageView> imageViews,
+ deUint32 width,
+ deUint32 height,
+ std::vector<vk::VkFramebuffer>& framebuffers)
+{
+ DE_ASSERT(framebuffers.size() == imageViews.size());
+
+ for (size_t ndx = 0; ndx < framebuffers.size(); ndx++)
+ framebuffers[ndx] = createFramebuffer(vkd, device, renderPass, imageViews[ndx], width, height).disown();
+}
+
+void deinitFramebuffers (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ std::vector<vk::VkFramebuffer>& framebuffers)
+{
+ for (size_t ndx = 0; ndx < framebuffers.size(); ndx++)
+ {
+ if (framebuffers[ndx] != (vk::VkFramebuffer)0)
+ vkd.destroyFramebuffer(device, framebuffers[ndx], DE_NULL);
+
+ framebuffers[ndx] = (vk::VkFramebuffer)0;
+ }
+
+ framebuffers.clear();
+}
+
+vk::Move<vk::VkImageView> createImageView (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ vk::VkImage image,
+ vk::VkFormat format)
+{
+ const vk::VkImageViewCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ DE_NULL,
+
+ 0u,
+ image,
+ vk::VK_IMAGE_VIEW_TYPE_2D,
+ format,
+ vk::makeComponentMappingRGBA(),
+ {
+ vk::VK_IMAGE_ASPECT_COLOR_BIT,
+ 0u,
+ 1u,
+ 0u,
+ 1u
+ }
+ };
+
+ return vk::createImageView(vkd, device, &createInfo, DE_NULL);
+}
+
+void initImageViews (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ const std::vector<vk::VkImage>& images,
+ vk::VkFormat format,
+ std::vector<vk::VkImageView>& imageViews)
+{
+ DE_ASSERT(images.size() == imageViews.size());
+
+ for (size_t ndx = 0; ndx < imageViews.size(); ndx++)
+ imageViews[ndx] = createImageView(vkd, device, images[ndx], format).disown();
+}
+
+void deinitImageViews (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ std::vector<vk::VkImageView>& imageViews)
+{
+ for (size_t ndx = 0; ndx < imageViews.size(); ndx++)
+ {
+ if (imageViews[ndx] != (vk::VkImageView)0)
+ vkd.destroyImageView(device, imageViews[ndx], DE_NULL);
+
+ imageViews[ndx] = (vk::VkImageView)0;
+ }
+
+ imageViews.clear();
+}
+
+vk::Move<vk::VkRenderPass> createRenderPass (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ vk::VkFormat format)
+{
+ const vk::VkAttachmentDescription attachments[] =
+ {
+ {
+ 0u,
+ format,
+ vk::VK_SAMPLE_COUNT_1_BIT,
+
+ vk::VK_ATTACHMENT_LOAD_OP_LOAD,
+ vk::VK_ATTACHMENT_STORE_OP_STORE,
+
+ vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+ vk::VK_ATTACHMENT_STORE_OP_DONT_CARE,
+
+ vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+ vk::VK_IMAGE_LAYOUT_PRESENT_SRC_KHR
+ }
+ };
+ const vk::VkAttachmentReference colorAttachmentRefs[] =
+ {
+ {
+ 0u,
+ vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
+ }
+ };
+ const vk::VkSubpassDescription subpasses[] =
+ {
+ {
+ 0u,
+ vk::VK_PIPELINE_BIND_POINT_GRAPHICS,
+ 0u,
+ DE_NULL,
+
+ DE_LENGTH_OF_ARRAY(colorAttachmentRefs),
+ colorAttachmentRefs,
+ DE_NULL,
+
+ DE_NULL,
+ 0u,
+ DE_NULL
+ }
+ };
+
+ const vk::VkRenderPassCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
+ DE_NULL,
+ 0u,
+
+ DE_LENGTH_OF_ARRAY(attachments),
+ attachments,
+
+ DE_LENGTH_OF_ARRAY(subpasses),
+ subpasses,
+
+ 0u,
+ DE_NULL
+ };
+
+ return vk::createRenderPass(vkd, device, &createInfo);
+}
+
+vk::Move<vk::VkPipeline> createPipeline (const vk::DeviceInterface& vkd,
+ vk::VkDevice device,
+ vk::VkRenderPass renderPass,
+ vk::VkPipelineLayout layout,
+ vk::VkShaderModule vertexShaderModule,
+ vk::VkShaderModule fragmentShaderModule,
+ deUint32 width,
+ deUint32 height)
+{
+ const vk::VkSpecializationInfo shaderSpecialization =
+ {
+ 0u,
+ DE_NULL,
+ 0,
+ DE_NULL
+ };
+ const vk::VkPipelineShaderStageCreateInfo stages[] =
+ {
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ DE_NULL,
+ 0u,
+ vk::VK_SHADER_STAGE_VERTEX_BIT,
+ vertexShaderModule,
+ "main",
+ &shaderSpecialization
+ },
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ DE_NULL,
+ 0u,
+ vk::VK_SHADER_STAGE_FRAGMENT_BIT,
+ fragmentShaderModule,
+ "main",
+ &shaderSpecialization
+ }
+ };
+ const vk::VkPipelineVertexInputStateCreateInfo vertexInputState =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
+ DE_NULL,
+ 0u,
+ 0u,
+ DE_NULL,
+ 0u,
+ DE_NULL
+ };
+ const vk::VkPipelineInputAssemblyStateCreateInfo inputAssemblyState =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
+ DE_NULL,
+ 0u,
+ vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
+ VK_FALSE
+ };
+ const vk::VkViewport viewports[] =
+ {
+ {
+ 0.0f, 0.0f,
+ (float)width, (float)height,
+ 0.0f, 1.0f
+ }
+ };
+ const vk::VkRect2D scissors[] =
+ {
+ {
+ { 0u, 0u },
+ { width, height }
+ }
+ };
+ const vk::VkPipelineViewportStateCreateInfo viewportState =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
+ DE_NULL,
+ 0u,
+
+ DE_LENGTH_OF_ARRAY(viewports),
+ viewports,
+ DE_LENGTH_OF_ARRAY(scissors),
+ scissors
+ };
+ const vk::VkPipelineRasterizationStateCreateInfo rasterizationState =
+ {
+ vk::VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
+ DE_NULL,
+ 0u,
+ VK_TRUE,
+ VK_FALSE,
+ vk::VK_POLYGON_MODE_FILL,
+ vk::VK_CULL_MODE_NONE,
+ vk::VK_FRONT_FACE_CLOCKWISE,
+ VK_FALSE,
+ 0.0f,
+ 0.0f,
+ 0.0f,
+ 1.0f
+ };
+ const vk::VkSampleMask sampleMask = ~0u;
+ const vk::VkPipelineMultisampleStateCreateInfo multisampleState =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
+ DE_NULL,
+ 0u,
+ vk::VK_SAMPLE_COUNT_1_BIT,
+ VK_FALSE,
+ 0.0f,
+ &sampleMask,
+ VK_FALSE,
+ VK_FALSE
+ };
+ const vk::VkPipelineDepthStencilStateCreateInfo depthStencilState =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
+ DE_NULL,
+ 0u,
+ DE_FALSE,
+ DE_FALSE,
+ vk::VK_COMPARE_OP_ALWAYS,
+ DE_FALSE,
+ DE_FALSE,
+ {
+ vk::VK_STENCIL_OP_KEEP,
+ vk::VK_STENCIL_OP_KEEP,
+ vk::VK_STENCIL_OP_KEEP,
+ vk::VK_COMPARE_OP_ALWAYS,
+ 0u,
+ 0u,
+ 0u,
+ },
+ {
+ vk::VK_STENCIL_OP_KEEP,
+ vk::VK_STENCIL_OP_KEEP,
+ vk::VK_STENCIL_OP_KEEP,
+ vk::VK_COMPARE_OP_ALWAYS,
+ 0u,
+ 0u,
+ 0u,
+ },
+ 0.0f,
+ 1.0f
+ };
+ const vk::VkPipelineColorBlendAttachmentState attachmentBlendState =
+ {
+ VK_FALSE,
+ vk::VK_BLEND_FACTOR_ONE,
+ vk::VK_BLEND_FACTOR_ZERO,
+ vk::VK_BLEND_OP_ADD,
+ vk::VK_BLEND_FACTOR_ONE,
+ vk::VK_BLEND_FACTOR_ZERO,
+ vk::VK_BLEND_OP_ADD,
+ (vk::VK_COLOR_COMPONENT_R_BIT|
+ vk::VK_COLOR_COMPONENT_G_BIT|
+ vk::VK_COLOR_COMPONENT_B_BIT|
+ vk::VK_COLOR_COMPONENT_A_BIT),
+ };
+ const vk::VkPipelineColorBlendStateCreateInfo blendState =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
+ DE_NULL,
+ 0u,
+ DE_FALSE,
+ vk::VK_LOGIC_OP_COPY,
+ 1u,
+ &attachmentBlendState,
+ { 0.0f, 0.0f, 0.0f, 0.0f }
+ };
+ const vk::VkDynamicState dynamicStates[] =
+ {
+ vk::VK_DYNAMIC_STATE_SCISSOR
+ };
+ const vk::VkPipelineDynamicStateCreateInfo dynamicState =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
+ DE_NULL,
+ 0u,
+
+ DE_LENGTH_OF_ARRAY(dynamicStates),
+ dynamicStates
+ };
+ const vk::VkGraphicsPipelineCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
+ DE_NULL,
+ 0u,
+
+ DE_LENGTH_OF_ARRAY(stages),
+ stages,
+ &vertexInputState,
+ &inputAssemblyState,
+ DE_NULL,
+ &viewportState,
+ &rasterizationState,
+ &multisampleState,
+ &depthStencilState,
+ &blendState,
+ &dynamicState,
+ layout,
+ renderPass,
+ 0u,
+ DE_NULL,
+ 0u
+ };
+
+ return vk::createGraphicsPipeline(vkd, device, DE_NULL, &createInfo);
+}
+
+vk::Move<vk::VkPipelineLayout> createPipelineLayout (const vk::DeviceInterface& vkd,
+ vk::VkDevice device)
+{
+ const vk::VkPushConstantRange pushConstants[] =
+ {
+ {
+ vk::VK_SHADER_STAGE_FRAGMENT_BIT,
+ 0u,
+ 4u
+ }
+ };
+ const vk::VkPipelineLayoutCreateInfo createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+ DE_NULL,
+ 0u,
+
+ 0u,
+ DE_NULL,
+
+ DE_LENGTH_OF_ARRAY(pushConstants),
+ pushConstants
+ };
+
+ return vk::createPipelineLayout(vkd, device, &createInfo);
+}
+
+struct TestConfig
+{
+ vk::wsi::Type wsiType;
+ Scaling scaling;
+ bool useIncrementalPresent;
+ vk::VkPresentModeKHR presentMode;
+};
+
+class IncrementalPresentTestInstance : public TestInstance
+{
+public:
+ IncrementalPresentTestInstance (Context& context, const TestConfig& testConfig);
+ ~IncrementalPresentTestInstance (void);
+
+ tcu::TestStatus iterate (void);
+
+private:
+ const TestConfig m_testConfig;
+ const bool m_useIncrementalPresent;
+ const vk::PlatformInterface& m_vkp;
+ const Extensions m_instanceExtensions;
+ const vk::Unique<vk::VkInstance> m_instance;
+ const vk::InstanceDriver m_vki;
+ const vk::VkPhysicalDevice m_physicalDevice;
+ const de::UniquePtr<vk::wsi::Display> m_nativeDisplay;
+ const de::UniquePtr<vk::wsi::Window> m_nativeWindow;
+ const vk::Unique<vk::VkSurfaceKHR> m_surface;
+
+ const deUint32 m_queueFamilyIndex;
+ const Extensions m_deviceExtensions;
+ const vk::Unique<vk::VkDevice> m_device;
+ const vk::DeviceDriver m_vkd;
+ const vk::VkQueue m_queue;
+
+ const vk::Unique<vk::VkCommandPool> m_commandPool;
+ const vk::Unique<vk::VkShaderModule> m_vertexShaderModule;
+ const vk::Unique<vk::VkShaderModule> m_fragmentShaderModule;
+ const vk::Unique<vk::VkPipelineLayout> m_pipelineLayout;
+
+ const vk::VkSurfaceCapabilitiesKHR m_surfaceProperties;
+ const vector<vk::VkSurfaceFormatKHR> m_surfaceFormats;
+ const vector<vk::VkPresentModeKHR> m_presentModes;
+
+ tcu::ResultCollector m_resultCollector;
+
+ vk::Move<vk::VkSwapchainKHR> m_swapchain;
+ std::vector<vk::VkImage> m_swapchainImages;
+ std::vector<size_t> m_imageNextFrames;
+
+ vk::Move<vk::VkRenderPass> m_renderPass;
+ vk::Move<vk::VkPipeline> m_pipeline;
+
+ std::vector<vk::VkImageView> m_swapchainImageViews;
+ std::vector<vk::VkFramebuffer> m_framebuffers;
+ std::vector<vk::VkCommandBuffer> m_commandBuffers;
+ std::vector<vk::VkSemaphore> m_acquireSemaphores;
+ std::vector<vk::VkSemaphore> m_renderSemaphores;
+ std::vector<vk::VkFence> m_fences;
+
+ vk::VkSemaphore m_freeAcquireSemaphore;
+ vk::VkSemaphore m_freeRenderSemaphore;
+
+ std::vector<vk::VkSwapchainCreateInfoKHR> m_swapchainConfigs;
+ size_t m_swapchainConfigNdx;
+
+ const size_t m_frameCount;
+ size_t m_frameNdx;
+
+ const size_t m_maxOutOfDateCount;
+ size_t m_outOfDateCount;
+
+ void initSwapchainResources (void);
+ void deinitSwapchainResources (void);
+ void render (void);
+};
+
+std::vector<vk::VkSwapchainCreateInfoKHR> generateSwapchainConfigs (vk::VkSurfaceKHR surface,
+ deUint32 queueFamilyIndex,
+ Scaling scaling,
+ const vk::VkSurfaceCapabilitiesKHR& properties,
+ const vector<vk::VkSurfaceFormatKHR>& formats,
+ const vector<vk::VkPresentModeKHR>& presentModes,
+ vk::VkPresentModeKHR presentMode)
+{
+ const deUint32 imageLayers = 1u;
+ const vk::VkImageUsageFlags imageUsage = properties.supportedUsageFlags;
+ const vk::VkBool32 clipped = VK_FALSE;
+ vector<vk::VkSwapchainCreateInfoKHR> createInfos;
+
+ const deUint32 imageWidth = scaling == SCALING_NONE
+ ? (properties.currentExtent.width != 0xFFFFFFFFu
+ ? properties.currentExtent.width
+ : de::min(1024u, properties.minImageExtent.width + ((properties.maxImageExtent.width - properties.minImageExtent.width) / 2)))
+ : (scaling == SCALING_UP
+ ? de::max(31u, properties.minImageExtent.width)
+ : properties.maxImageExtent.width);
+ const deUint32 imageHeight = scaling == SCALING_NONE
+ ? (properties.currentExtent.height != 0xFFFFFFFFu
+ ? properties.currentExtent.height
+ : de::min(1024u, properties.minImageExtent.height + ((properties.maxImageExtent.height - properties.minImageExtent.height) / 2)))
+ : (scaling == SCALING_UP
+ ? de::max(31u, properties.minImageExtent.height)
+ : properties.maxImageExtent.height);
+ const vk::VkExtent2D imageSize = { imageWidth, imageHeight };
+
+ {
+ size_t presentModeNdx;
+
+ for (presentModeNdx = 0; presentModeNdx < presentModes.size(); presentModeNdx++)
+ {
+ if (presentModes[presentModeNdx] == presentMode)
+ break;
+ }
+
+ if (presentModeNdx == presentModes.size())
+ TCU_THROW(NotSupportedError, "Present mode not supported");
+ }
+
+ for (size_t formatNdx = 0; formatNdx < formats.size(); formatNdx++)
+ {
+ for (vk::VkSurfaceTransformFlagsKHR transform = 1u; transform <= properties.supportedTransforms; transform = transform << 1u)
+ {
+ if ((properties.supportedTransforms & transform) == 0)
+ continue;
+
+ for (vk::VkCompositeAlphaFlagsKHR alpha = 1u; alpha <= properties.supportedCompositeAlpha; alpha = alpha << 1u)
+ {
+ if ((alpha & properties.supportedCompositeAlpha) == 0)
+ continue;
+
+ const vk::VkSurfaceTransformFlagBitsKHR preTransform = (vk::VkSurfaceTransformFlagBitsKHR)transform;
+ const vk::VkCompositeAlphaFlagBitsKHR compositeAlpha = (vk::VkCompositeAlphaFlagBitsKHR)alpha;
+ const vk::VkFormat imageFormat = formats[formatNdx].format;
+ const vk::VkColorSpaceKHR imageColorSpace = formats[formatNdx].colorSpace;
+ const vk::VkSwapchainCreateInfoKHR createInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
+ DE_NULL,
+ 0u,
+ surface,
+ properties.minImageCount,
+ imageFormat,
+ imageColorSpace,
+ imageSize,
+ imageLayers,
+ imageUsage,
+ vk::VK_SHARING_MODE_EXCLUSIVE,
+ 1u,
+ &queueFamilyIndex,
+ preTransform,
+ compositeAlpha,
+ presentMode,
+ clipped,
+ (vk::VkSwapchainKHR)0
+ };
+
+ createInfos.push_back(createInfo);
+ }
+ }
+ }
+
+ return createInfos;
+}
+
+IncrementalPresentTestInstance::IncrementalPresentTestInstance (Context& context, const TestConfig& testConfig)
+ : TestInstance (context)
+ , m_testConfig (testConfig)
+ , m_useIncrementalPresent (testConfig.useIncrementalPresent)
+ , m_vkp (context.getPlatformInterface())
+ , m_instanceExtensions (vk::enumerateInstanceExtensionProperties(m_vkp, DE_NULL))
+ , m_instance (createInstanceWithWsi(m_vkp, m_instanceExtensions, testConfig.wsiType))
+ , m_vki (m_vkp, *m_instance)
+ , m_physicalDevice (vk::chooseDevice(m_vki, *m_instance, context.getTestContext().getCommandLine()))
+ , m_nativeDisplay (createDisplay(context.getTestContext().getPlatform().getVulkanPlatform(), m_instanceExtensions, testConfig.wsiType))
+ , m_nativeWindow (createWindow(*m_nativeDisplay, tcu::nothing<UVec2>()))
+ , m_surface (vk::wsi::createSurface(m_vki, *m_instance, testConfig.wsiType, *m_nativeDisplay, *m_nativeWindow))
+
+ , m_queueFamilyIndex (chooseQueueFamilyIndex(m_vki, m_physicalDevice, *m_surface))
+ , m_deviceExtensions (vk::enumerateDeviceExtensionProperties(m_vki, m_physicalDevice, DE_NULL))
+ , m_device (createDeviceWithWsi(m_vki, m_physicalDevice, m_deviceExtensions, m_queueFamilyIndex, testConfig.useIncrementalPresent))
+ , m_vkd (m_vki, *m_device)
+ , m_queue (getDeviceQueue(m_vkd, *m_device, m_queueFamilyIndex, 0u))
+
+ , m_commandPool (createCommandPool(m_vkd, *m_device, m_queueFamilyIndex))
+ , m_vertexShaderModule (vk::createShaderModule(m_vkd, *m_device, context.getBinaryCollection().get("quad-vert"), 0u))
+ , m_fragmentShaderModule (vk::createShaderModule(m_vkd, *m_device, context.getBinaryCollection().get("quad-frag"), 0u))
+ , m_pipelineLayout (createPipelineLayout(m_vkd, *m_device))
+
+ , m_surfaceProperties (vk::wsi::getPhysicalDeviceSurfaceCapabilities(m_vki, m_physicalDevice, *m_surface))
+ , m_surfaceFormats (vk::wsi::getPhysicalDeviceSurfaceFormats(m_vki, m_physicalDevice, *m_surface))
+ , m_presentModes (vk::wsi::getPhysicalDeviceSurfacePresentModes(m_vki, m_physicalDevice, *m_surface))
+
+ , m_freeAcquireSemaphore ((vk::VkSemaphore)0)
+ , m_freeRenderSemaphore ((vk::VkSemaphore)0)
+
+ , m_swapchainConfigs (generateSwapchainConfigs(*m_surface, m_queueFamilyIndex, testConfig.scaling, m_surfaceProperties, m_surfaceFormats, m_presentModes, testConfig.presentMode))
+ , m_swapchainConfigNdx (0u)
+
+ , m_frameCount (60u * 5u)
+ , m_frameNdx (0u)
+
+ , m_maxOutOfDateCount (20u)
+ , m_outOfDateCount (0u)
+{
+ {
+ const tcu::ScopedLogSection surfaceInfo (m_context.getTestContext().getLog(), "SurfaceCapabilities", "SurfaceCapabilities");
+ m_context.getTestContext().getLog() << TestLog::Message << m_surfaceProperties << TestLog::EndMessage;
+ }
+}
+
+IncrementalPresentTestInstance::~IncrementalPresentTestInstance (void)
+{
+ deinitSwapchainResources();
+}
+
+void IncrementalPresentTestInstance::initSwapchainResources (void)
+{
+ const size_t fenceCount = 6;
+ const deUint32 imageWidth = m_swapchainConfigs[m_swapchainConfigNdx].imageExtent.width;
+ const deUint32 imageHeight = m_swapchainConfigs[m_swapchainConfigNdx].imageExtent.height;
+ const vk::VkFormat imageFormat = m_swapchainConfigs[m_swapchainConfigNdx].imageFormat;
+
+ m_swapchain = vk::createSwapchainKHR(m_vkd, *m_device, &m_swapchainConfigs[m_swapchainConfigNdx]);
+ m_swapchainImages = vk::wsi::getSwapchainImages(m_vkd, *m_device, *m_swapchain);
+
+ m_imageNextFrames.resize(m_swapchainImages.size(), 0);
+
+ m_renderPass = createRenderPass(m_vkd, *m_device, imageFormat);
+ m_pipeline = createPipeline(m_vkd, *m_device, *m_renderPass, *m_pipelineLayout, *m_vertexShaderModule, *m_fragmentShaderModule, imageWidth, imageHeight);
+
+ m_swapchainImageViews = std::vector<vk::VkImageView>(m_swapchainImages.size(), (vk::VkImageView)0);
+ m_framebuffers = std::vector<vk::VkFramebuffer>(m_swapchainImages.size(), (vk::VkFramebuffer)0);
+ m_acquireSemaphores = std::vector<vk::VkSemaphore>(m_swapchainImages.size(), (vk::VkSemaphore)0);
+ m_renderSemaphores = std::vector<vk::VkSemaphore>(m_swapchainImages.size(), (vk::VkSemaphore)0);
+
+ m_fences = std::vector<vk::VkFence>(fenceCount, (vk::VkFence)0);
+ m_commandBuffers = std::vector<vk::VkCommandBuffer>(m_fences.size(), (vk::VkCommandBuffer)0);
+
+ m_freeAcquireSemaphore = (vk::VkSemaphore)0;
+ m_freeRenderSemaphore = (vk::VkSemaphore)0;
+
+ m_freeAcquireSemaphore = createSemaphore(m_vkd, *m_device).disown();
+ m_freeRenderSemaphore = createSemaphore(m_vkd, *m_device).disown();
+
+ initImageViews(m_vkd, *m_device, m_swapchainImages, imageFormat, m_swapchainImageViews);
+ initFramebuffers(m_vkd, *m_device, *m_renderPass, m_swapchainImageViews, imageWidth, imageHeight, m_framebuffers);
+ initSemaphores(m_vkd, *m_device, m_acquireSemaphores);
+ initSemaphores(m_vkd, *m_device, m_renderSemaphores);
+
+ initFences(m_vkd, *m_device, m_fences);
+}
+
+void IncrementalPresentTestInstance::deinitSwapchainResources (void)
+{
+ VK_CHECK(m_vkd.queueWaitIdle(m_queue));
+
+ if (m_freeAcquireSemaphore != (vk::VkSemaphore)0)
+ {
+ m_vkd.destroySemaphore(*m_device, m_freeAcquireSemaphore, DE_NULL);
+ m_freeAcquireSemaphore = (vk::VkSemaphore)0;
+ }
+
+ if (m_freeRenderSemaphore != (vk::VkSemaphore)0)
+ {
+ m_vkd.destroySemaphore(*m_device, m_freeRenderSemaphore, DE_NULL);
+ m_freeRenderSemaphore = (vk::VkSemaphore)0;
+ }
+
+ deinitSemaphores(m_vkd, *m_device, m_acquireSemaphores);
+ deinitSemaphores(m_vkd, *m_device, m_renderSemaphores);
+ deinitFences(m_vkd, *m_device, m_fences);
+ deinitCommandBuffers(m_vkd, *m_device, *m_commandPool, m_commandBuffers);
+ deinitFramebuffers(m_vkd, *m_device, m_framebuffers);
+ deinitImageViews(m_vkd, *m_device, m_swapchainImageViews);
+
+ m_swapchainImages.clear();
+ m_imageNextFrames.clear();
+
+ m_swapchain = vk::Move<vk::VkSwapchainKHR>();
+ m_renderPass = vk::Move<vk::VkRenderPass>();
+ m_pipeline = vk::Move<vk::VkPipeline>();
+
+}
+
+void IncrementalPresentTestInstance::render (void)
+{
+ const deUint64 foreverNs = 0xFFFFFFFFFFFFFFFFul;
+ const vk::VkFence fence = m_fences[m_frameNdx % m_fences.size()];
+ const deUint32 width = m_swapchainConfigs[m_swapchainConfigNdx].imageExtent.width;
+ const deUint32 height = m_swapchainConfigs[m_swapchainConfigNdx].imageExtent.height;
+ size_t imageNextFrame;
+
+ // Throttle execution
+ if (m_frameNdx >= m_fences.size())
+ {
+ VK_CHECK(m_vkd.waitForFences(*m_device, 1u, &fence, VK_TRUE, foreverNs));
+ VK_CHECK(m_vkd.resetFences(*m_device, 1u, &fence));
+
+ m_vkd.freeCommandBuffers(*m_device, *m_commandPool, 1u, &m_commandBuffers[m_frameNdx % m_commandBuffers.size()]);
+ m_commandBuffers[m_frameNdx % m_commandBuffers.size()] = (vk::VkCommandBuffer)0;
+ }
+
+ vk::VkSemaphore currentAcquireSemaphore = m_freeAcquireSemaphore;
+ vk::VkSemaphore currentRenderSemaphore = m_freeRenderSemaphore;
+ deUint32 imageIndex;
+
+ // Acquire next image
+ VK_CHECK(m_vkd.acquireNextImageKHR(*m_device, *m_swapchain, foreverNs, currentAcquireSemaphore, fence, &imageIndex));
+
+ // Create command buffer
+ {
+ imageNextFrame = m_imageNextFrames[imageIndex];
+ m_commandBuffers[m_frameNdx % m_commandBuffers.size()] = createCommandBuffer(m_vkd, *m_device, *m_commandPool, *m_pipelineLayout, *m_renderPass, m_framebuffers[imageIndex], *m_pipeline, imageNextFrame, m_frameNdx, width, height).disown();
+ m_imageNextFrames[imageIndex] = m_frameNdx + 1;
+ }
+
+ // Submit command buffer
+ {
+ const vk::VkPipelineStageFlags dstStageMask = vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ const vk::VkSubmitInfo submitInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
+ DE_NULL,
+ 1u,
+ ¤tAcquireSemaphore,
+ &dstStageMask,
+ 1u,
+ &m_commandBuffers[m_frameNdx % m_commandBuffers.size()],
+ 1u,
+ ¤tRenderSemaphore
+ };
+
+ VK_CHECK(m_vkd.queueSubmit(m_queue, 1u, &submitInfo, (vk::VkFence)0));
+ }
+
+ // Present frame
+ if (m_useIncrementalPresent)
+ {
+ vk::VkResult result;
+ const vector<vk::VkRectLayerKHR> rects = getUpdatedRects(imageNextFrame, m_frameNdx, width, height);
+ const vk::VkPresentRegionKHR region =
+ {
+ (deUint32)rects.size(),
+ rects.empty() ? DE_NULL : &rects[0]
+ };
+ const vk::VkPresentRegionsKHR regionInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR,
+ DE_NULL,
+ 1u,
+ ®ion
+ };
+ const vk::VkPresentInfoKHR presentInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
+ ®ionInfo,
+ 1u,
+ ¤tRenderSemaphore,
+ 1u,
+ &*m_swapchain,
+ &imageIndex,
+ &result
+ };
+
+ VK_CHECK(m_vkd.queuePresentKHR(m_queue, &presentInfo));
+ VK_CHECK(result);
+ }
+ else
+ {
+ vk::VkResult result;
+ const vk::VkPresentInfoKHR presentInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
+ DE_NULL,
+ 1u,
+ ¤tRenderSemaphore,
+ 1u,
+ &*m_swapchain,
+ &imageIndex,
+ &result
+ };
+
+ VK_CHECK(m_vkd.queuePresentKHR(m_queue, &presentInfo));
+ VK_CHECK(result);
+ }
+
+ {
+ m_freeAcquireSemaphore = m_acquireSemaphores[imageIndex];
+ m_acquireSemaphores[imageIndex] = currentAcquireSemaphore;
+
+ m_freeRenderSemaphore = m_renderSemaphores[imageIndex];
+ m_renderSemaphores[imageIndex] = currentRenderSemaphore;
+ }
+}
+
+tcu::TestStatus IncrementalPresentTestInstance::iterate (void)
+{
+ // Initialize swapchain specific resources
+ // Render test
+ try
+ {
+ if (m_frameNdx == 0)
+ {
+ if (m_outOfDateCount == 0)
+ m_context.getTestContext().getLog() << tcu::TestLog::Message << "Swapchain: " << m_swapchainConfigs[m_swapchainConfigNdx] << tcu::TestLog::EndMessage;
+
+ initSwapchainResources();
+ }
+
+ render();
+ }
+ catch (const vk::Error& error)
+ {
+ if (error.getError() == vk::VK_ERROR_OUT_OF_DATE_KHR)
+ {
+ m_swapchainConfigs = generateSwapchainConfigs(*m_surface, m_queueFamilyIndex, m_testConfig.scaling, m_surfaceProperties, m_surfaceFormats, m_presentModes, m_testConfig.presentMode);
+
+ if (m_outOfDateCount < m_maxOutOfDateCount)
+ {
+ m_context.getTestContext().getLog() << TestLog::Message << "Frame " << m_frameNdx << ": Swapchain out of date. Recreating resources." << TestLog::EndMessage;
+ deinitSwapchainResources();
+ m_frameNdx = 0;
+ m_outOfDateCount++;
+
+ return tcu::TestStatus::incomplete();
+ }
+ else
+ {
+ m_context.getTestContext().getLog() << TestLog::Message << "Frame " << m_frameNdx << ": Swapchain out of date." << TestLog::EndMessage;
+ m_resultCollector.fail("Received too many VK_ERROR_OUT_OF_DATE_KHR errors. Received " + de::toString(m_outOfDateCount) + ", max " + de::toString(m_maxOutOfDateCount));
+ }
+ }
+ else
+ {
+ m_resultCollector.fail(error.what());
+ }
+
+ deinitSwapchainResources();
+
+ m_swapchainConfigNdx++;
+ m_frameNdx = 0;
+ m_outOfDateCount = 0;
+
+ if (m_swapchainConfigNdx >= m_swapchainConfigs.size())
+ return tcu::TestStatus(m_resultCollector.getResult(), m_resultCollector.getMessage());
+ else
+ return tcu::TestStatus::incomplete();
+ }
+
+ m_frameNdx++;
+
+ if (m_frameNdx >= m_frameCount)
+ {
+ m_frameNdx = 0;
+ m_outOfDateCount = 0;
+ m_swapchainConfigNdx++;
+
+ deinitSwapchainResources();
+
+ if (m_swapchainConfigNdx >= m_swapchainConfigs.size())
+ return tcu::TestStatus(m_resultCollector.getResult(), m_resultCollector.getMessage());
+ else
+ return tcu::TestStatus::incomplete();
+ }
+ else
+ return tcu::TestStatus::incomplete();
+}
+
+struct Programs
+{
+ static void init (vk::SourceCollections& dst, TestConfig)
+ {
+ dst.glslSources.add("quad-vert") << glu::VertexSource(
+ "#version 450\n"
+ "out gl_PerVertex {\n"
+ "\tvec4 gl_Position;\n"
+ "};\n"
+ "highp float;\n"
+ "void main (void) {\n"
+ "\tgl_Position = vec4(((gl_VertexIndex + 2) / 3) % 2 == 0 ? -1.0 : 1.0,\n"
+ "\t ((gl_VertexIndex + 1) / 3) % 2 == 0 ? -1.0 : 1.0, 0.0, 1.0);\n"
+ "}\n");
+ dst.glslSources.add("quad-frag") << glu::FragmentSource(
+ "#version 310 es\n"
+ "layout(location = 0) out highp vec4 o_color;\n"
+ "layout(push_constant) uniform PushConstant {\n"
+ "\thighp uint mask;\n"
+ "} pushConstants;\n"
+ "void main (void)\n"
+ "{\n"
+ "\thighp uint mask = pushConstants.mask;\n"
+ "\thighp uint x = mask ^ uint(gl_FragCoord.x);\n"
+ "\thighp uint y = mask ^ uint(gl_FragCoord.y);\n"
+ "\thighp uint r = 128u * bitfieldExtract(x, 0, 1)\n"
+ "\t + 64u * bitfieldExtract(y, 1, 1)\n"
+ "\t + 32u * bitfieldExtract(x, 3, 1);\n"
+ "\thighp uint g = 128u * bitfieldExtract(y, 0, 1)\n"
+ "\t + 64u * bitfieldExtract(x, 2, 1)\n"
+ "\t + 32u * bitfieldExtract(y, 3, 1);\n"
+ "\thighp uint b = 128u * bitfieldExtract(x, 1, 1)\n"
+ "\t + 64u * bitfieldExtract(y, 2, 1)\n"
+ "\t + 32u * bitfieldExtract(x, 4, 1);\n"
+ "\to_color = vec4(float(r) / 255.0, float(g) / 255.0, float(b) / 255.0, 1.0);\n"
+ "}\n");
+ }
+};
+
+} // anonymous
+
+void createIncrementalPresentTests (tcu::TestCaseGroup* testGroup, vk::wsi::Type wsiType)
+{
+ const struct
+ {
+ Scaling scaling;
+ const char* name;
+ } scaling [] =
+ {
+ { SCALING_NONE, "scale_none" },
+ { SCALING_UP, "scale_up" },
+ { SCALING_DOWN, "scale_down" }
+ };
+ const struct
+ {
+ vk::VkPresentModeKHR mode;
+ const char* name;
+ } presentModes[] =
+ {
+ { vk::VK_PRESENT_MODE_IMMEDIATE_KHR, "immediate" },
+ { vk::VK_PRESENT_MODE_MAILBOX_KHR, "mailbox" },
+ { vk::VK_PRESENT_MODE_FIFO_KHR, "fifo" },
+ { vk::VK_PRESENT_MODE_FIFO_RELAXED_KHR, "fifo_relaxed" }
+ };
+
+ for (size_t scalingNdx = 0; scalingNdx < DE_LENGTH_OF_ARRAY(scaling); scalingNdx++)
+ {
+ if (scaling[scalingNdx].scaling != SCALING_NONE && wsiType == vk::wsi::TYPE_WAYLAND)
+ continue;
+
+ if (scaling[scalingNdx].scaling != SCALING_NONE && vk::wsi::getPlatformProperties(wsiType).swapchainExtent != vk::wsi::PlatformProperties::SWAPCHAIN_EXTENT_SCALED_TO_WINDOW_SIZE)
+ continue;
+
+ {
+
+ de::MovePtr<tcu::TestCaseGroup> scaleGroup (new tcu::TestCaseGroup(testGroup->getTestContext(), scaling[scalingNdx].name, scaling[scalingNdx].name));
+
+ for (size_t presentModeNdx = 0; presentModeNdx < DE_LENGTH_OF_ARRAY(presentModes); presentModeNdx++)
+ {
+ de::MovePtr<tcu::TestCaseGroup> presentModeGroup (new tcu::TestCaseGroup(testGroup->getTestContext(), presentModes[presentModeNdx].name, presentModes[presentModeNdx].name));
+
+ for (size_t ref = 0; ref < 2; ref++)
+ {
+ const bool isReference = (ref == 0);
+ const char* const name = isReference ? "reference" : "incremental_present";
+ TestConfig config;
+
+ config.wsiType = wsiType;
+ config.scaling = scaling[scalingNdx].scaling;
+ config.useIncrementalPresent = !isReference;
+ config.presentMode = presentModes[presentModeNdx].mode;
+
+ presentModeGroup->addChild(new vkt::InstanceFactory1<IncrementalPresentTestInstance, TestConfig, Programs>(testGroup->getTestContext(), tcu::NODETYPE_SELF_VALIDATE, name, name, Programs(), config));
+ }
+
+ scaleGroup->addChild(presentModeGroup.release());
+ }
+
+ testGroup->addChild(scaleGroup.release());
+ }
+ }
+}
+
+} // wsi
+} // vkt
--- /dev/null
+#ifndef _VKTWSIINCREMENTALPRESENTTESTS_HPP
+#define _VKTWSIINCREMENTALPRESENTTESTS_HPP
+/*-------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2016 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Tests for incremental present extension
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuDefs.hpp"
+#include "tcuTestCase.hpp"
+#include "vkDefs.hpp"
+
+namespace vkt
+{
+namespace wsi
+{
+
+void createIncrementalPresentTests (tcu::TestCaseGroup* testGroup, vk::wsi::Type wsiType);
+
+} // wsi
+} // vkt
+
+#endif // _VKTWSIINCREMENTALPRESENTTESTS_HPP
extensions.push_back("VK_KHR_surface");
extensions.push_back(getExtensionName(wsiType));
+ // VK_EXT_swapchain_colorspace adds new surface formats. Driver can enumerate
+ // the formats regardless of whether VK_EXT_swapchain_colorspace was enabled,
+ // but using them without enabling the extension is not allowed. Thus we have
+ // two options:
+ //
+ // 1) Filter out non-core formats to stay within valid usage.
+ //
+ // 2) Enable VK_EXT_swapchain colorspace if advertised by the driver.
+ //
+ // We opt for (2) as it provides basic coverage for the extension as a bonus.
+ if (isExtensionSupported(supportedExtensions, RequiredExtension("VK_EXT_swapchain_colorspace")))
+ extensions.push_back("VK_EXT_swapchain_colorspace");
+
checkAllSupported(supportedExtensions, extensions);
return createDefaultInstance(vkp, vector<string>(), extensions, pAllocator);
#include "vktWsiSurfaceTests.hpp"
#include "vktWsiSwapchainTests.hpp"
+#include "vktWsiIncrementalPresentTests.hpp"
+#include "vktWsiDisplayTimingTests.hpp"
namespace vkt
{
void createTypeSpecificTests (tcu::TestCaseGroup* testGroup, vk::wsi::Type wsiType)
{
- addTestGroup(testGroup, "surface", "VkSurface Tests", createSurfaceTests, wsiType);
- addTestGroup(testGroup, "swapchain", "VkSwapchain Tests", createSwapchainTests, wsiType);
+ addTestGroup(testGroup, "surface", "VkSurface Tests", createSurfaceTests, wsiType);
+ addTestGroup(testGroup, "swapchain", "VkSwapchain Tests", createSwapchainTests, wsiType);
+ addTestGroup(testGroup, "incremental_present", "Incremental present tests", createIncrementalPresentTests, wsiType);
+ addTestGroup(testGroup, "display_timing", "Display Timing Tests", createDisplayTimingTests, wsiType);
}
void createWsiTests (tcu::TestCaseGroup* apiTests)
dEQP-VK.glsl.linkage.varying.rules.vertex_use_fragment_declare
dEQP-VK.glsl.linkage.varying.rules.vertex_use_declare_fragment
dEQP-VK.glsl.linkage.varying.rules.vertex_use_fragment_use
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_1
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_2
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_3
dEQP-VK.glsl.linkage.varying.rules.differing_interpolation_2
dEQP-VK.glsl.linkage.varying.basic_types.float
dEQP-VK.glsl.linkage.varying.basic_types.vec2
dEQP-VK.glsl.linkage.varying.rules.vertex_use_fragment_declare
dEQP-VK.glsl.linkage.varying.rules.vertex_use_declare_fragment
dEQP-VK.glsl.linkage.varying.rules.vertex_use_fragment_use
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_1
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_2
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_3
dEQP-VK.glsl.linkage.varying.rules.differing_interpolation_2
dEQP-VK.glsl.linkage.varying.basic_types.float
dEQP-VK.glsl.linkage.varying.basic_types.vec2
dEQP-VK.glsl.linkage.varying.rules.vertex_use_fragment_declare
dEQP-VK.glsl.linkage.varying.rules.vertex_use_declare_fragment
dEQP-VK.glsl.linkage.varying.rules.vertex_use_fragment_use
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_1
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_2
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_3
dEQP-VK.glsl.linkage.varying.rules.differing_interpolation_2
dEQP-VK.glsl.linkage.varying.rules.differing_name_1
dEQP-VK.glsl.linkage.varying.rules.differing_name_2
dEQP-VK.glsl.linkage.varying.rules.vertex_use_fragment_declare
dEQP-VK.glsl.linkage.varying.rules.vertex_use_declare_fragment
dEQP-VK.glsl.linkage.varying.rules.vertex_use_fragment_use
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_1
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_2
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_3
dEQP-VK.glsl.linkage.varying.rules.differing_interpolation_2
dEQP-VK.glsl.linkage.varying.rules.differing_name_1
dEQP-VK.glsl.linkage.varying.rules.differing_name_2
dEQP-VK.glsl.linkage.varying.rules.vertex_use_fragment_declare
dEQP-VK.glsl.linkage.varying.rules.vertex_use_declare_fragment
dEQP-VK.glsl.linkage.varying.rules.vertex_use_fragment_use
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_1
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_2
-dEQP-VK.glsl.linkage.varying.rules.differing_precision_3
dEQP-VK.glsl.linkage.varying.rules.differing_interpolation_2
dEQP-VK.glsl.linkage.varying.rules.differing_name_1
dEQP-VK.glsl.linkage.varying.rules.differing_name_2
dEQP-VK.wsi.xlib.swapchain.render.basic
dEQP-VK.wsi.xlib.swapchain.destroy.null_handle
dEQP-VK.wsi.xlib.swapchain.get_images.incomplete
+dEQP-VK.wsi.xlib.incremental_present.scale_none.immediate.reference
+dEQP-VK.wsi.xlib.incremental_present.scale_none.immediate.incremental_present
+dEQP-VK.wsi.xlib.incremental_present.scale_none.mailbox.reference
+dEQP-VK.wsi.xlib.incremental_present.scale_none.mailbox.incremental_present
+dEQP-VK.wsi.xlib.incremental_present.scale_none.fifo.reference
+dEQP-VK.wsi.xlib.incremental_present.scale_none.fifo.incremental_present
+dEQP-VK.wsi.xlib.incremental_present.scale_none.fifo_relaxed.reference
+dEQP-VK.wsi.xlib.incremental_present.scale_none.fifo_relaxed.incremental_present
+dEQP-VK.wsi.xlib.display_timing.fifo.reference
+dEQP-VK.wsi.xlib.display_timing.fifo.display_timing
+dEQP-VK.wsi.xlib.display_timing.fifo_relaxed.reference
+dEQP-VK.wsi.xlib.display_timing.fifo_relaxed.display_timing
+dEQP-VK.wsi.xlib.display_timing.immediate.reference
+dEQP-VK.wsi.xlib.display_timing.immediate.display_timing
+dEQP-VK.wsi.xlib.display_timing.mailbox.reference
+dEQP-VK.wsi.xlib.display_timing.mailbox.display_timing
dEQP-VK.wsi.xcb.surface.create
dEQP-VK.wsi.xcb.surface.create_custom_allocator
dEQP-VK.wsi.xcb.surface.create_simulate_oom
dEQP-VK.wsi.xcb.swapchain.render.basic
dEQP-VK.wsi.xcb.swapchain.destroy.null_handle
dEQP-VK.wsi.xcb.swapchain.get_images.incomplete
+dEQP-VK.wsi.xcb.incremental_present.scale_none.immediate.reference
+dEQP-VK.wsi.xcb.incremental_present.scale_none.immediate.incremental_present
+dEQP-VK.wsi.xcb.incremental_present.scale_none.mailbox.reference
+dEQP-VK.wsi.xcb.incremental_present.scale_none.mailbox.incremental_present
+dEQP-VK.wsi.xcb.incremental_present.scale_none.fifo.reference
+dEQP-VK.wsi.xcb.incremental_present.scale_none.fifo.incremental_present
+dEQP-VK.wsi.xcb.incremental_present.scale_none.fifo_relaxed.reference
+dEQP-VK.wsi.xcb.incremental_present.scale_none.fifo_relaxed.incremental_present
+dEQP-VK.wsi.xcb.display_timing.fifo.reference
+dEQP-VK.wsi.xcb.display_timing.fifo.display_timing
+dEQP-VK.wsi.xcb.display_timing.fifo_relaxed.reference
+dEQP-VK.wsi.xcb.display_timing.fifo_relaxed.display_timing
+dEQP-VK.wsi.xcb.display_timing.immediate.reference
+dEQP-VK.wsi.xcb.display_timing.immediate.display_timing
+dEQP-VK.wsi.xcb.display_timing.mailbox.reference
+dEQP-VK.wsi.xcb.display_timing.mailbox.display_timing
dEQP-VK.wsi.wayland.surface.create
dEQP-VK.wsi.wayland.surface.create_custom_allocator
dEQP-VK.wsi.wayland.surface.create_simulate_oom
dEQP-VK.wsi.wayland.swapchain.modify.resize
dEQP-VK.wsi.wayland.swapchain.destroy.null_handle
dEQP-VK.wsi.wayland.swapchain.get_images.incomplete
+dEQP-VK.wsi.wayland.incremental_present.scale_none.immediate.reference
+dEQP-VK.wsi.wayland.incremental_present.scale_none.immediate.incremental_present
+dEQP-VK.wsi.wayland.incremental_present.scale_none.mailbox.reference
+dEQP-VK.wsi.wayland.incremental_present.scale_none.mailbox.incremental_present
+dEQP-VK.wsi.wayland.incremental_present.scale_none.fifo.reference
+dEQP-VK.wsi.wayland.incremental_present.scale_none.fifo.incremental_present
+dEQP-VK.wsi.wayland.incremental_present.scale_none.fifo_relaxed.reference
+dEQP-VK.wsi.wayland.incremental_present.scale_none.fifo_relaxed.incremental_present
+dEQP-VK.wsi.wayland.display_timing.fifo.reference
+dEQP-VK.wsi.wayland.display_timing.fifo.display_timing
+dEQP-VK.wsi.wayland.display_timing.fifo_relaxed.reference
+dEQP-VK.wsi.wayland.display_timing.fifo_relaxed.display_timing
+dEQP-VK.wsi.wayland.display_timing.immediate.reference
+dEQP-VK.wsi.wayland.display_timing.immediate.display_timing
+dEQP-VK.wsi.wayland.display_timing.mailbox.reference
+dEQP-VK.wsi.wayland.display_timing.mailbox.display_timing
dEQP-VK.wsi.mir.surface.create
dEQP-VK.wsi.mir.surface.create_custom_allocator
dEQP-VK.wsi.mir.surface.create_simulate_oom
dEQP-VK.wsi.mir.swapchain.modify.resize
dEQP-VK.wsi.mir.swapchain.destroy.null_handle
dEQP-VK.wsi.mir.swapchain.get_images.incomplete
+dEQP-VK.wsi.mir.incremental_present.scale_none.immediate.reference
+dEQP-VK.wsi.mir.incremental_present.scale_none.immediate.incremental_present
+dEQP-VK.wsi.mir.incremental_present.scale_none.mailbox.reference
+dEQP-VK.wsi.mir.incremental_present.scale_none.mailbox.incremental_present
+dEQP-VK.wsi.mir.incremental_present.scale_none.fifo.reference
+dEQP-VK.wsi.mir.incremental_present.scale_none.fifo.incremental_present
+dEQP-VK.wsi.mir.incremental_present.scale_none.fifo_relaxed.reference
+dEQP-VK.wsi.mir.incremental_present.scale_none.fifo_relaxed.incremental_present
+dEQP-VK.wsi.mir.incremental_present.scale_up.immediate.reference
+dEQP-VK.wsi.mir.incremental_present.scale_up.immediate.incremental_present
+dEQP-VK.wsi.mir.incremental_present.scale_up.mailbox.reference
+dEQP-VK.wsi.mir.incremental_present.scale_up.mailbox.incremental_present
+dEQP-VK.wsi.mir.incremental_present.scale_up.fifo.reference
+dEQP-VK.wsi.mir.incremental_present.scale_up.fifo.incremental_present
+dEQP-VK.wsi.mir.incremental_present.scale_up.fifo_relaxed.reference
+dEQP-VK.wsi.mir.incremental_present.scale_up.fifo_relaxed.incremental_present
+dEQP-VK.wsi.mir.incremental_present.scale_down.immediate.reference
+dEQP-VK.wsi.mir.incremental_present.scale_down.immediate.incremental_present
+dEQP-VK.wsi.mir.incremental_present.scale_down.mailbox.reference
+dEQP-VK.wsi.mir.incremental_present.scale_down.mailbox.incremental_present
+dEQP-VK.wsi.mir.incremental_present.scale_down.fifo.reference
+dEQP-VK.wsi.mir.incremental_present.scale_down.fifo.incremental_present
+dEQP-VK.wsi.mir.incremental_present.scale_down.fifo_relaxed.reference
+dEQP-VK.wsi.mir.incremental_present.scale_down.fifo_relaxed.incremental_present
+dEQP-VK.wsi.mir.display_timing.fifo.reference
+dEQP-VK.wsi.mir.display_timing.fifo.display_timing
+dEQP-VK.wsi.mir.display_timing.fifo_relaxed.reference
+dEQP-VK.wsi.mir.display_timing.fifo_relaxed.display_timing
+dEQP-VK.wsi.mir.display_timing.immediate.reference
+dEQP-VK.wsi.mir.display_timing.immediate.display_timing
+dEQP-VK.wsi.mir.display_timing.mailbox.reference
+dEQP-VK.wsi.mir.display_timing.mailbox.display_timing
dEQP-VK.wsi.android.surface.create
dEQP-VK.wsi.android.surface.create_custom_allocator
dEQP-VK.wsi.android.surface.create_simulate_oom
dEQP-VK.wsi.android.swapchain.modify.resize
dEQP-VK.wsi.android.swapchain.destroy.null_handle
dEQP-VK.wsi.android.swapchain.get_images.incomplete
+dEQP-VK.wsi.android.incremental_present.scale_none.immediate.reference
+dEQP-VK.wsi.android.incremental_present.scale_none.immediate.incremental_present
+dEQP-VK.wsi.android.incremental_present.scale_none.mailbox.reference
+dEQP-VK.wsi.android.incremental_present.scale_none.mailbox.incremental_present
+dEQP-VK.wsi.android.incremental_present.scale_none.fifo.reference
+dEQP-VK.wsi.android.incremental_present.scale_none.fifo.incremental_present
+dEQP-VK.wsi.android.incremental_present.scale_none.fifo_relaxed.reference
+dEQP-VK.wsi.android.incremental_present.scale_none.fifo_relaxed.incremental_present
+dEQP-VK.wsi.android.incremental_present.scale_up.immediate.reference
+dEQP-VK.wsi.android.incremental_present.scale_up.immediate.incremental_present
+dEQP-VK.wsi.android.incremental_present.scale_up.mailbox.reference
+dEQP-VK.wsi.android.incremental_present.scale_up.mailbox.incremental_present
+dEQP-VK.wsi.android.incremental_present.scale_up.fifo.reference
+dEQP-VK.wsi.android.incremental_present.scale_up.fifo.incremental_present
+dEQP-VK.wsi.android.incremental_present.scale_up.fifo_relaxed.reference
+dEQP-VK.wsi.android.incremental_present.scale_up.fifo_relaxed.incremental_present
+dEQP-VK.wsi.android.incremental_present.scale_down.immediate.reference
+dEQP-VK.wsi.android.incremental_present.scale_down.immediate.incremental_present
+dEQP-VK.wsi.android.incremental_present.scale_down.mailbox.reference
+dEQP-VK.wsi.android.incremental_present.scale_down.mailbox.incremental_present
+dEQP-VK.wsi.android.incremental_present.scale_down.fifo.reference
+dEQP-VK.wsi.android.incremental_present.scale_down.fifo.incremental_present
+dEQP-VK.wsi.android.incremental_present.scale_down.fifo_relaxed.reference
+dEQP-VK.wsi.android.incremental_present.scale_down.fifo_relaxed.incremental_present
+dEQP-VK.wsi.android.display_timing.fifo.reference
+dEQP-VK.wsi.android.display_timing.fifo.display_timing
+dEQP-VK.wsi.android.display_timing.fifo_relaxed.reference
+dEQP-VK.wsi.android.display_timing.fifo_relaxed.display_timing
+dEQP-VK.wsi.android.display_timing.immediate.reference
+dEQP-VK.wsi.android.display_timing.immediate.display_timing
+dEQP-VK.wsi.android.display_timing.mailbox.reference
+dEQP-VK.wsi.android.display_timing.mailbox.display_timing
dEQP-VK.wsi.win32.surface.create
dEQP-VK.wsi.win32.surface.create_custom_allocator
dEQP-VK.wsi.win32.surface.create_simulate_oom
dEQP-VK.wsi.win32.swapchain.render.basic
dEQP-VK.wsi.win32.swapchain.destroy.null_handle
dEQP-VK.wsi.win32.swapchain.get_images.incomplete
+dEQP-VK.wsi.win32.incremental_present.scale_none.immediate.reference
+dEQP-VK.wsi.win32.incremental_present.scale_none.immediate.incremental_present
+dEQP-VK.wsi.win32.incremental_present.scale_none.mailbox.reference
+dEQP-VK.wsi.win32.incremental_present.scale_none.mailbox.incremental_present
+dEQP-VK.wsi.win32.incremental_present.scale_none.fifo.reference
+dEQP-VK.wsi.win32.incremental_present.scale_none.fifo.incremental_present
+dEQP-VK.wsi.win32.incremental_present.scale_none.fifo_relaxed.reference
+dEQP-VK.wsi.win32.incremental_present.scale_none.fifo_relaxed.incremental_present
+dEQP-VK.wsi.win32.display_timing.fifo.reference
+dEQP-VK.wsi.win32.display_timing.fifo.display_timing
+dEQP-VK.wsi.win32.display_timing.fifo_relaxed.reference
+dEQP-VK.wsi.win32.display_timing.fifo_relaxed.display_timing
+dEQP-VK.wsi.win32.display_timing.immediate.reference
+dEQP-VK.wsi.win32.display_timing.immediate.display_timing
+dEQP-VK.wsi.win32.display_timing.mailbox.reference
+dEQP-VK.wsi.win32.display_timing.mailbox.display_timing
dEQP-VK.synchronization.smoke.fences
dEQP-VK.synchronization.smoke.semaphores
dEQP-VK.synchronization.smoke.events
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2_KHR = 1000059008,
VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT = 1000061000,
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR = 1000080000,
+ VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR = 1000084000,
VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR = 1000085000,
VK_STRUCTURE_TYPE_OBJECT_TABLE_CREATE_INFO_NVX = 1000086000,
VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NVX = 1000086001,
VK_STRUCTURE_TYPE_CMD_RESERVE_SPACE_FOR_COMMANDS_INFO_NVX = 1000086003,
VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_LIMITS_NVX = 1000086004,
VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_FEATURES_NVX = 1000086005,
+ VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE = 1000092000,
VK_STRUCTURE_TYPE_BEGIN_RANGE = VK_STRUCTURE_TYPE_APPLICATION_INFO,
VK_STRUCTURE_TYPE_END_RANGE = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO,
VK_STRUCTURE_TYPE_RANGE_SIZE = (VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO - VK_STRUCTURE_TYPE_APPLICATION_INFO + 1),
typedef enum VkColorSpaceKHR {
VK_COLOR_SPACE_SRGB_NONLINEAR_KHR = 0,
+ VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT = 1000104001,
+ VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT = 1000104002,
+ VK_COLOR_SPACE_DCI_P3_LINEAR_EXT = 1000104003,
+ VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT = 1000104004,
+ VK_COLOR_SPACE_BT709_LINEAR_EXT = 1000104005,
+ VK_COLOR_SPACE_BT709_NONLINEAR_EXT = 1000104006,
+ VK_COLOR_SPACE_BT2020_LINEAR_EXT = 1000104007,
+ VK_COLOR_SPACE_HDR10_ST2084_EXT = 1000104008,
+ VK_COLOR_SPACE_DOLBYVISION_EXT = 1000104009,
+ VK_COLOR_SPACE_HDR10_HLG_EXT = 1000104010,
+ VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT = 1000104011,
+ VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT = 1000104012,
+ VK_COLOR_SPACE_PASS_THROUGH_EXT = 1000104013,
VK_COLOR_SPACE_BEGIN_RANGE_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR,
VK_COLOR_SPACE_END_RANGE_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR,
VK_COLOR_SPACE_RANGE_SIZE_KHR = (VK_COLOR_SPACE_SRGB_NONLINEAR_KHR - VK_COLOR_SPACE_SRGB_NONLINEAR_KHR + 1),
VkCommandPoolTrimFlagsKHR flags);
#endif
+#define VK_KHR_incremental_present 1
+#define VK_KHR_INCREMENTAL_PRESENT_SPEC_VERSION 1
+#define VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME "VK_KHR_incremental_present"
+
+typedef struct VkRectLayerKHR {
+ VkOffset2D offset;
+ VkExtent2D extent;
+ uint32_t layer;
+} VkRectLayerKHR;
+
+typedef struct VkPresentRegionKHR {
+ uint32_t rectangleCount;
+ const VkRectLayerKHR* pRectangles;
+} VkPresentRegionKHR;
+
+typedef struct VkPresentRegionsKHR {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t swapchainCount;
+ const VkPresentRegionKHR* pRegions;
+} VkPresentRegionsKHR;
+
#define VK_EXT_debug_report 1
VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugReportCallbackEXT)
const void* pData);
#endif
+#define VK_GOOGLE_display_timing 1
+#define VK_GOOGLE_DISPLAY_TIMING_SPEC_VERSION 1
+#define VK_GOOGLE_DISPLAY_TIMING_EXTENSION_NAME "VK_GOOGLE_display_timing"
+
+typedef struct VkRefreshCycleDurationGOOGLE {
+ uint64_t refreshDuration;
+} VkRefreshCycleDurationGOOGLE;
+
+typedef struct VkPastPresentationTimingGOOGLE {
+ uint32_t presentID;
+ uint64_t desiredPresentTime;
+ uint64_t actualPresentTime;
+ uint64_t earliestPresentTime;
+ uint64_t presentMargin;
+} VkPastPresentationTimingGOOGLE;
+
+typedef struct VkPresentTimeGOOGLE {
+ uint32_t presentID;
+ uint64_t desiredPresentTime;
+} VkPresentTimeGOOGLE;
+
+typedef struct VkPresentTimesInfoGOOGLE {
+ VkStructureType sType;
+ const void* pNext;
+ uint32_t swapchainCount;
+ const VkPresentTimeGOOGLE* pTimes;
+} VkPresentTimesInfoGOOGLE;
+
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetRefreshCycleDurationGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDuratiOOGLE* pDisplayTimingProperties);
+typedef VkResult (VKAPI_PTR *PFN_vkGetPastPresentationTimingGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pPresentonTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings);
+
+#ifndef VK_NO_PROTOTYPES
+VKAPI_ATTR VkResult VKAPI_CALL vkGetRefreshCycleDurationGOOGLE(
+ VkDevice device,
+ VkSwapchainKHR swapchain,
+ VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties);
+
+VKAPI_ATTR VkResult VKAPI_CALL vkGetPastPresentationTimingGOOGLE(
+ VkDevice device,
+ VkSwapchainKHR swapchain,
+ uint32_t* pPresentationTimingCount,
+ VkPastPresentationTimingGOOGLE* pPresentationTimings);
+#endif
+
+#define VK_EXT_swapchain_colorspace 1
+#define VK_EXT_SWAPCHAIN_COLOR_SPACE_SPEC_VERSION 2
+#define VK_EXT_SWAPCHAIN_COLOR_SPACE_EXTENSION_NAME "VK_EXT_swapchain_colorspace"
+
#ifdef __cplusplus
}
#endif