dEQP-VK.glsl.atomic_operations.xor_unsigned64bit_compute_reference
dEQP-VK.glsl.combined_operations.notxor
dEQP-VK.glsl.combined_operations.negintdivand
+dEQP-VK.glsl.crash_test.divbyzero_frag
+dEQP-VK.glsl.crash_test.divbyzero_comp
dEQP-VK.glsl.shader_clock.compute.clockRealtime2x32EXT
dEQP-VK.glsl.combined_operations.notxor
dEQP-VK.glsl.combined_operations.negintdivand
+dEQP-VK.glsl.crash_test.divbyzero_frag
+dEQP-VK.glsl.crash_test.divbyzero_comp
--- /dev/null
+#!amber
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+SHADER compute compute_shader GLSL
+#version 450
+layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
+
+layout(binding = 0) buffer block0
+{
+ int data;
+} ssbo_array[20];
+
+void main()
+{
+ // Zero constants
+ int ival = ssbo_array[0].data;
+ float val = float(ival);
+
+ // int div
+ ssbo_array[1].data = 7 / ival;
+ // float div
+ ssbo_array[2].data = int(7.0 / val);
+ // normalize float
+ ssbo_array[3].data = int(normalize(val));
+ // normalize vec2
+ ssbo_array[4].data = int(normalize(vec2(val))[ival]);
+ // normalize vec3
+ ssbo_array[5].data = int(normalize(vec3(val))[ival]);
+ // normalize vec4
+ ssbo_array[6].data = int(normalize(vec4(val))[ival]);
+ // integer mod
+ ssbo_array[7].data = 7 % ival;
+ // float mod
+ ssbo_array[8].data = int(mod(7.0, val));
+ // vec2 mod
+ ssbo_array[9].data = int(mod(vec2(7.0), vec2(val))[ival]);
+ // vec3 mod
+ ssbo_array[10].data = int(mod(vec3(7.0), vec3(val))[ival]);
+ // vec4 mod
+ ssbo_array[11].data = int(mod(vec4(7.0), vec4(val))[ival]);
+ // float smoothstep
+ ssbo_array[12].data = int(smoothstep(val, val, 0.3));
+ // vec2 smoothstep
+ ssbo_array[13].data = int(smoothstep(vec2(val), vec2(val), vec2(0.3))[ival]);
+ // vec3 smoothstep
+ ssbo_array[14].data = int(smoothstep(vec3(val), vec3(val), vec3(0.3))[ival]);
+ // vec4 smoothstep
+ ssbo_array[15].data = int(smoothstep(vec4(val), vec4(val), vec4(0.3))[ival]);
+ // float atan2
+ ssbo_array[16].data = int(atan(7.0, val));
+ // vec2 atan2
+ ssbo_array[17].data = int(atan(vec2(7.0), vec2(val))[ival]);
+ // vec3 atan2
+ ssbo_array[18].data = int(atan(vec3(7.0), vec3(val))[ival]);
+ // vec4 atan2
+ ssbo_array[19].data = int(atan(vec4(7.0), vec4(val))[ival]);
+
+ // Known good value
+ ssbo_array[0].data = 42;
+}
+END
+
+BUFFER buf0 DATA_TYPE int32 DATA
+0
+END
+BUFFER buf1 DATA_TYPE int32 DATA
+0
+END
+BUFFER buf2 DATA_TYPE int32 DATA
+0
+END
+BUFFER buf3 DATA_TYPE int32 DATA
+0
+END
+BUFFER buf4 DATA_TYPE int32 DATA
+0
+END
+BUFFER buf5 DATA_TYPE int32 DATA
+0
+END
+BUFFER buf6 DATA_TYPE int32 DATA
+0
+END
+BUFFER buf7 DATA_TYPE int32 DATA
+0
+END
+BUFFER buf8 DATA_TYPE int32 DATA
+0
+END
+BUFFER buf9 DATA_TYPE int32 DATA
+0
+END
+BUFFER buf10 DATA_TYPE int32 DATA
+0
+END
+BUFFER buf11 DATA_TYPE int32 DATA
+0
+END
+BUFFER buf12 DATA_TYPE int32 DATA
+0
+END
+BUFFER buf13 DATA_TYPE int32 DATA
+0
+END
+BUFFER buf14 DATA_TYPE int32 DATA
+0
+END
+BUFFER buf15 DATA_TYPE int32 DATA
+0
+END
+BUFFER buf16 DATA_TYPE int32 DATA
+0
+END
+BUFFER buf17 DATA_TYPE int32 DATA
+0
+END
+BUFFER buf18 DATA_TYPE int32 DATA
+0
+END
+BUFFER buf19 DATA_TYPE int32 DATA
+0
+END
+
+PIPELINE compute pipeline
+ ATTACH compute_shader
+
+ BIND BUFFER_ARRAY buf0 buf1 buf2 buf3 buf4 buf5 buf6 buf7 buf8 buf9 buf10 buf11 buf12 buf13 buf14 buf15 buf16 buf17 buf18 buf19 AS storage DESCRIPTOR_SET 0 BINDING 0
+END
+
+RUN pipeline 1 1 1
+
+EXPECT buf0 IDX 0 EQ 42
--- /dev/null
+#!amber
+
+# Copyright 2021 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Division by zero test. Specification states:
+#
+# "Some calculations require division. In such cases (including implied divisions performed by
+# vector normalization), division by zero produces an unspecified result but must not lead to
+# Vulkan interruption or termination."
+#
+# This test performs various divisions (both implicit and explicit) and succeeds if it doesn't crash.
+
+SHADER vertex vert_shader PASSTHROUGH
+SHADER fragment frag_shader GLSL
+#version 430
+layout(location = 0) out vec4 color_out;
+void main() {
+ ivec4 ifragcoord = ivec4(gl_FragCoord);
+ vec4 fragcoord = vec4(ifragcoord);
+ // Generate one pixel we can be certain about
+ if (ifragcoord.x == 0 && ifragcoord.y == 0)
+ {
+ color_out = vec4(1.0, 0.0, 0.0, 1.0);
+ }
+ else
+ {
+ // Generate sweep of values which hit 0 as divisor.
+ // When ifragcoord.y hits 8, the integer case becomes "7 / (8 - 8)"
+ switch(ifragcoord.x % 32)
+ {
+ case 0:
+ // int div
+ color_out = vec4(7 / (ifragcoord.y - 8), 1.0, 0.0, 1.0);
+ break;
+ case 1:
+ // float div
+ color_out = vec4(7 / (fragcoord.y - 8.0), 1.0, 0.0, 1.0);
+ break;
+ case 2:
+ // normalize float
+ color_out = vec4(normalize(fragcoord.y - 8.0), 1.0, 0.0, 1.0);
+ break;
+ case 3:
+ // normalize vec2
+ color_out = vec4(normalize(fragcoord.yy - vec2(8.0)), 0.0, 1.0);
+ break;
+ case 4:
+ // normalize vec3
+ color_out = vec4(normalize(fragcoord.yyy - vec3(8.0)), 1.0);
+ break;
+ case 5:
+ // normalize vec4
+ color_out = normalize(fragcoord.yyyy - vec4(8.0));
+ break;
+ case 6:
+ // integer mod
+ color_out = vec4((7 % (ifragcoord.y - 8)), 1.0, 0.0, 1.0);
+ break;
+ case 7:
+ // float mod
+ color_out = vec4(mod(7.0, (fragcoord.y - 8.0)), 1.0, 0.0, 1.0);
+ break;
+ case 8:
+ // vec2 mod
+ color_out = vec4(mod(vec2(7.0), (fragcoord.yy - vec2(8.0))), 0.0, 1.0);
+ break;
+ case 9:
+ // vec3 mod
+ color_out = vec4(mod(vec3(7.0), (fragcoord.yyy - vec3(8.0))), 1.0);
+ break;
+ case 10:
+ // vec4 mod
+ color_out = mod(vec4(7.0), (fragcoord.yyyy - vec4(8.0)));
+ break;
+ case 11:
+ // float smoothstep
+ color_out = vec4(smoothstep(7.0, (fragcoord.y - 8.0), 0.3), 1.0, 0.0, 1.0);
+ break;
+ case 12:
+ // vec2 smoothstep
+ color_out = vec4(smoothstep(vec2(7.0), vec2(fragcoord.y - 8.0), vec2(0.3)), 0.0, 1.0);
+ break;
+ case 13:
+ // vec3 smoothstep
+ color_out = vec4(smoothstep(vec3(7.0), vec3(fragcoord.y - 8.0), vec3(0.3)), 1.0);
+ break;
+ case 14:
+ // vec4 smoothstep
+ color_out = smoothstep(vec4(7.0), vec4(fragcoord.y - 8.0), vec4(0.3));
+ break;
+ case 15:
+ // float atan2
+ color_out = vec4(atan(7.0, (fragcoord.y - 8.0)), 1.0, 0.0, 1.0);
+ break;
+ case 16:
+ // vec2 atan2
+ color_out = vec4(atan(vec2(7.0), (fragcoord.yy - vec2(8.0))), 0.0, 1.0);
+ break;
+ case 17:
+ // vec3 atan2
+ color_out = vec4(atan(vec3(7.0), (fragcoord.yyy - vec3(8.0))), 1.0);
+ break;
+ case 18:
+ // vec4 atan2
+ color_out = atan(vec4(7.0), (fragcoord.yyyy - vec4(8.0)));
+ break;
+ default:
+ color_out = vec4(0.0, 0.0, 1.0, 1.0);
+ }
+ }
+}
+END
+
+BUFFER framebuffer FORMAT B8G8R8A8_UNORM
+
+PIPELINE graphics my_pipeline
+ ATTACH vert_shader
+ ATTACH frag_shader
+ BIND BUFFER framebuffer AS color LOCATION 0
+END
+
+RUN my_pipeline DRAW_RECT POS 0 0 SIZE 32 32
+# Check that the pixel we expect is fine
+EXPECT framebuffer IDX 0 0 SIZE 1 1 EQ_RGBA 255 0 0 255
return group.release();
}
+tcu::TestCaseGroup* createCrashTestGroup (tcu::TestContext& testCtx)
+{
+ static const std::string kGroupName = "crash_test";
+ static const std::vector<std::pair<std::string, std::string>> crashTests =
+ {
+ { "divbyzero_frag", "Fragment shader division by zero tests" },
+ { "divbyzero_comp", "Compute shader division by zero tests" },
+ };
+
+ de::MovePtr<tcu::TestCaseGroup> group{new tcu::TestCaseGroup{testCtx, kGroupName.c_str(), "Crash test group"}};
+ for (const auto& test : crashTests)
+ {
+ group->addChild(createAmberTestCase(testCtx, test.first.c_str(), test.second.c_str(), kGroupName.c_str(), test.first + ".amber"));
+ }
+ return group.release();
+}
+
} // cts_amber
} // vkt
{
tcu::TestCaseGroup* createCombinedOperationsGroup (tcu::TestContext&);
+tcu::TestCaseGroup* createCrashTestGroup (tcu::TestContext&);
} // cts_amber
} // vkt
// Amber GLSL tests.
glslTests->addChild(cts_amber::createCombinedOperationsGroup (testCtx));
+ glslTests->addChild(cts_amber::createCrashTestGroup (testCtx));
}
// TestPackage
dEQP-VK.glsl.shader_clock.compute.clockRealtime2x32EXT
dEQP-VK.glsl.combined_operations.notxor
dEQP-VK.glsl.combined_operations.negintdivand
+dEQP-VK.glsl.crash_test.divbyzero_frag
+dEQP-VK.glsl.crash_test.divbyzero_comp