--- /dev/null
+// Copyright (c) 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Validates correctness of barrier SPIR-V instructions.
+
+#include "validate.h"
+
+#include <tuple>
+
+#include "diagnostic.h"
+#include "opcode.h"
+#include "spirv_target_env.h"
+#include "util/bitutils.h"
+#include "val/instruction.h"
+#include "val/validation_state.h"
+
+namespace libspirv {
+
+namespace {
+
+// Tries to evaluate a 32-bit signed or unsigned scalar integer constant.
+// Returns tuple <is_int32, is_const_int32, value>.
+std::tuple<bool, bool, uint32_t> EvalInt32IfConst(ValidationState_t& _,
+ uint32_t id) {
+ const Instruction* const inst = _.FindDef(id);
+ assert(inst);
+ const uint32_t type = inst->type_id();
+
+ if (!_.IsIntScalarType(type) || _.GetBitWidth(type) != 32) {
+ return std::make_tuple(false, false, 0);
+ }
+
+ if (inst->opcode() != SpvOpConstant && inst->opcode() != SpvOpSpecConstant) {
+ return std::make_tuple(true, false, 0);
+ }
+
+ assert(inst->words().size() == 4);
+ return std::make_tuple(true, true, inst->word(3));
+}
+
+// Validates Execution Scope operand.
+spv_result_t ValidateExecutionScope(ValidationState_t& _,
+ const spv_parsed_instruction_t* inst,
+ uint32_t id) {
+ const SpvOp opcode = static_cast<SpvOp>(inst->opcode);
+ bool is_int32 = false, is_const_int32 = false;
+ uint32_t value = 0;
+ std::tie(is_int32, is_const_int32, value) = EvalInt32IfConst(_, id);
+
+ if (!is_int32) {
+ return _.diag(SPV_ERROR_INVALID_DATA)
+ << spvOpcodeString(opcode)
+ << ": expected Execution Scope to be a 32-bit int";
+ }
+
+ if (!is_const_int32) {
+ return SPV_SUCCESS;
+ }
+
+ if (spvIsVulkanEnv(_.context()->target_env)) {
+ if (value != SpvScopeWorkgroup && value != SpvScopeSubgroup) {
+ return _.diag(SPV_ERROR_INVALID_DATA)
+ << spvOpcodeString(opcode)
+ << ": in Vulkan environment Execution Scope is limited to "
+ "Workgroup and Subgroup";
+ }
+ }
+
+ // TODO(atgoo@github.com) Add checks for OpenCL and OpenGL environments.
+
+ return SPV_SUCCESS;
+}
+
+// Validates Memory Scope operand.
+spv_result_t ValidateMemoryScope(ValidationState_t& _,
+ const spv_parsed_instruction_t* inst,
+ uint32_t id) {
+ const SpvOp opcode = static_cast<SpvOp>(inst->opcode);
+ bool is_int32 = false, is_const_int32 = false;
+ uint32_t value = 0;
+ std::tie(is_int32, is_const_int32, value) = EvalInt32IfConst(_, id);
+
+ if (!is_int32) {
+ return _.diag(SPV_ERROR_INVALID_DATA)
+ << spvOpcodeString(opcode)
+ << ": expected Memory Scope to be a 32-bit int";
+ }
+
+ if (!is_const_int32) {
+ return SPV_SUCCESS;
+ }
+
+ if (spvIsVulkanEnv(_.context()->target_env)) {
+ if (value != SpvScopeDevice && value != SpvScopeWorkgroup &&
+ value != SpvScopeInvocation) {
+ return _.diag(SPV_ERROR_INVALID_DATA)
+ << spvOpcodeString(opcode)
+ << ": in Vulkan environment Memory Scope is limited to Device, "
+ "Workgroup and Invocation";
+ }
+ }
+
+ // TODO(atgoo@github.com) Add checks for OpenCL and OpenGL environments.
+
+ return SPV_SUCCESS;
+}
+
+// Validates Memory Semantics operand.
+spv_result_t ValidateMemorySemantics(ValidationState_t& _,
+ const spv_parsed_instruction_t* inst,
+ uint32_t id) {
+ const SpvOp opcode = static_cast<SpvOp>(inst->opcode);
+ bool is_int32 = false, is_const_int32 = false;
+ uint32_t value = 0;
+ std::tie(is_int32, is_const_int32, value) = EvalInt32IfConst(_, id);
+
+ if (!is_int32) {
+ return _.diag(SPV_ERROR_INVALID_DATA)
+ << spvOpcodeString(opcode)
+ << ": expected Memory Semantics to be a 32-bit int";
+ }
+
+ if (!is_const_int32) {
+ return SPV_SUCCESS;
+ }
+
+ const size_t num_memory_order_set_bits = spvutils::CountSetBits(
+ value & (SpvMemorySemanticsAcquireMask | SpvMemorySemanticsReleaseMask |
+ SpvMemorySemanticsAcquireReleaseMask |
+ SpvMemorySemanticsSequentiallyConsistentMask));
+
+ if (num_memory_order_set_bits > 1) {
+ return _.diag(SPV_ERROR_INVALID_DATA)
+ << spvOpcodeString(opcode)
+ << ": Memory Semantics can have at most one of the following bits "
+ "set: Acquire, Release, AcquireRelease or SequentiallyConsistent";
+ }
+
+ if (spvIsVulkanEnv(_.context()->target_env)) {
+ const bool includes_storage_class =
+ value & (SpvMemorySemanticsUniformMemoryMask |
+ SpvMemorySemanticsWorkgroupMemoryMask |
+ SpvMemorySemanticsImageMemoryMask);
+
+ if (opcode == SpvOpMemoryBarrier && !num_memory_order_set_bits) {
+ return _.diag(SPV_ERROR_INVALID_DATA)
+ << spvOpcodeString(opcode)
+ << ": Vulkan specification requires Memory Semantics to have one "
+ "of the following bits set: Acquire, Release, AcquireRelease "
+ "or SequentiallyConsistent";
+ }
+
+ if (opcode == SpvOpMemoryBarrier && !includes_storage_class) {
+ return _.diag(SPV_ERROR_INVALID_DATA)
+ << spvOpcodeString(opcode)
+ << ": expected Memory Semantics to include a Vulkan-supported "
+ "storage class";
+ }
+
+#if 0
+ // TODO(atgoo@github.com): this check fails Vulkan CTS, reenable once fixed.
+ if (opcode == SpvOpControlBarrier && value && !includes_storage_class) {
+ return _.diag(SPV_ERROR_INVALID_DATA)
+ << spvOpcodeString(opcode)
+ << ": expected Memory Semantics to include a Vulkan-supported "
+ "storage class if Memory Semantics is not None";
+ }
+#endif
+ }
+
+ // TODO(atgoo@github.com) Add checks for OpenCL and OpenGL environments.
+
+ return SPV_SUCCESS;
+}
+
+} // anonymous namespace
+
+// Validates correctness of barrier instructions.
+spv_result_t BarriersPass(ValidationState_t& _,
+ const spv_parsed_instruction_t* inst) {
+ const SpvOp opcode = static_cast<SpvOp>(inst->opcode);
+ const uint32_t result_type = inst->type_id;
+
+ switch (opcode) {
+ case SpvOpControlBarrier: {
+ _.current_function().RegisterExecutionModelLimitation(
+ [](SpvExecutionModel model, std::string* message) {
+ if (model != SpvExecutionModelTessellationControl &&
+ model != SpvExecutionModelGLCompute &&
+ model != SpvExecutionModelKernel) {
+ if (message) {
+ *message =
+ "OpControlBarrier requires one of the following Execution "
+ "Models: TessellationControl, GLCompute or Kernel";
+ }
+ return false;
+ }
+ return true;
+ });
+
+ const uint32_t execution_scope = inst->words[1];
+ const uint32_t memory_scope = inst->words[2];
+ const uint32_t memory_semantics = inst->words[3];
+
+ if (auto error = ValidateExecutionScope(_, inst, execution_scope)) {
+ return error;
+ }
+
+ if (auto error = ValidateMemoryScope(_, inst, memory_scope)) {
+ return error;
+ }
+
+ if (auto error = ValidateMemorySemantics(_, inst, memory_semantics)) {
+ return error;
+ }
+ break;
+ }
+
+ case SpvOpMemoryBarrier: {
+ const uint32_t memory_scope = inst->words[1];
+ const uint32_t memory_semantics = inst->words[2];
+
+ if (auto error = ValidateMemoryScope(_, inst, memory_scope)) {
+ return error;
+ }
+
+ if (auto error = ValidateMemorySemantics(_, inst, memory_semantics)) {
+ return error;
+ }
+ break;
+ }
+
+ case SpvOpNamedBarrierInitialize: {
+ if (_.GetIdOpcode(result_type) != SpvOpTypeNamedBarrier) {
+ return _.diag(SPV_ERROR_INVALID_DATA)
+ << spvOpcodeString(opcode)
+ << ": expected Result Type to be OpTypeNamedBarrier";
+ }
+
+ const uint32_t subgroup_count_type = _.GetOperandTypeId(inst, 2);
+ if (!_.IsIntScalarType(subgroup_count_type) ||
+ _.GetBitWidth(subgroup_count_type) != 32) {
+ return _.diag(SPV_ERROR_INVALID_DATA)
+ << spvOpcodeString(opcode)
+ << ": expected Subgroup Count to be a 32-bit int";
+ }
+ break;
+ }
+
+ case SpvOpMemoryNamedBarrier: {
+ const uint32_t named_barrier_type = _.GetOperandTypeId(inst, 0);
+ if (_.GetIdOpcode(named_barrier_type) != SpvOpTypeNamedBarrier) {
+ return _.diag(SPV_ERROR_INVALID_DATA)
+ << spvOpcodeString(opcode)
+ << ": expected Named Barrier to be of type OpTypeNamedBarrier";
+ }
+
+ const uint32_t memory_scope = inst->words[2];
+ const uint32_t memory_semantics = inst->words[3];
+
+ if (auto error = ValidateMemoryScope(_, inst, memory_scope)) {
+ return error;
+ }
+
+ if (auto error = ValidateMemorySemantics(_, inst, memory_semantics)) {
+ return error;
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ return SPV_SUCCESS;
+}
+
+} // namespace libspirv
--- /dev/null
+// Copyright (c) 2018 Google LLC.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <sstream>
+#include <string>
+
+#include "gmock/gmock.h"
+#include "unit_spirv.h"
+#include "val_fixtures.h"
+
+namespace {
+
+using ::testing::HasSubstr;
+using ::testing::Not;
+
+using ValidateBarriers = spvtest::ValidateBase<bool>;
+
+std::string GenerateShaderCode(
+ const std::string& body,
+ const std::string& capabilities_and_extensions = "",
+ const std::string& execution_model = "GLCompute") {
+ std::ostringstream ss;
+ ss << R"(
+OpCapability Shader
+OpCapability Int64
+)";
+
+ ss << capabilities_and_extensions;
+ ss << "OpMemoryModel Logical GLSL450\n";
+ ss << "OpEntryPoint " << execution_model << " %main \"main\"\n";
+
+ ss << R"(
+%void = OpTypeVoid
+%func = OpTypeFunction %void
+%bool = OpTypeBool
+%f32 = OpTypeFloat 32
+%u32 = OpTypeInt 32 0
+%u64 = OpTypeInt 64 0
+
+%f32_0 = OpConstant %f32 0
+%f32_1 = OpConstant %f32 1
+%u32_0 = OpConstant %u32 0
+%u32_1 = OpConstant %u32 1
+%u32_4 = OpConstant %u32 4
+%u64_0 = OpConstant %u64 0
+%u64_1 = OpConstant %u64 1
+
+%cross_device = OpConstant %u32 0
+%device = OpConstant %u32 1
+%workgroup = OpConstant %u32 2
+%subgroup = OpConstant %u32 3
+%invocation = OpConstant %u32 4
+
+%none = OpConstant %u32 0
+%acquire = OpConstant %u32 2
+%release = OpConstant %u32 4
+%acquire_release = OpConstant %u32 8
+%acquire_and_release = OpConstant %u32 6
+%sequentially_consistent = OpConstant %u32 16
+%acquire_release_uniform_workgroup = OpConstant %u32 328
+%acquire_and_release_uniform = OpConstant %u32 70
+%acquire_release_subgroup = OpConstant %u32 136
+%uniform = OpConstant %u32 64
+
+%main = OpFunction %void None %func
+%main_entry = OpLabel
+)";
+
+ ss << body;
+
+ ss << R"(
+OpReturn
+OpFunctionEnd)";
+
+ return ss.str();
+}
+
+std::string GenerateKernelCode(
+ const std::string& body,
+ const std::string& capabilities_and_extensions = "") {
+ std::ostringstream ss;
+ ss << R"(
+OpCapability Addresses
+OpCapability Kernel
+OpCapability Linkage
+OpCapability Int64
+OpCapability NamedBarrier
+)";
+
+ ss << capabilities_and_extensions;
+ ss << R"(
+OpMemoryModel Physical32 OpenCL
+%void = OpTypeVoid
+%func = OpTypeFunction %void
+%bool = OpTypeBool
+%f32 = OpTypeFloat 32
+%u32 = OpTypeInt 32 0
+%u64 = OpTypeInt 64 0
+
+%f32_0 = OpConstant %f32 0
+%f32_1 = OpConstant %f32 1
+%f32_4 = OpConstant %f32 4
+%u32_0 = OpConstant %u32 0
+%u32_1 = OpConstant %u32 1
+%u32_4 = OpConstant %u32 4
+%u64_0 = OpConstant %u64 0
+%u64_1 = OpConstant %u64 1
+%u64_4 = OpConstant %u64 4
+
+%cross_device = OpConstant %u32 0
+%device = OpConstant %u32 1
+%workgroup = OpConstant %u32 2
+%subgroup = OpConstant %u32 3
+%invocation = OpConstant %u32 4
+
+%none = OpConstant %u32 0
+%acquire = OpConstant %u32 2
+%release = OpConstant %u32 4
+%acquire_release = OpConstant %u32 8
+%acquire_and_release = OpConstant %u32 6
+%sequentially_consistent = OpConstant %u32 16
+%acquire_release_uniform_workgroup = OpConstant %u32 328
+%acquire_and_release_uniform = OpConstant %u32 70
+%uniform = OpConstant %u32 64
+
+%named_barrier = OpTypeNamedBarrier
+
+%main = OpFunction %void None %func
+%main_entry = OpLabel
+)";
+
+ ss << body;
+
+ ss << R"(
+OpReturn
+OpFunctionEnd)";
+
+ return ss.str();
+}
+
+TEST_F(ValidateBarriers, OpControlBarrierGLComputeSuccess) {
+ const std::string body = R"(
+OpControlBarrier %device %device %none
+OpControlBarrier %workgroup %workgroup %acquire
+OpControlBarrier %workgroup %device %release
+OpControlBarrier %cross_device %cross_device %acquire_release
+OpControlBarrier %cross_device %cross_device %sequentially_consistent
+OpControlBarrier %cross_device %cross_device %acquire_release_uniform_workgroup
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body));
+ ASSERT_EQ(SPV_SUCCESS, ValidateInstructions());
+}
+
+TEST_F(ValidateBarriers, OpControlBarrierKernelSuccess) {
+ const std::string body = R"(
+OpControlBarrier %device %device %none
+OpControlBarrier %workgroup %workgroup %acquire
+OpControlBarrier %workgroup %device %release
+OpControlBarrier %cross_device %cross_device %acquire_release
+OpControlBarrier %cross_device %cross_device %sequentially_consistent
+OpControlBarrier %cross_device %cross_device %acquire_release_uniform_workgroup
+)";
+
+ CompileSuccessfully(GenerateKernelCode(body), SPV_ENV_UNIVERSAL_1_1);
+ ASSERT_EQ(SPV_SUCCESS, ValidateInstructions(SPV_ENV_UNIVERSAL_1_1));
+}
+
+TEST_F(ValidateBarriers, OpControlBarrierTesselationControlSuccess) {
+ const std::string body = R"(
+OpControlBarrier %device %device %none
+OpControlBarrier %workgroup %workgroup %acquire
+OpControlBarrier %workgroup %device %release
+OpControlBarrier %cross_device %cross_device %acquire_release
+OpControlBarrier %cross_device %cross_device %sequentially_consistent
+OpControlBarrier %cross_device %cross_device %acquire_release_uniform_workgroup
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body, "OpCapability Tessellation\n",
+ "TessellationControl"));
+ ASSERT_EQ(SPV_SUCCESS, ValidateInstructions());
+}
+
+TEST_F(ValidateBarriers, OpControlBarrierVulkanSuccess) {
+ const std::string body = R"(
+OpControlBarrier %workgroup %device %none
+OpControlBarrier %workgroup %workgroup %acquire_release_uniform_workgroup
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body), SPV_ENV_VULKAN_1_0);
+ ASSERT_EQ(SPV_SUCCESS, ValidateInstructions(SPV_ENV_VULKAN_1_0));
+}
+
+TEST_F(ValidateBarriers, OpControlBarrierExecutionModelFragment) {
+ const std::string body = R"(
+OpControlBarrier %device %device %none
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body, "", "Fragment"));
+ ASSERT_EQ(SPV_ERROR_INVALID_ID, ValidateInstructions());
+ EXPECT_THAT(
+ getDiagnosticString(),
+ HasSubstr("OpControlBarrier requires one of the following Execution "
+ "Models: TessellationControl, GLCompute or Kernel"));
+}
+
+TEST_F(ValidateBarriers, OpControlBarrierFloatExecutionScope) {
+ const std::string body = R"(
+OpControlBarrier %f32_1 %device %none
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body));
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA, ValidateInstructions());
+ EXPECT_THAT(
+ getDiagnosticString(),
+ HasSubstr("ControlBarrier: expected Execution Scope to be a 32-bit int"));
+}
+
+TEST_F(ValidateBarriers, OpControlBarrierU64ExecutionScope) {
+ const std::string body = R"(
+OpControlBarrier %u64_1 %device %none
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body));
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA, ValidateInstructions());
+ EXPECT_THAT(
+ getDiagnosticString(),
+ HasSubstr("ControlBarrier: expected Execution Scope to be a 32-bit int"));
+}
+
+TEST_F(ValidateBarriers, OpControlBarrierFloatMemoryScope) {
+ const std::string body = R"(
+OpControlBarrier %device %f32_1 %none
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body));
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA, ValidateInstructions());
+ EXPECT_THAT(
+ getDiagnosticString(),
+ HasSubstr("ControlBarrier: expected Memory Scope to be a 32-bit int"));
+}
+
+TEST_F(ValidateBarriers, OpControlBarrierU64MemoryScope) {
+ const std::string body = R"(
+OpControlBarrier %device %u64_1 %none
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body));
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA, ValidateInstructions());
+ EXPECT_THAT(
+ getDiagnosticString(),
+ HasSubstr("ControlBarrier: expected Memory Scope to be a 32-bit int"));
+}
+
+TEST_F(ValidateBarriers, OpControlBarrierFloatMemorySemantics) {
+ const std::string body = R"(
+OpControlBarrier %device %device %f32_0
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body));
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA, ValidateInstructions());
+ EXPECT_THAT(
+ getDiagnosticString(),
+ HasSubstr(
+ "ControlBarrier: expected Memory Semantics to be a 32-bit int"));
+}
+
+TEST_F(ValidateBarriers, OpControlBarrierU64MemorySemantics) {
+ const std::string body = R"(
+OpControlBarrier %device %device %u64_0
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body));
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA, ValidateInstructions());
+ EXPECT_THAT(
+ getDiagnosticString(),
+ HasSubstr(
+ "ControlBarrier: expected Memory Semantics to be a 32-bit int"));
+}
+
+TEST_F(ValidateBarriers, OpControlBarrierVulkanExecutionScopeDevice) {
+ const std::string body = R"(
+OpControlBarrier %device %workgroup %none
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body), SPV_ENV_VULKAN_1_0);
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA, ValidateInstructions(SPV_ENV_VULKAN_1_0));
+ EXPECT_THAT(getDiagnosticString(),
+ HasSubstr("ControlBarrier: in Vulkan environment Execution Scope "
+ "is limited to Workgroup and Subgroup"));
+}
+
+TEST_F(ValidateBarriers, OpControlBarrierVulkanMemoryScopeSubgroup) {
+ const std::string body = R"(
+OpControlBarrier %subgroup %subgroup %none
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body), SPV_ENV_VULKAN_1_0);
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA, ValidateInstructions(SPV_ENV_VULKAN_1_0));
+ EXPECT_THAT(getDiagnosticString(),
+ HasSubstr("ControlBarrier: in Vulkan environment Memory Scope is "
+ "limited to Device, Workgroup and Invocation"));
+}
+
+TEST_F(ValidateBarriers, OpControlBarrierAcquireAndRelease) {
+ const std::string body = R"(
+OpControlBarrier %device %device %acquire_and_release_uniform
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body));
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA, ValidateInstructions());
+ EXPECT_THAT(getDiagnosticString(),
+ HasSubstr("ControlBarrier: Memory Semantics can have at most one "
+ "of the following bits set: Acquire, Release, "
+ "AcquireRelease or SequentiallyConsistent"));
+}
+
+// TODO(atgoo@github.com): the corresponding check fails Vulkan CTS,
+// reenable once fixed.
+TEST_F(ValidateBarriers, DISABLED_OpControlBarrierVulkanSubgroupStorageClass) {
+ const std::string body = R"(
+OpControlBarrier %workgroup %device %acquire_release_subgroup
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body), SPV_ENV_VULKAN_1_0);
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA, ValidateInstructions(SPV_ENV_VULKAN_1_0));
+ EXPECT_THAT(
+ getDiagnosticString(),
+ HasSubstr(
+ "ControlBarrier: expected Memory Semantics to include a "
+ "Vulkan-supported storage class if Memory Semantics is not None"));
+}
+
+TEST_F(ValidateBarriers, OpMemoryBarrierSuccess) {
+ const std::string body = R"(
+OpMemoryBarrier %cross_device %acquire_release_uniform_workgroup
+OpMemoryBarrier %device %uniform
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body));
+ ASSERT_EQ(SPV_SUCCESS, ValidateInstructions());
+}
+
+TEST_F(ValidateBarriers, OpMemoryBarrierKernelSuccess) {
+ const std::string body = R"(
+OpMemoryBarrier %cross_device %acquire_release_uniform_workgroup
+OpMemoryBarrier %device %uniform
+)";
+
+ CompileSuccessfully(GenerateKernelCode(body), SPV_ENV_UNIVERSAL_1_1);
+ ASSERT_EQ(SPV_SUCCESS, ValidateInstructions(SPV_ENV_UNIVERSAL_1_1));
+}
+
+TEST_F(ValidateBarriers, OpMemoryBarrierVulkanSuccess) {
+ const std::string body = R"(
+OpMemoryBarrier %workgroup %acquire_release_uniform_workgroup
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body), SPV_ENV_VULKAN_1_0);
+ ASSERT_EQ(SPV_SUCCESS, ValidateInstructions(SPV_ENV_VULKAN_1_0));
+}
+
+TEST_F(ValidateBarriers, OpMemoryBarrierFloatMemoryScope) {
+ const std::string body = R"(
+OpMemoryBarrier %f32_1 %acquire_release_uniform_workgroup
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body));
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA, ValidateInstructions());
+ EXPECT_THAT(
+ getDiagnosticString(),
+ HasSubstr("MemoryBarrier: expected Memory Scope to be a 32-bit int"));
+}
+
+TEST_F(ValidateBarriers, OpMemoryBarrierU64MemoryScope) {
+ const std::string body = R"(
+OpMemoryBarrier %u64_1 %acquire_release_uniform_workgroup
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body));
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA, ValidateInstructions());
+ EXPECT_THAT(
+ getDiagnosticString(),
+ HasSubstr("MemoryBarrier: expected Memory Scope to be a 32-bit int"));
+}
+
+TEST_F(ValidateBarriers, OpMemoryBarrierFloatMemorySemantics) {
+ const std::string body = R"(
+OpMemoryBarrier %device %f32_0
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body));
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA, ValidateInstructions());
+ EXPECT_THAT(
+ getDiagnosticString(),
+ HasSubstr("MemoryBarrier: expected Memory Semantics to be a 32-bit int"));
+}
+
+TEST_F(ValidateBarriers, OpMemoryBarrierU64MemorySemantics) {
+ const std::string body = R"(
+OpMemoryBarrier %device %u64_0
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body));
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA, ValidateInstructions());
+ EXPECT_THAT(
+ getDiagnosticString(),
+ HasSubstr("MemoryBarrier: expected Memory Semantics to be a 32-bit int"));
+}
+
+TEST_F(ValidateBarriers, OpMemoryBarrierVulkanMemoryScopeSubgroup) {
+ const std::string body = R"(
+OpMemoryBarrier %subgroup %acquire_release_uniform_workgroup
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body), SPV_ENV_VULKAN_1_0);
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA, ValidateInstructions(SPV_ENV_VULKAN_1_0));
+ EXPECT_THAT(getDiagnosticString(),
+ HasSubstr("MemoryBarrier: in Vulkan environment Memory Scope is "
+ "limited to Device, Workgroup and Invocation"));
+}
+
+TEST_F(ValidateBarriers, OpMemoryBarrierAcquireAndRelease) {
+ const std::string body = R"(
+OpMemoryBarrier %device %acquire_and_release_uniform
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body));
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA, ValidateInstructions());
+ EXPECT_THAT(getDiagnosticString(),
+ HasSubstr("MemoryBarrier: Memory Semantics can have at most one "
+ "of the following bits set: Acquire, Release, "
+ "AcquireRelease or SequentiallyConsistent"));
+}
+
+TEST_F(ValidateBarriers, OpMemoryBarrierVulkanMemorySemanticsNone) {
+ const std::string body = R"(
+OpMemoryBarrier %device %none
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body), SPV_ENV_VULKAN_1_0);
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA, ValidateInstructions(SPV_ENV_VULKAN_1_0));
+ EXPECT_THAT(
+ getDiagnosticString(),
+ HasSubstr("MemoryBarrier: Vulkan specification requires Memory Semantics "
+ "to have one of the following bits set: Acquire, Release, "
+ "AcquireRelease or SequentiallyConsistent"));
+}
+
+TEST_F(ValidateBarriers, OpMemoryBarrierVulkanMemorySemanticsAcquire) {
+ const std::string body = R"(
+OpMemoryBarrier %device %acquire
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body), SPV_ENV_VULKAN_1_0);
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA, ValidateInstructions(SPV_ENV_VULKAN_1_0));
+ EXPECT_THAT(getDiagnosticString(),
+ HasSubstr("MemoryBarrier: expected Memory Semantics to include a "
+ "Vulkan-supported storage class"));
+}
+
+TEST_F(ValidateBarriers, OpMemoryBarrierVulkanSubgroupStorageClass) {
+ const std::string body = R"(
+OpMemoryBarrier %device %acquire_release_subgroup
+)";
+
+ CompileSuccessfully(GenerateShaderCode(body), SPV_ENV_VULKAN_1_0);
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA, ValidateInstructions(SPV_ENV_VULKAN_1_0));
+ EXPECT_THAT(getDiagnosticString(),
+ HasSubstr("MemoryBarrier: expected Memory Semantics to include a "
+ "Vulkan-supported storage class"));
+}
+
+TEST_F(ValidateBarriers, OpNamedBarrierInitializeSuccess) {
+ const std::string body = R"(
+%barrier = OpNamedBarrierInitialize %named_barrier %u32_4
+)";
+
+ CompileSuccessfully(GenerateKernelCode(body), SPV_ENV_UNIVERSAL_1_1);
+ ASSERT_EQ(SPV_SUCCESS, ValidateInstructions(SPV_ENV_UNIVERSAL_1_1));
+}
+
+TEST_F(ValidateBarriers, OpNamedBarrierInitializeWrongResultType) {
+ const std::string body = R"(
+%barrier = OpNamedBarrierInitialize %u32 %u32_4
+)";
+
+ CompileSuccessfully(GenerateKernelCode(body), SPV_ENV_UNIVERSAL_1_1);
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA,
+ ValidateInstructions(SPV_ENV_UNIVERSAL_1_1));
+ EXPECT_THAT(getDiagnosticString(),
+ HasSubstr("NamedBarrierInitialize: expected Result Type to be "
+ "OpTypeNamedBarrier"));
+}
+
+TEST_F(ValidateBarriers, OpNamedBarrierInitializeFloatSubgroupCount) {
+ const std::string body = R"(
+%barrier = OpNamedBarrierInitialize %named_barrier %f32_4
+)";
+
+ CompileSuccessfully(GenerateKernelCode(body), SPV_ENV_UNIVERSAL_1_1);
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA,
+ ValidateInstructions(SPV_ENV_UNIVERSAL_1_1));
+ EXPECT_THAT(getDiagnosticString(),
+ HasSubstr("NamedBarrierInitialize: expected Subgroup Count to be "
+ "a 32-bit int"));
+}
+
+TEST_F(ValidateBarriers, OpNamedBarrierInitializeU64SubgroupCount) {
+ const std::string body = R"(
+%barrier = OpNamedBarrierInitialize %named_barrier %u64_4
+)";
+
+ CompileSuccessfully(GenerateKernelCode(body), SPV_ENV_UNIVERSAL_1_1);
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA,
+ ValidateInstructions(SPV_ENV_UNIVERSAL_1_1));
+ EXPECT_THAT(getDiagnosticString(),
+ HasSubstr("NamedBarrierInitialize: expected Subgroup Count to be "
+ "a 32-bit int"));
+}
+
+TEST_F(ValidateBarriers, OpMemoryNamedBarrierSuccess) {
+ const std::string body = R"(
+%barrier = OpNamedBarrierInitialize %named_barrier %u32_4
+OpMemoryNamedBarrier %barrier %workgroup %acquire_release_uniform_workgroup
+)";
+
+ CompileSuccessfully(GenerateKernelCode(body), SPV_ENV_UNIVERSAL_1_1);
+ ASSERT_EQ(SPV_SUCCESS, ValidateInstructions(SPV_ENV_UNIVERSAL_1_1));
+}
+
+TEST_F(ValidateBarriers, OpMemoryNamedBarrierNotNamedBarrier) {
+ const std::string body = R"(
+OpMemoryNamedBarrier %u32_1 %workgroup %acquire_release_uniform_workgroup
+)";
+
+ CompileSuccessfully(GenerateKernelCode(body), SPV_ENV_UNIVERSAL_1_1);
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA,
+ ValidateInstructions(SPV_ENV_UNIVERSAL_1_1));
+ EXPECT_THAT(getDiagnosticString(),
+ HasSubstr("MemoryNamedBarrier: expected Named Barrier to be of "
+ "type OpTypeNamedBarrier"));
+}
+
+TEST_F(ValidateBarriers, OpMemoryNamedBarrierFloatMemoryScope) {
+ const std::string body = R"(
+%barrier = OpNamedBarrierInitialize %named_barrier %u32_4
+OpMemoryNamedBarrier %barrier %f32_1 %acquire_release_uniform_workgroup
+)";
+
+ CompileSuccessfully(GenerateKernelCode(body), SPV_ENV_UNIVERSAL_1_1);
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA,
+ ValidateInstructions(SPV_ENV_UNIVERSAL_1_1));
+ EXPECT_THAT(
+ getDiagnosticString(),
+ HasSubstr(
+ "MemoryNamedBarrier: expected Memory Scope to be a 32-bit int"));
+}
+
+TEST_F(ValidateBarriers, OpMemoryNamedBarrierFloatMemorySemantics) {
+ const std::string body = R"(
+%barrier = OpNamedBarrierInitialize %named_barrier %u32_4
+OpMemoryNamedBarrier %barrier %workgroup %f32_0
+)";
+
+ CompileSuccessfully(GenerateKernelCode(body), SPV_ENV_UNIVERSAL_1_1);
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA,
+ ValidateInstructions(SPV_ENV_UNIVERSAL_1_1));
+ EXPECT_THAT(
+ getDiagnosticString(),
+ HasSubstr(
+ "MemoryNamedBarrier: expected Memory Semantics to be a 32-bit int"));
+}
+
+TEST_F(ValidateBarriers, OpMemoryNamedBarrierAcquireAndRelease) {
+ const std::string body = R"(
+%barrier = OpNamedBarrierInitialize %named_barrier %u32_4
+OpMemoryNamedBarrier %barrier %workgroup %acquire_and_release_uniform
+)";
+
+ CompileSuccessfully(GenerateKernelCode(body), SPV_ENV_UNIVERSAL_1_1);
+ ASSERT_EQ(SPV_ERROR_INVALID_DATA,
+ ValidateInstructions(SPV_ENV_UNIVERSAL_1_1));
+ EXPECT_THAT(getDiagnosticString(),
+ HasSubstr("MemoryNamedBarrier: Memory Semantics can have at most "
+ "one of the following bits set: Acquire, Release, "
+ "AcquireRelease or SequentiallyConsistent"));
+}
+
+} // anonymous namespace