return ValueMI;
}
+// Return an integer constant corresponding to the given register and
+// defined in spv_track_constant.
+// TODO: maybe unify with prelegalizer pass.
+static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI) {
+ MachineInstr *DefMI = MRI->getUniqueVRegDef(Reg);
+ assert(isSpvIntrinsic(*DefMI, Intrinsic::spv_track_constant) &&
+ DefMI->getOperand(2).isReg());
+ MachineInstr *DefMI2 = MRI->getUniqueVRegDef(DefMI->getOperand(2).getReg());
+ assert(DefMI2->getOpcode() == TargetOpcode::G_CONSTANT &&
+ DefMI2->getOperand(1).isCImm());
+ return DefMI2->getOperand(1).getCImm()->getValue().getZExtValue();
+}
+
// Return type of the instruction result from spv_assign_type intrinsic.
// TODO: maybe unify with prelegalizer pass.
static const Type *getMachineInstrType(MachineInstr *MI) {
return true;
}
+static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call,
+ MachineIRBuilder &MIRBuilder,
+ SPIRVGlobalRegistry *GR) {
+ // Lookup the instruction opcode in the TableGen records.
+ const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
+ unsigned Opcode =
+ SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
+ bool IsLoad = Opcode == SPIRV::OpLoad;
+ // Build the instruction.
+ auto MIB = MIRBuilder.buildInstr(Opcode);
+ if (IsLoad) {
+ MIB.addDef(Call->ReturnRegister);
+ MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
+ }
+ // Add a pointer to the value to load/store.
+ MIB.addUse(Call->Arguments[0]);
+ // Add a value to store.
+ if (!IsLoad)
+ MIB.addUse(Call->Arguments[1]);
+ // Add optional memory attributes and an alignment.
+ MachineRegisterInfo *MRI = MIRBuilder.getMRI();
+ unsigned NumArgs = Call->Arguments.size();
+ if ((IsLoad && NumArgs >= 2) || NumArgs >= 3)
+ MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
+ if ((IsLoad && NumArgs >= 3) || NumArgs >= 4)
+ MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
+ return true;
+}
+
/// Lowers a builtin funtion call using the provided \p DemangledCall skeleton
/// and external instruction \p Set.
namespace SPIRV {
return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
case SPIRV::VectorLoadStore:
return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
+ case SPIRV::LoadStore:
+ return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
}
return false;
}
def Enqueue : BuiltinGroup;
def AsyncCopy : BuiltinGroup;
def VectorLoadStore : BuiltinGroup;
+def LoadStore : BuiltinGroup;
//===----------------------------------------------------------------------===//
// Class defining a demangled builtin record. The information in the record
defm : DemangledNativeBuiltin<"ndrange_2D", OpenCL_std, Enqueue, 1, 3, OpBuildNDRange>;
defm : DemangledNativeBuiltin<"ndrange_3D", OpenCL_std, Enqueue, 1, 3, OpBuildNDRange>;
-// Spec constant builtin record:
+// Spec constant builtin records:
defm : DemangledNativeBuiltin<"__spirv_SpecConstant", OpenCL_std, SpecConstant, 2, 2, OpSpecConstant>;
defm : DemangledNativeBuiltin<"__spirv_SpecConstantComposite", OpenCL_std, SpecConstant, 1, 0, OpSpecConstantComposite>;
defm : DemangledNativeBuiltin<"async_work_group_copy", OpenCL_std, AsyncCopy, 4, 4, OpGroupAsyncCopy>;
defm : DemangledNativeBuiltin<"wait_group_events", OpenCL_std, AsyncCopy, 2, 2, OpGroupWaitEvents>;
+// Load and store builtin records:
+defm : DemangledNativeBuiltin<"__spirv_Load", OpenCL_std, LoadStore, 1, 3, OpLoad>;
+defm : DemangledNativeBuiltin<"__spirv_Store", OpenCL_std, LoadStore, 2, 4, OpStore>;
+
//===----------------------------------------------------------------------===//
// Class defining a work/sub group builtin that should be translated into a
// SPIR-V instruction using the defined properties.
--- /dev/null
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+;; Translate SPIR-V friendly OpLoad and OpStore calls
+
+; CHECK: %[[#CONST:]] = OpConstant %[[#]] 42
+; CHECK: OpStore %[[#PTR:]] %[[#CONST]] Volatile|Aligned 4
+; CHECK: %[[#]] = OpLoad %[[#]] %[[#PTR]]
+
+define weak_odr dso_local spir_kernel void @foo(i32 addrspace(1)* %var) {
+entry:
+ tail call spir_func void @_Z13__spirv_StorePiiii(i32 addrspace(1)* %var, i32 42, i32 3, i32 4)
+ %value = tail call spir_func double @_Z12__spirv_LoadPi(i32 addrspace(1)* %var)
+ ret void
+}
+
+declare dso_local spir_func double @_Z12__spirv_LoadPi(i32 addrspace(1)*) local_unnamed_addr
+declare dso_local spir_func void @_Z13__spirv_StorePiiii(i32 addrspace(1)*, i32, i32, i32) local_unnamed_addr