{{S32, FlatPtr, 8, 8}, {S32, FlatPtr, 16, 16}});
}
+ // Constant 32-bit is handled by addrspacecasting the 32-bit pointer to
+ // 64-bits.
+ //
+ // TODO: Should generalize bitcast action into coerce, which will also cover
+ // inserting addrspacecasts.
+ ExtLoads.customIf(typeIs(1, Constant32Ptr));
+
ExtLoads.clampScalar(0, S32, S32)
.widenScalarToNextPow2(0)
.unsupportedIfMemSizeNotPow2()
case TargetOpcode::G_GLOBAL_VALUE:
return legalizeGlobalValue(MI, MRI, B);
case TargetOpcode::G_LOAD:
+ case TargetOpcode::G_SEXTLOAD:
+ case TargetOpcode::G_ZEXTLOAD:
return legalizeLoad(Helper, MI);
case TargetOpcode::G_FMAD:
return legalizeFMad(MI, MRI, B);
return true;
}
+ if (MI.getOpcode() != AMDGPU::G_LOAD)
+ return false;
+
Register ValReg = MI.getOperand(0).getReg();
LLT ValTy = MRI.getType(ValReg);
; CI: [[COPY:%[0-9]+]]:_(p6) = COPY $sgpr0
; CI: [[C:%[0-9]+]]:_(p6) = G_CONSTANT i32 0
; CI: [[MV:%[0-9]+]]:_(p4) = G_MERGE_VALUES [[COPY]](p6), [[C]](p6)
- ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV]](p4) :: (load 1, addrspace 6)
- ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32)
- ; CI: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY1]], 8
- ; CI: $vgpr0 = COPY [[SEXT_INREG]](s32)
+ ; CI: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[MV]](p4) :: (load 1, addrspace 6)
+ ; CI: $vgpr0 = COPY [[SEXTLOAD]](s32)
%0:_(p6) = COPY $sgpr0
%1:_(s32) = G_SEXTLOAD %0 :: (load 1, align 1, addrspace 6)
$vgpr0 = COPY %1
; CI: [[COPY:%[0-9]+]]:_(p6) = COPY $sgpr0
; CI: [[C:%[0-9]+]]:_(p6) = G_CONSTANT i32 0
; CI: [[MV:%[0-9]+]]:_(p4) = G_MERGE_VALUES [[COPY]](p6), [[C]](p6)
- ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV]](p4) :: (load 2, addrspace 6)
- ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32)
- ; CI: [[SEXT_INREG:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY1]], 16
- ; CI: $vgpr0 = COPY [[SEXT_INREG]](s32)
+ ; CI: [[SEXTLOAD:%[0-9]+]]:_(s32) = G_SEXTLOAD [[MV]](p4) :: (load 2, addrspace 6)
+ ; CI: $vgpr0 = COPY [[SEXTLOAD]](s32)
%0:_(p6) = COPY $sgpr0
%1:_(s32) = G_SEXTLOAD %0 :: (load 2, align 2, addrspace 6)
$vgpr0 = COPY %1
; CI: [[COPY:%[0-9]+]]:_(p6) = COPY $sgpr0
; CI: [[C:%[0-9]+]]:_(p6) = G_CONSTANT i32 0
; CI: [[MV:%[0-9]+]]:_(p4) = G_MERGE_VALUES [[COPY]](p6), [[C]](p6)
- ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV]](p4) :: (load 1, addrspace 6)
- ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32)
- ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]]
- ; CI: $vgpr0 = COPY [[AND]](s32)
+ ; CI: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[MV]](p4) :: (load 1, addrspace 6)
+ ; CI: $vgpr0 = COPY [[ZEXTLOAD]](s32)
%0:_(p6) = COPY $sgpr0
%1:_(s32) = G_ZEXTLOAD %0 :: (load 1, align 1, addrspace 6)
$vgpr0 = COPY %1
; CI: [[COPY:%[0-9]+]]:_(p6) = COPY $sgpr0
; CI: [[C:%[0-9]+]]:_(p6) = G_CONSTANT i32 0
; CI: [[MV:%[0-9]+]]:_(p4) = G_MERGE_VALUES [[COPY]](p6), [[C]](p6)
- ; CI: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[MV]](p4) :: (load 2, addrspace 6)
- ; CI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
- ; CI: [[COPY1:%[0-9]+]]:_(s32) = COPY [[LOAD]](s32)
- ; CI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C1]]
- ; CI: $vgpr0 = COPY [[AND]](s32)
+ ; CI: [[ZEXTLOAD:%[0-9]+]]:_(s32) = G_ZEXTLOAD [[MV]](p4) :: (load 2, addrspace 6)
+ ; CI: $vgpr0 = COPY [[ZEXTLOAD]](s32)
%0:_(p6) = COPY $sgpr0
%1:_(s32) = G_ZEXTLOAD %0 :: (load 2, align 2, addrspace 6)
$vgpr0 = COPY %1