let IsNonExtLoad = 1;
}
-def extloadi8_#as : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
+def extloadi8_#as : PatFrag<(ops node:$ptr), (extloadi8 node:$ptr)> {
let IsLoad = 1;
- let MemoryVT = i8;
}
-def extloadi16_#as : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
+def extloadi16_#as : PatFrag<(ops node:$ptr), (extloadi16 node:$ptr)> {
let IsLoad = 1;
- let MemoryVT = i16;
}
-def sextloadi8_#as : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
+def sextloadi8_#as : PatFrag<(ops node:$ptr), (sextloadi8 node:$ptr)> {
let IsLoad = 1;
- let MemoryVT = i8;
}
-def sextloadi16_#as : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
+def sextloadi16_#as : PatFrag<(ops node:$ptr), (sextloadi16 node:$ptr)> {
let IsLoad = 1;
- let MemoryVT = i16;
}
-def zextloadi8_#as : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
+def zextloadi8_#as : PatFrag<(ops node:$ptr), (zextloadi8 node:$ptr)> {
let IsLoad = 1;
- let MemoryVT = i8;
}
-def zextloadi16_#as : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
+def zextloadi16_#as : PatFrag<(ops node:$ptr), (zextloadi16 node:$ptr)> {
let IsLoad = 1;
- let MemoryVT = i16;
}
def atomic_load_8_#as : PatFrag<(ops node:$ptr), (atomic_load_8 node:$ptr)> {
foreach as = [ "global", "flat", "local", "private", "region" ] in {
-let AddressSpaces = !cast<AddressSpaceList>("StoreAddress_"#as).AddrSpaces in {
+let IsStore = 1, AddressSpaces = !cast<AddressSpaceList>("StoreAddress_"#as).AddrSpaces in {
def store_#as : PatFrag<(ops node:$val, node:$ptr),
(unindexedstore node:$val, node:$ptr)> {
- let IsStore = 1;
let IsTruncStore = 0;
}
// truncstore fragments.
def truncstore_#as : PatFrag<(ops node:$val, node:$ptr),
(unindexedstore node:$val, node:$ptr)> {
- let IsStore = 1;
let IsTruncStore = 1;
}
// unnecessary check that the memory size is less than the value type
// in the generated matcher table.
def truncstorei8_#as : PatFrag<(ops node:$val, node:$ptr),
- (truncstore node:$val, node:$ptr)> {
- let IsStore = 1;
- let MemoryVT = i8;
-}
-
+ (truncstorei8 node:$val, node:$ptr)>;
def truncstorei16_#as : PatFrag<(ops node:$val, node:$ptr),
- (truncstore node:$val, node:$ptr)> {
- let IsStore = 1;
- let MemoryVT = i16;
-}
+ (truncstorei16 node:$val, node:$ptr)>;
def store_hi16_#as : StoreHi16 <truncstorei16, i16>;
def truncstorei8_hi16_#as : StoreHi16<truncstorei8, i8>;
def truncstorei16_hi16_#as : StoreHi16<truncstorei16, i16>;
-defm atomic_store_#as : binary_atomic_op<atomic_store>;
+} // End let IsStore = 1, AddressSpaces = ...
-} // End let AddressSpaces
+let IsAtomic = 1, AddressSpaces = !cast<AddressSpaceList>("StoreAddress_"#as).AddrSpaces in {
+def atomic_store_8_#as : PatFrag<(ops node:$ptr, node:$val),
+ (atomic_store_8 node:$ptr, node:$val)>;
+def atomic_store_16_#as : PatFrag<(ops node:$ptr, node:$val),
+ (atomic_store_16 node:$ptr, node:$val)>;
+def atomic_store_32_#as : PatFrag<(ops node:$ptr, node:$val),
+ (atomic_store_32 node:$ptr, node:$val)>;
+def atomic_store_64_#as : PatFrag<(ops node:$ptr, node:$val),
+ (atomic_store_64 node:$ptr, node:$val)>;
+}
} // End foreach as
// TODO: Add GISelPredicateCode for the ret and noret PatFrags once
defm AMDGPUatomic_cmp_swap : binary_atomic_op_all_as<AMDGPUatomic_cmp_swap>;
def load_align8_local : PatFrag<(ops node:$ptr), (load_local node:$ptr)>,
- Aligned<8> {
+ Aligned<8> {
let IsLoad = 1;
- let IsNonExtLoad = 1;
}
def load_align16_local : PatFrag<(ops node:$ptr), (load_local node:$ptr)>,
Aligned<16> {
let IsLoad = 1;
- let IsNonExtLoad = 1;
}
def store_align8_local: PatFrag<(ops node:$val, node:$ptr),
(store_local node:$val, node:$ptr)>, Aligned<8> {
let IsStore = 1;
- let IsTruncStore = 0;
}
def store_align16_local: PatFrag<(ops node:$val, node:$ptr),
(store_local node:$val, node:$ptr)>, Aligned<16> {
let IsStore = 1;
- let IsTruncStore = 0;
}
let AddressSpaces = StoreAddress_local.AddrSpaces in {
>;
}
let SubtargetPredicate = isGFX6GFX7 in {
-defm : MUBUFStore_Atomic_Pattern <BUFFER_STORE_BYTE_ADDR64, BUFFER_STORE_BYTE_OFFSET, i32, atomic_store_global_8>;
-defm : MUBUFStore_Atomic_Pattern <BUFFER_STORE_BYTE_ADDR64, BUFFER_STORE_BYTE_OFFSET, i16, atomic_store_global_8>;
-defm : MUBUFStore_Atomic_Pattern <BUFFER_STORE_SHORT_ADDR64, BUFFER_STORE_SHORT_OFFSET, i32, atomic_store_global_16>;
-defm : MUBUFStore_Atomic_Pattern <BUFFER_STORE_SHORT_ADDR64, BUFFER_STORE_SHORT_OFFSET, i16, atomic_store_global_16>;
-defm : MUBUFStore_Atomic_Pattern <BUFFER_STORE_DWORD_ADDR64, BUFFER_STORE_DWORD_OFFSET, i32, atomic_store_global_32>;
-defm : MUBUFStore_Atomic_Pattern <BUFFER_STORE_DWORDX2_ADDR64, BUFFER_STORE_DWORDX2_OFFSET, i64, atomic_store_global_64>;
+defm : MUBUFStore_Atomic_Pattern <BUFFER_STORE_BYTE_ADDR64, BUFFER_STORE_BYTE_OFFSET, i32, atomic_store_8_global>;
+defm : MUBUFStore_Atomic_Pattern <BUFFER_STORE_BYTE_ADDR64, BUFFER_STORE_BYTE_OFFSET, i16, atomic_store_8_global>;
+defm : MUBUFStore_Atomic_Pattern <BUFFER_STORE_SHORT_ADDR64, BUFFER_STORE_SHORT_OFFSET, i32, atomic_store_16_global>;
+defm : MUBUFStore_Atomic_Pattern <BUFFER_STORE_SHORT_ADDR64, BUFFER_STORE_SHORT_OFFSET, i16, atomic_store_16_global>;
+defm : MUBUFStore_Atomic_Pattern <BUFFER_STORE_DWORD_ADDR64, BUFFER_STORE_DWORD_OFFSET, i32, atomic_store_32_global>;
+defm : MUBUFStore_Atomic_Pattern <BUFFER_STORE_DWORDX2_ADDR64, BUFFER_STORE_DWORDX2_OFFSET, i64, atomic_store_64_global>;
} // End Predicates = isGFX6GFX7
defm : DSWritePat_mc <DS_WRITE_B32, vt, "store_local">;
}
-defm : DSAtomicWritePat_mc <DS_WRITE_B8, i16, "atomic_store_local_8">;
-defm : DSAtomicWritePat_mc <DS_WRITE_B8, i32, "atomic_store_local_8">;
-defm : DSAtomicWritePat_mc <DS_WRITE_B16, i16, "atomic_store_local_16">;
-defm : DSAtomicWritePat_mc <DS_WRITE_B16, i32, "atomic_store_local_16">;
-defm : DSAtomicWritePat_mc <DS_WRITE_B32, i32, "atomic_store_local_32">;
-defm : DSAtomicWritePat_mc <DS_WRITE_B64, i64, "atomic_store_local_64">;
+defm : DSAtomicWritePat_mc <DS_WRITE_B8, i16, "atomic_store_8_local">;
+defm : DSAtomicWritePat_mc <DS_WRITE_B8, i32, "atomic_store_8_local">;
+defm : DSAtomicWritePat_mc <DS_WRITE_B16, i16, "atomic_store_16_local">;
+defm : DSAtomicWritePat_mc <DS_WRITE_B16, i32, "atomic_store_16_local">;
+defm : DSAtomicWritePat_mc <DS_WRITE_B32, i32, "atomic_store_32_local">;
+defm : DSAtomicWritePat_mc <DS_WRITE_B64, i64, "atomic_store_64_local">;
let OtherPredicates = [HasD16LoadStore] in {
def : DSWritePat <DS_WRITE_B16_D16_HI, i32, store_hi16_local>;
def : FlatStorePat <FLAT_STORE_DWORDX4, store_flat, vt>;
}
-def : FlatStoreAtomicPat <FLAT_STORE_DWORD, atomic_store_flat_32, i32>;
-def : FlatStoreAtomicPat <FLAT_STORE_DWORDX2, atomic_store_flat_64, i64>;
-def : FlatStoreAtomicPat <FLAT_STORE_BYTE, atomic_store_flat_8, i32>;
-def : FlatStoreAtomicPat <FLAT_STORE_BYTE, atomic_store_flat_8, i16>;
-def : FlatStoreAtomicPat <FLAT_STORE_SHORT, atomic_store_flat_16, i32>;
-def : FlatStoreAtomicPat <FLAT_STORE_SHORT, atomic_store_flat_16, i16>;
-
+def : FlatStoreAtomicPat <FLAT_STORE_DWORD, atomic_store_32_flat, i32>;
+def : FlatStoreAtomicPat <FLAT_STORE_DWORDX2, atomic_store_64_flat, i64>;
+def : FlatStoreAtomicPat <FLAT_STORE_BYTE, atomic_store_8_flat, i32>;
+def : FlatStoreAtomicPat <FLAT_STORE_BYTE, atomic_store_8_flat, i16>;
+def : FlatStoreAtomicPat <FLAT_STORE_SHORT, atomic_store_16_flat, i32>;
+def : FlatStoreAtomicPat <FLAT_STORE_SHORT, atomic_store_16_flat, i16>;
foreach as = [ "flat", "global" ] in {
defm : FlatAtomicPat <"FLAT_ATOMIC_ADD", "atomic_load_add_"#as, i32>;
defm : GlobalFLATLoadPats_D16 <GLOBAL_LOAD_SHORT_D16, load_d16_lo_global, v2f16>;
}
-defm : GlobalFLATAtomicStorePats <GLOBAL_STORE_BYTE, atomic_store_global_8, i32>;
-defm : GlobalFLATAtomicStorePats <GLOBAL_STORE_BYTE, atomic_store_global_8, i16>;
-defm : GlobalFLATAtomicStorePats <GLOBAL_STORE_SHORT, atomic_store_global_16, i32>;
-defm : GlobalFLATAtomicStorePats <GLOBAL_STORE_SHORT, atomic_store_global_16, i16>;
-defm : GlobalFLATAtomicStorePats <GLOBAL_STORE_DWORD, atomic_store_global_32, i32>;
-defm : GlobalFLATAtomicStorePats <GLOBAL_STORE_DWORDX2, atomic_store_global_64, i64>;
+defm : GlobalFLATAtomicStorePats <GLOBAL_STORE_BYTE, atomic_store_8_global, i32>;
+defm : GlobalFLATAtomicStorePats <GLOBAL_STORE_BYTE, atomic_store_8_global, i16>;
+defm : GlobalFLATAtomicStorePats <GLOBAL_STORE_SHORT, atomic_store_16_global, i32>;
+defm : GlobalFLATAtomicStorePats <GLOBAL_STORE_SHORT, atomic_store_16_global, i16>;
+defm : GlobalFLATAtomicStorePats <GLOBAL_STORE_DWORD, atomic_store_32_global, i32>;
+defm : GlobalFLATAtomicStorePats <GLOBAL_STORE_DWORDX2, atomic_store_64_global, i64>;
defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_ADD", "atomic_load_add_global", i32>;
defm : GlobalFLATAtomicPats <"GLOBAL_ATOMIC_SUB", "atomic_load_sub_global", i32>;
let IsNonExtLoad = 1;
}
-let MemoryVT = i8 in {
def extloadi8_local_m0 : PatFrag<(ops node:$ptr), (extloadi8_glue node:$ptr)>;
def sextloadi8_local_m0 : PatFrag<(ops node:$ptr), (sextloadi8_glue node:$ptr)>;
def zextloadi8_local_m0 : PatFrag<(ops node:$ptr), (zextloadi8_glue node:$ptr)>;
-}
-let MemoryVT = i16 in {
def extloadi16_local_m0 : PatFrag<(ops node:$ptr), (extloadi16_glue node:$ptr)>;
def sextloadi16_local_m0 : PatFrag<(ops node:$ptr), (sextloadi16_glue node:$ptr)>;
def zextloadi16_local_m0 : PatFrag<(ops node:$ptr), (zextloadi16_glue node:$ptr)>;
-}
+} // End IsLoad = 1, , AddressSpaces = LoadAddress_local.AddrSpaces
def load_align8_local_m0 : PatFrag<(ops node:$ptr),
- (load_local_m0 node:$ptr)>, Aligned<8> {
+ (load_local_m0 node:$ptr)> {
let IsLoad = 1;
- let IsNonExtLoad = 1;
+ int MinAlignment = 8;
}
def load_align16_local_m0 : PatFrag<(ops node:$ptr),
- (load_local_m0 node:$ptr)>, Aligned<16> {
+ (load_local_m0 node:$ptr)> {
let IsLoad = 1;
- let IsNonExtLoad = 1;
+ int MinAlignment = 16;
}
-} // End IsLoad = 1
-
let IsAtomic = 1, AddressSpaces = LoadAddress_local.AddrSpaces in {
def atomic_load_8_local_m0 : PatFrag<(ops node:$ptr),
- (atomic_load_8_glue node:$ptr)> {
- let MemoryVT = i8;
-}
+ (atomic_load_8_glue node:$ptr)>;
def atomic_load_16_local_m0 : PatFrag<(ops node:$ptr),
- (atomic_load_16_glue node:$ptr)> {
- let MemoryVT = i16;
-}
+ (atomic_load_16_glue node:$ptr)>;
def atomic_load_32_local_m0 : PatFrag<(ops node:$ptr),
- (atomic_load_32_glue node:$ptr)> {
- let MemoryVT = i32;
-}
+ (atomic_load_32_glue node:$ptr)>;
def atomic_load_64_local_m0 : PatFrag<(ops node:$ptr),
- (atomic_load_64_glue node:$ptr)> {
- let MemoryVT = i64;
-}
-
+ (atomic_load_64_glue node:$ptr)>;
} // End let AddressSpaces = LoadAddress_local.AddrSpaces
(truncstore_glue node:$val, node:$ptr)> {
let IsStore = 1;
let MemoryVT = i8;
+ let IsTruncStore = 1;
}
def truncstorei16_glue : PatFrag<(ops node:$val, node:$ptr),
(truncstore_glue node:$val, node:$ptr)> {
let IsStore = 1;
let MemoryVT = i16;
+ let IsTruncStore = 1;
}
let IsStore = 1, AddressSpaces = StoreAddress_local.AddrSpaces in {
def store_local_m0 : PatFrag<(ops node:$val, node:$ptr),
- (store_glue node:$val, node:$ptr)> {
- let IsStore = 1;
- let IsTruncStore = 0;
-}
-
+ (store_glue node:$val, node:$ptr)>;
def truncstorei8_local_m0 : PatFrag<(ops node:$val, node:$ptr),
- (unindexedstore_glue node:$val, node:$ptr)> {
- let IsStore = 1;
- let MemoryVT = i8;
-}
-
+ (truncstorei8_glue node:$val, node:$ptr)>;
def truncstorei16_local_m0 : PatFrag<(ops node:$val, node:$ptr),
- (unindexedstore_glue node:$val, node:$ptr)> {
- let IsStore = 1;
- let MemoryVT = i16;
-}
+ (truncstorei16_glue node:$val, node:$ptr)>;
}
def store_align8_local_m0 : PatFrag <(ops node:$value, node:$ptr),
(store_local_m0 node:$value, node:$ptr)>,
Aligned<8> {
let IsStore = 1;
- let IsTruncStore = 0;
}
def store_align16_local_m0 : PatFrag <(ops node:$value, node:$ptr),
(store_local_m0 node:$value, node:$ptr)>,
Aligned<16> {
let IsStore = 1;
- let IsTruncStore = 0;
}
let PredicateCode = [{return cast<MemSDNode>(N)->getAlignment() < 4;}],
}
}
-let AddressSpaces = StoreAddress_local.AddrSpaces in {
-
-def atomic_store_local_8_m0 : PatFrag <
- (ops node:$value, node:$ptr),
- (AMDGPUatomic_st_glue node:$value, node:$ptr)> {
+def atomic_store_8_glue : PatFrag <
+ (ops node:$ptr, node:$value),
+ (AMDGPUatomic_st_glue node:$ptr, node:$value)> {
let IsAtomic = 1;
let MemoryVT = i8;
}
-def atomic_store_local_16_m0 : PatFrag <
- (ops node:$value, node:$ptr),
- (AMDGPUatomic_st_glue node:$value, node:$ptr)> {
+
+def atomic_store_16_glue : PatFrag <
+ (ops node:$ptr, node:$value),
+ (AMDGPUatomic_st_glue node:$ptr, node:$value)> {
let IsAtomic = 1;
let MemoryVT = i16;
}
-def atomic_store_local_32_m0 : PatFrag <
- (ops node:$value, node:$ptr),
- (AMDGPUatomic_st_glue node:$value, node:$ptr)> {
+
+def atomic_store_32_glue : PatFrag <
+ (ops node:$ptr, node:$value),
+ (AMDGPUatomic_st_glue node:$ptr, node:$value)> {
let IsAtomic = 1;
let MemoryVT = i32;
}
-def atomic_store_local_64_m0 : PatFrag <
- (ops node:$value, node:$ptr),
- (AMDGPUatomic_st_glue node:$value, node:$ptr)> {
+
+def atomic_store_64_glue : PatFrag <
+ (ops node:$ptr, node:$value),
+ (AMDGPUatomic_st_glue node:$ptr, node:$value)> {
let IsAtomic = 1;
let MemoryVT = i64;
}
-} // End let AddressSpaces = StoreAddress_local.AddrSpaces
+
+let IsAtomic = 1, AddressSpaces = StoreAddress_local.AddrSpaces in {
+def atomic_store_8_local_m0 : PatFrag<(ops node:$ptr, node:$val),
+ (atomic_store_8_glue node:$ptr, node:$val)>;
+def atomic_store_16_local_m0 : PatFrag<(ops node:$ptr, node:$val),
+ (atomic_store_16_glue node:$ptr, node:$val)>;
+def atomic_store_32_local_m0 : PatFrag<(ops node:$ptr, node:$val),
+ (atomic_store_32_glue node:$ptr, node:$val)>;
+def atomic_store_64_local_m0 : PatFrag<(ops node:$ptr, node:$val),
+ (atomic_store_64_glue node:$ptr, node:$val)>;
+} // End let IsAtomic = 1, AddressSpaces = StoreAddress_local.AddrSpaces
def si_setcc_uniform : PatFrag <
// With one address space
def pat_frag_a : PatFrag <(ops node:$ptr), (load node:$ptr), [{}]> {
- let AddressSpaces = [ 999 ];
let IsLoad = 1; // FIXME: Can this be inferred?
- let MemoryVT = i32;
let MinAlignment = 2;
}
def pat_frag_b : PatFrag <(ops node:$ptr), (load node:$ptr), [{}]> {
let AddressSpaces = [ 123, 455 ];
let IsLoad = 1; // FIXME: Can this be inferred?
- let MemoryVT = i32;
}
def inst_a : Instruction {
// SDAG: case 2: {
// SDAG-NEXT: // Predicate_pat_frag_b
+// SDAG-NEXT: // Predicate_truncstorei16_addrspace
// SDAG-NEXT: SDNode *N = Node;
// SDAG-NEXT: (void)N;
// SDAG-NEXT: unsigned AddrSpace = cast<MemSDNode>(N)->getAddressSpace();
// SDAG-NEXT: if (AddrSpace != 123 && AddrSpace != 455)
// SDAG-NEXT: return false;
-// SDAG-NEXT: if (cast<MemSDNode>(N)->getMemoryVT() != MVT::i32) return false;
// SDAG-NEXT: return true;
// GISEL-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_LOAD,
// GISEL-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0,
// GISEL-NEXT: GIM_CheckMemoryAddressSpace, /*MI*/0, /*MMO*/0, /*NumAddrSpace*/2, /*AddrSpace*/123, /*AddrSpace*/455,
-// GISEL-NEXT: GIM_CheckMemorySizeEqualTo, /*MI*/0, /*MMO*/0, /*Size*/4,
// GISEL-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(int64_t)AtomicOrdering::NotAtomic,
def : Pat <
(pat_frag_b GPR32:$src),
// SDAG: // Predicate_pat_frag_a
// SDAG-NEXT: SDNode *N = Node;
// SDAG-NEXT: (void)N;
-// SDAG-NEXT: unsigned AddrSpace = cast<MemSDNode>(N)->getAddressSpace();
-
-// SDAG-NEXT: if (AddrSpace != 999)
-// SDAG-NEXT: return false;
// SDAG-NEXT: if (cast<MemSDNode>(N)->getAlign() < Align(2))
// SDAG-NEXT: return false;
-// SDAG-NEXT: if (cast<MemSDNode>(N)->getMemoryVT() != MVT::i32) return false;
// SDAG-NEXT: return true;
// GISEL: GIM_Try, /*On fail goto*//*Label 1*/ {{[0-9]+}}, // Rule ID 1 //
// GISEL-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2,
// GISEL-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_LOAD,
// GISEL-NEXT: GIM_CheckMemorySizeEqualToLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0,
-// GISEL-NEXT: GIM_CheckMemoryAddressSpace, /*MI*/0, /*MMO*/0, /*NumAddrSpace*/1, /*AddrSpace*/999,
// GISEL-NEXT: GIM_CheckMemoryAlignment, /*MI*/0, /*MMO*/0, /*MinAlign*/2,
-// GISEL-NEXT: GIM_CheckMemorySizeEqualTo, /*MI*/0, /*MMO*/0, /*Size*/4,
// GISEL-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(int64_t)AtomicOrdering::NotAtomic,
def : Pat <
(pat_frag_a GPR32:$src),
def truncstorei16_addrspace : PatFrag<(ops node:$val, node:$ptr),
- (truncstore node:$val, node:$ptr)> {
+ (truncstorei16 node:$val, node:$ptr)> {
let IsStore = 1;
- let MemoryVT = i16;
let AddressSpaces = [ 123, 455 ];
}
// GISEL-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2,
// GISEL-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_STORE,
// GISEL-NEXT: GIM_CheckMemorySizeLessThanLLT, /*MI*/0, /*MMO*/0, /*OpIdx*/0,
-// GISEL-NEXT: GIM_CheckMemoryAddressSpace, /*MI*/0, /*MMO*/0, /*NumAddrSpace*/2, /*AddrSpace*/123, /*AddrSpace*/455,
// GISEL-NEXT: GIM_CheckMemorySizeEqualTo, /*MI*/0, /*MMO*/0, /*Size*/2,
+// GISEL-NEXT: GIM_CheckMemoryAddressSpace, /*MI*/0, /*MMO*/0, /*NumAddrSpace*/2, /*AddrSpace*/123, /*AddrSpace*/455,
def : Pat <
(truncstorei16_addrspace GPR32:$src0, GPR32:$src1),
(inst_c GPR32:$src0, GPR32:$src1)
if (!ParsedAddrSpaces.empty()) {
InsnMatcher.addPredicate<MemoryAddressSpacePredicateMatcher>(
0, ParsedAddrSpaces);
+ return InsnMatcher;
}
}
int64_t MinAlign = Predicate.getMinAlignment();
- if (MinAlign > 0)
+ if (MinAlign > 0) {
InsnMatcher.addPredicate<MemoryAlignmentPredicateMatcher>(0, MinAlign);
+ return InsnMatcher;
+ }
}
// G_LOAD is used for both non-extending and any-extending loads.