/// Conversion pattern that converts a 1-D vector transfer read/write op in a
/// sequence of:
-/// 1. Bitcast to vector form.
+/// 1. Bitcast or addrspacecast to vector form.
/// 2. Create an offsetVector = [ offset + 0 .. offset + vector_length - 1 ].
/// 3. Create a mask where offsetVector is compared against memref upper bound.
/// 4. Rewrite op as a masked read or write.
MemRefType memRefType = xferOp.getMemRefType();
// 1. Get the source/dst address as an LLVM vector pointer.
+ // The vector pointer would always be on address space 0, therefore
+ // addrspacecast shall be used when source/dst memrefs are not on
+ // address space 0.
// TODO: support alignment when possible.
Value dataPtr = getDataPtr(loc, memRefType, adaptor.memref(),
adaptor.indices(), rewriter, getModule());
auto vecTy =
toLLVMTy(xferOp.getVectorType()).template cast<LLVM::LLVMType>();
- auto vectorDataPtr =
- rewriter.create<LLVM::BitcastOp>(loc, vecTy.getPointerTo(), dataPtr);
+ Value vectorDataPtr;
+ if (memRefType.getMemorySpace() == 0)
+ vectorDataPtr =
+ rewriter.create<LLVM::BitcastOp>(loc, vecTy.getPointerTo(), dataPtr);
+ else
+ vectorDataPtr = rewriter.create<LLVM::AddrSpaceCastOp>(
+ loc, vecTy.getPointerTo(), dataPtr);
// 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
unsigned vecWidth = vecTy.getVectorNumElements();
// CHECK-SAME: 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32, 0 : i32,
// CHECK-SAME: 0 : i32, 0 : i32, 0 : i32] :
// CHECK-SAME: !llvm<"<17 x i64>">, !llvm<"<17 x i64>">
+
+func @transfer_read_1d_non_zero_addrspace(%A : memref<?xf32, 3>, %base: index) -> vector<17xf32> {
+ %f7 = constant 7.0: f32
+ %f = vector.transfer_read %A[%base], %f7
+ {permutation_map = affine_map<(d0) -> (d0)>} :
+ memref<?xf32, 3>, vector<17xf32>
+ vector.transfer_write %f, %A[%base]
+ {permutation_map = affine_map<(d0) -> (d0)>} :
+ vector<17xf32>, memref<?xf32, 3>
+ return %f: vector<17xf32>
+}
+// CHECK-LABEL: func @transfer_read_1d_non_zero_addrspace
+// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: !llvm.i64) -> !llvm<"<17 x float>">
+//
+// 1. Check address space for GEP is correct.
+// CHECK: %[[gep:.*]] = llvm.getelementptr {{.*}} :
+// CHECK-SAME: (!llvm<"float addrspace(3)*">, !llvm.i64) -> !llvm<"float addrspace(3)*">
+// CHECK: %[[vecPtr:.*]] = llvm.addrspacecast %[[gep]] :
+// CHECK-SAME: !llvm<"float addrspace(3)*"> to !llvm<"<17 x float>*">
+//
+// 2. Check address space of the memref is correct.
+// CHECK: %[[DIM:.*]] = llvm.extractvalue %{{.*}}[3, 0] :
+// CHECK-SAME: !llvm<"{ float addrspace(3)*, float addrspace(3)*, i64, [1 x i64], [1 x i64] }">
+//
+// 3. Check address apce for GEP is correct.
+// CHECK: %[[gep_b:.*]] = llvm.getelementptr {{.*}} :
+// CHECK-SAME: (!llvm<"float addrspace(3)*">, !llvm.i64) -> !llvm<"float addrspace(3)*">
+// CHECK: %[[vecPtr_b:.*]] = llvm.addrspacecast %[[gep_b]] :
+// CHECK-SAME: !llvm<"float addrspace(3)*"> to !llvm<"<17 x float>*">