});
}
+/// Emits and returns the standard load and store ops from the view indexings.
+/// If the indexing is of index type, use it as an index to the load/store.
+/// If the indexing is a range, use range.min + indexing as an index to the
+/// load/store.
+template <typename LoadOrStoreOp>
+static SmallVector<Value *, 8>
+emitAndReturnLoadStoreOperands(LoadOrStoreOp loadOrStoreOp, ViewOp viewOp) {
+ unsigned storeDim = 0;
+ SmallVector<Value *, 8> operands;
+ for (auto *indexing : viewOp.getIndexings()) {
+ if (indexing->getType().isa<IndexType>()) {
+ operands.push_back(indexing);
+ continue;
+ }
+ RangeOp range = indexing->getDefiningOp()->cast<RangeOp>();
+ ValueHandle min(range.getMin());
+ Value *storeIndex = *(loadOrStoreOp.getIndices().begin() + storeDim++);
+ using edsc::op::operator+;
+ operands.push_back(min + ValueHandle(storeIndex));
+ }
+ return operands;
+}
+
namespace {
/// Rewriting linalg::LoadOp and linalg::StoreOp to mlir::LoadOp and
applyPatternsGreedily(getFunction(), std::move(patterns));
}
};
-} // namespace
-
-/// Emits and returns the standard load and store ops from the view indexings.
-/// If the indexing is of index type, use it as an index to the load/store.
-/// If the indexing is a range, use range.min + indexing as an index to the
-/// load/store.
-template <typename LoadOrStoreOp>
-static SmallVector<Value *, 8>
-emitAndReturnLoadStoreOperands(LoadOrStoreOp loadOrStoreOp, ViewOp viewOp) {
- unsigned storeDim = 0;
- SmallVector<Value *, 8> operands;
- for (auto *indexing : viewOp.getIndexings()) {
- if (indexing->getType().isa<IndexType>()) {
- operands.push_back(indexing);
- continue;
- }
- RangeOp range = indexing->getDefiningOp()->cast<RangeOp>();
- ValueHandle min(range.getMin());
- Value *storeIndex = *(loadOrStoreOp.getIndices().begin() + storeDim++);
- using edsc::op::operator+;
- operands.push_back(min + ValueHandle(storeIndex));
- }
- return operands;
-}
template <>
PatternMatchResult
operands);
return matchSuccess();
}
+} // namespace
FunctionPassBase *linalg::createLowerLinalgLoadStorePass() {
return new LowerLinalgLoadStorePass();
// OpOperand
//===----------------------------------------------------------------------===//
+// TODO: This namespace is only required because of a bug in GCC<7.0.
+namespace mlir {
/// Return which operand this is in the operand list.
template <> unsigned OpOperand::getOperandNumber() {
return this - &getOwner()->getOpOperands()[0];
}
+} // end namespace mlir
//===----------------------------------------------------------------------===//
// BlockOperand
//===----------------------------------------------------------------------===//
+// TODO: This namespace is only required because of a bug in GCC<7.0.
+namespace mlir {
/// Return which operand this is in the operand list.
template <> unsigned BlockOperand::getOperandNumber() {
return this - &getOwner()->getBlockOperands()[0];
}
+} // end namespace mlir
//===----------------------------------------------------------------------===//
// Operation
PatternMatchResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const override;
};
-} // end anonymous namespace
/// Analyzes the `transfer` to find an access dimension along the fastest remote
/// MemRef dimension. If such a dimension with coalescing properties is found,
/// Emits remote memory accesses that are clipped to the boundaries of the
/// MemRef.
template <typename VectorTransferOpTy>
-static llvm::SmallVector<edsc::ValueHandle, 8>
-clip(VectorTransferOpTy transfer, edsc::MemRefView &view,
- ArrayRef<edsc::IndexHandle> ivs) {
+llvm::SmallVector<edsc::ValueHandle, 8> clip(VectorTransferOpTy transfer,
+ edsc::MemRefView &view,
+ ArrayRef<edsc::IndexHandle> ivs) {
using namespace mlir::edsc;
using namespace edsc::op;
using edsc::intrinsics::select;
return matchSuccess();
}
-namespace {
struct LowerVectorTransfersPass
: public FunctionPass<LowerVectorTransfersPass> {
void runOnFunction() {