From: Fraser Cormack Date: Fri, 18 Dec 2020 12:51:48 +0000 (+0000) Subject: [RISCV] Assume no-op addrspacecasts by default X-Git-Tag: llvmorg-13-init~2926 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=d4ed253d0b8487d9e9fd95a3895f83c437e5e7bb;p=platform%2Fupstream%2Fllvm.git [RISCV] Assume no-op addrspacecasts by default To support OpenCL, which typically uses SPIR as an IR, non-zero address spaces must be accounted for. This patch makes the RISC-V target assume no-op address space casts across the board, which effectively removes the need to support addrspacecast instructions in the backend. For a RISC-V implementation with different configurations or specialized address spaces where casts aren't no-ops, the function can be adjusted as required. Reviewed By: jrtc27 Differential Revision: https://reviews.llvm.org/D93536 --- diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp index 5851f56..5e1b623 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp @@ -113,6 +113,15 @@ RISCVTargetMachine::getTargetTransformInfo(const Function &F) { return TargetTransformInfo(RISCVTTIImpl(this, F)); } +// A RISC-V hart has a single byte-addressable address space of 2^XLEN bytes +// for all memory accesses, so it is reasonable to assume that an +// implementation has no-op address space casts. If an implementation makes a +// change to this, they can override it here. +bool RISCVTargetMachine::isNoopAddrSpaceCast(unsigned SrcAS, + unsigned DstAS) const { + return true; +} + namespace { class RISCVPassConfig : public TargetPassConfig { public: diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.h b/llvm/lib/Target/RISCV/RISCVTargetMachine.h index a4476fa..9d1e04a 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetMachine.h +++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.h @@ -43,6 +43,9 @@ public: } TargetTransformInfo getTargetTransformInfo(const Function &F) override; + + virtual bool isNoopAddrSpaceCast(unsigned SrcAS, + unsigned DstAS) const override; }; } diff --git a/llvm/test/CodeGen/RISCV/addrspacecast.ll b/llvm/test/CodeGen/RISCV/addrspacecast.ll new file mode 100644 index 0000000..1519e1e --- /dev/null +++ b/llvm/test/CodeGen/RISCV/addrspacecast.ll @@ -0,0 +1,49 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefix=RV32I +; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefix=RV64I + +define void @cast0(i32 addrspace(1)* %ptr) { +; RV32I-LABEL: cast0: +; RV32I: # %bb.0: +; RV32I-NEXT: sw zero, 0(a0) +; RV32I-NEXT: ret +; +; RV64I-LABEL: cast0: +; RV64I: # %bb.0: +; RV64I-NEXT: sw zero, 0(a0) +; RV64I-NEXT: ret + %ptr0 = addrspacecast i32 addrspace(1)* %ptr to i32 addrspace(0)* + store i32 0, i32* %ptr0 + ret void +} + +define void @cast1(i32* %ptr) { +; RV32I-LABEL: cast1: +; RV32I: # %bb.0: +; RV32I-NEXT: addi sp, sp, -16 +; RV32I-NEXT: .cfi_def_cfa_offset 16 +; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill +; RV32I-NEXT: .cfi_offset ra, -4 +; RV32I-NEXT: call foo@plt +; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload +; RV32I-NEXT: addi sp, sp, 16 +; RV32I-NEXT: ret +; +; RV64I-LABEL: cast1: +; RV64I: # %bb.0: +; RV64I-NEXT: addi sp, sp, -16 +; RV64I-NEXT: .cfi_def_cfa_offset 16 +; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill +; RV64I-NEXT: .cfi_offset ra, -8 +; RV64I-NEXT: call foo@plt +; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload +; RV64I-NEXT: addi sp, sp, 16 +; RV64I-NEXT: ret + %castptr = addrspacecast i32* %ptr to i32 addrspace(10)* + call void @foo(i32 addrspace(10)* %castptr) + ret void +} + +declare void @foo(i32 addrspace(10)*)