From f5d5f17d3ad455de2fbb9448acea66cbc09561c5 Mon Sep 17 00:00:00 2001 From: Anshil Gandhi Date: Wed, 18 Aug 2021 21:37:53 -0600 Subject: [PATCH] Revert "[HIP] Allow target addr space in target builtins" This reverts commit a35008955fa606487f79a050f5cc80fc7ee84dda. --- clang/include/clang/AST/Type.h | 7 +--- clang/lib/Basic/Targets/AMDGPU.h | 11 +---- clang/lib/Sema/SemaExpr.cpp | 47 --------------------- clang/test/CodeGenCUDA/builtins-amdgcn.cu | 68 +++---------------------------- 4 files changed, 7 insertions(+), 126 deletions(-) diff --git a/clang/include/clang/AST/Type.h b/clang/include/clang/AST/Type.h index fc83c89..09e9705 100644 --- a/clang/include/clang/AST/Type.h +++ b/clang/include/clang/AST/Type.h @@ -495,12 +495,7 @@ public: (A == LangAS::Default && (B == LangAS::sycl_private || B == LangAS::sycl_local || B == LangAS::sycl_global || B == LangAS::sycl_global_device || - B == LangAS::sycl_global_host)) || - // In HIP device compilation, any cuda address space is allowed - // to implicitly cast into the default address space. - (A == LangAS::Default && - (B == LangAS::cuda_constant || B == LangAS::cuda_device || - B == LangAS::cuda_shared)); + B == LangAS::sycl_global_host)); } /// Returns true if the address space in these qualifiers is equal to or diff --git a/clang/lib/Basic/Targets/AMDGPU.h b/clang/lib/Basic/Targets/AMDGPU.h index f8772cb..2e580ec 100644 --- a/clang/lib/Basic/Targets/AMDGPU.h +++ b/clang/lib/Basic/Targets/AMDGPU.h @@ -352,16 +352,7 @@ public: } LangAS getCUDABuiltinAddressSpace(unsigned AS) const override { - switch (AS) { - case 1: - return LangAS::cuda_device; - case 3: - return LangAS::cuda_shared; - case 4: - return LangAS::cuda_constant; - default: - return getLangASFromTargetAS(AS); - } + return LangAS::Default; } llvm::Optional getConstantAddressSpace() const override { diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp index 5bde87d..8ef4a9d 100644 --- a/clang/lib/Sema/SemaExpr.cpp +++ b/clang/lib/Sema/SemaExpr.cpp @@ -6572,53 +6572,6 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc, return ExprError(); checkDirectCallValidity(*this, Fn, FD, ArgExprs); - - // If this expression is a call to a builtin function in HIP device - // compilation, allow a pointer-type argument to default address space to be - // passed as a pointer-type parameter to a non-default address space. - // If Arg is declared in the default address space and Param is declared - // in a non-default address space, perform an implicit address space cast to - // the parameter type. - if (getLangOpts().HIP && getLangOpts().CUDAIsDevice && FD && - FD->getBuiltinID()) { - for (unsigned Idx = 0; Idx < FD->param_size(); ++Idx) { - ParmVarDecl *Param = FD->getParamDecl(Idx); - if (!ArgExprs[Idx] || !Param || !Param->getType()->isPointerType() || - !ArgExprs[Idx]->getType()->isPointerType()) - continue; - - auto ParamAS = Param->getType()->getPointeeType().getAddressSpace(); - auto ArgTy = ArgExprs[Idx]->getType(); - auto ArgPtTy = ArgTy->getPointeeType(); - auto ArgAS = ArgPtTy.getAddressSpace(); - - // Only allow implicit casting from a non-default address space pointee - // type to a default address space pointee type - if (ArgAS != LangAS::Default || ParamAS == LangAS::Default) - continue; - - // First, ensure that the Arg is an RValue. - if (ArgExprs[Idx]->isGLValue()) { - ArgExprs[Idx] = ImplicitCastExpr::Create( - Context, ArgExprs[Idx]->getType(), CK_NoOp, ArgExprs[Idx], - nullptr, VK_PRValue, FPOptionsOverride()); - } - - // Construct a new arg type with address space of Param - Qualifiers ArgPtQuals = ArgPtTy.getQualifiers(); - ArgPtQuals.setAddressSpace(ParamAS); - auto NewArgPtTy = - Context.getQualifiedType(ArgPtTy.getUnqualifiedType(), ArgPtQuals); - auto NewArgTy = - Context.getQualifiedType(Context.getPointerType(NewArgPtTy), - ArgTy.getQualifiers()); - - // Finally perform an implicit address space cast - ArgExprs[Idx] = ImpCastExprToType(ArgExprs[Idx], NewArgTy, - CK_AddressSpaceConversion) - .get(); - } - } } if (Context.isDependenceAllowed() && diff --git a/clang/test/CodeGenCUDA/builtins-amdgcn.cu b/clang/test/CodeGenCUDA/builtins-amdgcn.cu index 6b0dc75..1283bf5 100644 --- a/clang/test/CodeGenCUDA/builtins-amdgcn.cu +++ b/clang/test/CodeGenCUDA/builtins-amdgcn.cu @@ -1,8 +1,8 @@ -// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 -x hip \ +// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 \ // RUN: -aux-triple x86_64-unknown-linux-gnu -fcuda-is-device -emit-llvm %s \ // RUN: -o - | FileCheck %s -// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 -x hip \ +// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 \ // RUN: -aux-triple x86_64-pc-windows-msvc -fcuda-is-device -emit-llvm %s \ // RUN: -o - | FileCheck %s @@ -10,7 +10,7 @@ // CHECK-LABEL: @_Z16use_dispatch_ptrPi( // CHECK: %[[PTR:.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr() -// CHECK: %{{.*}} = addrspacecast i8 addrspace(4)* %[[PTR]] to i32* +// CHECK: %{{.*}} = addrspacecast i8 addrspace(4)* %[[PTR]] to i8* __global__ void use_dispatch_ptr(int* out) { const int* dispatch_ptr = (const int*)__builtin_amdgcn_dispatch_ptr(); *out = *dispatch_ptr; @@ -24,39 +24,6 @@ void test_ds_fmax(float src) { volatile float x = __builtin_amdgcn_ds_fmaxf(&shared, src, 0, 0, false); } -// CHECK-LABEL: @_Z12test_ds_faddf( -// CHECK: call contract float @llvm.amdgcn.ds.fadd.f32(float addrspace(3)* @_ZZ12test_ds_faddfE6shared, float %{{[^,]*}}, i32 0, i32 0, i1 false) -__global__ void test_ds_fadd(float src) { - __shared__ float shared; - volatile float x = __builtin_amdgcn_ds_faddf(&shared, src, 0, 0, false); -} - -// CHECK-LABEL: @_Z12test_ds_fminfPf(float %src, float addrspace(1)* %shared.coerce -// CHECK: %shared = alloca float*, align 8, addrspace(5) -// CHECK: %shared.ascast = addrspacecast float* addrspace(5)* %shared to float** -// CHECK: %shared.addr = alloca float*, align 8, addrspace(5) -// CHECK: %shared.addr.ascast = addrspacecast float* addrspace(5)* %shared.addr to float** -// CHECK: %[[S0:.*]] = addrspacecast float addrspace(1)* %shared.coerce to float* -// CHECK: store float* %[[S0]], float** %shared.ascast, align 8 -// CHECK: %shared1 = load float*, float** %shared.ascast, align 8 -// CHECK: store float* %shared1, float** %shared.addr.ascast, align 8 -// CHECK: %[[S1:.*]] = load float*, float** %shared.addr.ascast, align 8 -// CHECK: %[[S2:.*]] = addrspacecast float* %[[S1]] to float addrspace(3)* -// CHECK: call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* %[[S2]] -__global__ void test_ds_fmin(float src, float *shared) { - volatile float x = __builtin_amdgcn_ds_fminf(shared, src, 0, 0, false); -} - -// CHECK: @_Z33test_ret_builtin_nondef_addrspace -// CHECK: %[[X:.*]] = alloca i8*, align 8, addrspace(5) -// CHECK: %[[XC:.*]] = addrspacecast i8* addrspace(5)* %[[X]] to i8** -// CHECK: %[[Y:.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr() -// CHECK: %[[YASCAST:.*]] = addrspacecast i8 addrspace(4)* %[[Y]] to i8* -// CHECK: store i8* %[[YASCAST]], i8** %[[XC]], align 8 -__device__ void test_ret_builtin_nondef_addrspace() { - void *x = __builtin_amdgcn_dispatch_ptr(); -} - // CHECK-LABEL: @_Z6endpgmv( // CHECK: call void @llvm.amdgcn.endpgm() __global__ void endpgm() { @@ -66,12 +33,12 @@ __global__ void endpgm() { // Check the 64 bit argument is correctly passed to the intrinsic without truncation or assertion. // CHECK-LABEL: @_Z14test_uicmp_i64 -// CHECK: store i64* %out1, i64** %out.addr.ascast +// CHECK: store i64* %out, i64** %out.addr.ascast // CHECK-NEXT: store i64 %a, i64* %a.addr.ascast // CHECK-NEXT: store i64 %b, i64* %b.addr.ascast // CHECK-NEXT: %[[V0:.*]] = load i64, i64* %a.addr.ascast // CHECK-NEXT: %[[V1:.*]] = load i64, i64* %b.addr.ascast -// CHECK-NEXT: %[[V2:.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 %[[V0]], i64 %[[V1]], i32 35) +// CHECK-NEXT: %[[V2:.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 %0, i64 %1, i32 35) // CHECK-NEXT: %[[V3:.*]] = load i64*, i64** %out.addr.ascast // CHECK-NEXT: store i64 %[[V2]], i64* %[[V3]] // CHECK-NEXT: ret void @@ -91,28 +58,3 @@ __global__ void test_s_memtime(unsigned long long* out) { *out = __builtin_amdgcn_s_memtime(); } - -// Check a generic pointer can be passed as a shared pointer and a generic pointer. -__device__ void func(float *x); - -// CHECK: @_Z17test_ds_fmin_funcfPf -// CHECK: %[[SHARED:.*]] = alloca float*, align 8, addrspace(5) -// CHECK: %[[SHARED_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[SHARED]] to float** -// CHECK: %[[SRC_ADDR:.*]] = alloca float, align 4, addrspace(5) -// CHECK: %[[SRC_ADDR_ASCAST:.*]] = addrspacecast float addrspace(5)* %[[SRC_ADDR]] to float* -// CHECK: %[[SHARED_ADDR:.*]] = alloca float*, align 8, addrspace(5) -// CHECK: %[[SHARED_ADDR_ASCAST:.*]] = addrspacecast float* addrspace(5)* %[[SHARED_ADDR]] to float** -// CHECK: %[[X:.*]] = alloca float, align 4, addrspace(5) -// CHECK: %[[X_ASCAST:.*]] = addrspacecast float addrspace(5)* %[[X]] to float* -// CHECK: %[[SHARED1:.*]] = load float*, float** %[[SHARED_ASCAST]], align 8 -// CHECK: store float %src, float* %[[SRC_ADDR_ASCAST]], align 4 -// CHECK: store float* %[[SHARED1]], float** %[[SHARED_ADDR_ASCAST]], align 8 -// CHECK: %[[ARG0_PTR:.*]] = load float*, float** %[[SHARED_ADDR_ASCAST]], align 8 -// CHECK: %[[ARG0:.*]] = addrspacecast float* %[[ARG0_PTR]] to float addrspace(3)* -// CHECK: call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* %[[ARG0]] -// CHECK: %[[ARG0:.*]] = load float*, float** %[[SHARED_ADDR_ASCAST]], align 8 -// CHECK: call void @_Z4funcPf(float* %[[ARG0]]) #8 -__global__ void test_ds_fmin_func(float src, float *__restrict shared) { - volatile float x = __builtin_amdgcn_ds_fminf(shared, src, 0, 0, false); - func(shared); -} -- 2.7.4