From b57b4b098cb5f87d30473f0ae5ad6b665d19e0db Mon Sep 17 00:00:00 2001 From: Warren Hunt Date: Fri, 6 Jun 2014 22:12:37 +0000 Subject: [PATCH] [MS-ABI] Implement dynamic_cast This patch implements call lower from dynamic_cast to __RTDynamicCast and __RTCastToVoid. Test cases are included. A feature of note is that helper function getPolymorphicOffset is placed in such a way that it can be used by EmitTypeid (to be implemented in a later patch) without being moved. Details are included as comments directly in the code. llvm-svn: 210377 --- clang/lib/CodeGen/CGExprCXX.cpp | 139 +++++++++++++++++- .../test/CodeGenCXX/microsoft-abi-dynamic-cast.cpp | 157 +++++++++++++++++++++ 2 files changed, 292 insertions(+), 4 deletions(-) create mode 100644 clang/test/CodeGenCXX/microsoft-abi-dynamic-cast.cpp diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp index f2332bd..268f4aa 100644 --- a/clang/lib/CodeGen/CGExprCXX.cpp +++ b/clang/lib/CodeGen/CGExprCXX.cpp @@ -1628,6 +1628,20 @@ static void EmitBadTypeidCall(CodeGenFunction &CGF) { CGF.Builder.CreateUnreachable(); } +/// \brief Gets the offset to the virtual base that contains the vfptr for +/// MS-ABI polymorphic types. +static llvm::Value *getPolymorphicOffset(CodeGenFunction &CGF, + const CXXRecordDecl *RD, + llvm::Value *Value) { + const ASTContext &Context = RD->getASTContext(); + for (const CXXBaseSpecifier &Base : RD->vbases()) + if (Context.getASTRecordLayout(Base.getType()->getAsCXXRecordDecl()) + .hasExtendableVFPtr()) + return CGF.CGM.getCXXABI().GetVirtualBaseClassOffset( + CGF, Value, RD, Base.getType()->getAsCXXRecordDecl()); + llvm_unreachable("One of our vbases should be polymorphic."); +} + static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E, llvm::Type *StdTypeInfoPtrTy) { @@ -1686,7 +1700,7 @@ llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) { StdTypeInfoPtrTy); } -static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) { +static llvm::Constant *getItaniumDynamicCastFn(CodeGenFunction &CGF) { // void *__dynamic_cast(const void *sub, // const abi::__class_type_info *src, // const abi::__class_type_info *dst, @@ -1774,7 +1788,7 @@ static CharUnits computeOffsetHint(ASTContext &Context, } static llvm::Value * -EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value, +EmitItaniumDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value, QualType SrcTy, QualType DestTy, llvm::BasicBlock *CastEnd) { llvm::Type *PtrDiffLTy = @@ -1834,7 +1848,7 @@ EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value, Value = CGF.EmitCastToVoidPtr(Value); llvm::Value *args[] = { Value, SrcRTTI, DestRTTI, OffsetHint }; - Value = CGF.EmitNounwindRuntimeCall(getDynamicCastFn(CGF), args); + Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args); Value = CGF.Builder.CreateBitCast(Value, DestLTy); /// C++ [expr.dynamic.cast]p9: @@ -1867,8 +1881,125 @@ static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF, return llvm::UndefValue::get(DestLTy); } +namespace { +struct MSDynamicCastBuilder { + MSDynamicCastBuilder(CodeGenFunction &CGF, const CXXDynamicCastExpr *DCE); + llvm::Value *emitDynamicCastCall(llvm::Value *Value); + llvm::Value *emitDynamicCast(llvm::Value *Value); + + CodeGenFunction &CGF; + CGBuilderTy &Builder; + llvm::PointerType *Int8PtrTy; + QualType SrcTy, DstTy; + const CXXRecordDecl *SrcDecl; + bool IsPtrCast, IsCastToVoid, IsCastOfNull; +}; +} // namespace + +MSDynamicCastBuilder::MSDynamicCastBuilder(CodeGenFunction &CGF, + const CXXDynamicCastExpr *DCE) + : CGF(CGF), Builder(CGF.Builder), Int8PtrTy(CGF.Int8PtrTy), + SrcDecl(nullptr) { + DstTy = DCE->getTypeAsWritten(); + IsPtrCast = DstTy->isPointerType(); + // Get the PointeeTypes. After this point the original types are not used. + DstTy = IsPtrCast ? DstTy->castAs()->getPointeeType() + : DstTy->castAs()->getPointeeType(); + IsCastToVoid = DstTy->isVoidType(); + IsCastOfNull = DCE->isAlwaysNull(); + if (IsCastOfNull) + return; + SrcTy = DCE->getSubExpr()->getType(); + SrcTy = IsPtrCast ? SrcTy->castAs()->getPointeeType() : SrcTy; + SrcDecl = SrcTy->getAsCXXRecordDecl(); + // If we don't need a base adjustment, we don't need a SrcDecl so clear it + // here. Later we use the existance of the SrcDecl to determine the need for + // a base adjustment. + if (CGF.getContext().getASTRecordLayout(SrcDecl).hasExtendableVFPtr()) + SrcDecl = nullptr; +} + +llvm::Value *MSDynamicCastBuilder::emitDynamicCastCall(llvm::Value *Value) { + llvm::IntegerType *Int32Ty = CGF.Int32Ty; + llvm::Value *Offset = llvm::ConstantInt::get(Int32Ty, 0); + Value = Builder.CreateBitCast(Value, Int8PtrTy); + // If we need to perform a base adjustment, do it here. + if (SrcDecl) { + Offset = getPolymorphicOffset(CGF, SrcDecl, Value); + Value = Builder.CreateInBoundsGEP(Value, Offset); + Offset = Builder.CreateTrunc(Offset, Int32Ty); + } + if (IsCastToVoid) { + // PVOID __RTCastToVoid( + // PVOID inptr) + llvm::Type *ArgTypes[] = {Int8PtrTy}; + llvm::Constant *Function = CGF.CGM.CreateRuntimeFunction( + llvm::FunctionType::get(Int8PtrTy, ArgTypes, false), "__RTCastToVoid"); + llvm::Value *Args[] = {Value}; + return CGF.EmitRuntimeCall(Function, Args); + } + // PVOID __RTDynamicCast( + // PVOID inptr, + // LONG VfDelta, + // PVOID SrcType, + // PVOID TargetType, + // BOOL isReference) + llvm::Type *ArgTypes[] = {Int8PtrTy, Int32Ty, Int8PtrTy, Int8PtrTy, Int32Ty}; + llvm::Constant *Function = CGF.CGM.CreateRuntimeFunction( + llvm::FunctionType::get(Int8PtrTy, ArgTypes, false), "__RTDynamicCast"); + llvm::Value *Args[] = { + Value, Offset, + CGF.CGM.GetAddrOfRTTIDescriptor(SrcTy.getUnqualifiedType()), + CGF.CGM.GetAddrOfRTTIDescriptor(DstTy.getUnqualifiedType()), + llvm::ConstantInt::get(Int32Ty, IsPtrCast ? 0 : 1)}; + return CGF.EmitRuntimeCall(Function, Args); +} + +llvm::Value *MSDynamicCastBuilder::emitDynamicCast(llvm::Value *Value) { + // Note about undefined behavior: If the dynamic cast is casting to a + // reference type and the input is null, we hit a grey area in the standard. + // Here we're interpreting the behavior as undefined. The effects are the + // following: If the compiler determines that the argument is statically null + // or if the argument is dynamically null but does not require base + // adjustment, __RTDynamicCast will be called with a null argument and the + // isreference bit set. In this case __RTDynamicCast will throw + // std::bad_cast. If the argument is dynamically null and a base adjustment is + // required the resulting code will produce an out of bounds memory reference + // when trying to read VBTblPtr. In Itanium mode clang also emits a vtable + // load that fails at run time. + llvm::PointerType *DstLTy = CGF.ConvertType(DstTy)->getPointerTo(); + if (IsCastOfNull && IsPtrCast) + return Builder.CreateBitCast(Value, DstLTy); + if (IsCastOfNull || !IsPtrCast || !SrcDecl) + return Builder.CreateBitCast(emitDynamicCastCall(Value), DstLTy); + // !IsCastOfNull && IsPtrCast && SrcDecl + // In this case we have a pointer that requires a base adjustment. An + // adjustment is only required if the pointer is actually valid so here we + // perform a null check before doing the base adjustment and calling + // __RTDynamicCast. In the case that the argument is null we simply return + // null without calling __RTDynamicCast. + llvm::BasicBlock *EntryBlock = Builder.GetInsertBlock(); + llvm::BasicBlock *CallBlock = CGF.createBasicBlock("dynamic_cast.valid"); + llvm::BasicBlock *ExitBlock = CGF.createBasicBlock("dynamic_cast.call"); + Builder.CreateCondBr(Builder.CreateIsNull(Value), ExitBlock, CallBlock); + // Emit the call block and code for it. + CGF.EmitBlock(CallBlock); + Value = emitDynamicCastCall(Value); + // Emit the call block and the phi nodes for it. + CGF.EmitBlock(ExitBlock); + llvm::PHINode *ValuePHI = Builder.CreatePHI(Int8PtrTy, 2); + ValuePHI->addIncoming(Value, CallBlock); + ValuePHI->addIncoming(llvm::Constant::getNullValue(Int8PtrTy), EntryBlock); + return Builder.CreateBitCast(ValuePHI, DstLTy); +} + llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value, const CXXDynamicCastExpr *DCE) { + if (getTarget().getCXXABI().isMicrosoft()) { + MSDynamicCastBuilder Builder(*this, DCE); + return Builder.emitDynamicCast(Value); + } + QualType DestTy = DCE->getTypeAsWritten(); if (DCE->isAlwaysNull()) @@ -1894,7 +2025,7 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value, EmitBlock(CastNotNull); } - Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd); + Value = EmitItaniumDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd); if (ShouldNullCheckSrcValue) { EmitBranch(CastEnd); diff --git a/clang/test/CodeGenCXX/microsoft-abi-dynamic-cast.cpp b/clang/test/CodeGenCXX/microsoft-abi-dynamic-cast.cpp new file mode 100644 index 0000000..9fceaae --- /dev/null +++ b/clang/test/CodeGenCXX/microsoft-abi-dynamic-cast.cpp @@ -0,0 +1,157 @@ +// RUN: %clang_cc1 -emit-llvm -O2 -optzns -o - -triple=i386-pc-win32 2>/dev/null %s | FileCheck %s + +struct S { char a; }; +struct V { virtual void f(){} }; +struct A : virtual V {}; +struct B : S, virtual V {}; +struct T {}; + +T* test0() { return dynamic_cast((B*)0); } +// CHECK: define noalias %struct.T* @"\01?test0@@YAPAUT@@XZ"() #0 { +// CHECK-NEXT: entry: +// CHECK-NEXT: ret %struct.T* null +// CHECK-NEXT: } + +T* test1(V* x) { return &dynamic_cast(*x); } +// CHECK: define %struct.T* @"\01?test1@@YAPAUT@@PAUV@@@Z"(%struct.V* %x) #1 { +// CHECK-NEXT: entry: +// CHECK-NEXT: %0 = bitcast %struct.V* %x to i8* +// CHECK-NEXT: %1 = tail call i8* @__RTDynamicCast(i8* %0, i32 0, i8* bitcast (%"MSRTTITypeDescriptor\07"* @"\01??_R0?AUV@@@8" to i8*), i8* bitcast (%"MSRTTITypeDescriptor\07"* @"\01??_R0?AUT@@@8" to i8*), i32 1) #2 +// CHECK-NEXT: %2 = bitcast i8* %1 to %struct.T* +// CHECK-NEXT: ret %struct.T* %2 +// CHECK-NEXT: } + +T* test2(A* x) { return &dynamic_cast(*x); } +// CHECK: define %struct.T* @"\01?test2@@YAPAUT@@PAUA@@@Z"(%struct.A* %x) #1 { +// CHECK-NEXT: entry: +// CHECK-NEXT: %0 = bitcast %struct.A* %x to i8* +// CHECK-NEXT: %1 = bitcast %struct.A* %x to i8** +// CHECK-NEXT: %vbtable = load i8** %1, align 4 +// CHECK-NEXT: %2 = getelementptr inbounds i8* %vbtable, i32 4 +// CHECK-NEXT: %3 = bitcast i8* %2 to i32* +// CHECK-NEXT: %vbase_offs = load i32* %3, align 4 +// CHECK-NEXT: %4 = getelementptr inbounds i8* %0, i32 %vbase_offs +// CHECK-NEXT: %5 = tail call i8* @__RTDynamicCast(i8* %4, i32 %vbase_offs, i8* bitcast (%"MSRTTITypeDescriptor\07"* @"\01??_R0?AUA@@@8" to i8*), i8* bitcast (%"MSRTTITypeDescriptor\07"* @"\01??_R0?AUT@@@8" to i8*), i32 1) #2 +// CHECK-NEXT: %6 = bitcast i8* %5 to %struct.T* +// CHECK-NEXT: ret %struct.T* %6 +// CHECK-NEXT: } + +T* test3(B* x) { return &dynamic_cast(*x); } +// CHECK: define %struct.T* @"\01?test3@@YAPAUT@@PAUB@@@Z"(%struct.B* %x) #1 { +// CHECK-NEXT: entry: +// CHECK-NEXT: %0 = getelementptr inbounds %struct.B* %x, i32 0, i32 0, i32 0 +// CHECK-NEXT: %vbptr = getelementptr inbounds i8* %0, i32 4 +// CHECK-NEXT: %1 = bitcast i8* %vbptr to i8** +// CHECK-NEXT: %vbtable = load i8** %1, align 4 +// CHECK-NEXT: %2 = getelementptr inbounds i8* %vbtable, i32 4 +// CHECK-NEXT: %3 = bitcast i8* %2 to i32* +// CHECK-NEXT: %vbase_offs = load i32* %3, align 4 +// CHECK-NEXT: %4 = add nsw i32 %vbase_offs, 4 +// CHECK-NEXT: %5 = getelementptr inbounds i8* %0, i32 %4 +// CHECK-NEXT: %6 = tail call i8* @__RTDynamicCast(i8* %5, i32 %4, i8* bitcast (%"MSRTTITypeDescriptor\07"* @"\01??_R0?AUB@@@8" to i8*), i8* bitcast (%"MSRTTITypeDescriptor\07"* @"\01??_R0?AUT@@@8" to i8*), i32 1) #2 +// CHECK-NEXT: %7 = bitcast i8* %6 to %struct.T* +// CHECK-NEXT: ret %struct.T* %7 +// CHECK-NEXT: } + +T* test4(V* x) { return dynamic_cast(x); } +// CHECK: define %struct.T* @"\01?test4@@YAPAUT@@PAUV@@@Z"(%struct.V* %x) #1 { +// CHECK-NEXT: entry: +// CHECK-NEXT: %0 = bitcast %struct.V* %x to i8* +// CHECK-NEXT: %1 = tail call i8* @__RTDynamicCast(i8* %0, i32 0, i8* bitcast (%"MSRTTITypeDescriptor\07"* @"\01??_R0?AUV@@@8" to i8*), i8* bitcast (%"MSRTTITypeDescriptor\07"* @"\01??_R0?AUT@@@8" to i8*), i32 0) #2 +// CHECK-NEXT: %2 = bitcast i8* %1 to %struct.T* +// CHECK-NEXT: ret %struct.T* %2 +// CHECK-NEXT: } + +T* test5(A* x) { return dynamic_cast(x); } +// CHECK: define %struct.T* @"\01?test5@@YAPAUT@@PAUA@@@Z"(%struct.A* %x) #1 { +// CHECK-NEXT: entry: +// CHECK-NEXT: %0 = icmp eq %struct.A* %x, null +// CHECK-NEXT: br i1 %0, label %dynamic_cast.call, label %dynamic_cast.valid +// CHECK: dynamic_cast.valid: ; preds = %entry +// CHECK-NEXT: %1 = bitcast %struct.A* %x to i8* +// CHECK-NEXT: %2 = bitcast %struct.A* %x to i8** +// CHECK-NEXT: %vbtable = load i8** %2, align 4 +// CHECK-NEXT: %3 = getelementptr inbounds i8* %vbtable, i32 4 +// CHECK-NEXT: %4 = bitcast i8* %3 to i32* +// CHECK-NEXT: %vbase_offs = load i32* %4, align 4 +// CHECK-NEXT: %5 = getelementptr inbounds i8* %1, i32 %vbase_offs +// CHECK-NEXT: %6 = tail call i8* @__RTDynamicCast(i8* %5, i32 %vbase_offs, i8* bitcast (%"MSRTTITypeDescriptor\07"* @"\01??_R0?AUA@@@8" to i8*), i8* bitcast (%"MSRTTITypeDescriptor\07"* @"\01??_R0?AUT@@@8" to i8*), i32 0) #2 +// CHECK-NEXT: %phitmp = bitcast i8* %6 to %struct.T* +// CHECK-NEXT: br label %dynamic_cast.call +// CHECK: dynamic_cast.call: ; preds = %dynamic_cast.valid, %entry +// CHECK-NEXT: %7 = phi %struct.T* [ %phitmp, %dynamic_cast.valid ], [ null, %entry ] +// CHECK-NEXT: ret %struct.T* %7 +// CHECK-NEXT: } + +T* test6(B* x) { return dynamic_cast(x); } +// CHECK: define %struct.T* @"\01?test6@@YAPAUT@@PAUB@@@Z"(%struct.B* %x) #1 { +// CHECK-NEXT: entry: +// CHECK-NEXT: %0 = icmp eq %struct.B* %x, null +// CHECK-NEXT: br i1 %0, label %dynamic_cast.call, label %dynamic_cast.valid +// CHECK: dynamic_cast.valid: ; preds = %entry +// CHECK-NEXT: %1 = getelementptr inbounds %struct.B* %x, i32 0, i32 0, i32 0 +// CHECK-NEXT: %vbptr = getelementptr inbounds i8* %1, i32 4 +// CHECK-NEXT: %2 = bitcast i8* %vbptr to i8** +// CHECK-NEXT: %vbtable = load i8** %2, align 4 +// CHECK-NEXT: %3 = getelementptr inbounds i8* %vbtable, i32 4 +// CHECK-NEXT: %4 = bitcast i8* %3 to i32* +// CHECK-NEXT: %vbase_offs = load i32* %4, align 4 +// CHECK-NEXT: %5 = add nsw i32 %vbase_offs, 4 +// CHECK-NEXT: %6 = getelementptr inbounds i8* %1, i32 %5 +// CHECK-NEXT: %7 = tail call i8* @__RTDynamicCast(i8* %6, i32 %5, i8* bitcast (%"MSRTTITypeDescriptor\07"* @"\01??_R0?AUB@@@8" to i8*), i8* bitcast (%"MSRTTITypeDescriptor\07"* @"\01??_R0?AUT@@@8" to i8*), i32 0) #2 +// CHECK-NEXT: %phitmp = bitcast i8* %7 to %struct.T* +// CHECK-NEXT: br label %dynamic_cast.call +// CHECK: dynamic_cast.call: ; preds = %dynamic_cast.valid, %entry +// CHECK-NEXT: %8 = phi %struct.T* [ %phitmp, %dynamic_cast.valid ], [ null, %entry ] +// CHECK-NEXT: ret %struct.T* %8 +// CHECK-NEXT: } + +void* test7(V* x) { return dynamic_cast(x); } +// CHECK: define i8* @"\01?test7@@YAPAXPAUV@@@Z"(%struct.V* %x) #1 { +// CHECK-NEXT: entry: +// CHECK-NEXT: %0 = bitcast %struct.V* %x to i8* +// CHECK-NEXT: %1 = tail call i8* @__RTCastToVoid(i8* %0) #2 +// CHECK-NEXT: ret i8* %1 +// CHECK-NEXT: } + +void* test8(A* x) { return dynamic_cast(x); } +// CHECK: define i8* @"\01?test8@@YAPAXPAUA@@@Z"(%struct.A* %x) #1 { +// CHECK-NEXT: entry: +// CHECK-NEXT: %0 = icmp eq %struct.A* %x, null +// CHECK-NEXT: br i1 %0, label %dynamic_cast.call, label %dynamic_cast.valid +// CHECK: dynamic_cast.valid: ; preds = %entry +// CHECK-NEXT: %1 = bitcast %struct.A* %x to i8* +// CHECK-NEXT: %2 = bitcast %struct.A* %x to i8** +// CHECK-NEXT: %vbtable = load i8** %2, align 4 +// CHECK-NEXT: %3 = getelementptr inbounds i8* %vbtable, i32 4 +// CHECK-NEXT: %4 = bitcast i8* %3 to i32* +// CHECK-NEXT: %vbase_offs = load i32* %4, align 4 +// CHECK-NEXT: %5 = getelementptr inbounds i8* %1, i32 %vbase_offs +// CHECK-NEXT: %6 = tail call i8* @__RTCastToVoid(i8* %5) #2 +// CHECK-NEXT: br label %dynamic_cast.call +// CHECK: dynamic_cast.call: ; preds = %dynamic_cast.valid, %entry +// CHECK-NEXT: %7 = phi i8* [ %6, %dynamic_cast.valid ], [ null, %entry ] +// CHECK-NEXT: ret i8* %7 +// CHECK-NEXT: } + +void* test9(B* x) { return dynamic_cast(x); } +// CHECK: define i8* @"\01?test9@@YAPAXPAUB@@@Z"(%struct.B* %x) #1 { +// CHECK-NEXT: entry: +// CHECK-NEXT: %0 = icmp eq %struct.B* %x, null +// CHECK-NEXT: br i1 %0, label %dynamic_cast.call, label %dynamic_cast.valid +// CHECK: dynamic_cast.valid: ; preds = %entry +// CHECK-NEXT: %1 = getelementptr inbounds %struct.B* %x, i32 0, i32 0, i32 0 +// CHECK-NEXT: %vbptr = getelementptr inbounds i8* %1, i32 4 +// CHECK-NEXT: %2 = bitcast i8* %vbptr to i8** +// CHECK-NEXT: %vbtable = load i8** %2, align 4 +// CHECK-NEXT: %3 = getelementptr inbounds i8* %vbtable, i32 4 +// CHECK-NEXT: %4 = bitcast i8* %3 to i32* +// CHECK-NEXT: %vbase_offs = load i32* %4, align 4 +// CHECK-NEXT: %5 = add nsw i32 %vbase_offs, 4 +// CHECK-NEXT: %6 = getelementptr inbounds i8* %1, i32 %5 +// CHECK-NEXT: %7 = tail call i8* @__RTCastToVoid(i8* %6) #2 +// CHECK-NEXT: br label %dynamic_cast.call +// CHECK: dynamic_cast.call: ; preds = %dynamic_cast.valid, %entry +// CHECK-NEXT: %8 = phi i8* [ %7, %dynamic_cast.valid ], [ null, %entry ] +// CHECK-NEXT: ret i8* %8 +// CHECK-NEXT: } -- 2.7.4