return StackRealignable;
}
- /// Return the skew that has to be applied to stack alignment under
- /// certain conditions (e.g. stack was adjusted before function \p MF
- /// was called).
- virtual unsigned getStackAlignmentSkew(const MachineFunction &MF) const;
-
/// This method returns whether or not it is safe for an object with the
/// given stack id to be bundled into the local area.
virtual bool isStackIdSafeForLocalArea(unsigned StackId) const {
/// registers.
X86_VectorCall = 80,
- /// Used by HipHop Virtual Machine (HHVM) to perform calls to and from
- /// translation cache, and for calling PHP functions. HHVM calling
- /// convention supports tail/sibling call elimination.
- HHVM = 81,
-
- /// HHVM calling convention for invoking C/C++ helpers.
- HHVM_C = 82,
+ /// Placeholders for HHVM calling conventions (deprecated, removed).
+ DUMMY_HHVM = 81,
+ DUMMY_HHVM_C = 82,
/// x86 hardware interrupt context. Callee may take one or two parameters,
/// where the 1st represents a pointer to hardware context frame and the 2nd
case lltok::kw_swiftcc: CC = CallingConv::Swift; break;
case lltok::kw_swifttailcc: CC = CallingConv::SwiftTail; break;
case lltok::kw_x86_intrcc: CC = CallingConv::X86_INTR; break;
- case lltok::kw_hhvmcc: CC = CallingConv::HHVM; break;
- case lltok::kw_hhvm_ccc: CC = CallingConv::HHVM_C; break;
+ case lltok::kw_hhvmcc:
+ CC = CallingConv::DUMMY_HHVM;
+ break;
+ case lltok::kw_hhvm_ccc:
+ CC = CallingConv::DUMMY_HHVM_C;
+ break;
case lltok::kw_cxx_fast_tlscc: CC = CallingConv::CXX_FAST_TLS; break;
case lltok::kw_amdgpu_vs: CC = CallingConv::AMDGPU_VS; break;
case lltok::kw_amdgpu_gfx: CC = CallingConv::AMDGPU_Gfx; break;
/// AdjustStackOffset - Helper function used to adjust the stack frame offset.
static inline void AdjustStackOffset(MachineFrameInfo &MFI, int FrameIdx,
bool StackGrowsDown, int64_t &Offset,
- Align &MaxAlign, unsigned Skew) {
+ Align &MaxAlign) {
// If the stack grows down, add the object size to find the lowest address.
if (StackGrowsDown)
Offset += MFI.getObjectSize(FrameIdx);
MaxAlign = std::max(MaxAlign, Alignment);
// Adjust to alignment boundary.
- Offset = alignTo(Offset, Alignment, Skew);
+ Offset = alignTo(Offset, Alignment);
if (StackGrowsDown) {
LLVM_DEBUG(dbgs() << "alloc FI(" << FrameIdx << ") at SP[" << -Offset
static void AssignProtectedObjSet(const StackObjSet &UnassignedObjs,
SmallSet<int, 16> &ProtectedObjs,
MachineFrameInfo &MFI, bool StackGrowsDown,
- int64_t &Offset, Align &MaxAlign,
- unsigned Skew) {
+ int64_t &Offset, Align &MaxAlign) {
for (int i : UnassignedObjs) {
- AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign, Skew);
+ AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign);
ProtectedObjs.insert(i);
}
}
&& "Local area offset should be in direction of stack growth");
int64_t Offset = LocalAreaOffset;
- // Skew to be applied to alignment.
- unsigned Skew = TFI.getStackAlignmentSkew(MF);
-
#ifdef EXPENSIVE_CHECKS
for (unsigned i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i)
if (!MFI.isDeadObjectIndex(i) &&
if (!StackGrowsDown && MFI.isDeadObjectIndex(FrameIndex))
continue;
- AdjustStackOffset(MFI, FrameIndex, StackGrowsDown, Offset, MaxAlign,
- Skew);
+ AdjustStackOffset(MFI, FrameIndex, StackGrowsDown, Offset, MaxAlign);
}
}
SmallVector<int, 2> SFIs;
RS->getScavengingFrameIndices(SFIs);
for (int SFI : SFIs)
- AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign, Skew);
+ AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign);
}
// FIXME: Once this is working, then enable flag will change to a target
Align Alignment = MFI.getLocalFrameMaxAlign();
// Adjust to alignment boundary.
- Offset = alignTo(Offset, Alignment, Skew);
+ Offset = alignTo(Offset, Alignment);
LLVM_DEBUG(dbgs() << "Local frame base offset: " << Offset << "\n");
"Stack protector on non-default stack expected to not be "
"pre-allocated by LocalStackSlotPass.");
} else if (!MFI.getUseLocalStackAllocationBlock()) {
- AdjustStackOffset(MFI, StackProtectorFI, StackGrowsDown, Offset, MaxAlign,
- Skew);
+ AdjustStackOffset(MFI, StackProtectorFI, StackGrowsDown, Offset,
+ MaxAlign);
} else if (!MFI.isObjectPreAllocated(MFI.getStackProtectorIndex())) {
llvm_unreachable(
"Stack protector not pre-allocated by LocalStackSlotPass.");
"LocalStackSlotPass.");
AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
- Offset, MaxAlign, Skew);
+ Offset, MaxAlign);
AssignProtectedObjSet(SmallArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
- Offset, MaxAlign, Skew);
+ Offset, MaxAlign);
AssignProtectedObjSet(AddrOfObjs, ProtectedObjs, MFI, StackGrowsDown,
- Offset, MaxAlign, Skew);
+ Offset, MaxAlign);
}
SmallVector<int, 8> ObjectsToAllocate;
// Allocate the EH registration node first if one is present.
if (EHRegNodeFrameIndex != std::numeric_limits<int>::max())
AdjustStackOffset(MFI, EHRegNodeFrameIndex, StackGrowsDown, Offset,
- MaxAlign, Skew);
+ MaxAlign);
// Give the targets a chance to order the objects the way they like it.
if (MF.getTarget().getOptLevel() != CodeGenOpt::None &&
for (auto &Object : ObjectsToAllocate)
if (!scavengeStackSlot(MFI, Object, StackGrowsDown, MaxAlign,
StackBytesFree))
- AdjustStackOffset(MFI, Object, StackGrowsDown, Offset, MaxAlign, Skew);
+ AdjustStackOffset(MFI, Object, StackGrowsDown, Offset, MaxAlign);
// Make sure the special register scavenging spill slot is closest to the
// stack pointer.
SmallVector<int, 2> SFIs;
RS->getScavengingFrameIndices(SFIs);
for (int SFI : SFIs)
- AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign, Skew);
+ AdjustStackOffset(MFI, SFI, StackGrowsDown, Offset, MaxAlign);
}
if (!TFI.targetHandlesStackFrameRounding()) {
// SP not FP. Align to MaxAlign so this works.
StackAlign = std::max(StackAlign, MaxAlign);
int64_t OffsetBeforeAlignment = Offset;
- Offset = alignTo(Offset, StackAlign, Skew);
+ Offset = alignTo(Offset, StackAlign);
// If we have increased the offset to fulfill the alignment constrants,
// then the scavenging spill slots may become harder to reach from the
#include "llvm/CodeGen/TargetFrameLowering.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/IR/Attributes.h"
-#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/MC/MCAsmInfo.h"
}
}
-unsigned TargetFrameLowering::getStackAlignmentSkew(
- const MachineFunction &MF) const {
- // When HHVM function is called, the stack is skewed as the return address
- // is removed from the stack before we enter the function.
- if (LLVM_UNLIKELY(MF.getFunction().getCallingConv() == CallingConv::HHVM))
- return MF.getTarget().getAllocaPointerSize();
-
- return 0;
-}
-
bool TargetFrameLowering::allocateScavengingFrameIndexesNearIncomingSP(
const MachineFunction &MF) const {
if (!hasFP(MF))
case CallingConv::Swift: Out << "swiftcc"; break;
case CallingConv::SwiftTail: Out << "swifttailcc"; break;
case CallingConv::X86_INTR: Out << "x86_intrcc"; break;
- case CallingConv::HHVM: Out << "hhvmcc"; break;
- case CallingConv::HHVM_C: Out << "hhvm_ccc"; break;
+ case CallingConv::DUMMY_HHVM:
+ Out << "hhvmcc";
+ break;
+ case CallingConv::DUMMY_HHVM_C:
+ Out << "hhvm_ccc";
+ break;
case CallingConv::AMDGPU_VS: Out << "amdgpu_vs"; break;
case CallingConv::AMDGPU_LS: Out << "amdgpu_ls"; break;
case CallingConv::AMDGPU_HS: Out << "amdgpu_hs"; break;
CCCustom<"CC_X86_AnyReg_Error">
]>;
-// X86-64 HHVM return-value convention.
-def RetCC_X86_64_HHVM: CallingConv<[
- // Promote all types to i64
- CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
-
- // Return: could return in any GP register save RSP and R12.
- CCIfType<[i64], CCAssignToReg<[RBX, RBP, RDI, RSI, RDX, RCX, R8, R9,
- RAX, R10, R11, R13, R14, R15]>>
-]>;
-
defm X86_32_RegCall :
X86_RegCall_base<RC_X86_32_RegCall>;
// Handle Vectorcall CC
CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<RetCC_X86_64_Vectorcall>>,
- // Handle HHVM calls.
- CCIfCC<"CallingConv::HHVM", CCDelegateTo<RetCC_X86_64_HHVM>>,
-
CCIfCC<"CallingConv::X86_RegCall",
CCIfSubtarget<"isTargetWin64()",
CCDelegateTo<RetCC_X86_Win64_RegCall>>>,
CCAssignToStack<64, 64>>
]>;
-// Calling convention for X86-64 HHVM.
-def CC_X86_64_HHVM : CallingConv<[
- // Use all/any GP registers for args, except RSP.
- CCIfType<[i64], CCAssignToReg<[RBX, R12, RBP, R15,
- RDI, RSI, RDX, RCX, R8, R9,
- RAX, R10, R11, R13, R14]>>
-]>;
-
-// Calling convention for helper functions in HHVM.
-def CC_X86_64_HHVM_C : CallingConv<[
- // Pass the first argument in RBP.
- CCIfType<[i64], CCAssignToReg<[RBP]>>,
-
- // Otherwise it's the same as the regular C calling convention.
- CCDelegateTo<CC_X86_64_C>
-]>;
-
// Calling convention used on Win64
def CC_X86_Win64_C : CallingConv<[
// FIXME: Handle varargs.
CCIfCC<"CallingConv::Win64", CCDelegateTo<CC_X86_Win64_C>>,
CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<CC_X86_64_C>>,
CCIfCC<"CallingConv::X86_VectorCall", CCDelegateTo<CC_X86_Win64_VectorCall>>,
- CCIfCC<"CallingConv::HHVM", CCDelegateTo<CC_X86_64_HHVM>>,
- CCIfCC<"CallingConv::HHVM_C", CCDelegateTo<CC_X86_64_HHVM_C>>,
CCIfCC<"CallingConv::X86_RegCall",
CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_RegCall>>>,
CCIfCC<"CallingConv::X86_RegCall", CCDelegateTo<CC_X86_SysV64_RegCall>>,
(sequence "ZMM%u", 16, 31),
K4, K5, K6, K7)>;
-// Only R12 is preserved for PHP calls in HHVM.
-def CSR_64_HHVM : CalleeSavedRegs<(add R12)>;
-
// Register calling convention preserves few GPR and XMM8-15
def CSR_32_RegCall_NoSSE : CalleeSavedRegs<(add ESI, EDI, EBX, EBP)>;
def CSR_32_RegCall : CalleeSavedRegs<(add CSR_32_RegCall_NoSSE,
static bool canGuaranteeTCO(CallingConv::ID CC) {
return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
CC == CallingConv::X86_RegCall || CC == CallingConv::HiPE ||
- CC == CallingConv::HHVM || CC == CallingConv::Tail ||
- CC == CallingConv::SwiftTail);
+ CC == CallingConv::Tail || CC == CallingConv::SwiftTail);
}
/// Return true if we might ever do TCO for calls with this calling convention.
return CSR_64_Intel_OCL_BI_SaveList;
break;
}
- case CallingConv::HHVM:
- return CSR_64_HHVM_SaveList;
case CallingConv::X86_RegCall:
if (Is64Bit) {
if (IsWin64) {
return CSR_64_Intel_OCL_BI_RegMask;
break;
}
- case CallingConv::HHVM:
- return CSR_64_HHVM_RegMask;
case CallingConv::X86_RegCall:
if (Is64Bit) {
if (IsWin64) {
+++ /dev/null
-; RUN: llc < %s | FileCheck %s
-
-target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-
-declare hhvmcc i64 @bar(i64, i64, i64) nounwind
-
-; Simply check we can modify %rbx and %rbp before returning via call to bar.
-define hhvmcc i64 @foo(i64 %a, i64 %b, i64 %c) nounwind {
-entry:
-; CHECK-LABEL: foo:
-; CHECK-DAG: movl $1, %ebx
-; CHECK-DAG: movl $3, %ebp
-; CHECK: jmp bar
- %ret = musttail call hhvmcc i64 @bar(i64 1, i64 %b, i64 3)
- ret i64 %ret
-}
-
-; Check that we can read and modify %rbx returned from PHP function.
-define hhvmcc i64 @mod_return(i64 %a, i64 %b, i64 %c) nounwind {
-entry:
-; CHECK-LABEL: mod_return:
-; CHECK-NEXT: {{^#.*}}
-; CHECK-NEXT: callq bar
-; CHECK-NEXT: incq %rbx
- %tmp = call hhvmcc i64 @bar(i64 %a, i64 %b, i64 %c)
- %retval = add i64 %tmp, 1
- ret i64 %retval
-}
-
-%rettype = type { i64, i64, i64, i64, i64, i64, i64,
- i64, i64, i64, i64, i64, i64, i64
-}
-
-; Check that we can return up to 14 64-bit args in registers.
-define hhvmcc %rettype @return_all(i64 %a, i64 %b, i64 %c) nounwind {
-entry:
-; CHECK-LABEL: return_all:
-; CHECK-DAG: movl $1, %ebx
-; CHECK-DAG: movl $2, %ebp
-; CHECK-DAG: movl $3, %edi
-; CHECK-DAG: movl $4, %esi
-; CHECK-DAG: movl $5, %edx
-; CHECK-DAG: movl $6, %ecx
-; CHECK-DAG: movl $7, %r8
-; CHECK-DAG: movl $8, %r9
-; CHECK-DAG: movl $9, %eax
-; CHECK-DAG: movl $10, %r10
-; CHECK-DAG: movl $11, %r11
-; CHECK-DAG: movl $12, %r13
-; CHECK-DAG: movl $13, %r14
-; CHECK-DAG: movl $14, %r15
-; CHECK: retq
- %r1 = insertvalue %rettype zeroinitializer, i64 1, 0
- %r2 = insertvalue %rettype %r1, i64 2, 1
- %r3 = insertvalue %rettype %r2, i64 3, 2
- %r4 = insertvalue %rettype %r3, i64 4, 3
- %r5 = insertvalue %rettype %r4, i64 5, 4
- %r6 = insertvalue %rettype %r5, i64 6, 5
- %r7 = insertvalue %rettype %r6, i64 7, 6
- %r8 = insertvalue %rettype %r7, i64 8, 7
- %r9 = insertvalue %rettype %r8, i64 9, 8
- %r10 = insertvalue %rettype %r9, i64 10, 9
- %r11 = insertvalue %rettype %r10, i64 11, 10
- %r12 = insertvalue %rettype %r11, i64 12, 11
- %r13 = insertvalue %rettype %r12, i64 13, 12
- %r14 = insertvalue %rettype %r13, i64 14, 13
- ret %rettype %r14
-}
-
-declare hhvmcc void @return_all_tc(i64, i64, i64, i64, i64, i64, i64, i64,
- i64, i64, i64, i64, i64, i64, i64)
-
-; Check that we can return up to 14 64-bit args in registers via tail call.
-define hhvmcc void @test_return_all_tc(i64 %a, i64 %b, i64 %c) nounwind {
-entry:
-; CHECK-LABEL: test_return_all_tc:
-; CHECK-NEXT: {{^#.*}}
-; CHECK-DAG: movl $1, %ebx
-; CHECK-DAG: movl $3, %ebp
-; CHECK-DAG: movl $4, %r15
-; CHECK-DAG: movl $5, %edi
-; CHECK-DAG: movl $6, %esi
-; CHECK-DAG: movl $7, %edx
-; CHECK-DAG: movl $8, %ecx
-; CHECK-DAG: movl $9, %r8
-; CHECK-DAG: movl $10, %r9
-; CHECK-DAG: movl $11, %eax
-; CHECK-DAG: movl $12, %r10
-; CHECK-DAG: movl $13, %r11
-; CHECK-DAG: movl $14, %r13
-; CHECK-DAG: movl $15, %r14
-; CHECK: jmp return_all_tc
- tail call hhvmcc void @return_all_tc(
- i64 1, i64 %b, i64 3, i64 4, i64 5, i64 6, i64 7,
- i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15)
- ret void
-}
-
-declare hhvmcc {i64, i64} @php_short(i64, i64, i64, i64)
-
-define hhvmcc i64 @test_php_short(i64 %a, i64 %b, i64 %c) nounwind {
-entry:
-; CHECK-LABEL: test_php_short:
-; CHECK-NEXT: {{^#.*}}
-; CHECK-NEXT: movl $42, %r15
-; CHECK-NEXT: callq php_short
-; CHECK-NEXT: leaq (%rbp,%r12), %rbx
-; CHECK-NEXT: retq
- %pair = call hhvmcc {i64, i64} @php_short(i64 %a, i64 %b, i64 %c, i64 42)
- %fp = extractvalue {i64, i64} %pair, 1
- %rv = add i64 %fp, %b
- ret i64 %rv
-}
-
-declare hhvmcc %rettype @php_all(i64, i64, i64, i64, i64, i64, i64,
- i64, i64, i64, i64, i64, i64, i64, i64)
-
-; Check that we can pass 15 arguments in registers.
-; Also check that %r12 (2nd arg) is not spilled.
-define hhvmcc i64 @test_php_all(i64 %a, i64 %b, i64 %c) nounwind {
-entry:
-; CHECK-LABEL: test_php_all:
-; CHECK-NEXT: {{^#.*}}
-; CHECK-NOT: sub
-; CHECK-NOT: sub
-; CHECK-DAG: movl $1, %ebx
-; CHECK-DAG: movl $3, %ebp
-; CHECK-DAG: movl $4, %r15
-; CHECK-DAG: movl $5, %edi
-; CHECK-DAG: movl $6, %esi
-; CHECK-DAG: movl $7, %edx
-; CHECK-DAG: movl $8, %ecx
-; CHECK-DAG: movl $9, %r8
-; CHECK-DAG: movl $10, %r9
-; CHECK-DAG: movl $11, %eax
-; CHECK-DAG: movl $12, %r10
-; CHECK-DAG: movl $13, %r11
-; CHECK-DAG: movl $14, %r13
-; CHECK-DAG: movl $15, %r14
-; CHECK: callq php_all
- %pair = call hhvmcc %rettype @php_all(
- i64 1, i64 %b, i64 3, i64 4, i64 5, i64 6, i64 7,
- i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15)
- %fp = extractvalue %rettype %pair, 1
- %rv = add i64 %fp, %b
- ret i64 %rv
-}
-
-declare hhvmcc void @svcreq(i64, i64, i64, i64, i64, i64, i64, i64, i64, i64,
- i64, i64)
-
-define hhvmcc void @test_svcreq(i64 %a, i64 %b, i64 %c) nounwind {
-entry:
-; CHECK-LABEL: test_svcreq:
-; CHECK-DAG: movl $42, %r10
-; CHECK-DAG: movl $1, %edi
-; CHECK-DAG: movl $2, %esi
-; CHECK-DAG: movl $3, %edx
-; CHECK-DAG: movl $4, %ecx
-; CHECK-DAG: movl $5, %r8
-; CHECK-DAG: movl $6, %r9
-; CHECK: jmp svcreq
- tail call hhvmcc void @svcreq(i64 %a, i64 %b, i64 %c, i64 undef, i64 1,
- i64 2, i64 3, i64 4, i64 5, i64 6, i64 undef,
- i64 42)
- ret void
-}
-
-declare hhvm_ccc void @helper_short(i64, i64, i64, i64, i64, i64, i64)
-
-; Pass all arguments in registers and check that we don't adjust stack
-; for the call.
-define hhvmcc void @test_helper_short(i64 %a, i64 %b, i64 %c) nounwind {
-entry:
-; CHECK-LABEL: test_helper_short:
-; CHECK-NOT: push
-; CHECK-NOT: sub
-; CHECK-DAG: movl $1, %edi
-; CHECK-DAG: movl $2, %esi
-; CHECK-DAG: movl $3, %edx
-; CHECK-DAG: movl $4, %ecx
-; CHECK-DAG: movl $5, %r8
-; CHECK-DAG: movl $6, %r9
-; CHECK: callq helper_short
- call hhvm_ccc void @helper_short(i64 %c, i64 1, i64 2, i64 3, i64 4,
- i64 5, i64 6)
- ret void
-}
-
-declare hhvm_ccc void @helper(i64, i64, i64, i64, i64, i64, i64, i64, i64, i64)
-
-define hhvmcc void @test_helper(i64 %a, i64 %b, i64 %c) nounwind {
-entry:
-; CHECK-LABEL: test_helper:
-; CHECK-DAG: movl $1, %edi
-; CHECK-DAG: movl $2, %esi
-; CHECK-DAG: movl $3, %edx
-; CHECK-DAG: movl $4, %ecx
-; CHECK-DAG: movl $5, %r8
-; CHECK-DAG: movl $6, %r9
-; CHECK: callq helper
- call hhvm_ccc void @helper(i64 %c, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6,
- i64 7, i64 8, i64 9)
- ret void
-}
-
-; When we enter function with HHVM calling convention, the stack is aligned
-; at 16 bytes. This means we align objects on the stack differently and
-; adjust the stack differently for calls.
-declare hhvm_ccc void @stack_helper(i64, i64, i64)
-declare hhvm_ccc void @stack_helper2(<2 x double>, i64)
-
-define hhvmcc void @test_stack_helper(i64 %a, i64 %b, i64 %c) nounwind {
-entry:
-; CHECK-LABEL: test_stack_helper:
-; CHECK-NOT: push
-; CHECK: subq $32, %rsp
-; CHECK: movaps 16(%rsp), %xmm0
-; CHECK: callq stack_helper2
- %t1 = alloca <2 x double>, align 16
- %t2 = alloca i64, align 8
- %t3 = alloca i64, align 8
- %load3 = load i64, ptr%t3
- call hhvm_ccc void @stack_helper(i64 %c, i64 %load3, i64 42)
- %load = load <2 x double>, ptr%t1
- %load2 = load i64, ptr%t2
- call hhvm_ccc void @stack_helper2(<2 x double> %load, i64 %load2)
- ret void
-}
-
-; Check that we are not adjusting the stack before calling the helper.
-define hhvmcc void @test_stack_helper2(i64 %a, i64 %b, i64 %c) nounwind {
-entry:
-; CHECK-LABEL: test_stack_helper2:
-; CHECK-NOT: push
-; CHECK-NOT: subq
- call hhvm_ccc void @stack_helper(i64 %c, i64 7, i64 42)
- ret void
-}
-
...
# Same as above but with multiple RegMask operands per instruction.
-# These regmasks have no real meaning and chosen to allow only single register to be assignable ($r12)
+# These regmasks have no real meaning and chosen to allow only single register to be assignable ($rbp)
---
name: test_relocate_multi_regmasks
tracksRegLiveness: true
bb.0.entry:
liveins: $rdi
- ; CHECK: renamable $r12 = STATEPOINT 0, 0, 0, target-flags(x86-plt) 0, 2, 0, 2, 0, 2, 0, 2, 1, renamable $r12(tied-def 0)
+ ; CHECK: renamable $rbp = STATEPOINT 0, 0, 0, target-flags(x86-plt) 0, 2, 0, 2, 0, 2, 0, 2, 1, renamable $rbp(tied-def 0)
%1:gr64 = COPY $rdi
ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
- %1:gr64 = STATEPOINT 0, 0, 0, target-flags(x86-plt) 0, 2, 0, 2, 0, 2, 0, 2, 1, %1(tied-def 0), 2, 0, 2, 1, 0, 0, csr_64_rt_allregs, csr_64_hhvm, implicit-def $rsp, implicit-def $ssp
+ %1:gr64 = STATEPOINT 0, 0, 0, target-flags(x86-plt) 0, 2, 0, 2, 0, 2, 0, 2, 1, %1(tied-def 0), 2, 0, 2, 1, 0, 0, csr_64_rt_allregs, csr_64_cxx_tls_darwin_pe, implicit-def $rsp, implicit-def $ssp
ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
$rax = COPY %1
RET 0, killed $rax
ret void
}
-declare hhvm_ccc void @hhvm_c_callee()
-
-define hhvmcc void @hhvm_caller() {
- call hhvm_ccc void @hhvm_c_callee()
- ret void
-}
-
declare i32 @__gxx_personality_v0(...)
def CConvX86_VectorCall : LLVM_EnumAttrCase<"X86_VectorCall",
"x86_vectorcallcc",
"X86_VectorCall", 80>;
-def CConvHHVM : LLVM_EnumAttrCase<"HHVM", "hhvmcc", "HHVM", 81>;
-def CConvHHVM_C : LLVM_EnumAttrCase<"HHVM_C", "hhvm_ccc", "HHVM_C", 82>;
+def CConvHHVM : LLVM_EnumAttrCase<"DUMMY_HHVM", "hhvmcc", "DUMMY_HHVM", 81>;
+def CConvHHVM_C
+ : LLVM_EnumAttrCase<"DUMMY_HHVM_C", "hhvm_ccc", "DUMMY_HHVM_C", 82>;
def CConvX86_INTR : LLVM_EnumAttrCase<"X86_INTR", "x86_intrcc", "X86_INTR", 83>;
def CConvAVR_INTR : LLVM_EnumAttrCase<"AVR_INTR", "avr_intrcc", "AVR_INTR", 84>;
def CConvAVR_SIGNAL : LLVM_EnumAttrCase<"AVR_SIGNAL", "avr_signalcc",