namespace compiler {
enum ImmediateMode {
- kArithimeticImm, // 12 bit unsigned immediate shifted left 0 or 12 bits
- kShift32Imm, // 0 - 31
- kShift64Imm, // 0 -63
+ kArithmeticImm, // 12 bit unsigned immediate shifted left 0 or 12 bits
+ kShift32Imm, // 0 - 31
+ kShift64Imm, // 0 - 63
kLogical32Imm,
kLogical64Imm,
- kLoadStoreImm, // unsigned 9 bit or signed 7 bit
+ kLoadStoreImm8, // signed 8 bit or 12 bit unsigned scaled by access size
+ kLoadStoreImm16,
+ kLoadStoreImm32,
+ kLoadStoreImm64,
kNoImmediate
};
// Adds Arm64-specific methods for generating operands.
-class Arm64OperandGenerator V8_FINAL : public OperandGenerator {
+class Arm64OperandGenerator FINAL : public OperandGenerator {
public:
explicit Arm64OperandGenerator(InstructionSelector* selector)
: OperandGenerator(selector) {}
bool CanBeImmediate(Node* node, ImmediateMode mode) {
int64_t value;
- switch (node->opcode()) {
- // TODO(turbofan): SMI number constants as immediates.
- case IrOpcode::kInt32Constant:
- value = ValueOf<int32_t>(node->op());
- break;
- default:
- return false;
- }
+ if (node->opcode() == IrOpcode::kInt32Constant)
+ value = OpParameter<int32_t>(node);
+ else if (node->opcode() == IrOpcode::kInt64Constant)
+ value = OpParameter<int64_t>(node);
+ else
+ return false;
unsigned ignored;
switch (mode) {
case kLogical32Imm:
case kLogical64Imm:
return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
&ignored, &ignored, &ignored);
- case kArithimeticImm:
+ case kArithmeticImm:
// TODO(dcarney): -values can be handled by instruction swapping
return Assembler::IsImmAddSub(value);
case kShift32Imm:
- return 0 <= value && value < 31;
+ return 0 <= value && value < 32;
case kShift64Imm:
- return 0 <= value && value < 63;
- case kLoadStoreImm:
- return (0 <= value && value < (1 << 9)) ||
- (-(1 << 6) <= value && value < (1 << 6));
+ return 0 <= value && value < 64;
+ case kLoadStoreImm8:
+ return IsLoadStoreImmediate(value, LSByte);
+ case kLoadStoreImm16:
+ return IsLoadStoreImmediate(value, LSHalfword);
+ case kLoadStoreImm32:
+ return IsLoadStoreImmediate(value, LSWord);
+ case kLoadStoreImm64:
+ return IsLoadStoreImmediate(value, LSDoubleWord);
case kNoImmediate:
return false;
}
return false;
}
-};
-
-static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
- Node* node) {
- Arm64OperandGenerator g(selector);
- selector->Emit(opcode, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
-}
+ private:
+ bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
+ return Assembler::IsImmLSScaled(value, size) ||
+ Assembler::IsImmLSUnscaled(value);
+ }
+};
static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
Arm64OperandGenerator g(selector);
- selector->Emit(opcode, g.DefineAsDoubleRegister(node),
- g.UseDoubleRegister(node->InputAt(0)),
- g.UseDoubleRegister(node->InputAt(1)));
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
}
// Shared routine for multiple binary operations.
+template <typename Matcher>
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, ImmediateMode operand_mode,
FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
- Int32BinopMatcher m(node);
+ Matcher m(node);
InstructionOperand* inputs[4];
size_t input_count = 0;
InstructionOperand* outputs[2];
DCHECK_NE(0, input_count);
DCHECK_NE(0, output_count);
- DCHECK_GE(ARRAY_SIZE(inputs), input_count);
- DCHECK_GE(ARRAY_SIZE(outputs), output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
outputs, input_count, inputs);
// Shared routine for multiple binary operations.
+template <typename Matcher>
static void VisitBinop(InstructionSelector* selector, Node* node,
ArchOpcode opcode, ImmediateMode operand_mode) {
FlagsContinuation cont;
- VisitBinop(selector, node, opcode, operand_mode, &cont);
+ VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
}
void InstructionSelector::VisitLoad(Node* node) {
- MachineType rep = OpParameter<MachineType>(node);
+ MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+ MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
Arm64OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
-
- InstructionOperand* result = rep == kMachineFloat64
- ? g.DefineAsDoubleRegister(node)
- : g.DefineAsRegister(node);
-
ArchOpcode opcode;
+ ImmediateMode immediate_mode = kNoImmediate;
switch (rep) {
- case kMachineFloat64:
- opcode = kArm64Float64Load;
+ case kRepFloat32:
+ opcode = kArm64LdrS;
+ immediate_mode = kLoadStoreImm32;
+ break;
+ case kRepFloat64:
+ opcode = kArm64LdrD;
+ immediate_mode = kLoadStoreImm64;
break;
- case kMachineWord8:
- opcode = kArm64LoadWord8;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = typ == kTypeInt32 ? kArm64Ldrsb : kArm64Ldrb;
+ immediate_mode = kLoadStoreImm8;
break;
- case kMachineWord16:
- opcode = kArm64LoadWord16;
+ case kRepWord16:
+ opcode = typ == kTypeInt32 ? kArm64Ldrsh : kArm64Ldrh;
+ immediate_mode = kLoadStoreImm16;
break;
- case kMachineWord32:
- opcode = kArm64LoadWord32;
+ case kRepWord32:
+ opcode = kArm64LdrW;
+ immediate_mode = kLoadStoreImm32;
break;
- case kMachineTagged: // Fall through.
- case kMachineWord64:
- opcode = kArm64LoadWord64;
+ case kRepTagged: // Fall through.
+ case kRepWord64:
+ opcode = kArm64Ldr;
+ immediate_mode = kLoadStoreImm64;
break;
default:
UNREACHABLE();
return;
}
- if (g.CanBeImmediate(index, kLoadStoreImm)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), result,
- g.UseRegister(base), g.UseImmediate(index));
- } else if (g.CanBeImmediate(base, kLoadStoreImm)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), result,
- g.UseRegister(index), g.UseImmediate(base));
+ if (g.CanBeImmediate(index, immediate_mode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
} else {
- Emit(opcode | AddressingModeField::encode(kMode_MRR), result,
- g.UseRegister(base), g.UseRegister(index));
+ Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
}
}
Node* value = node->InputAt(2);
StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
- MachineType rep = store_rep.rep;
- if (store_rep.write_barrier_kind == kFullWriteBarrier) {
- DCHECK(rep == kMachineTagged);
+ MachineType rep = RepresentationOf(store_rep.machine_type());
+ if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+ DCHECK(rep == kRepTagged);
// TODO(dcarney): refactor RecordWrite function to take temp registers
// and pass them here instead of using fixed regs
// TODO(dcarney): handle immediate indices.
InstructionOperand* temps[] = {g.TempRegister(x11), g.TempRegister(x12)};
Emit(kArm64StoreWriteBarrier, NULL, g.UseFixed(base, x10),
- g.UseFixed(index, x11), g.UseFixed(value, x12), ARRAY_SIZE(temps),
+ g.UseFixed(index, x11), g.UseFixed(value, x12), arraysize(temps),
temps);
return;
}
- DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
- InstructionOperand* val;
- if (rep == kMachineFloat64) {
- val = g.UseDoubleRegister(value);
- } else {
- val = g.UseRegister(value);
- }
+ DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
ArchOpcode opcode;
+ ImmediateMode immediate_mode = kNoImmediate;
switch (rep) {
- case kMachineFloat64:
- opcode = kArm64Float64Store;
+ case kRepFloat32:
+ opcode = kArm64StrS;
+ immediate_mode = kLoadStoreImm32;
+ break;
+ case kRepFloat64:
+ opcode = kArm64StrD;
+ immediate_mode = kLoadStoreImm64;
break;
- case kMachineWord8:
- opcode = kArm64StoreWord8;
+ case kRepBit: // Fall through.
+ case kRepWord8:
+ opcode = kArm64Strb;
+ immediate_mode = kLoadStoreImm8;
break;
- case kMachineWord16:
- opcode = kArm64StoreWord16;
+ case kRepWord16:
+ opcode = kArm64Strh;
+ immediate_mode = kLoadStoreImm16;
break;
- case kMachineWord32:
- opcode = kArm64StoreWord32;
+ case kRepWord32:
+ opcode = kArm64StrW;
+ immediate_mode = kLoadStoreImm32;
break;
- case kMachineTagged: // Fall through.
- case kMachineWord64:
- opcode = kArm64StoreWord64;
+ case kRepTagged: // Fall through.
+ case kRepWord64:
+ opcode = kArm64Str;
+ immediate_mode = kLoadStoreImm64;
break;
default:
UNREACHABLE();
return;
}
- if (g.CanBeImmediate(index, kLoadStoreImm)) {
+ if (g.CanBeImmediate(index, immediate_mode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
- g.UseRegister(base), g.UseImmediate(index), val);
- } else if (g.CanBeImmediate(base, kLoadStoreImm)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
- g.UseRegister(index), g.UseImmediate(base), val);
+ g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
} else {
Emit(opcode | AddressingModeField::encode(kMode_MRR), NULL,
- g.UseRegister(base), g.UseRegister(index), val);
+ g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
}
}
void InstructionSelector::VisitWord32And(Node* node) {
- VisitBinop(this, node, kArm64And32, kLogical32Imm);
+ VisitBinop<Int32BinopMatcher>(this, node, kArm64And32, kLogical32Imm);
}
void InstructionSelector::VisitWord64And(Node* node) {
- VisitBinop(this, node, kArm64And, kLogical64Imm);
+ VisitBinop<Int64BinopMatcher>(this, node, kArm64And, kLogical64Imm);
}
void InstructionSelector::VisitWord32Or(Node* node) {
- VisitBinop(this, node, kArm64Or32, kLogical32Imm);
+ VisitBinop<Int32BinopMatcher>(this, node, kArm64Or32, kLogical32Imm);
}
void InstructionSelector::VisitWord64Or(Node* node) {
- VisitBinop(this, node, kArm64Or, kLogical64Imm);
+ VisitBinop<Int64BinopMatcher>(this, node, kArm64Or, kLogical64Imm);
}
-template <typename T>
-static void VisitXor(InstructionSelector* selector, Node* node,
- ArchOpcode xor_opcode, ArchOpcode not_opcode) {
- Arm64OperandGenerator g(selector);
- BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ Arm64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
if (m.right().Is(-1)) {
- selector->Emit(not_opcode, g.DefineAsRegister(node),
- g.UseRegister(m.left().node()));
+ Emit(kArm64Not32, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
} else {
- VisitBinop(selector, node, xor_opcode, kLogical32Imm);
+ VisitBinop<Int32BinopMatcher>(this, node, kArm64Xor32, kLogical32Imm);
}
}
-void InstructionSelector::VisitWord32Xor(Node* node) {
- VisitXor<int32_t>(this, node, kArm64Xor32, kArm64Not32);
-}
-
-
void InstructionSelector::VisitWord64Xor(Node* node) {
- VisitXor<int64_t>(this, node, kArm64Xor, kArm64Not);
+ Arm64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.right().Is(-1)) {
+ Emit(kArm64Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+ } else {
+ VisitBinop<Int64BinopMatcher>(this, node, kArm64Xor, kLogical32Imm);
+ }
}
}
+void InstructionSelector::VisitWord32Ror(Node* node) {
+ VisitRRO(this, kArm64Ror32, node, kShift32Imm);
+}
+
+
+void InstructionSelector::VisitWord64Ror(Node* node) {
+ VisitRRO(this, kArm64Ror, node, kShift64Imm);
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
- VisitBinop(this, node, kArm64Add32, kArithimeticImm);
+ VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm);
}
void InstructionSelector::VisitInt64Add(Node* node) {
- VisitBinop(this, node, kArm64Add, kArithimeticImm);
+ VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm);
}
-template <typename T>
-static void VisitSub(InstructionSelector* selector, Node* node,
- ArchOpcode sub_opcode, ArchOpcode neg_opcode) {
- Arm64OperandGenerator g(selector);
- BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ Arm64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
if (m.left().Is(0)) {
- selector->Emit(neg_opcode, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()));
+ Emit(kArm64Neg32, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()));
} else {
- VisitBinop(selector, node, sub_opcode, kArithimeticImm);
+ VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm);
}
}
-void InstructionSelector::VisitInt32Sub(Node* node) {
- VisitSub<int32_t>(this, node, kArm64Sub32, kArm64Neg32);
-}
-
-
void InstructionSelector::VisitInt64Sub(Node* node) {
- VisitSub<int64_t>(this, node, kArm64Sub, kArm64Neg);
+ Arm64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.left().Is(0)) {
+ Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+ } else {
+ VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm);
+ }
}
}
-void InstructionSelector::VisitConvertInt32ToInt64(Node* node) {
- VisitRR(this, kArm64Int32ToInt64, node);
-}
-
-
-void InstructionSelector::VisitConvertInt64ToInt32(Node* node) {
- VisitRR(this, kArm64Int64ToInt32, node);
-}
-
-
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
Arm64OperandGenerator g(this);
- Emit(kArm64Int32ToFloat64, g.DefineAsDoubleRegister(node),
+ Emit(kArm64Int32ToFloat64, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
Arm64OperandGenerator g(this);
- Emit(kArm64Uint32ToFloat64, g.DefineAsDoubleRegister(node),
+ Emit(kArm64Uint32ToFloat64, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64Float64ToInt32, g.DefineAsRegister(node),
- g.UseDoubleRegister(node->InputAt(0)));
+ g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64Float64ToUint32, g.DefineAsRegister(node),
- g.UseDoubleRegister(node->InputAt(0)));
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Sxtw, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitFloat64Mod(Node* node) {
Arm64OperandGenerator g(this);
- Emit(kArm64Float64Mod, g.DefineAsFixedDouble(node, d0),
- g.UseFixedDouble(node->InputAt(0), d0),
- g.UseFixedDouble(node->InputAt(1), d1))->MarkAsCall();
+ Emit(kArm64Float64Mod, g.DefineAsFixed(node, d0),
+ g.UseFixed(node->InputAt(0), d0),
+ g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Float64Sqrt, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
}
void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
FlagsContinuation* cont) {
- VisitBinop(this, node, kArm64Add32, kArithimeticImm, cont);
+ VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm, cont);
}
void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
FlagsContinuation* cont) {
- VisitBinop(this, node, kArm64Sub32, kArithimeticImm, cont);
+ VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm, cont);
}
Node* right = node->InputAt(1);
// Match immediates on left or right side of comparison.
- if (g.CanBeImmediate(right, kArithimeticImm)) {
+ if (g.CanBeImmediate(right, kArithmeticImm)) {
VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
cont);
- } else if (g.CanBeImmediate(left, kArithimeticImm)) {
+ } else if (g.CanBeImmediate(left, kArithmeticImm)) {
if (!commutative) cont->Commute();
VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
cont);
void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
switch (node->opcode()) {
+ case IrOpcode::kInt32Add:
+ return VisitWordCompare(this, node, kArm64Cmn32, cont, true);
+ case IrOpcode::kInt32Sub:
+ return VisitWordCompare(this, node, kArm64Cmp32, cont, false);
case IrOpcode::kWord32And:
return VisitWordCompare(this, node, kArm64Tst32, cont, true);
default:
Arm64OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
- VisitCompare(this, kArm64Float64Cmp, g.UseDoubleRegister(left),
- g.UseDoubleRegister(right), cont);
+ VisitCompare(this, kArm64Float64Cmp, g.UseRegister(left),
+ g.UseRegister(right), cont);
}
BasicBlock* deoptimization) {
Arm64OperandGenerator g(this);
CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
- CallBuffer buffer(zone(), descriptor); // TODO(turbofan): temp zone here?
+
+ FrameStateDescriptor* frame_state_descriptor = NULL;
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor =
+ GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+ }
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
// Compute InstructionOperands for inputs and outputs.
// TODO(turbofan): on ARM64 it's probably better to use the code object in a
// register if there are multiple uses of it. Improve constant pool and the
// heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(call, &buffer, true, false, continuation,
- deoptimization);
+ InitializeCallBuffer(call, &buffer, true, false);
// Push the arguments to the stack.
- bool is_c_frame = descriptor->kind() == CallDescriptor::kCallAddress;
- bool pushed_count_uneven = buffer.pushed_count & 1;
- int aligned_push_count = buffer.pushed_count;
- if (is_c_frame && pushed_count_uneven) {
- aligned_push_count++;
- }
+ bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
+ int aligned_push_count = buffer.pushed_nodes.size();
// TODO(dcarney): claim and poke probably take small immediates,
// loop here or whatever.
// Bump the stack pointer(s).
}
// Move arguments to the stack.
{
- int slot = buffer.pushed_count - 1;
+ int slot = buffer.pushed_nodes.size() - 1;
// Emit the uneven pushes.
if (pushed_count_uneven) {
Node* input = buffer.pushed_nodes[slot];
- ArchOpcode opcode = is_c_frame ? kArm64PokePairZero : kArm64Poke;
- Emit(opcode | MiscField::encode(slot), NULL, g.UseRegister(input));
+ Emit(kArm64Poke | MiscField::encode(slot), NULL, g.UseRegister(input));
slot--;
}
// Now all pushes can be done in pairs.
InstructionCode opcode;
switch (descriptor->kind()) {
case CallDescriptor::kCallCodeObject: {
- bool lazy_deopt = descriptor->CanLazilyDeoptimize();
- opcode = kArm64CallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
+ opcode = kArchCallCodeObject;
break;
}
- case CallDescriptor::kCallAddress:
- opcode = kArm64CallAddress;
- break;
case CallDescriptor::kCallJSFunction:
- opcode = kArm64CallJSFunction;
+ opcode = kArchCallJSFunction;
break;
default:
UNREACHABLE();
return;
}
+ opcode |= MiscField::encode(descriptor->flags());
// Emit the call instruction.
Instruction* call_instr =
- Emit(opcode, buffer.output_count, buffer.outputs,
- buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
+ Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+ buffer.instruction_args.size(), &buffer.instruction_args.front());
call_instr->MarkAsCall();
if (deoptimization != NULL) {
DCHECK(continuation != NULL);
call_instr->MarkAsControl();
}
-
- // Caller clean up of stack for C-style calls.
- if (is_c_frame && aligned_push_count > 0) {
- DCHECK(deoptimization == NULL && continuation == NULL);
- Emit(kArm64Drop | MiscField::encode(aligned_push_count), NULL);
- }
}
} // namespace compiler