DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArchPrepareCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, kScratchReg);
+ break;
+ }
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (instr->InputAt(0)->IsImmediate()) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ break;
+ }
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
__ Push(i.InputRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArmPoke: {
+ int const slot = MiscField::decode(instr->opcode());
+ __ str(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
case kArmStoreWriteBarrier: {
Register object = i.InputRegister(0);
Register index = i.InputRegister(1);
V(ArmLdr) \
V(ArmStr) \
V(ArmPush) \
+ V(ArmPoke) \
V(ArmStoreWriteBarrier)
// TODO(turbofan): on ARM it's probably better to use the code object in a
// register if there are multiple uses of it. Improve constant pool and the
// heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, false);
-
- // Push any stack arguments.
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
- Emit(kArmPush, g.NoOutput(), g.UseRegister(node));
+ InitializeCallBuffer(node, &buffer, true, true);
+
+ // Prepare for C function call.
+ if (descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ 0, nullptr, 0, nullptr);
+
+ // Poke any stack arguments.
+ for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
+ if (Node* node = buffer.pushed_nodes[n]) {
+ int const slot = static_cast<int>(n);
+ InstructionOperand value = g.UseRegister(node);
+ Emit(kArmPoke | MiscField::encode(slot), g.NoOutput(), value);
+ }
+ }
+ } else {
+ // Push any stack arguments.
+ for (Node* node : base::Reversed(buffer.pushed_nodes)) {
+ Emit(kArmPush, g.NoOutput(), g.UseRegister(node));
+ }
}
// Pass label of exception handler block.
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
+ case CallDescriptor::kCallAddress:
+ opcode =
+ kArchCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
+ break;
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject | MiscField::encode(flags);
break;
- }
case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
+ opcode = kArchCallJSFunction | MiscField::encode(flags);
break;
default:
UNREACHABLE();
return;
}
- opcode |= MiscField::encode(flags);
// Emit the call instruction.
size_t const output_count = buffer.outputs.size();
return register_parameters[i];
}
static int CRegisterParametersLength() { return 4; }
+ static int CStackBackingStoreLength() { return 0; }
};
__ Jump(x10);
break;
}
+ case kArchPrepareCallCFunction:
+ // We don't need kArchPrepareCallCFunction on arm64 as the instruction
+ // selector already perform a Claim to reserve space on the stack and
+ // guarantee correct alignment of stack pointer.
+ UNREACHABLE();
+ break;
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (instr->InputAt(0)->IsImmediate()) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters, 0);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters, 0);
+ }
+ break;
+ }
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
// TODO(turbofan): on ARM64 it's probably better to use the code object in a
// register if there are multiple uses of it. Improve constant pool and the
// heuristics in the register allocator for where to emit constants.
- InitializeCallBuffer(node, &buffer, true, false);
+ InitializeCallBuffer(node, &buffer, true, true);
// Push the arguments to the stack.
int aligned_push_count = static_cast<int>(buffer.pushed_nodes.size());
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
+ case CallDescriptor::kCallAddress:
+ opcode =
+ kArchCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
+ break;
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject | MiscField::encode(flags);
break;
- }
case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
+ opcode = kArchCallJSFunction | MiscField::encode(flags);
break;
default:
UNREACHABLE();
return;
}
- opcode |= MiscField::encode(flags);
// Emit the call instruction.
size_t const output_count = buffer.outputs.size();
return register_parameters[i];
}
static int CRegisterParametersLength() { return 8; }
+ static int CStackBackingStoreLength() { return 0; }
};
return static_cast<uint8_t>(InputInt32(index) & 0x3F);
}
+ ExternalReference InputExternalReference(size_t index) {
+ return ToExternalReference(instr_->InputAt(index));
+ }
+
Handle<HeapObject> InputHeapObject(size_t index) {
return ToHeapObject(instr_->InputAt(index));
}
double ToDouble(InstructionOperand* op) { return ToConstant(op).ToFloat64(); }
+ ExternalReference ToExternalReference(InstructionOperand* op) {
+ return ToConstant(op).ToExternalReference();
+ }
+
Handle<HeapObject> ToHeapObject(InstructionOperand* op) {
return ToConstant(op).ToHeapObject();
}
Handle<Code> CodeGenerator::GenerateCode() {
CompilationInfo* info = this->info();
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done in AssemblePrologue).
+ FrameScope frame_scope(masm(), StackFrame::MANUAL);
+
// Emit a code line info recording start event.
PositionsRecorder* recorder = masm()->positions_recorder();
LOG_CODE_EVENT(isolate(), CodeStartLinePosInfoRecordEvent(recorder));
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
break;
}
+ case kArchPrepareCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, i.TempRegister(0));
+ break;
+ }
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (HasImmediateInput(instr, 0)) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ break;
+ }
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
__ push(i.InputOperand(0));
}
break;
+ case kIA32Poke: {
+ int const slot = MiscField::decode(instr->opcode());
+ if (HasImmediateInput(instr, 0)) {
+ __ mov(Operand(esp, slot * kPointerSize), i.InputImmediate(0));
+ } else {
+ __ mov(Operand(esp, slot * kPointerSize), i.InputRegister(0));
+ }
+ break;
+ }
case kIA32StoreWriteBarrier: {
Register object = i.InputRegister(0);
Register value = i.InputRegister(2);
V(IA32Movsd) \
V(IA32Lea) \
V(IA32Push) \
+ V(IA32Poke) \
V(IA32StoreWriteBarrier) \
V(IA32StackCheck)
// Compute InstructionOperands for inputs and outputs.
InitializeCallBuffer(node, &buffer, true, true);
- // Push any stack arguments.
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
- // TODO(titzer): handle pushing double parameters.
- InstructionOperand value =
- g.CanBeImmediate(node)
- ? g.UseImmediate(node)
- : IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
- Emit(kIA32Push, g.NoOutput(), value);
+ // Prepare for C function call.
+ if (descriptor->IsCFunctionCall()) {
+ InstructionOperand temps[] = {g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ Emit(kArchPrepareCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ 0, nullptr, 0, nullptr, temp_count, temps);
+
+ // Poke any stack arguments.
+ for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
+ if (Node* node = buffer.pushed_nodes[n]) {
+ int const slot = static_cast<int>(n);
+ InstructionOperand value =
+ g.CanBeImmediate(node) ? g.UseImmediate(node) : g.UseRegister(node);
+ Emit(kIA32Poke | MiscField::encode(slot), g.NoOutput(), value);
+ }
+ }
+ } else {
+ // Push any stack arguments.
+ for (Node* node : base::Reversed(buffer.pushed_nodes)) {
+ // TODO(titzer): handle pushing double parameters.
+ InstructionOperand value =
+ g.CanBeImmediate(node)
+ ? g.UseImmediate(node)
+ : IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
+ Emit(kIA32Push, g.NoOutput(), value);
+ }
}
// Pass label of exception handler block.
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
+ case CallDescriptor::kCallAddress:
+ opcode =
+ kArchCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
+ break;
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject | MiscField::encode(flags);
break;
- }
case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
+ opcode = kArchCallJSFunction | MiscField::encode(flags);
break;
default:
UNREACHABLE();
return;
}
- opcode |= MiscField::encode(flags);
// Emit the call instruction.
size_t const output_count = buffer.outputs.size();
}
static Register CRegisterParameter(int i) { return no_reg; }
static int CRegisterParametersLength() { return 0; }
+ static int CStackBackingStoreLength() { return 0; }
};
typedef LinkageHelper<IA32LinkageHelperTraits> LH;
// Target-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define ARCH_OPCODE_LIST(V) \
- V(ArchCallCodeObject) \
- V(ArchTailCallCodeObject) \
- V(ArchCallJSFunction) \
- V(ArchTailCallJSFunction) \
- V(ArchJmp) \
- V(ArchLookupSwitch) \
- V(ArchTableSwitch) \
- V(ArchNop) \
- V(ArchDeoptimize) \
- V(ArchRet) \
- V(ArchStackPointer) \
- V(ArchFramePointer) \
- V(ArchTruncateDoubleToI) \
- V(CheckedLoadInt8) \
- V(CheckedLoadUint8) \
- V(CheckedLoadInt16) \
- V(CheckedLoadUint16) \
- V(CheckedLoadWord32) \
- V(CheckedLoadFloat32) \
- V(CheckedLoadFloat64) \
- V(CheckedStoreWord8) \
- V(CheckedStoreWord16) \
- V(CheckedStoreWord32) \
- V(CheckedStoreFloat32) \
- V(CheckedStoreFloat64) \
+#define ARCH_OPCODE_LIST(V) \
+ V(ArchCallCodeObject) \
+ V(ArchTailCallCodeObject) \
+ V(ArchCallJSFunction) \
+ V(ArchTailCallJSFunction) \
+ V(ArchPrepareCallCFunction) \
+ V(ArchCallCFunction) \
+ V(ArchJmp) \
+ V(ArchLookupSwitch) \
+ V(ArchTableSwitch) \
+ V(ArchNop) \
+ V(ArchDeoptimize) \
+ V(ArchRet) \
+ V(ArchStackPointer) \
+ V(ArchFramePointer) \
+ V(ArchTruncateDoubleToI) \
+ V(CheckedLoadInt8) \
+ V(CheckedLoadUint8) \
+ V(CheckedLoadInt16) \
+ V(CheckedLoadUint16) \
+ V(CheckedLoadWord32) \
+ V(CheckedLoadFloat32) \
+ V(CheckedLoadFloat64) \
+ V(CheckedStoreWord8) \
+ V(CheckedStoreWord16) \
+ V(CheckedStoreWord32) \
+ V(CheckedStoreFloat32) \
+ V(CheckedStoreFloat64) \
TARGET_ARCH_OPCODE_LIST(V)
enum ArchOpcode {
case CallDescriptor::kCallAddress:
buffer->instruction_args.push_back(
(call_address_immediate &&
- (callee->opcode() == IrOpcode::kInt32Constant ||
- callee->opcode() == IrOpcode::kInt64Constant))
+ callee->opcode() == IrOpcode::kExternalConstant)
? g.UseImmediate(callee)
: g.UseRegister(callee));
break;
// not appear as arguments to the call. Everything else ends up
// as an InstructionOperand argument to the call.
auto iter(call->inputs().begin());
- int pushed_count = 0;
+ size_t pushed_count = 0;
for (size_t index = 0; index < input_count; ++iter, ++index) {
DCHECK(iter != call->inputs().end());
DCHECK((*iter)->op()->opcode() != IrOpcode::kFrameState);
buffer->instruction_args.push_back(op);
}
}
- CHECK_EQ(pushed_count, static_cast<int>(buffer->pushed_nodes.size()));
- DCHECK(static_cast<size_t>(input_count) ==
- (buffer->instruction_args.size() + buffer->pushed_nodes.size() -
- buffer->frame_state_value_count()));
+ DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count -
+ buffer->frame_state_value_count());
}
// Add register and/or stack parameter(s).
const int parameter_count = static_cast<int>(msig->parameter_count());
+ int stack_offset = LinkageTraits::CStackBackingStoreLength();
for (int i = 0; i < parameter_count; i++) {
if (i < LinkageTraits::CRegisterParametersLength()) {
locations.AddParam(regloc(LinkageTraits::CRegisterParameter(i)));
} else {
- locations.AddParam(stackloc(-1 - i));
+ locations.AddParam(stackloc(-1 - stack_offset));
+ stack_offset++;
}
}
// Returns the kind of this call.
Kind kind() const { return kind_; }
+ // Returns {true} if this descriptor is a call to a C function.
+ bool IsCFunctionCall() const { return kind_ == kCallAddress; }
+
// Returns {true} if this descriptor is a call to a JSFunction.
bool IsJSFunctionCall() const { return kind_ == kCallJSFunction; }
// The number of return values from this call.
size_t ReturnCount() const { return machine_sig_->return_count(); }
+ // The number of C parameters to this call.
+ size_t CParameterCount() const { return machine_sig_->parameter_count(); }
+
// The number of JavaScript parameters to this call, including the receiver
// object.
size_t JSParameterCount() const { return js_param_count_; }
__ Jump(at);
break;
}
+ case kArchPrepareCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, kScratchReg);
+ break;
+ }
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (instr->InputAt(0)->IsImmediate()) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ break;
+ }
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
// Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, false);
- // Possibly align stack here for functions.
- int push_count = buffer.pushed_nodes.size();
- if (push_count > 0) {
- Emit(kMipsStackClaim, g.NoOutput(),
- g.TempImmediate(push_count << kPointerSizeLog2));
- }
- int slot = buffer.pushed_nodes.size() - 1;
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
- Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(node),
- g.TempImmediate(slot << kPointerSizeLog2));
- slot--;
+ InitializeCallBuffer(node, &buffer, true, true);
+
+ // Prepare for C function call.
+ if (descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ 0, nullptr, 0, nullptr);
+
+ // Poke any stack arguments.
+ int slot = kCArgSlotCount;
+ for (Node* node : buffer.pushed_nodes) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(node),
+ g.TempImmediate(slot << kPointerSizeLog2));
+ ++slot;
+ }
+ } else {
+ // Possibly align stack here for functions.
+ int push_count = buffer.pushed_nodes.size();
+ if (push_count > 0) {
+ Emit(kMipsStackClaim, g.NoOutput(),
+ g.TempImmediate(push_count << kPointerSizeLog2));
+ }
+ int slot = buffer.pushed_nodes.size() - 1;
+ for (Node* node : base::Reversed(buffer.pushed_nodes)) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(node),
+ g.TempImmediate(slot << kPointerSizeLog2));
+ slot--;
+ }
}
// Pass label of exception handler block.
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
+ case CallDescriptor::kCallAddress:
+ opcode =
+ kArchCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
+ break;
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject | MiscField::encode(flags);
break;
- }
case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
+ opcode = kArchCallJSFunction | MiscField::encode(flags);
break;
default:
UNREACHABLE();
return;
}
- opcode |= MiscField::encode(flags);
// Emit the call instruction.
size_t const output_count = buffer.outputs.size();
return register_parameters[i];
}
static int CRegisterParametersLength() { return 4; }
+ static int CStackBackingStoreLength() { return 0; }
};
__ Jump(at);
break;
}
+ case kArchPrepareCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, kScratchReg);
+ break;
+ }
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (instr->InputAt(0)->IsImmediate()) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ break;
+ }
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
// Compute InstructionOperands for inputs and outputs.
- InitializeCallBuffer(node, &buffer, true, false);
+ InitializeCallBuffer(node, &buffer, true, true);
- const int32_t push_count = static_cast<int32_t>(buffer.pushed_nodes.size());
- if (push_count > 0) {
- Emit(kMips64StackClaim, g.NoOutput(),
- g.TempImmediate(push_count << kPointerSizeLog2));
- }
- int32_t slot = push_count - 1;
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
- Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(node),
- g.TempImmediate(slot << kPointerSizeLog2));
- slot--;
+ // Prepare for C function call.
+ if (descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ 0, nullptr, 0, nullptr);
+
+ // Poke any stack arguments.
+ int slot = kCArgSlotCount;
+ for (Node* node : buffer.pushed_nodes) {
+ Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(node),
+ g.TempImmediate(slot << kPointerSizeLog2));
+ ++slot;
+ }
+ } else {
+ const int32_t push_count = static_cast<int32_t>(buffer.pushed_nodes.size());
+ if (push_count > 0) {
+ Emit(kMips64StackClaim, g.NoOutput(),
+ g.TempImmediate(push_count << kPointerSizeLog2));
+ }
+ int32_t slot = push_count - 1;
+ for (Node* node : base::Reversed(buffer.pushed_nodes)) {
+ Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(node),
+ g.TempImmediate(slot << kPointerSizeLog2));
+ slot--;
+ }
}
// Pass label of exception handler block.
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = kArchCallCodeObject;
+ case CallDescriptor::kCallAddress:
+ opcode =
+ kArchCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
+ break;
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject | MiscField::encode(flags);
break;
- }
case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
+ opcode = kArchCallJSFunction | MiscField::encode(flags);
break;
default:
UNREACHABLE();
return register_parameters[i];
}
static int CRegisterParametersLength() { return 8; }
+ static int CStackBackingStoreLength() { return 0; }
};
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
break;
}
+ case kArchPrepareCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters);
+ break;
+ }
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+ if (HasImmediateInput(instr, 0)) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+ break;
+ }
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
}
}
break;
+ case kX64Poke: {
+ int const slot = MiscField::decode(instr->opcode());
+ if (HasImmediateInput(instr, 0)) {
+ __ movq(Operand(rsp, slot * kPointerSize), i.InputImmediate(0));
+ } else {
+ __ movq(Operand(rsp, slot * kPointerSize), i.InputRegister(0));
+ }
+ break;
+ }
case kX64StoreWriteBarrier: {
Register object = i.InputRegister(0);
Register value = i.InputRegister(2);
V(X64Dec32) \
V(X64Inc32) \
V(X64Push) \
+ V(X64Poke) \
V(X64StoreWriteBarrier) \
V(X64StackCheck)
// Compute InstructionOperands for inputs and outputs.
InitializeCallBuffer(node, &buffer, true, true);
- // Push any stack arguments.
- for (Node* node : base::Reversed(buffer.pushed_nodes)) {
- // TODO(titzer): handle pushing double parameters.
- InstructionOperand value =
- g.CanBeImmediate(node)
- ? g.UseImmediate(node)
- : IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
- Emit(kX64Push, g.NoOutput(), value);
+ // Prepare for C function call.
+ if (descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount())),
+ 0, nullptr, 0, nullptr);
+
+ // Poke any stack arguments.
+ for (size_t n = 0; n < buffer.pushed_nodes.size(); ++n) {
+ if (Node* node = buffer.pushed_nodes[n]) {
+ int const slot = static_cast<int>(n);
+ InstructionOperand value =
+ g.CanBeImmediate(node) ? g.UseImmediate(node) : g.UseRegister(node);
+ Emit(kX64Poke | MiscField::encode(slot), g.NoOutput(), value);
+ }
+ }
+ } else {
+ // Push any stack arguments.
+ for (Node* node : base::Reversed(buffer.pushed_nodes)) {
+ // TODO(titzer): handle pushing double parameters.
+ InstructionOperand value =
+ g.CanBeImmediate(node)
+ ? g.UseImmediate(node)
+ : IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
+ Emit(kX64Push, g.NoOutput(), value);
+ }
}
// Pass label of exception handler block.
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
+ case CallDescriptor::kCallAddress:
+ opcode =
+ kArchCallCFunction |
+ MiscField::encode(static_cast<int>(descriptor->CParameterCount()));
+ break;
case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject;
+ opcode = kArchCallCodeObject | MiscField::encode(flags);
break;
case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
+ opcode = kArchCallJSFunction | MiscField::encode(flags);
break;
default:
UNREACHABLE();
return;
}
- opcode |= MiscField::encode(flags);
// Emit the call instruction.
size_t const output_count = buffer.outputs.size();
}
}
static int CRegisterParametersLength() { return kWin64 ? 4 : 6; }
+ static int CStackBackingStoreLength() { return kWin64 ? 4 : 0; }
};
typedef LinkageHelper<X64LinkageHelperTraits> LH;
}
}
+
+#if !USE_SIMULATOR
+
+namespace {
+
+int32_t const kMagicFoo0 = 0xdeadbeef;
+
+
+int32_t foo0() { return kMagicFoo0; }
+
+
+int32_t foo1(int32_t x) { return x; }
+
+
+int32_t foo2(int32_t x, int32_t y) { return x - y; }
+
+
+int32_t foo8(int32_t a, int32_t b, int32_t c, int32_t d, int32_t e, int32_t f,
+ int32_t g, int32_t h) {
+ return a + b + c + d + e + f + g + h;
+}
+
+} // namespace
+
+
+TEST(RunCallCFunction0) {
+ auto* foo0_ptr = &foo0;
+ RawMachineAssemblerTester<int32_t> m;
+ Node* function = m.LoadFromPointer(&foo0_ptr, kMachPtr);
+ m.Return(m.CallCFunction0(kMachInt32, function));
+ CHECK_EQ(kMagicFoo0, m.Call());
+}
+
+
+TEST(RunCallCFunction1) {
+ auto* foo1_ptr = &foo1;
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ Node* function = m.LoadFromPointer(&foo1_ptr, kMachPtr);
+ m.Return(m.CallCFunction1(kMachInt32, kMachInt32, function, m.Parameter(0)));
+ FOR_INT32_INPUTS(i) {
+ int32_t const expected = *i;
+ CHECK_EQ(expected, m.Call(expected));
+ }
+}
+
+
+TEST(RunCallCFunction2) {
+ auto* foo2_ptr = &foo2;
+ RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+ Node* function = m.LoadFromPointer(&foo2_ptr, kMachPtr);
+ m.Return(m.CallCFunction2(kMachInt32, kMachInt32, kMachInt32, function,
+ m.Parameter(0), m.Parameter(1)));
+ FOR_INT32_INPUTS(i) {
+ int32_t const x = *i;
+ FOR_INT32_INPUTS(j) {
+ int32_t const y = *j;
+ CHECK_EQ(x - y, m.Call(x, y));
+ }
+ }
+}
+
+
+TEST(RunCallCFunction8) {
+ auto* foo8_ptr = &foo8;
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ Node* function = m.LoadFromPointer(&foo8_ptr, kMachPtr);
+ Node* param = m.Parameter(0);
+ m.Return(m.CallCFunction8(kMachInt32, kMachInt32, kMachInt32, kMachInt32,
+ kMachInt32, kMachInt32, kMachInt32, kMachInt32,
+ kMachInt32, function, param, param, param, param,
+ param, param, param, param));
+ FOR_INT32_INPUTS(i) {
+ int32_t const x = *i;
+ CHECK_EQ(x * 8, m.Call(x));
+ }
+}
+
+#endif // USE_SIMULATOR
+
#endif // V8_TURBOFAN_TARGET
}
+Node* RawMachineAssembler::CallCFunction0(MachineType return_type,
+ Node* function) {
+ MachineSignature::Builder builder(zone(), 1, 0);
+ builder.AddReturn(return_type);
+ const CallDescriptor* descriptor =
+ Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+
+ Node* call = graph()->NewNode(common()->Call(descriptor), function);
+ schedule()->AddNode(CurrentBlock(), call);
+ return call;
+}
+
+
+Node* RawMachineAssembler::CallCFunction1(MachineType return_type,
+ MachineType arg0_type, Node* function,
+ Node* arg0) {
+ MachineSignature::Builder builder(zone(), 1, 1);
+ builder.AddReturn(return_type);
+ builder.AddParam(arg0_type);
+ const CallDescriptor* descriptor =
+ Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+
+ Node* call = graph()->NewNode(common()->Call(descriptor), function, arg0);
+ schedule()->AddNode(CurrentBlock(), call);
+ return call;
+}
+
+
+Node* RawMachineAssembler::CallCFunction2(MachineType return_type,
+ MachineType arg0_type,
+ MachineType arg1_type, Node* function,
+ Node* arg0, Node* arg1) {
+ MachineSignature::Builder builder(zone(), 1, 2);
+ builder.AddReturn(return_type);
+ builder.AddParam(arg0_type);
+ builder.AddParam(arg1_type);
+ const CallDescriptor* descriptor =
+ Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+
+ Node* call =
+ graph()->NewNode(common()->Call(descriptor), function, arg0, arg1);
+ schedule()->AddNode(CurrentBlock(), call);
+ return call;
+}
+
+
+Node* RawMachineAssembler::CallCFunction8(
+ MachineType return_type, MachineType arg0_type, MachineType arg1_type,
+ MachineType arg2_type, MachineType arg3_type, MachineType arg4_type,
+ MachineType arg5_type, MachineType arg6_type, MachineType arg7_type,
+ Node* function, Node* arg0, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+ Node* arg5, Node* arg6, Node* arg7) {
+ MachineSignature::Builder builder(zone(), 1, 8);
+ builder.AddReturn(return_type);
+ builder.AddParam(arg0_type);
+ builder.AddParam(arg1_type);
+ builder.AddParam(arg2_type);
+ builder.AddParam(arg3_type);
+ builder.AddParam(arg4_type);
+ builder.AddParam(arg5_type);
+ builder.AddParam(arg6_type);
+ builder.AddParam(arg7_type);
+ const CallDescriptor* descriptor =
+ Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+
+ Node* call = graph()->NewNode(common()->Call(descriptor), function, arg0,
+ arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+ schedule()->AddNode(CurrentBlock(), call);
+ return call;
+}
+
+
void RawMachineAssembler::Bind(Label* label) {
DCHECK(current_block_ == NULL);
DCHECK(!label->bound_);
// Call to a runtime function with zero parameters.
Node* CallRuntime1(Runtime::FunctionId function, Node* arg0, Node* context,
Node* frame_state);
+ // Call to a C function with zero parameters.
+ Node* CallCFunction0(MachineType return_type, Node* function);
+ // Call to a C function with one parameter.
+ Node* CallCFunction1(MachineType return_type, MachineType arg0_type,
+ Node* function, Node* arg0);
+ // Call to a C function with two parameters.
+ Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, Node* function, Node* arg0,
+ Node* arg1);
+ // Call to a C function with eight parameters.
+ Node* CallCFunction8(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, MachineType arg2_type,
+ MachineType arg3_type, MachineType arg4_type,
+ MachineType arg5_type, MachineType arg6_type,
+ MachineType arg7_type, Node* function, Node* arg0,
+ Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+ Node* arg5, Node* arg6, Node* arg7);
void Return(Node* value);
void Bind(Label* label);
void Deoptimize(Node* state);