"src/compiler/source-position.h",
"src/compiler/state-values-utils.cc",
"src/compiler/state-values-utils.h",
+ "src/compiler/tail-call-optimization.cc",
+ "src/compiler/tail-call-optimization.h",
"src/compiler/typer.cc",
"src/compiler/typer.h",
"src/compiler/value-numbering-reducer.cc",
}
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler,
- CallMode call_mode) {
+void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
ArmOperandGenerator g(this);
const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
- FrameStateDescriptor* frame_state_descriptor = NULL;
+ FrameStateDescriptor* frame_state_descriptor = nullptr;
if (descriptor->NeedsFrameState()) {
frame_state_descriptor =
GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
// Compute InstructionOperands for inputs and outputs.
- // TODO(turbofan): on ARM64 it's probably better to use the code object in a
+ // TODO(turbofan): on ARM it's probably better to use the code object in a
// register if there are multiple uses of it. Improve constant pool and the
// heuristics in the register allocator for where to emit constants.
InitializeCallBuffer(node, &buffer, true, false);
- // TODO(dcarney): might be possible to use claim/poke instead
// Push any stack arguments.
for (Node* node : base::Reversed(buffer.pushed_nodes)) {
Emit(kArmPush, g.NoOutput(), g.UseRegister(node));
// Pass label of exception handler block.
CallDescriptor::Flags flags = descriptor->flags();
- if (handler != nullptr) {
+ if (handler) {
flags |= CallDescriptor::kHasExceptionHandler;
buffer.instruction_args.push_back(g.Label(handler));
}
// Select the appropriate opcode based on the call type.
- bool is_tail_call = call_mode == TAIL_CALL;
InstructionCode opcode;
switch (descriptor->kind()) {
case CallDescriptor::kCallCodeObject: {
- opcode = is_tail_call ? kArchTailCallCodeObject : kArchCallCodeObject;
+ opcode = kArchCallCodeObject;
break;
}
case CallDescriptor::kCallJSFunction:
- opcode = is_tail_call ? kArchTailCallJSFunction : kArchCallJSFunction;
+ opcode = kArchCallJSFunction;
break;
default:
UNREACHABLE();
opcode |= MiscField::encode(flags);
// Emit the call instruction.
- size_t size = is_tail_call ? 0 : buffer.outputs.size();
- InstructionOperand* first_output =
- size > 0 ? &buffer.outputs.front() : nullptr;
- Instruction* call_instr =
- Emit(opcode, size, first_output, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- call_instr->MarkAsCall();
+ size_t const output_count = buffer.outputs.size();
+ auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front())->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitTailCall(Node* node) {
+ ArmOperandGenerator g(this);
+ CallDescriptor const* descriptor = OpParameter<CallDescriptor const*>(node);
+ DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
+ DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
+ DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
+
+ // TODO(turbofan): Relax restriction for stack parameters.
+ if (descriptor->UsesOnlyRegisters() &&
+ descriptor->HasSameReturnLocationsAs(
+ linkage()->GetIncomingDescriptor())) {
+ CallBuffer buffer(zone(), descriptor, nullptr);
+
+ // Compute InstructionOperands for inputs and outputs.
+ // TODO(turbofan): on ARM it's probably better to use the code object in a
+ // register if there are multiple uses of it. Improve constant pool and the
+ // heuristics in the register allocator for where to emit constants.
+ InitializeCallBuffer(node, &buffer, true, false);
+
+ DCHECK_EQ(0u, buffer.pushed_nodes.size());
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchTailCallCodeObject;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchTailCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the tailcall instruction.
+ Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
+ &buffer.instruction_args.front());
+ } else {
+ FrameStateDescriptor* frame_state_descriptor =
+ descriptor->NeedsFrameState()
+ ? GetFrameStateDescriptor(
+ node->InputAt(static_cast<int>(descriptor->InputCount())))
+ : nullptr;
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ // TODO(turbofan): on ARM it's probably better to use the code object in a
+ // register if there are multiple uses of it. Improve constant pool and the
+ // heuristics in the register allocator for where to emit constants.
+ InitializeCallBuffer(node, &buffer, true, false);
+
+ // Push any stack arguments.
+ for (Node* node : base::Reversed(buffer.pushed_nodes)) {
+ Emit(kArmPush, g.NoOutput(), g.UseRegister(node));
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject: {
+ opcode = kArchCallCodeObject;
+ break;
+ }
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the call instruction.
+ size_t const output_count = buffer.outputs.size();
+ auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front())->MarkAsCall();
+ Emit(kArchRet, 0, nullptr, output_count, outputs);
+ }
}
}
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler,
- CallMode call_mode) {
+void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
Arm64OperandGenerator g(this);
const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
- FrameStateDescriptor* frame_state_descriptor = NULL;
+ FrameStateDescriptor* frame_state_descriptor = nullptr;
if (descriptor->NeedsFrameState()) {
frame_state_descriptor =
GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
}
// Select the appropriate opcode based on the call type.
- bool is_tail_call = call_mode == TAIL_CALL;
InstructionCode opcode;
switch (descriptor->kind()) {
case CallDescriptor::kCallCodeObject: {
- opcode = is_tail_call ? kArchTailCallCodeObject : kArchCallCodeObject;
+ opcode = kArchCallCodeObject;
break;
}
case CallDescriptor::kCallJSFunction:
- opcode = is_tail_call ? kArchTailCallJSFunction : kArchCallJSFunction;
+ opcode = kArchCallJSFunction;
break;
default:
UNREACHABLE();
opcode |= MiscField::encode(flags);
// Emit the call instruction.
- size_t size = is_tail_call ? 0 : buffer.outputs.size();
- InstructionOperand* first_output =
- size > 0 ? &buffer.outputs.front() : nullptr;
- Instruction* call_instr =
- Emit(opcode, size, first_output, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- call_instr->MarkAsCall();
+ size_t const output_count = buffer.outputs.size();
+ auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front())->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitTailCall(Node* node) {
+ Arm64OperandGenerator g(this);
+ const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
+ DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
+ DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
+ DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
+
+ // TODO(turbofan): Relax restriction for stack parameters.
+ if (descriptor->UsesOnlyRegisters() &&
+ descriptor->HasSameReturnLocationsAs(
+ linkage()->GetIncomingDescriptor())) {
+ CallBuffer buffer(zone(), descriptor, nullptr);
+
+ // Compute InstructionOperands for inputs and outputs.
+ // TODO(turbofan): on ARM64 it's probably better to use the code object in a
+ // register if there are multiple uses of it. Improve constant pool and the
+ // heuristics in the register allocator for where to emit constants.
+ InitializeCallBuffer(node, &buffer, true, false);
+
+ DCHECK_EQ(0u, buffer.pushed_nodes.size());
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchTailCallCodeObject;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchTailCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the tailcall instruction.
+ Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
+ &buffer.instruction_args.front());
+ } else {
+ FrameStateDescriptor* frame_state_descriptor = nullptr;
+ if (descriptor->NeedsFrameState()) {
+ frame_state_descriptor =
+ GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
+ }
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ // TODO(turbofan): on ARM64 it's probably better to use the code object in a
+ // register if there are multiple uses of it. Improve constant pool and the
+ // heuristics in the register allocator for where to emit constants.
+ InitializeCallBuffer(node, &buffer, true, false);
+
+ // Push the arguments to the stack.
+ bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
+ int aligned_push_count = buffer.pushed_nodes.size();
+ // TODO(dcarney): claim and poke probably take small immediates,
+ // loop here or whatever.
+ // Bump the stack pointer(s).
+ if (aligned_push_count > 0) {
+ // TODO(dcarney): it would be better to bump the csp here only
+ // and emit paired stores with increment for non c frames.
+ Emit(kArm64Claim, g.NoOutput(), g.TempImmediate(aligned_push_count));
+ }
+ // Move arguments to the stack.
+ {
+ int slot = buffer.pushed_nodes.size() - 1;
+ // Emit the uneven pushes.
+ if (pushed_count_uneven) {
+ Node* input = buffer.pushed_nodes[slot];
+ Emit(kArm64Poke, g.NoOutput(), g.UseRegister(input),
+ g.TempImmediate(slot));
+ slot--;
+ }
+ // Now all pushes can be done in pairs.
+ for (; slot >= 0; slot -= 2) {
+ Emit(kArm64PokePair, g.NoOutput(),
+ g.UseRegister(buffer.pushed_nodes[slot]),
+ g.UseRegister(buffer.pushed_nodes[slot - 1]),
+ g.TempImmediate(slot));
+ }
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject: {
+ opcode = kArchCallCodeObject;
+ break;
+ }
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the call instruction.
+ size_t const output_count = buffer.outputs.size();
+ auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front())->MarkAsCall();
+ Emit(kArchRet, 0, nullptr, output_count, outputs);
+ }
}
const Operator* CommonOperatorBuilder::Call(const CallDescriptor* descriptor) {
class CallOperator final : public Operator1<const CallDescriptor*> {
public:
- CallOperator(const CallDescriptor* descriptor, const char* mnemonic)
+ explicit CallOperator(const CallDescriptor* descriptor)
: Operator1<const CallDescriptor*>(
- IrOpcode::kCall, descriptor->properties(), mnemonic,
+ IrOpcode::kCall, descriptor->properties(), "Call",
descriptor->InputCount() + descriptor->FrameStateCount(),
Operator::ZeroIfPure(descriptor->properties()),
Operator::ZeroIfEliminatable(descriptor->properties()),
os << "[" << *parameter() << "]";
}
};
- return new (zone()) CallOperator(descriptor, "Call");
+ return new (zone()) CallOperator(descriptor);
+}
+
+
+const Operator* CommonOperatorBuilder::TailCall(
+ const CallDescriptor* descriptor) {
+ class TailCallOperator final : public Operator1<const CallDescriptor*> {
+ public:
+ explicit TailCallOperator(const CallDescriptor* descriptor)
+ : Operator1<const CallDescriptor*>(
+ IrOpcode::kTailCall, descriptor->properties(), "TailCall",
+ descriptor->InputCount() + descriptor->FrameStateCount(), 1, 1, 0,
+ 0, 1, descriptor) {}
+
+ void PrintParameter(std::ostream& os) const override {
+ os << "[" << *parameter() << "]";
+ }
+ };
+ return new (zone()) TailCallOperator(descriptor);
}
OutputFrameStateCombine state_combine,
MaybeHandle<JSFunction> jsfunction = MaybeHandle<JSFunction>());
const Operator* Call(const CallDescriptor* descriptor);
+ const Operator* TailCall(const CallDescriptor* descriptor);
const Operator* Projection(size_t index);
// Constructs a new merge or phi operator with the same opcode as {op}, but
}
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler,
- CallMode call_mode) {
+void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
IA32OperandGenerator g(this);
const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
- FrameStateDescriptor* frame_state_descriptor = NULL;
-
+ FrameStateDescriptor* frame_state_descriptor = nullptr;
if (descriptor->NeedsFrameState()) {
frame_state_descriptor =
GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
// Pass label of exception handler block.
CallDescriptor::Flags flags = descriptor->flags();
- if (handler != nullptr) {
+ if (handler) {
flags |= CallDescriptor::kHasExceptionHandler;
buffer.instruction_args.push_back(g.Label(handler));
}
// Select the appropriate opcode based on the call type.
- bool is_tail_call = call_mode == TAIL_CALL;
InstructionCode opcode;
switch (descriptor->kind()) {
case CallDescriptor::kCallCodeObject: {
- opcode = is_tail_call ? kArchTailCallCodeObject : kArchCallCodeObject;
+ opcode = kArchCallCodeObject;
break;
}
case CallDescriptor::kCallJSFunction:
- opcode = is_tail_call ? kArchTailCallJSFunction : kArchCallJSFunction;
+ opcode = kArchCallJSFunction;
break;
default:
UNREACHABLE();
opcode |= MiscField::encode(flags);
// Emit the call instruction.
- size_t size = is_tail_call ? 0 : buffer.outputs.size();
- InstructionOperand* first_output =
- size > 0 ? &buffer.outputs.front() : nullptr;
- Instruction* call_instr =
- Emit(opcode, size, first_output, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- call_instr->MarkAsCall();
+ size_t const output_count = buffer.outputs.size();
+ auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front())->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitTailCall(Node* node) {
+ IA32OperandGenerator g(this);
+ CallDescriptor const* descriptor = OpParameter<CallDescriptor const*>(node);
+ DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
+ DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
+ DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
+
+ // TODO(turbofan): Relax restriction for stack parameters.
+ if (descriptor->UsesOnlyRegisters() &&
+ descriptor->HasSameReturnLocationsAs(
+ linkage()->GetIncomingDescriptor())) {
+ CallBuffer buffer(zone(), descriptor, nullptr);
+
+ // Compute InstructionOperands for inputs and outputs.
+ InitializeCallBuffer(node, &buffer, true, true);
+
+ DCHECK_EQ(0u, buffer.pushed_nodes.size());
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchTailCallCodeObject;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchTailCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the tailcall instruction.
+ Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
+ &buffer.instruction_args.front());
+ } else {
+ FrameStateDescriptor* frame_state_descriptor =
+ descriptor->NeedsFrameState()
+ ? GetFrameStateDescriptor(
+ node->InputAt(static_cast<int>(descriptor->InputCount())))
+ : nullptr;
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ InitializeCallBuffer(node, &buffer, true, true);
+
+ // Push any stack arguments.
+ for (Node* node : base::Reversed(buffer.pushed_nodes)) {
+ // TODO(titzer): Handle pushing double parameters.
+ InstructionOperand value =
+ g.CanBeImmediate(node)
+ ? g.UseImmediate(node)
+ : IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
+ Emit(kIA32Push, g.NoOutput(), value);
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the call instruction.
+ size_t output_count = buffer.outputs.size();
+ auto* outputs = &buffer.outputs.front();
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front())->MarkAsCall();
+ Emit(kArchRet, 0, nullptr, output_count, outputs);
+ }
}
#include "src/base/adapters.h"
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/schedule.h"
#include "src/compiler/state-values-utils.h"
bool call_code_immediate,
bool call_address_immediate) {
OperandGenerator g(this);
- DCHECK_EQ(call->op()->ValueOutputCount(),
+ DCHECK_LE(call->op()->ValueOutputCount(),
static_cast<int>(buffer->descriptor->ReturnCount()));
DCHECK_EQ(
call->op()->ValueInputCount(),
DCHECK_EQ(IrOpcode::kCall, input->opcode());
BasicBlock* success = block->SuccessorAt(0);
BasicBlock* exception = block->SuccessorAt(1);
- return VisitCall(input, exception, NORMAL_CALL), VisitGoto(success);
+ return VisitCall(input, exception), VisitGoto(success);
+ }
+ case BasicBlock::kTailCall: {
+ DCHECK_EQ(IrOpcode::kTailCall, input->opcode());
+ return VisitTailCall(input);
}
case BasicBlock::kBranch: {
DCHECK_EQ(IrOpcode::kBranch, input->opcode());
}
case BasicBlock::kReturn: {
DCHECK_EQ(IrOpcode::kReturn, input->opcode());
- return VisitReturn(input);
+ return VisitReturn(input->InputAt(0));
}
case BasicBlock::kDeoptimize: {
// If the result itself is a return, return its input.
return VisitConstant(node);
}
case IrOpcode::kCall:
- return VisitCall(node, nullptr, NORMAL_CALL);
+ return VisitCall(node);
case IrOpcode::kFrameState:
case IrOpcode::kStateValues:
return;
}
-namespace {
-
-// Returns the call node if the given return node is part of a tail call,
-// nullptr otherwise.
-Node* TryMatchTailCall(Node* ret) {
- // The value which is returned must be the result of a potential tail call,
- // there must be no try/catch/finally around the call, and there must be no
- // effects between the call and the return.
- Node* call = NodeProperties::GetValueInput(ret, 0);
- if (call->opcode() != IrOpcode::kCall ||
- !OpParameter<const CallDescriptor*>(call)->SupportsTailCalls() ||
- NodeProperties::IsExceptionalCall(call) ||
- NodeProperties::GetEffectInput(ret, 0) != call) {
- return nullptr;
- }
- // Furthermore, control has to flow via an IfSuccess from the call (for calls
- // which can throw), or the return and the call have to use the same control
- // input (for calls which can't throw).
- Node* control = NodeProperties::GetControlInput(ret, 0);
- bool found = (control->opcode() == IrOpcode::kIfSuccess)
- ? (NodeProperties::GetControlInput(control, 0) == call)
- : (control == NodeProperties::GetControlInput(call, 0));
- return found ? call : nullptr;
-}
-
-} // namespace
-
-
-void InstructionSelector::VisitReturn(Node* node) {
- if (FLAG_turbo_tail_calls) {
- Node* call = TryMatchTailCall(node);
- if (call != nullptr) {
- const CallDescriptor* desc = OpParameter<const CallDescriptor*>(call);
- if (desc->UsesOnlyRegisters() &&
- desc->HasSameReturnLocationsAs(linkage()->GetIncomingDescriptor())) {
- return VisitCall(call, nullptr, TAIL_CALL);
- }
- }
- }
- Node* value = NodeProperties::GetValueInput(node, 0);
+void InstructionSelector::VisitReturn(Node* value) {
DCHECK_NOT_NULL(value);
OperandGenerator g(this);
Emit(kArchRet, g.NoOutput(),
#undef DECLARE_UNIMPLEMENTED_SELECTOR
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler,
- CallMode call_mode) {
+void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
UNIMPLEMENTED();
}
+void InstructionSelector::VisitTailCall(Node* node) { UNIMPLEMENTED(); }
+
+
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
BasicBlock* fbranch) {
UNIMPLEMENTED();
void VisitPhi(Node* node);
void VisitProjection(Node* node);
void VisitConstant(Node* node);
- void VisitCall(Node* call, BasicBlock* handler, CallMode call_mode);
+ void VisitCall(Node* call, BasicBlock* handler = nullptr);
+ void VisitTailCall(Node* call);
void VisitGoto(BasicBlock* target);
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
void VisitSwitch(Node* node, const SwitchInfo& sw);
}
typedef Signature<MachineType> MachineSignature;
+
} // namespace compiler
} // namespace internal
} // namespace v8
}
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler,
- CallMode call_mode) {
+void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
MipsOperandGenerator g(this);
const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
- FrameStateDescriptor* frame_state_descriptor = NULL;
+ FrameStateDescriptor* frame_state_descriptor = nullptr;
if (descriptor->NeedsFrameState()) {
frame_state_descriptor =
GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
// Pass label of exception handler block.
CallDescriptor::Flags flags = descriptor->flags();
- if (handler != nullptr) {
+ if (handler) {
flags |= CallDescriptor::kHasExceptionHandler;
buffer.instruction_args.push_back(g.Label(handler));
}
// Select the appropriate opcode based on the call type.
- bool is_tail_call = call_mode == TAIL_CALL;
InstructionCode opcode;
switch (descriptor->kind()) {
case CallDescriptor::kCallCodeObject: {
- opcode = is_tail_call ? kArchTailCallCodeObject : kArchCallCodeObject;
+ opcode = kArchCallCodeObject;
break;
}
case CallDescriptor::kCallJSFunction:
- opcode = is_tail_call ? kArchTailCallJSFunction : kArchCallJSFunction;
+ opcode = kArchCallJSFunction;
break;
default:
UNREACHABLE();
opcode |= MiscField::encode(flags);
// Emit the call instruction.
- size_t size = is_tail_call ? 0 : buffer.outputs.size();
- InstructionOperand* first_output =
- size > 0 ? &buffer.outputs.front() : nullptr;
- Instruction* call_instr =
- Emit(opcode, size, first_output, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- call_instr->MarkAsCall();
+ size_t const output_count = buffer.outputs.size();
+ auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front())->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitTailCall(Node* node) {
+ MipsOperandGenerator g(this);
+ const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
+ DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
+ DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
+ DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
+
+ // TODO(turbofan): Relax restriction for stack parameters.
+ if (descriptor->UsesOnlyRegisters() &&
+ descriptor->HasSameReturnLocationsAs(
+ linkage()->GetIncomingDescriptor())) {
+ CallBuffer buffer(zone(), descriptor, nullptr);
+
+ // Compute InstructionOperands for inputs and outputs.
+ InitializeCallBuffer(node, &buffer, true, false);
+
+ DCHECK_EQ(0u, buffer.pushed_nodes.size());
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchTailCallCodeObject;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchTailCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the tailcall instruction.
+ Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
+ &buffer.instruction_args.front());
+ } else {
+ FrameStateDescriptor* frame_state_descriptor =
+ descriptor->NeedsFrameState()
+ ? GetFrameStateDescriptor(
+ node->InputAt(static_cast<int>(descriptor->InputCount())))
+ : nullptr;
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ InitializeCallBuffer(node, &buffer, true, false);
+ // Possibly align stack here for functions.
+ int push_count = buffer.pushed_nodes.size();
+ if (push_count > 0) {
+ Emit(kMipsStackClaim, g.NoOutput(),
+ g.TempImmediate(push_count << kPointerSizeLog2));
+ }
+ int slot = buffer.pushed_nodes.size() - 1;
+ for (Node* node : base::Reversed(buffer.pushed_nodes)) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(node),
+ g.TempImmediate(slot << kPointerSizeLog2));
+ slot--;
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject: {
+ opcode = kArchCallCodeObject;
+ break;
+ }
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the call instruction.
+ size_t const output_count = buffer.outputs.size();
+ auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front())->MarkAsCall();
+ Emit(kArchRet, 0, nullptr, output_count, outputs);
+ }
}
}
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler,
- CallMode call_mode) {
+void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
Mips64OperandGenerator g(this);
const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
- FrameStateDescriptor* frame_state_descriptor = NULL;
+ FrameStateDescriptor* frame_state_descriptor = nullptr;
if (descriptor->NeedsFrameState()) {
frame_state_descriptor =
GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
// Pass label of exception handler block.
CallDescriptor::Flags flags = descriptor->flags();
- if (handler != nullptr) {
+ if (handler) {
flags |= CallDescriptor::kHasExceptionHandler;
buffer.instruction_args.push_back(g.Label(handler));
}
// Select the appropriate opcode based on the call type.
- bool is_tail_call = call_mode == TAIL_CALL;
InstructionCode opcode;
switch (descriptor->kind()) {
case CallDescriptor::kCallCodeObject: {
- opcode = is_tail_call ? kArchTailCallCodeObject : kArchCallCodeObject;
+ opcode = kArchCallCodeObject;
break;
}
case CallDescriptor::kCallJSFunction:
- opcode = is_tail_call ? kArchTailCallJSFunction : kArchCallJSFunction;
+ opcode = kArchCallJSFunction;
break;
default:
UNREACHABLE();
opcode |= MiscField::encode(flags);
// Emit the call instruction.
- size_t size = is_tail_call ? 0 : buffer.outputs.size();
- InstructionOperand* first_output =
- size > 0 ? &buffer.outputs.front() : nullptr;
- Instruction* call_instr =
- Emit(opcode, size, first_output, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- call_instr->MarkAsCall();
+ size_t const output_count = buffer.outputs.size();
+ auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front())->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitTailCall(Node* node) {
+ Mips64OperandGenerator g(this);
+ const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
+ DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
+ DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
+ DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
+
+ // TODO(turbofan): Relax restriction for stack parameters.
+ if (descriptor->UsesOnlyRegisters() &&
+ descriptor->HasSameReturnLocationsAs(
+ linkage()->GetIncomingDescriptor())) {
+ CallBuffer buffer(zone(), descriptor, nullptr);
+
+ // Compute InstructionOperands for inputs and outputs.
+ InitializeCallBuffer(node, &buffer, true, false);
+
+ DCHECK_EQ(0u, buffer.pushed_nodes.size());
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchTailCallCodeObject;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchTailCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the tailcall instruction.
+ Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
+ &buffer.instruction_args.front());
+ } else {
+ FrameStateDescriptor* frame_state_descriptor =
+ descriptor->NeedsFrameState()
+ ? GetFrameStateDescriptor(
+ node->InputAt(static_cast<int>(descriptor->InputCount())))
+ : nullptr;
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ InitializeCallBuffer(node, &buffer, true, false);
+
+ int push_count = buffer.pushed_nodes.size();
+ if (push_count > 0) {
+ Emit(kMips64StackClaim, g.NoOutput(),
+ g.TempImmediate(push_count << kPointerSizeLog2));
+ }
+ int slot = buffer.pushed_nodes.size() - 1;
+ for (Node* node : base::Reversed(buffer.pushed_nodes)) {
+ Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(node),
+ g.TempImmediate(slot << kPointerSizeLog2));
+ slot--;
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject: {
+ opcode = kArchCallCodeObject;
+ break;
+ }
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the call instruction.
+ size_t const output_count = buffer.outputs.size();
+ auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front())->MarkAsCall();
+ Emit(kArchRet, 0, nullptr, output_count, outputs);
+ }
}
}
+bool Node::OwnedBy(Node const* owner1, Node const* owner2) const {
+ unsigned mask = 0;
+ for (Use* use = first_use_; use; use = use->next) {
+ if (use->from == owner1) {
+ mask |= 1;
+ } else if (use->from == owner2) {
+ mask |= 2;
+ } else {
+ return false;
+ }
+ }
+ return mask == 3;
+}
+
+
void Node::Input::Update(Node* new_to) {
Node* old_to = this->to;
if (new_to == old_to) return; // Nothing to do.
return first_use_ && first_use_->from == owner && !first_use_->next;
}
+ // Returns true if {owner1} and {owner2} are the only users of {this} node.
+ bool OwnedBy(Node const* owner1, Node const* owner2) const;
+
private:
struct Use final : public ZoneObject {
Node* from;
V(Merge) \
V(Deoptimize) \
V(Return) \
+ V(TailCall) \
V(OsrNormalEntry) \
V(OsrLoopEntry) \
V(Throw) \
#include "src/compiler/select-lowering.h"
#include "src/compiler/simplified-lowering.h"
#include "src/compiler/simplified-operator-reducer.h"
+#include "src/compiler/tail-call-optimization.h"
#include "src/compiler/typer.h"
#include "src/compiler/value-numbering-reducer.h"
#include "src/compiler/verifier.h"
JSGenericLowering generic(data->info()->is_typing_enabled(),
data->jsgraph());
SelectLowering select(data->jsgraph()->graph(), data->jsgraph()->common());
+ TailCallOptimization tco(data->common(), data->graph());
GraphReducer graph_reducer(data->graph(), temp_zone);
AddReducer(data, &graph_reducer, &generic);
AddReducer(data, &graph_reducer, &select);
+ // TODO(turbofan): TCO is currently limited to stubs.
+ if (data->info()->IsStub()) AddReducer(data, &graph_reducer, &tco);
graph_reducer.ReduceGraph();
}
};
return os << "switch";
case BasicBlock::kDeoptimize:
return os << "deoptimize";
+ case BasicBlock::kTailCall:
+ return os << "tailcall";
case BasicBlock::kReturn:
return os << "return";
case BasicBlock::kThrow:
}
+void Schedule::AddTailCall(BasicBlock* block, Node* input) {
+ DCHECK_EQ(BasicBlock::kNone, block->control());
+ block->set_control(BasicBlock::kTailCall);
+ SetControlInput(block, input);
+ if (block != end()) AddSuccessor(block, end());
+}
+
+
void Schedule::AddReturn(BasicBlock* block, Node* input) {
DCHECK_EQ(BasicBlock::kNone, block->control());
block->set_control(BasicBlock::kReturn);
kBranch, // Branch if true to first successor, otherwise second.
kSwitch, // Table dispatch to one of the successor blocks.
kDeoptimize, // Return a value from this method.
+ kTailCall, // Tail call another method from this method.
kReturn, // Return a value from this method.
kThrow // Throw an exception.
};
// BasicBlock building: add a deoptimize at the end of {block}.
void AddDeoptimize(BasicBlock* block, Node* input);
+ // BasicBlock building: add a tailcall at the end of {block}.
+ void AddTailCall(BasicBlock* block, Node* input);
+
// BasicBlock building: add a return at the end of {block}.
void AddReturn(BasicBlock* block, Node* input);
scheduler_->UpdatePlacement(node, Scheduler::kFixed);
ConnectDeoptimize(node);
break;
+ case IrOpcode::kTailCall:
+ scheduler_->UpdatePlacement(node, Scheduler::kFixed);
+ ConnectTailCall(node);
+ break;
case IrOpcode::kReturn:
scheduler_->UpdatePlacement(node, Scheduler::kFixed);
ConnectReturn(node);
}
}
+ void ConnectTailCall(Node* call) {
+ Node* call_control = NodeProperties::GetControlInput(call);
+ BasicBlock* call_block = FindPredecessorBlock(call_control);
+ TraceConnect(call, call_block, NULL);
+ schedule_->AddTailCall(call_block, call);
+ }
+
void ConnectReturn(Node* ret) {
Node* return_control = NodeProperties::GetControlInput(ret);
BasicBlock* return_block = FindPredecessorBlock(return_control);
--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/tail-call-optimization.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Reduction TailCallOptimization::Reduce(Node* node) {
+ if (node->opcode() != IrOpcode::kReturn) return NoChange();
+ // The value which is returned must be the result of a potential tail call,
+ // there must be no try/catch/finally around the Call, and there must be no
+ // other effect between the Call and the Return nodes.
+ Node* const call = NodeProperties::GetValueInput(node, 0);
+ if (call->opcode() == IrOpcode::kCall &&
+ OpParameter<CallDescriptor const*>(call)->SupportsTailCalls() &&
+ NodeProperties::GetEffectInput(node) == call &&
+ !NodeProperties::IsExceptionalCall(call)) {
+ Node* const control = NodeProperties::GetControlInput(node);
+ if (control->opcode() == IrOpcode::kIfSuccess &&
+ call->OwnedBy(node, control) && control->OwnedBy(node)) {
+ // Furthermore, control has to flow via an IfSuccess from the Call, so
+ // the Return node value and effect depends directly on the Call node,
+ // and indirectly control depends on the Call via an IfSuccess.
+
+ // Value1 ... ValueN Effect Control
+ // ^ ^ ^ ^
+ // | | | |
+ // | +--+ +-+ |
+ // +----------+ | | +------+
+ // \ | | /
+ // Call[Descriptor]
+ // ^ ^ ^
+ // | | |
+ // +-+ | |
+ // | | |
+ // | +-+ |
+ // | | IfSuccess
+ // | | ^
+ // | | |
+ // Return
+ // ^
+ // |
+
+ // The resulting graph looks like this:
+
+ // Value1 ... ValueN Effect Control
+ // ^ ^ ^ ^
+ // | | | |
+ // | +--+ +-+ |
+ // +----------+ | | +------+
+ // \ | | /
+ // TailCall[Descriptor]
+ // ^
+ // |
+
+ DCHECK_EQ(call, NodeProperties::GetControlInput(control, 0));
+ DCHECK_EQ(3, node->InputCount());
+ node->set_op(
+ common()->TailCall(OpParameter<CallDescriptor const*>(call)));
+ node->ReplaceInput(0, NodeProperties::GetEffectInput(call));
+ node->ReplaceInput(1, NodeProperties::GetControlInput(call));
+ node->RemoveInput(2);
+ for (int index = 0; index < call->op()->ValueInputCount(); ++index) {
+ node->InsertInput(graph()->zone(), index,
+ NodeProperties::GetValueInput(call, index));
+ }
+ return Changed(node);
+ }
+ }
+ return NoChange();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TAIL_CALL_OPTIMIZATION_H_
+#define V8_COMPILER_TAIL_CALL_OPTIMIZATION_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class Graph;
+
+
+// Performs tail call optimization by replacing certain combinations of Return
+// and Call nodes with a single TailCall.
+class TailCallOptimization final : public Reducer {
+ public:
+ TailCallOptimization(CommonOperatorBuilder* common, Graph* graph)
+ : common_(common), graph_(graph) {}
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ CommonOperatorBuilder* common() const { return common_; }
+ Graph* graph() const { return graph_; }
+
+ CommonOperatorBuilder* const common_;
+ Graph* const graph_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_TAIL_CALL_OPTIMIZATION_H_
DECLARE_CASE(Merge)
DECLARE_CASE(Deoptimize)
DECLARE_CASE(Return)
+ DECLARE_CASE(TailCall)
DECLARE_CASE(OsrNormalEntry)
DECLARE_CASE(OsrLoopEntry)
DECLARE_CASE(Throw)
DECLARE_CASE(Merge)
DECLARE_CASE(Deoptimize)
DECLARE_CASE(Return)
+ DECLARE_CASE(TailCall)
DECLARE_CASE(OsrNormalEntry)
DECLARE_CASE(OsrLoopEntry)
DECLARE_CASE(Throw)
case IrOpcode::kCall:
// TODO(rossberg): what are the constraints on these?
break;
+ case IrOpcode::kTailCall:
+ // TODO(bmeurer): what are the constraints on these?
+ break;
// JavaScript operators
// --------------------
}
-void InstructionSelector::VisitCall(Node* node, BasicBlock* handler,
- CallMode call_mode) {
+void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
X64OperandGenerator g(this);
const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
- FrameStateDescriptor* frame_state_descriptor = NULL;
+ FrameStateDescriptor* frame_state_descriptor = nullptr;
if (descriptor->NeedsFrameState()) {
frame_state_descriptor = GetFrameStateDescriptor(
node->InputAt(static_cast<int>(descriptor->InputCount())));
// Pass label of exception handler block.
CallDescriptor::Flags flags = descriptor->flags();
- if (handler != nullptr) {
+ if (handler) {
flags |= CallDescriptor::kHasExceptionHandler;
buffer.instruction_args.push_back(g.Label(handler));
}
// Select the appropriate opcode based on the call type.
- bool is_tail_call = call_mode == TAIL_CALL;
InstructionCode opcode;
switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject: {
- opcode = is_tail_call ? kArchTailCallCodeObject : kArchCallCodeObject;
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject;
break;
- }
case CallDescriptor::kCallJSFunction:
- opcode = is_tail_call ? kArchTailCallJSFunction : kArchCallJSFunction;
+ opcode = kArchCallJSFunction;
break;
default:
UNREACHABLE();
opcode |= MiscField::encode(flags);
// Emit the call instruction.
- size_t size = is_tail_call ? 0 : buffer.outputs.size();
- InstructionOperand* first_output =
- size > 0 ? &buffer.outputs.front() : nullptr;
- Instruction* call_instr =
- Emit(opcode, size, first_output, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- call_instr->MarkAsCall();
+ size_t const output_count = buffer.outputs.size();
+ auto* outputs = output_count ? &buffer.outputs.front() : nullptr;
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front())->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitTailCall(Node* node) {
+ X64OperandGenerator g(this);
+ CallDescriptor const* descriptor = OpParameter<CallDescriptor const*>(node);
+ DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
+ DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kPatchableCallSite);
+ DCHECK_EQ(0, descriptor->flags() & CallDescriptor::kNeedsNopAfterCall);
+
+ // TODO(turbofan): Relax restriction for stack parameters.
+ if (descriptor->UsesOnlyRegisters() &&
+ descriptor->HasSameReturnLocationsAs(
+ linkage()->GetIncomingDescriptor())) {
+ CallBuffer buffer(zone(), descriptor, nullptr);
+
+ // Compute InstructionOperands for inputs and outputs.
+ InitializeCallBuffer(node, &buffer, true, true);
+
+ DCHECK_EQ(0u, buffer.pushed_nodes.size());
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchTailCallCodeObject;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchTailCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the tailcall instruction.
+ Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
+ &buffer.instruction_args.front());
+ } else {
+ FrameStateDescriptor* frame_state_descriptor =
+ descriptor->NeedsFrameState()
+ ? GetFrameStateDescriptor(
+ node->InputAt(static_cast<int>(descriptor->InputCount())))
+ : nullptr;
+
+ CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+ // Compute InstructionOperands for inputs and outputs.
+ InitializeCallBuffer(node, &buffer, true, true);
+
+ // Push any stack arguments.
+ for (Node* node : base::Reversed(buffer.pushed_nodes)) {
+ // TODO(titzer): Handle pushing double parameters.
+ InstructionOperand value =
+ g.CanBeImmediate(node)
+ ? g.UseImmediate(node)
+ : IsSupported(ATOM) ? g.UseRegister(node) : g.Use(node);
+ Emit(kX64Push, g.NoOutput(), value);
+ }
+
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchCallCodeObject;
+ break;
+ case CallDescriptor::kCallJSFunction:
+ opcode = kArchCallJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ opcode |= MiscField::encode(descriptor->flags());
+
+ // Emit the call instruction.
+ size_t output_count = buffer.outputs.size();
+ auto* outputs = &buffer.outputs.front();
+ Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
+ &buffer.instruction_args.front())->MarkAsCall();
+ Emit(kArchRet, 0, nullptr, output_count, outputs);
+ }
}
"stress loop peeling optimization")
DEFINE_BOOL(turbo_cf_optimization, true, "optimize control flow in TurboFan")
DEFINE_BOOL(turbo_frame_elision, true, "elide frames in TurboFan")
-DEFINE_BOOL(turbo_tail_calls, false,
- "enable tail call optimization in TurboFan")
DEFINE_INT(typed_array_max_size_in_heap, 64,
"threshold for in-heap typed array")
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
-enum CallMode { NORMAL_CALL, TAIL_CALL };
-
enum VisitMode {
VISIT_ALL,
VISIT_ALL_IN_SCAVENGE,
};
+enum CallMode { NORMAL_CALL, TAIL_CALL };
+
+
class HCallWithDescriptor final : public HInstruction {
public:
static HCallWithDescriptor* New(Isolate* isolate, Zone* zone, HValue* context,
#include "test/unittests/compiler/instruction-selector-unittest.h"
-#include "src/code-stubs.h"
#include "src/compiler/graph.h"
#include "src/compiler/schedule.h"
#include "src/flags.h"
EXPECT_EQ(index, s.size());
}
-
-// -----------------------------------------------------------------------------
-// Tail calls.
-
-TARGET_TEST_F(InstructionSelectorTest, TailCall) {
- for (int mode = 0; mode < 2; ++mode) {
- bool supports_tail_calls = FLAG_turbo_tail_calls && (mode == 0);
-
- StreamBuilder m(this, kMachAnyTagged);
- Node* start = m.graph()->start();
- Node* undefined = m.UndefinedConstant();
-
- StringLengthStub stub(isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), zone(), stub.GetCallInterfaceDescriptor(), 0,
- supports_tail_calls ? CallDescriptor::kSupportsTailCalls
- : CallDescriptor::kNoFlags,
- Operator::kNoProperties);
- Node* stub_node = m.NewNode(m.common()->HeapConstant(
- Unique<Code>::CreateUninitialized(stub.GetCode())));
-
- Node* call = m.NewNode(m.common()->Call(desc), stub_node, undefined,
- undefined, undefined, undefined, undefined);
- call->AppendInput(zone(), start); // effect
- call->AppendInput(zone(), start); // control
-
- m.Return(call);
- Node* ret = *call->uses().begin();
- ret->AppendInput(zone(), call); // effect
- ret->AppendInput(zone(), start); // control
-
- Stream s = m.Build(kAllInstructions);
- if (supports_tail_calls) {
- ASSERT_EQ(3U, s.size());
- EXPECT_EQ(kArchNop, s[0]->arch_opcode());
- EXPECT_EQ(kArchTailCallCodeObject, s[1]->arch_opcode());
- EXPECT_EQ(kArchNop, s[2]->arch_opcode());
- } else {
- ASSERT_EQ(4U, s.size());
- EXPECT_EQ(kArchNop, s[0]->arch_opcode());
- EXPECT_EQ(kArchCallCodeObject, s[1]->arch_opcode());
- EXPECT_EQ(kArchRet, s[2]->arch_opcode());
- EXPECT_EQ(kArchNop, s[3]->arch_opcode());
- }
- }
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
};
+class IsTailCallMatcher final : public NodeMatcher {
+ public:
+ IsTailCallMatcher(const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const std::vector<Matcher<Node*>>& value_matchers,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kTailCall),
+ descriptor_matcher_(descriptor_matcher),
+ value_matchers_(value_matchers),
+ effect_matcher_(effect_matcher),
+ control_matcher_(control_matcher) {}
+
+ void DescribeTo(std::ostream* os) const final {
+ NodeMatcher::DescribeTo(os);
+ for (size_t i = 0; i < value_matchers_.size(); ++i) {
+ if (i == 0) {
+ *os << " whose value0 (";
+ } else {
+ *os << "), value" << i << " (";
+ }
+ value_matchers_[i].DescribeTo(os);
+ }
+ *os << "), effect (";
+ effect_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ if (!NodeMatcher::MatchAndExplain(node, listener) ||
+ !PrintMatchAndExplain(OpParameter<CallDescriptor const*>(node),
+ "descriptor", descriptor_matcher_, listener)) {
+ return false;
+ }
+ for (size_t i = 0; i < value_matchers_.size(); ++i) {
+ std::ostringstream ost;
+ ost << "value" << i;
+ if (!PrintMatchAndExplain(
+ NodeProperties::GetValueInput(node, static_cast<int>(i)),
+ ost.str(), value_matchers_[i], listener)) {
+ return false;
+ }
+ }
+ return (PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+ effect_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<CallDescriptor const*> descriptor_matcher_;
+ const std::vector<Matcher<Node*>> value_matchers_;
+ const Matcher<Node*> effect_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
class IsAllocateMatcher final : public NodeMatcher {
public:
IsAllocateMatcher(const Matcher<Node*>& size_matcher,
}
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ return MakeMatcher(new IsTailCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
+
+
Matcher<Node*> IsAllocate(const Matcher<Node*>& size_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher) {
const Matcher<Node*>& value4_matcher, const Matcher<Node*>& value5_matcher,
const Matcher<Node*>& value6_matcher, const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
Matcher<Node*> IsBooleanNot(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsNumberEqual(const Matcher<Node*>& lhs_matcher,
EXPECT_FALSE(n0->OwnedBy(n1));
EXPECT_FALSE(n0->OwnedBy(n2));
EXPECT_TRUE(n1->OwnedBy(n2));
+ EXPECT_TRUE(n0->OwnedBy(n1, n2));
n2->ReplaceInput(0, n2);
EXPECT_TRUE(n0->OwnedBy(n1));
EXPECT_TRUE(n1->OwnedBy(n2));
0, 1, 0, 0);
const Operator kMockCall(IrOpcode::kCall, Operator::kNoProperties, "MockCall",
0, 0, 1, 1, 0, 2);
+const Operator kMockTailCall(IrOpcode::kTailCall, Operator::kNoProperties,
+ "MockTailCall", 1, 1, 1, 0, 0, 1);
} // namespace
}
+TARGET_TEST_F(SchedulerTest, TailCall) {
+ Node* start = graph()->NewNode(common()->Start(1));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* call = graph()->NewNode(&kMockTailCall, p0, start, start);
+ Node* end = graph()->NewNode(common()->End(), call);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(4);
+}
+
+
TARGET_TEST_F(SchedulerTest, Switch) {
Node* start = graph()->NewNode(common()->Start(1));
graph()->SetStart(start);
--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/linkage.h"
+#include "src/compiler/tail-call-optimization.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class TailCallOptimizationTest : public GraphTest {
+ public:
+ explicit TailCallOptimizationTest(int num_parameters = 1)
+ : GraphTest(num_parameters) {}
+ ~TailCallOptimizationTest() override {}
+
+ protected:
+ Reduction Reduce(Node* node) {
+ TailCallOptimization tco(common(), graph());
+ return tco.Reduce(node);
+ }
+};
+
+
+TEST_F(TailCallOptimizationTest, CallCodeObject0) {
+ MachineType kMachineSignature[] = {kMachAnyTagged, kMachAnyTagged};
+ LinkageLocation kLocationSignature[] = {LinkageLocation(0),
+ LinkageLocation(1)};
+ const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
+ CallDescriptor::kCallCodeObject, kMachAnyTagged, LinkageLocation(0),
+ new (zone()) MachineSignature(1, 1, kMachineSignature),
+ new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
+ Operator::kNoProperties, 0, CallDescriptor::kNoFlags);
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* call = graph()->NewNode(common()->Call(kCallDescriptor), p0, p1,
+ graph()->start(), graph()->start());
+ Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
+ Node* ret = graph()->NewNode(common()->Return(), call, call, if_success);
+ Reduction r = Reduce(ret);
+ ASSERT_FALSE(r.Changed());
+}
+
+
+TEST_F(TailCallOptimizationTest, CallCodeObject1) {
+ MachineType kMachineSignature[] = {kMachAnyTagged, kMachAnyTagged};
+ LinkageLocation kLocationSignature[] = {LinkageLocation(0),
+ LinkageLocation(1)};
+ const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
+ CallDescriptor::kCallCodeObject, kMachAnyTagged, LinkageLocation(0),
+ new (zone()) MachineSignature(1, 1, kMachineSignature),
+ new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
+ Operator::kNoProperties, 0, CallDescriptor::kSupportsTailCalls);
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* call = graph()->NewNode(common()->Call(kCallDescriptor), p0, p1,
+ graph()->start(), graph()->start());
+ Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
+ Node* if_exception = graph()->NewNode(common()->IfException(), call);
+ Node* ret = graph()->NewNode(common()->Return(), call, call, if_success);
+ Node* end = graph()->NewNode(common()->End(), if_exception);
+ graph()->SetEnd(end);
+ Reduction r = Reduce(ret);
+ ASSERT_FALSE(r.Changed());
+}
+
+
+TEST_F(TailCallOptimizationTest, CallCodeObject2) {
+ MachineType kMachineSignature[] = {kMachAnyTagged, kMachAnyTagged};
+ LinkageLocation kLocationSignature[] = {LinkageLocation(0),
+ LinkageLocation(1)};
+ const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
+ CallDescriptor::kCallCodeObject, kMachAnyTagged, LinkageLocation(0),
+ new (zone()) MachineSignature(1, 1, kMachineSignature),
+ new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
+ Operator::kNoProperties, 0, CallDescriptor::kSupportsTailCalls);
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* call = graph()->NewNode(common()->Call(kCallDescriptor), p0, p1,
+ graph()->start(), graph()->start());
+ Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
+ Node* ret = graph()->NewNode(common()->Return(), call, call, if_success);
+ Reduction r = Reduce(ret);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsTailCall(kCallDescriptor, p0, p1,
+ graph()->start(), graph()->start()));
+}
+
+
+TEST_F(TailCallOptimizationTest, CallJSFunction0) {
+ MachineType kMachineSignature[] = {kMachAnyTagged, kMachAnyTagged};
+ LinkageLocation kLocationSignature[] = {LinkageLocation(0),
+ LinkageLocation(1)};
+ const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
+ CallDescriptor::kCallJSFunction, kMachAnyTagged, LinkageLocation(0),
+ new (zone()) MachineSignature(1, 1, kMachineSignature),
+ new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
+ Operator::kNoProperties, 0, CallDescriptor::kNoFlags);
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* call = graph()->NewNode(common()->Call(kCallDescriptor), p0, p1,
+ graph()->start(), graph()->start());
+ Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
+ Node* ret = graph()->NewNode(common()->Return(), call, call, if_success);
+ Reduction r = Reduce(ret);
+ ASSERT_FALSE(r.Changed());
+}
+
+
+TEST_F(TailCallOptimizationTest, CallJSFunction1) {
+ MachineType kMachineSignature[] = {kMachAnyTagged, kMachAnyTagged};
+ LinkageLocation kLocationSignature[] = {LinkageLocation(0),
+ LinkageLocation(1)};
+ const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
+ CallDescriptor::kCallJSFunction, kMachAnyTagged, LinkageLocation(0),
+ new (zone()) MachineSignature(1, 1, kMachineSignature),
+ new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
+ Operator::kNoProperties, 0, CallDescriptor::kSupportsTailCalls);
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* call = graph()->NewNode(common()->Call(kCallDescriptor), p0, p1,
+ graph()->start(), graph()->start());
+ Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
+ Node* if_exception = graph()->NewNode(common()->IfException(), call);
+ Node* ret = graph()->NewNode(common()->Return(), call, call, if_success);
+ Node* end = graph()->NewNode(common()->End(), if_exception);
+ graph()->SetEnd(end);
+ Reduction r = Reduce(ret);
+ ASSERT_FALSE(r.Changed());
+}
+
+
+TEST_F(TailCallOptimizationTest, CallJSFunction2) {
+ MachineType kMachineSignature[] = {kMachAnyTagged, kMachAnyTagged};
+ LinkageLocation kLocationSignature[] = {LinkageLocation(0),
+ LinkageLocation(1)};
+ const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
+ CallDescriptor::kCallJSFunction, kMachAnyTagged, LinkageLocation(0),
+ new (zone()) MachineSignature(1, 1, kMachineSignature),
+ new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
+ Operator::kNoProperties, 0, CallDescriptor::kSupportsTailCalls);
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* call = graph()->NewNode(common()->Call(kCallDescriptor), p0, p1,
+ graph()->start(), graph()->start());
+ Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
+ Node* ret = graph()->NewNode(common()->Return(), call, call, if_success);
+ Reduction r = Reduce(ret);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsTailCall(kCallDescriptor, p0, p1,
+ graph()->start(), graph()->start()));
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
'compiler/simplified-operator-reducer-unittest.cc',
'compiler/simplified-operator-unittest.cc',
'compiler/state-values-utils-unittest.cc',
+ 'compiler/tail-call-optimization-unittest.cc',
'compiler/typer-unittest.cc',
'compiler/value-numbering-reducer-unittest.cc',
'compiler/zone-pool-unittest.cc',
'../../src/compiler/source-position.h',
'../../src/compiler/state-values-utils.cc',
'../../src/compiler/state-values-utils.h',
+ '../../src/compiler/tail-call-optimization.cc',
+ '../../src/compiler/tail-call-optimization.h',
'../../src/compiler/typer.cc',
'../../src/compiler/typer.h',
'../../src/compiler/value-numbering-reducer.cc',