#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/interpreter/bytecodes.h"
#include "src/runtime/runtime.h"
namespace v8 {
}
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right. The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+// o r1: the JS function object being called.
+// o cp: our context
+// o pp: the caller's constant pool pointer (if enabled)
+// o fp: the caller's frame pointer
+// o sp: stack pointer
+// o lr: return address
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-arm.h for its layout.
+// TODO(rmcilroy): We will need to include the current bytecode pointer in the
+// frame.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ PushFixedFrame(r1);
+ __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+
+ // Get the bytecode array from the function object and load the pointer to the
+ // first entry into kInterpreterBytecodeRegister.
+ __ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ SmiTst(kInterpreterBytecodeArrayRegister);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ CompareObjectType(kInterpreterBytecodeArrayRegister, r0, no_reg,
+ BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Allocate the local and temporary register file on the stack.
+ {
+ // Load frame size from the BytecodeArray object.
+ __ ldr(r4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kFrameSizeOffset));
+
+ // Do a stack check to ensure we don't go over the limit.
+ Label ok;
+ __ sub(r9, sp, Operand(r4));
+ __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
+ __ cmp(r9, Operand(r2));
+ __ b(hs, &ok);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+
+ // If ok, push undefined as the initial value for all register file entries.
+ // Note: there should always be at least one stack slot for the return
+ // register in the register file.
+ Label loop_header;
+ __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+ __ bind(&loop_header);
+ // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+ __ push(r9);
+ // Continue loop if not done.
+ __ sub(r4, r4, Operand(kPointerSize), SetCC);
+ __ b(&loop_header, ne);
+ }
+
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's prologue:
+ // - Support profiler (specifically profiling_counter).
+ // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
+ // - Allow simulator stop operations if FLAG_stop_at is set.
+ // - Deal with sloppy mode functions which need to replace the
+ // receiver with the global proxy when called as functions (without an
+ // explicit receiver object).
+ // - Code aging of the BytecodeArray object.
+ // - Supporting FLAG_trace.
+ //
+ // The following items are also not done here, and will probably be done using
+ // explicit bytecodes instead:
+ // - Allocating a new local context if applicable.
+ // - Setting up a local binding to the this function, which is used in
+ // derived constructors with super calls.
+ // - Setting new.target if required.
+ // - Dealing with REST parameters (only if
+ // https://codereview.chromium.org/1235153006 doesn't land by then).
+ // - Dealing with argument objects.
+
+ // Perform stack guard check.
+ {
+ Label ok;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &ok);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ __ bind(&ok);
+ }
+
+ // Load bytecode offset and dispatch table into registers.
+ __ mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Dispatch to the first bytecode handler for the function.
+ __ ldrb(r0, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ ldr(ip, MemOperand(kInterpreterDispatchTableRegister, r0, LSL,
+ kPointerSizeLog2));
+ // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+ // and header removal.
+ __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+}
+
+
+void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's EmitReturnSequence.
+ // - Supporting FLAG_trace for Runtime::TraceExit.
+ // - Support profiler (specifically decrementing profiling_counter
+ // appropriately and calling out to HandleInterrupts if necessary).
+
+ // Load return value into r0.
+ __ ldr(r0, MemOperand(fp, -kPointerSize -
+ StandardFrameConstants::kFixedFrameSizeFromFp));
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ // Drop receiver + arguments.
+ __ Drop(1); // TODO(rmcilroy): Get number of arguments from BytecodeArray.
+ __ Jump(lr);
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
const Register cp = { kRegister_r7_Code }; // JavaScript context pointer.
const Register pp = { kRegister_r8_Code }; // Constant pool pointer.
const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.
+const Register kInterpreterBytecodeOffsetRegister = {
+ kRegister_r5_Code // Interpreter bytecode offset.
+};
+const Register kInterpreterBytecodeArrayRegister = {
+ kRegister_r6_Code // Interpreter bytecode array pointer.
+};
+const Register kInterpreterDispatchTableRegister = {
+ kRegister_r8_Code // Interpreter dispatch table.
+};
// Flags used for AllocateHeapNumber
enum TaggingMode {
ALIAS_REGISTER(Register, lr, x30);
ALIAS_REGISTER(Register, xzr, x31);
ALIAS_REGISTER(Register, wzr, w31);
+ALIAS_REGISTER(Register, kInterpreterBytecodeOffsetRegister, x19);
+ALIAS_REGISTER(Register, kInterpreterBytecodeArrayRegister, x20);
+ALIAS_REGISTER(Register, kInterpreterDispatchTableRegister, x21);
// Keeps the 0 double value.
ALIAS_REGISTER(FPRegister, fp_zero, d15);
}
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right. The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+// - x1: the JS function object being called.
+// - cp: our context.
+// - fp: our caller's frame pointer.
+// - jssp: stack pointer.
+// - lr: return address.
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-arm64.h for its layout.
+// TODO(rmcilroy): We will need to include the current bytecode pointer in the
+// frame.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ Push(lr, fp, cp, x1);
+ __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+
+ // Get the bytecode array from the function object and load the pointer to the
+ // first entry into kInterpreterBytecodeRegister.
+ __ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ AssertNotSmi(kInterpreterBytecodeArrayRegister,
+ kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ CompareObjectType(kInterpreterBytecodeArrayRegister, x0, x0,
+ BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Allocate the local and temporary register file on the stack.
+ {
+ // Load frame size from the BytecodeArray object.
+ __ Ldr(w11, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kFrameSizeOffset));
+
+ // Do a stack check to ensure we don't go over the limit.
+ Label ok;
+ DCHECK(jssp.Is(__ StackPointer()));
+ __ Sub(x10, jssp, Operand(x11));
+ __ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
+ __ B(hs, &ok);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ Bind(&ok);
+
+ // If ok, push undefined as the initial value for all register file entries.
+ // Note: there should always be at least one stack slot for the return
+ // register in the register file.
+ Label loop_header;
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+ // TODO(rmcilroy): Ensure we always have an even number of registers to
+ // allow stack to be 16 bit aligned (and remove need for jssp).
+ __ Lsr(x11, x11, kPointerSizeLog2);
+ __ PushMultipleTimes(x10, x11);
+ __ Bind(&loop_header);
+ }
+
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's prologue:
+ // - Support profiler (specifically profiling_counter).
+ // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
+ // - Allow simulator stop operations if FLAG_stop_at is set.
+ // - Deal with sloppy mode functions which need to replace the
+ // receiver with the global proxy when called as functions (without an
+ // explicit receiver object).
+ // - Code aging of the BytecodeArray object.
+ // - Supporting FLAG_trace.
+ //
+ // The following items are also not done here, and will probably be done using
+ // explicit bytecodes instead:
+ // - Allocating a new local context if applicable.
+ // - Setting up a local binding to the this function, which is used in
+ // derived constructors with super calls.
+ // - Setting new.target if required.
+ // - Dealing with REST parameters (only if
+ // https://codereview.chromium.org/1235153006 doesn't land by then).
+ // - Dealing with argument objects.
+
+ // Perform stack guard check.
+ {
+ Label ok;
+ __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
+ __ B(hs, &ok);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ __ Bind(&ok);
+ }
+
+ // Load bytecode offset and dispatch table into registers.
+ __ Mov(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ Add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Dispatch to the first bytecode handler for the function.
+ __ Ldrb(x0, MemOperand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister));
+ __ Mov(x0, Operand(x0, LSL, kPointerSizeLog2));
+ __ Ldr(ip0, MemOperand(kInterpreterDispatchTableRegister, x0));
+ // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+ // and header removal.
+ __ Add(ip0, ip0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip0);
+}
+
+
+void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's EmitReturnSequence.
+ // - Supporting FLAG_trace for Runtime::TraceExit.
+ // - Support profiler (specifically decrementing profiling_counter
+ // appropriately and calling out to HandleInterrupts if necessary).
+
+ // Load return value into x0.
+ __ ldr(x0, MemOperand(fp, -kPointerSize -
+ StandardFrameConstants::kFixedFrameSizeFromFp));
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ // Drop receiver + arguments.
+ // TODO(rmcilroy): Get number of arguments from BytecodeArray.
+ __ Drop(1, kXRegSize);
+ __ Ret();
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
V(kFunctionBeingDebugged, "Function is being debugged") \
V(kFunctionCallsEval, "Function calls eval") \
V(kFunctionWithIllegalRedeclaration, "Function with illegal redeclaration") \
+ V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \
+ "The function_data field should be a BytecodeArray on interpreter entry") \
V(kGeneratedCodeIsTooLarge, "Generated code is too large") \
V(kGeneratorFailedToResume, "Generator failed to resume") \
V(kGenerator, "Generator") \
V(JSConstructStubApi, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterExitTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(CompileLazy, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(CompileOptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(CompileOptimizedConcurrent, BUILTIN, UNINITIALIZED, kNoExtraICState) \
static void Generate_JSConstructStubApi(MacroAssembler* masm);
static void Generate_JSEntryTrampoline(MacroAssembler* masm);
static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
+ static void Generate_InterpreterEntryTrampoline(MacroAssembler* masm);
+ static void Generate_InterpreterExitTrampoline(MacroAssembler* masm);
static void Generate_NotifyDeoptimized(MacroAssembler* masm);
static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
static Register ReturnValue2Reg() { return r1; }
static Register JSCallFunctionReg() { return r1; }
static Register ContextReg() { return cp; }
- static Register InterpreterBytecodeOffsetReg() { return r5; }
- static Register InterpreterBytecodeArrayReg() { return r6; }
- static Register InterpreterDispatchTableReg() { return r8; }
+ static Register InterpreterBytecodeOffsetReg() {
+ return kInterpreterBytecodeOffsetRegister;
+ }
+ static Register InterpreterBytecodeArrayReg() {
+ return kInterpreterBytecodeArrayRegister;
+ }
+ static Register InterpreterDispatchTableReg() {
+ return kInterpreterDispatchTableRegister;
+ }
static Register RuntimeCallFunctionReg() { return r1; }
static Register RuntimeCallArgCountReg() { return r0; }
static RegList CCalleeSaveRegisters() {
static Register ReturnValue2Reg() { return x1; }
static Register JSCallFunctionReg() { return x1; }
static Register ContextReg() { return cp; }
- static Register InterpreterBytecodeOffsetReg() { return x19; }
- static Register InterpreterBytecodeArrayReg() { return x20; }
- static Register InterpreterDispatchTableReg() { return x21; }
+ static Register InterpreterBytecodeOffsetReg() {
+ return kInterpreterBytecodeOffsetRegister;
+ }
+ static Register InterpreterBytecodeArrayReg() {
+ return kInterpreterBytecodeArrayRegister;
+ }
+ static Register InterpreterDispatchTableReg() {
+ return kInterpreterDispatchTableRegister;
+ }
static Register RuntimeCallFunctionReg() { return x1; }
static Register RuntimeCallArgCountReg() { return x0; }
static RegList CCalleeSaveRegisters() {
Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
- return raw_assembler_->Int32Sub(
+ return raw_assembler_->IntPtrSub(
Int32Constant(kFirstRegisterOffsetFromFp),
- raw_assembler_->Word32Shl(index, Int32Constant(kPointerSizeLog2)));
+ raw_assembler_->WordShl(index, Int32Constant(kPointerSizeLog2)));
}
}
+void InterpreterAssembler::Return() {
+ Node* exit_trampoline_code_object =
+ HeapConstant(Unique<HeapObject>::CreateImmovable(
+ isolate()->builtins()->InterpreterExitTrampoline()));
+ // If the order of the parameters you need to change the call signature below.
+ STATIC_ASSERT(0 == Linkage::kInterpreterBytecodeOffsetParameter);
+ STATIC_ASSERT(1 == Linkage::kInterpreterBytecodeArrayParameter);
+ STATIC_ASSERT(2 == Linkage::kInterpreterDispatchTableParameter);
+ Node* tail_call = graph()->NewNode(
+ common()->TailCall(call_descriptor()), exit_trampoline_code_object,
+ BytecodeOffset(), BytecodeArrayPointer(), DispatchTablePointer(),
+ graph()->start(), graph()->start());
+ schedule()->AddTailCall(raw_assembler_->CurrentBlock(), tail_call);
+ // This should always be the end node.
+ SetEndInput(tail_call);
+}
+
+
Node* InterpreterAssembler::Advance(int delta) {
return raw_assembler_->IntPtrAdd(BytecodeOffset(), Int32Constant(delta));
}
new_bytecode_offset, BytecodeArrayPointer(), DispatchTablePointer(),
graph()->start(), graph()->start());
schedule()->AddTailCall(raw_assembler_->CurrentBlock(), tail_call);
-
// This should always be the end node.
SetEndInput(tail_call);
}
}
+Node* InterpreterAssembler::HeapConstant(Unique<HeapObject> object) {
+ return raw_assembler_->HeapConstant(object);
+}
+
+
} // namespace interpreter
} // namespace internal
} // namespace v8
#include "src/base/smart-pointers.h"
#include "src/frames.h"
#include "src/interpreter/bytecodes.h"
+#include "src/unique.h"
namespace v8 {
namespace internal {
// Constants.
Node* Int32Constant(int value);
Node* NumberConstant(double value);
+ Node* HeapConstant(Unique<HeapObject> object);
// Returns the bytecode argument |index| for the current bytecode.
Node* BytecodeArg(int index);
Node* StoreRegister(Node* value, int index);
Node* StoreRegister(Node* value, Node* index);
+ // Returns from the function.
+ void Return();
+
// Dispatch to the bytecode.
void Dispatch();
static Register ReturnValue2Reg() { return v1; }
static Register JSCallFunctionReg() { return a1; }
static Register ContextReg() { return cp; }
- static Register InterpreterBytecodeOffsetReg() { return t4; }
- static Register InterpreterBytecodeArrayReg() { return t5; }
- static Register InterpreterDispatchTableReg() { return t6; }
+ static Register InterpreterBytecodeOffsetReg() {
+ return kInterpreterBytecodeOffsetRegister;
+ }
+ static Register InterpreterBytecodeArrayReg() {
+ return kInterpreterBytecodeArrayRegister;
+ }
+ static Register InterpreterDispatchTableReg() {
+ return kInterpreterDispatchTableRegister;
+ }
static Register RuntimeCallFunctionReg() { return a1; }
static Register RuntimeCallArgCountReg() { return a0; }
static RegList CCalleeSaveRegisters() {
static Register ReturnValue2Reg() { return v1; }
static Register JSCallFunctionReg() { return a1; }
static Register ContextReg() { return cp; }
- static Register InterpreterBytecodeOffsetReg() { return t1; }
- static Register InterpreterBytecodeArrayReg() { return t2; }
- static Register InterpreterDispatchTableReg() { return t3; }
+ static Register InterpreterBytecodeOffsetReg() {
+ return kInterpreterBytecodeOffsetRegister;
+ }
+ static Register InterpreterBytecodeArrayReg() {
+ return kInterpreterBytecodeArrayRegister;
+ }
+ static Register InterpreterDispatchTableReg() {
+ return kInterpreterDispatchTableRegister;
+ }
static Register RuntimeCallFunctionReg() { return a1; }
static Register RuntimeCallArgCountReg() { return a0; }
static RegList CCalleeSaveRegisters() {
Unique<HeapObject> val = Unique<HeapObject>::CreateUninitialized(object);
return NewNode(common()->HeapConstant(val));
}
+ Node* HeapConstant(Unique<HeapObject> object) {
+ return NewNode(common()->HeapConstant(object));
+ }
Node* ExternalConstant(ExternalReference address) {
return NewNode(common()->ExternalConstant(address));
}
static Register JSCallFunctionReg() { return rdi; }
static Register ContextReg() { return rsi; }
static Register InterpreterBytecodeOffsetReg() { return r12; }
- static Register InterpreterBytecodeArrayReg() { return rbx; }
- static Register InterpreterDispatchTableReg() { return rdi; }
+ static Register InterpreterBytecodeArrayReg() { return r14; }
+ static Register InterpreterDispatchTableReg() { return r15; }
static Register RuntimeCallFunctionReg() { return rbx; }
static Register RuntimeCallArgCountReg() { return rax; }
static RegList CCalleeSaveRegisters() {
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
- // The stub will rewrite receiever and parameter count if the previous
+ // The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
if (is_strict(language_mode()) || !is_simple_parameter_list()) {
case kWeakObjectToCodeTableRootIndex:
case kRetainedMapsRootIndex:
case kWeakStackTraceListRootIndex:
+ case kInterpreterTableRootIndex:
// Smi values
#define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
SMI_ROOT_LIST(SMI_ENTRY)
}
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right. The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+// o edi: the JS function object being called
+// o esi: our context
+// o ebp: the caller's frame pointer
+// o esp: stack pointer (pointing to return address)
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-ia32.h for its layout.
+// TODO(rmcilroy): We will need to include the current bytecode pointer in the
+// frame.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ push(ebp); // Caller's frame pointer.
+ __ mov(ebp, esp);
+ __ push(esi); // Callee's context.
+ __ push(edi); // Callee's JS function.
+
+ // Get the bytecode array from the function object and load the pointer to the
+ // first entry into edi (InterpreterBytecodeRegister).
+ __ mov(edi, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(edi, FieldOperand(edi, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ AssertNotSmi(edi);
+ __ CmpObjectType(edi, BYTECODE_ARRAY_TYPE, eax);
+ __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Allocate the local and temporary register file on the stack.
+ {
+ // Load frame size from the BytecodeArray object.
+ __ mov(ebx, FieldOperand(edi, BytecodeArray::kFrameSizeOffset));
+
+ // Do a stack check to ensure we don't go over the limit.
+ Label ok;
+ __ mov(ecx, esp);
+ __ sub(ecx, ebx);
+ ExternalReference stack_limit =
+ ExternalReference::address_of_real_stack_limit(masm->isolate());
+ __ cmp(ecx, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+
+ // If ok, push undefined as the initial value for all register file entries.
+ // Note: there should always be at least one stack slot for the return
+ // register in the register file.
+ Label loop_header;
+ __ mov(eax, Immediate(masm->isolate()->factory()->undefined_value()));
+ __ bind(&loop_header);
+ // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+ __ push(eax);
+ // Continue loop if not done.
+ __ sub(ebx, Immediate(kPointerSize));
+ __ j(not_equal, &loop_header, Label::kNear);
+ }
+
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's prologue:
+ // - Support profiler (specifically profiling_counter).
+ // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
+ // - Allow simulator stop operations if FLAG_stop_at is set.
+ // - Deal with sloppy mode functions which need to replace the
+ // receiver with the global proxy when called as functions (without an
+ // explicit receiver object).
+ // - Code aging of the BytecodeArray object.
+ // - Supporting FLAG_trace.
+ //
+ // The following items are also not done here, and will probably be done using
+ // explicit bytecodes instead:
+ // - Allocating a new local context if applicable.
+ // - Setting up a local binding to the this function, which is used in
+ // derived constructors with super calls.
+ // - Setting new.target if required.
+ // - Dealing with REST parameters (only if
+ // https://codereview.chromium.org/1235153006 doesn't land by then).
+ // - Dealing with argument objects.
+
+ // Perform stack guard check.
+ {
+ Label ok;
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(masm->isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ __ bind(&ok);
+ }
+
+ // Load bytecode offset and dispatch table into registers.
+ __ mov(ecx, Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ // Since the dispatch table root might be set after builtins are generated,
+ // load directly from the roots table.
+ __ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
+ __ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Dispatch to the first bytecode handler for the function.
+ __ movzx_b(eax, Operand(edi, ecx, times_1, 0));
+ __ mov(eax, Operand(ebx, eax, times_pointer_size, 0));
+ // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+ // and header removal.
+ __ add(eax, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(eax);
+}
+
+
+void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's EmitReturnSequence.
+ // - Supporting FLAG_trace for Runtime::TraceExit.
+ // - Support profiler (specifically decrementing profiling_counter
+ // appropriately and calling out to HandleInterrupts if necessary).
+
+ // Load return value into r0.
+ __ mov(eax, Operand(ebp, -kPointerSize -
+ StandardFrameConstants::kFixedFrameSizeFromFp));
+ // Leave the frame (also dropping the register file).
+ __ leave();
+ // Return droping receiver + arguments.
+ // TODO(rmcilroy): Get number of arguments from BytecodeArray.
+ __ Ret(1 * kPointerSize, ecx);
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
// Return the value in register 0.
void Interpreter::DoReturn(compiler::InterpreterAssembler* assembler) {
- // TODO(rmcilroy) Jump to exit trampoline.
+ __ Return();
}
#define cp s7
#define kLithiumScratchReg s3
#define kLithiumScratchReg2 s4
+#define kInterpreterBytecodeOffsetRegister t4
+#define kInterpreterBytecodeArrayRegister t5
+#define kInterpreterDispatchTableRegister t6
#define kLithiumScratchDouble f30
#define kDoubleRegZero f28
// Used on mips32r6 for compare operations.
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/interpreter/bytecodes.h"
#include "src/runtime/runtime.h"
}
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right. The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+// o a1: the JS function object being called.
+// o cp: our context
+// o fp: the caller's frame pointer
+// o sp: stack pointer
+// o ra: return address
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-mips.h for its layout.
+// TODO(rmcilroy): We will need to include the current bytecode pointer in the
+// frame.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+
+ __ Push(ra, fp, cp, a1);
+ __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+
+ // Get the bytecode array from the function object and load the pointer to the
+ // first entry into kInterpreterBytecodeRegister.
+ __ lw(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ SmiTst(kInterpreterBytecodeArrayRegister, t0);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, t0,
+ Operand(zero_reg));
+ __ GetObjectType(kInterpreterBytecodeArrayRegister, t0, t0);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, t0,
+ Operand(BYTECODE_ARRAY_TYPE));
+ }
+
+ // Allocate the local and temporary register file on the stack.
+ {
+ // Load frame size from the BytecodeArray object.
+ __ lw(t0, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kFrameSizeOffset));
+
+ // Do a stack check to ensure we don't go over the limit.
+ Label ok;
+ __ Subu(t1, sp, Operand(t0));
+ __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+ __ Branch(&ok, hs, t1, Operand(a2));
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+
+ // If ok, push undefined as the initial value for all register file entries.
+ // Note: there should always be at least one stack slot for the return
+ // register in the register file.
+ Label loop_header;
+ __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
+ __ bind(&loop_header);
+ // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+ __ push(t1);
+ // Continue loop if not done.
+ __ Subu(t0, t0, Operand(kPointerSize));
+ __ Branch(&loop_header, ge, t0, Operand(zero_reg));
+ }
+
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's prologue:
+ // - Support profiler (specifically profiling_counter).
+ // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
+ // - Allow simulator stop operations if FLAG_stop_at is set.
+ // - Deal with sloppy mode functions which need to replace the
+ // receiver with the global proxy when called as functions (without an
+ // explicit receiver object).
+ // - Code aging of the BytecodeArray object.
+ // - Supporting FLAG_trace.
+ //
+ // The following items are also not done here, and will probably be done using
+ // explicit bytecodes instead:
+ // - Allocating a new local context if applicable.
+ // - Setting up a local binding to the this function, which is used in
+ // derived constructors with super calls.
+ // - Setting new.target if required.
+ // - Dealing with REST parameters (only if
+ // https://codereview.chromium.org/1235153006 doesn't land by then).
+ // - Dealing with argument objects.
+
+ // Perform stack guard check.
+ {
+ Label ok;
+ __ LoadRoot(at, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, sp, Operand(at));
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ __ bind(&ok);
+ }
+
+ // Load bytecode offset and dispatch table into registers.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ Addu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Dispatch to the first bytecode handler for the function.
+ __ Addu(a0, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ lbu(a0, MemOperand(a0));
+ __ sll(at, a0, kPointerSizeLog2);
+ __ Addu(at, kInterpreterDispatchTableRegister, at);
+ __ lw(at, MemOperand(at));
+ // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+ // and header removal.
+ __ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+}
+
+
+void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's EmitReturnSequence.
+ // - Supporting FLAG_trace for Runtime::TraceExit.
+ // - Support profiler (specifically decrementing profiling_counter
+ // appropriately and calling out to HandleInterrupts if necessary).
+
+ // Load return value into v0.
+ __ lw(v0, MemOperand(fp, -kPointerSize -
+ StandardFrameConstants::kFixedFrameSizeFromFp));
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ // Drop receiver + arguments.
+ __ Drop(1); // TODO(rmcilroy): Get number of arguments from BytecodeArray.
+ __ Jump(ra);
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
#define cp s7
#define kLithiumScratchReg s3
#define kLithiumScratchReg2 s4
+#define kInterpreterBytecodeOffsetRegister t0
+#define kInterpreterBytecodeArrayRegister t1
+#define kInterpreterDispatchTableRegister t2
#define kLithiumScratchDouble f30
#define kDoubleRegZero f28
// Used on mips64r6 for compare operations.
}
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right. The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+// o a1: the JS function object being called.
+// o cp: our context
+// o fp: the caller's frame pointer
+// o sp: stack pointer
+// o ra: return address
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-mips.h for its layout.
+// TODO(rmcilroy): We will need to include the current bytecode pointer in the
+// frame.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+
+ __ Push(ra, fp, cp, a1);
+ __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+
+ // Get the bytecode array from the function object and load the pointer to the
+ // first entry into kInterpreterBytecodeRegister.
+ __ ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ SmiTst(kInterpreterBytecodeArrayRegister, a4);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a4,
+ Operand(zero_reg));
+ __ GetObjectType(kInterpreterBytecodeArrayRegister, a4, a4);
+ __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, a4,
+ Operand(BYTECODE_ARRAY_TYPE));
+ }
+
+ // Allocate the local and temporary register file on the stack.
+ {
+ // Load frame size from the BytecodeArray object.
+ __ ld(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kFrameSizeOffset));
+
+ // Do a stack check to ensure we don't go over the limit.
+ Label ok;
+ __ Dsubu(a5, sp, Operand(a4));
+ __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+ __ Branch(&ok, hs, a5, Operand(a2));
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+
+ // If ok, push undefined as the initial value for all register file entries.
+ // Note: there should always be at least one stack slot for the return
+ // register in the register file.
+ Label loop_header;
+ __ LoadRoot(a5, Heap::kUndefinedValueRootIndex);
+ __ bind(&loop_header);
+ // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+ __ push(a5);
+ // Continue loop if not done.
+ __ Dsubu(a4, a4, Operand(kPointerSize));
+ __ Branch(&loop_header, ge, a4, Operand(zero_reg));
+ }
+
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's prologue:
+ // - Support profiler (specifically profiling_counter).
+ // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
+ // - Allow simulator stop operations if FLAG_stop_at is set.
+ // - Deal with sloppy mode functions which need to replace the
+ // receiver with the global proxy when called as functions (without an
+ // explicit receiver object).
+ // - Code aging of the BytecodeArray object.
+ // - Supporting FLAG_trace.
+ //
+ // The following items are also not done here, and will probably be done using
+ // explicit bytecodes instead:
+ // - Allocating a new local context if applicable.
+ // - Setting up a local binding to the this function, which is used in
+ // derived constructors with super calls.
+ // - Setting new.target if required.
+ // - Dealing with REST parameters (only if
+ // https://codereview.chromium.org/1235153006 doesn't land by then).
+ // - Dealing with argument objects.
+
+ // Perform stack guard check.
+ {
+ Label ok;
+ __ LoadRoot(at, Heap::kStackLimitRootIndex);
+ __ Branch(&ok, hs, sp, Operand(at));
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ __ bind(&ok);
+ }
+
+ // Load bytecode offset and dispatch table into registers.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterDispatchTableRegister,
+ Heap::kInterpreterTableRootIndex);
+ __ Daddu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Dispatch to the first bytecode handler for the function.
+ __ Daddu(a0, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ lbu(a0, MemOperand(a0));
+ __ dsll(at, a0, kPointerSizeLog2);
+ __ Daddu(at, kInterpreterDispatchTableRegister, at);
+ __ ld(at, MemOperand(at));
+ // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+ // and header removal.
+ __ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+}
+
+
+void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's EmitReturnSequence.
+ // - Supporting FLAG_trace for Runtime::TraceExit.
+ // - Support profiler (specifically decrementing profiling_counter
+ // appropriately and calling out to HandleInterrupts if necessary).
+
+ // Load return value into v0.
+ __ ld(v0, MemOperand(fp, -kPointerSize -
+ StandardFrameConstants::kFixedFrameSizeFromFp));
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ // Drop receiver + arguments.
+ __ Drop(1); // TODO(rmcilroy): Get number of arguments from BytecodeArray.
+ __ Jump(ra);
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
}
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right. The actual argument count matches the formal parameter
+// count expected by the function.
+//
+// The live registers are:
+// o rdi: the JS function object being called
+// o rsi: our context
+// o rbp: the caller's frame pointer
+// o rsp: stack pointer (pointing to return address)
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-x64.h for its layout.
+// TODO(rmcilroy): We will need to include the current bytecode pointer in the
+// frame.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ pushq(rbp); // Caller's frame pointer.
+ __ movp(rbp, rsp);
+ __ Push(rsi); // Callee's context.
+ __ Push(rdi); // Callee's JS function.
+
+ // Get the bytecode array from the function object and load the pointer to the
+ // first entry into edi (InterpreterBytecodeRegister).
+ __ movp(r14, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(r14, FieldOperand(r14, SharedFunctionInfo::kFunctionDataOffset));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ AssertNotSmi(r14);
+ __ CmpObjectType(r14, BYTECODE_ARRAY_TYPE, rax);
+ __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ }
+
+ // Allocate the local and temporary register file on the stack.
+ {
+ // Load frame size from the BytecodeArray object.
+ __ movl(rcx, FieldOperand(r14, BytecodeArray::kFrameSizeOffset));
+
+ // Do a stack check to ensure we don't go over the limit.
+ Label ok;
+ __ movp(rdx, rsp);
+ __ subp(rdx, rcx);
+ __ CompareRoot(rdx, Heap::kRealStackLimitRootIndex);
+ __ j(above_equal, &ok, Label::kNear);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ bind(&ok);
+
+ // If ok, push undefined as the initial value for all register file entries.
+ // Note: there should always be at least one stack slot for the return
+ // register in the register file.
+ Label loop_header;
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ bind(&loop_header);
+ // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+ __ Push(rdx);
+ // Continue loop if not done.
+ __ subp(rcx, Immediate(kPointerSize));
+ __ j(not_equal, &loop_header, Label::kNear);
+ }
+
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's prologue:
+ // - Support profiler (specifically profiling_counter).
+ // - Call ProfileEntryHookStub when isolate has a function_entry_hook.
+ // - Allow simulator stop operations if FLAG_stop_at is set.
+ // - Deal with sloppy mode functions which need to replace the
+ // receiver with the global proxy when called as functions (without an
+ // explicit receiver object).
+ // - Code aging of the BytecodeArray object.
+ // - Supporting FLAG_trace.
+ //
+ // The following items are also not done here, and will probably be done using
+ // explicit bytecodes instead:
+ // - Allocating a new local context if applicable.
+ // - Setting up a local binding to the this function, which is used in
+ // derived constructors with super calls.
+ // - Setting new.target if required.
+ // - Dealing with REST parameters (only if
+ // https://codereview.chromium.org/1235153006 doesn't land by then).
+ // - Dealing with argument objects.
+
+ // Perform stack guard check.
+ {
+ Label ok;
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &ok, Label::kNear);
+ __ CallRuntime(Runtime::kStackGuard, 0);
+ __ bind(&ok);
+ }
+
+ // Load bytecode offset and dispatch table into registers.
+ __ movp(r12, Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(r15, Heap::kInterpreterTableRootIndex);
+ __ addp(r15, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+
+ // Dispatch to the first bytecode handler for the function.
+ __ movzxbp(rax, Operand(r14, r12, times_1, 0));
+ __ movp(rax, Operand(r15, rax, times_pointer_size, 0));
+ // TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
+ // and header removal.
+ __ addp(rax, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(rax);
+}
+
+
+void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
+ // TODO(rmcilroy): List of things not currently dealt with here but done in
+ // fullcodegen's EmitReturnSequence.
+ // - Supporting FLAG_trace for Runtime::TraceExit.
+ // - Support profiler (specifically decrementing profiling_counter
+ // appropriately and calling out to HandleInterrupts if necessary).
+
+ // Load return value into r0.
+ __ movp(rax, Operand(rbp, -kPointerSize -
+ StandardFrameConstants::kFixedFrameSizeFromFp));
+ // Leave the frame (also dropping the register file).
+ __ leave();
+ // Return droping receiver + arguments.
+ // TODO(rmcilroy): Get number of arguments from BytecodeArray.
+ __ Ret(1 * kPointerSize, rcx);
+}
+
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm);
#include "src/compiler/graph.h"
#include "src/compiler/node.h"
+#include "src/unique.h"
#include "test/unittests/compiler/compiler-test-utils.h"
#include "test/unittests/compiler/node-test-utils.h"
}
+Matcher<Node*> IsIntPtrSub(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return kPointerSize == 8 ? IsInt64Sub(lhs_matcher, rhs_matcher)
+ : IsInt32Sub(lhs_matcher, rhs_matcher);
+}
+
+
+Matcher<Node*> IsWordShl(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return kPointerSize == 8 ? IsWord64Shl(lhs_matcher, rhs_matcher)
+ : IsWord32Shl(lhs_matcher, rhs_matcher);
+}
+
+
TARGET_TEST_F(InterpreterAssemblerTest, Dispatch) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
}
+TARGET_TEST_F(InterpreterAssemblerTest, Return) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ m.Return();
+ Graph* graph = m.GetCompletedGraph();
+
+ Node* end = graph->end();
+ EXPECT_EQ(1, end->InputCount());
+ Node* tail_call_node = end->InputAt(0);
+
+ EXPECT_EQ(CallDescriptor::kInterpreterDispatch,
+ m.call_descriptor()->kind());
+ Matcher<Unique<HeapObject>> exit_trampoline(
+ Unique<HeapObject>::CreateImmovable(
+ isolate()->builtins()->InterpreterExitTrampoline()));
+ EXPECT_THAT(
+ tail_call_node,
+ IsTailCall(m.call_descriptor(), IsHeapConstant(exit_trampoline),
+ IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsParameter(Linkage::kInterpreterDispatchTableParameter),
+ graph->start(), graph->start()));
+ }
+}
+
+
TARGET_TEST_F(InterpreterAssemblerTest, BytecodeArg) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
EXPECT_THAT(
load_reg_node,
m.IsLoad(kMachPtr, IsLoadFramePointer(),
- IsInt32Sub(IsInt32Constant(m.kFirstRegisterOffsetFromFp),
- IsWord32Shl(reg_index_node,
- IsInt32Constant(kPointerSizeLog2)))));
+ IsIntPtrSub(IsInt32Constant(m.kFirstRegisterOffsetFromFp),
+ IsWordShl(reg_index_node,
+ IsInt32Constant(kPointerSizeLog2)))));
}
}
store_reg_node,
m.IsStore(StoreRepresentation(kMachPtr, kNoWriteBarrier),
IsLoadFramePointer(),
- IsInt32Sub(IsInt32Constant(m.kFirstRegisterOffsetFromFp),
- IsWord32Shl(reg_index_node,
- IsInt32Constant(kPointerSizeLog2))),
+ IsIntPtrSub(IsInt32Constant(m.kFirstRegisterOffsetFromFp),
+ IsWordShl(reg_index_node,
+ IsInt32Constant(kPointerSizeLog2))),
store_value));
}
}
IS_BINOP_MATCHER(Uint32LessThan)
IS_BINOP_MATCHER(Uint32LessThanOrEqual)
IS_BINOP_MATCHER(Int64Add)
+IS_BINOP_MATCHER(Int64Sub)
IS_BINOP_MATCHER(Float32Max)
IS_BINOP_MATCHER(Float32Min)
IS_BINOP_MATCHER(Float32Equal)
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt64Add(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt64Sub(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsChangeFloat64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeFloat64ToUint32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeInt32ToFloat64(const Matcher<Node*>& input_matcher);