"src/compiler/instruction-selector.h",
"src/compiler/instruction.cc",
"src/compiler/instruction.h",
+ "src/compiler/interpreter-assembler.cc",
+ "src/compiler/interpreter-assembler.h",
"src/compiler/js-builtin-reducer.cc",
"src/compiler/js-builtin-reducer.h",
"src/compiler/js-context-relaxation.cc",
"src/interface-descriptors.h",
"src/interpreter-irregexp.cc",
"src/interpreter-irregexp.h",
+ "src/interpreter/bytecodes.cc",
+ "src/interpreter/bytecodes.h",
+ "src/interpreter/interpreter.cc",
+ "src/interpreter/interpreter.h",
"src/isolate.cc",
"src/isolate.h",
"src/json-parser.h",
"+src",
"-src/compiler",
"+src/compiler/pipeline.h",
+ "-src/interpreter",
+ "+src/interpreter/bytecodes.h",
+ "+src/interpreter/interpreter.h",
"-src/libplatform",
"-include/libplatform"
]
static Register ReturnValue2Reg() { return r1; }
static Register JSCallFunctionReg() { return r1; }
static Register ContextReg() { return cp; }
+ static Register InterpreterBytecodePointerReg() { return r6; }
+ static Register InterpreterDispatchTableReg() { return r8; }
static Register RuntimeCallFunctionReg() { return r1; }
static Register RuntimeCallArgCountReg() { return r0; }
static RegList CCalleeSaveRegisters() {
}
-CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(
- Zone* zone, const MachineSignature* sig) {
- return LH::GetInterpreterDispatchDescriptor(zone, sig);
+CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(Zone* zone) {
+ return LH::GetInterpreterDispatchDescriptor(zone);
}
} // namespace compiler
static Register ReturnValue2Reg() { return x1; }
static Register JSCallFunctionReg() { return x1; }
static Register ContextReg() { return cp; }
+ static Register InterpreterBytecodePointerReg() { return x19; }
+ static Register InterpreterDispatchTableReg() { return x20; }
static Register RuntimeCallFunctionReg() { return x1; }
static Register RuntimeCallArgCountReg() { return x0; }
static RegList CCalleeSaveRegisters() {
}
-CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(
- Zone* zone, const MachineSignature* sig) {
- return LH::GetInterpreterDispatchDescriptor(zone, sig);
+CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(Zone* zone) {
+ return LH::GetInterpreterDispatchDescriptor(zone);
}
} // namespace compiler
static Register ReturnValue2Reg() { return edx; }
static Register JSCallFunctionReg() { return edi; }
static Register ContextReg() { return esi; }
+ static Register InterpreterBytecodePointerReg() { return edi; }
+ static Register InterpreterDispatchTableReg() { return ebx; }
static Register RuntimeCallFunctionReg() { return ebx; }
static Register RuntimeCallArgCountReg() { return eax; }
static RegList CCalleeSaveRegisters() {
}
-CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(
- Zone* zone, const MachineSignature* sig) {
- return LH::GetInterpreterDispatchDescriptor(zone, sig);
+CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(Zone* zone) {
+ return LH::GetInterpreterDispatchDescriptor(zone);
}
} // namespace compiler
--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/interpreter-assembler.h"
+
+#include <ostream>
+
+#include "src/compiler/graph.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-type.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/compiler/schedule.h"
+#include "src/frames.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/macro-assembler.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
+ interpreter::Bytecode bytecode)
+ : bytecode_(bytecode),
+ raw_assembler_(new RawMachineAssembler(
+ isolate, new (zone) Graph(zone),
+ Linkage::GetInterpreterDispatchDescriptor(zone), kMachPtr,
+ InstructionSelector::SupportedMachineOperatorFlags())),
+ end_node_(nullptr),
+ code_generated_(false) {}
+
+
+InterpreterAssembler::~InterpreterAssembler() {}
+
+
+Handle<Code> InterpreterAssembler::GenerateCode() {
+ DCHECK(!code_generated_);
+
+ End();
+
+ Schedule* schedule = raw_assembler_->Export();
+ // TODO(rmcilroy): use a non-testing code generator.
+ Handle<Code> code = Pipeline::GenerateCodeForTesting(
+ isolate(), raw_assembler_->call_descriptor(), graph(), schedule);
+
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_trace_ignition_codegen) {
+ OFStream os(stdout);
+ code->Disassemble(interpreter::Bytecodes::ToString(bytecode_), os);
+ os << std::flush;
+ }
+#endif
+
+ code_generated_ = true;
+ return code;
+}
+
+
+Node* InterpreterAssembler::BytecodePointer() {
+ return raw_assembler_->Parameter(Linkage::kInterpreterBytecodeParameter);
+}
+
+
+Node* InterpreterAssembler::DispatchTablePointer() {
+ return raw_assembler_->Parameter(Linkage::kInterpreterDispatchTableParameter);
+}
+
+
+Node* InterpreterAssembler::FramePointer() {
+ return raw_assembler_->LoadFramePointer();
+}
+
+
+Node* InterpreterAssembler::RegisterFrameOffset(int index) {
+ DCHECK_LE(index, kMaxRegisterIndex);
+ return Int32Constant(kFirstRegisterOffsetFromFp -
+ (index << kPointerSizeLog2));
+}
+
+
+Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
+ return raw_assembler_->Int32Sub(
+ Int32Constant(kFirstRegisterOffsetFromFp),
+ raw_assembler_->Word32Shl(index, Int32Constant(kPointerSizeLog2)));
+}
+
+
+Node* InterpreterAssembler::BytecodeArg(int delta) {
+ DCHECK_LT(delta, interpreter::Bytecodes::NumberOfArguments(bytecode_));
+ return raw_assembler_->Load(kMachUint8, BytecodePointer(),
+ Int32Constant(1 + delta));
+}
+
+
+Node* InterpreterAssembler::LoadRegister(int index) {
+ return raw_assembler_->Load(kMachPtr, FramePointer(),
+ RegisterFrameOffset(index));
+}
+
+
+Node* InterpreterAssembler::LoadRegister(Node* index) {
+ return raw_assembler_->Load(kMachPtr, FramePointer(),
+ RegisterFrameOffset(index));
+}
+
+
+Node* InterpreterAssembler::StoreRegister(Node* value, int index) {
+ return raw_assembler_->Store(kMachPtr, FramePointer(),
+ RegisterFrameOffset(index), value);
+}
+
+
+Node* InterpreterAssembler::StoreRegister(Node* value, Node* index) {
+ return raw_assembler_->Store(kMachPtr, FramePointer(),
+ RegisterFrameOffset(index), value);
+}
+
+
+Node* InterpreterAssembler::Advance(int delta) {
+ return raw_assembler_->IntPtrAdd(BytecodePointer(), Int32Constant(delta));
+}
+
+
+void InterpreterAssembler::Dispatch() {
+ Node* new_bytecode_pointer = Advance(interpreter::Bytecodes::Size(bytecode_));
+ Node* target_bytecode =
+ raw_assembler_->Load(kMachUint8, new_bytecode_pointer);
+
+ // TODO(rmcilroy): Create a code target dispatch table to avoid conversion
+ // from code object on every dispatch.
+ Node* target_code_object = raw_assembler_->Load(
+ kMachPtr, DispatchTablePointer(),
+ raw_assembler_->Word32Shl(target_bytecode,
+ Int32Constant(kPointerSizeLog2)));
+
+ // If the order of the parameters you need to change the call signature below.
+ STATIC_ASSERT(0 == Linkage::kInterpreterBytecodeParameter);
+ STATIC_ASSERT(1 == Linkage::kInterpreterDispatchTableParameter);
+ Node* tail_call = graph()->NewNode(common()->TailCall(call_descriptor()),
+ target_code_object, new_bytecode_pointer,
+ DispatchTablePointer(), graph()->start(),
+ graph()->start());
+ schedule()->AddTailCall(raw_assembler_->CurrentBlock(), tail_call);
+
+ // This should always be the end node.
+ SetEndInput(tail_call);
+}
+
+
+void InterpreterAssembler::SetEndInput(Node* input) {
+ DCHECK(!end_node_);
+ end_node_ = input;
+}
+
+
+void InterpreterAssembler::End() {
+ DCHECK(end_node_);
+ // TODO(rmcilroy): Support more than 1 end input.
+ Node* end = graph()->NewNode(common()->End(1), end_node_);
+ graph()->SetEnd(end);
+}
+
+
+// RawMachineAssembler delegate helpers:
+Isolate* InterpreterAssembler::isolate() { return raw_assembler_->isolate(); }
+
+
+Graph* InterpreterAssembler::graph() { return raw_assembler_->graph(); }
+
+
+CallDescriptor* InterpreterAssembler::call_descriptor() const {
+ return raw_assembler_->call_descriptor();
+}
+
+
+Schedule* InterpreterAssembler::schedule() {
+ return raw_assembler_->schedule();
+}
+
+
+MachineOperatorBuilder* InterpreterAssembler::machine() {
+ return raw_assembler_->machine();
+}
+
+
+CommonOperatorBuilder* InterpreterAssembler::common() {
+ return raw_assembler_->common();
+}
+
+
+Node* InterpreterAssembler::Int32Constant(int value) {
+ return raw_assembler_->Int32Constant(value);
+}
+
+
+Node* InterpreterAssembler::NumberConstant(double value) {
+ return raw_assembler_->NumberConstant(value);
+}
+
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INTERPRETER_CODEGEN_H_
+#define V8_COMPILER_INTERPRETER_CODEGEN_H_
+
+// Clients of this interface shouldn't depend on lots of compiler internals.
+// Do not include anything from src/compiler here!
+#include "src/allocation.h"
+#include "src/base/smart-pointers.h"
+#include "src/frames.h"
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class Zone;
+
+namespace compiler {
+
+class CallDescriptor;
+class CommonOperatorBuilder;
+class Graph;
+class MachineOperatorBuilder;
+class Node;
+class Operator;
+class RawMachineAssembler;
+class Schedule;
+
+class InterpreterAssembler {
+ public:
+ InterpreterAssembler(Isolate* isolate, Zone* zone,
+ interpreter::Bytecode bytecode);
+ virtual ~InterpreterAssembler();
+
+ Handle<Code> GenerateCode();
+
+ // Constants.
+ Node* Int32Constant(int value);
+ Node* NumberConstant(double value);
+
+ // Returns the bytecode argument |index| for the current bytecode.
+ Node* BytecodeArg(int index);
+
+ // Loads from and stores to the interpreter register file.
+ Node* LoadRegister(int index);
+ Node* LoadRegister(Node* index);
+ Node* StoreRegister(Node* value, int index);
+ Node* StoreRegister(Node* value, Node* index);
+
+ // Dispatch to the bytecode.
+ void Dispatch();
+
+ protected:
+ static const int kFirstRegisterOffsetFromFp =
+ -kPointerSize - StandardFrameConstants::kFixedFrameSizeFromFp;
+
+ // TODO(rmcilroy): Increase this when required.
+ static const int kMaxRegisterIndex = 255;
+
+ // Close the graph.
+ void End();
+
+ // Protected helpers (for testing) which delegate to RawMachineAssembler.
+ CallDescriptor* call_descriptor() const;
+ Graph* graph();
+
+ private:
+ // Returns the pointer to the current bytecode.
+ Node* BytecodePointer();
+ // Returns the pointer to first entry in the interpreter dispatch table.
+ Node* DispatchTablePointer();
+ // Returns the frame pointer for the current function.
+ Node* FramePointer();
+
+ // Returns the offset of register |index|.
+ Node* RegisterFrameOffset(int index);
+ Node* RegisterFrameOffset(Node* index);
+
+ // Returns BytecodePointer() advanced by delta bytecodes. Note: this does not
+ // update BytecodePointer() itself.
+ Node* Advance(int delta);
+
+ // Sets the end node of the graph.
+ void SetEndInput(Node* input);
+
+ // Private helpers which delegate to RawMachineAssembler.
+ Isolate* isolate();
+ Schedule* schedule();
+ MachineOperatorBuilder* machine();
+ CommonOperatorBuilder* common();
+
+ interpreter::Bytecode bytecode_;
+ base::SmartPointer<RawMachineAssembler> raw_assembler_;
+ Node* end_node_;
+ bool code_generated_;
+
+ DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_INTERPRETER_CODEGEN_H_
"c-call");
}
- static CallDescriptor* GetInterpreterDispatchDescriptor(
- Zone* zone, const MachineSignature* msig) {
- DCHECK_EQ(0U, msig->parameter_count());
- LocationSignature::Builder locations(zone, msig->return_count(),
- msig->parameter_count());
- AddReturnLocations(&locations);
+ static CallDescriptor* GetInterpreterDispatchDescriptor(Zone* zone) {
+ MachineSignature::Builder types(zone, 0, 2);
+ LocationSignature::Builder locations(zone, 0, 2);
+
+ // Add registers for fixed parameters passed via interpreter dispatch.
+ STATIC_ASSERT(0 == Linkage::kInterpreterBytecodeParameter);
+ types.AddParam(kMachPtr);
+ locations.AddParam(regloc(LinkageTraits::InterpreterBytecodePointerReg()));
+
+ STATIC_ASSERT(1 == Linkage::kInterpreterDispatchTableParameter);
+ types.AddParam(kMachPtr);
+ locations.AddParam(regloc(LinkageTraits::InterpreterDispatchTableReg()));
+
LinkageLocation target_loc = LinkageLocation::AnyRegister();
return new (zone) CallDescriptor( // --
CallDescriptor::kInterpreterDispatch, // kind
kMachNone, // target MachineType
target_loc, // target location
- msig, // machine_sig
+ types.Build(), // machine_sig
locations.Build(), // location_sig
0, // js_parameter_count
Operator::kNoProperties, // properties
// Creates a call descriptor for interpreter handler code stubs. These are not
// intended to be called directly but are instead dispatched to by the
// interpreter.
- static CallDescriptor* GetInterpreterDispatchDescriptor(
- Zone* zone, const MachineSignature* sig);
+ static CallDescriptor* GetInterpreterDispatchDescriptor(Zone* zone);
// Get the location of an (incoming) parameter to this function.
LinkageLocation GetParameterLocation(int index) const {
// A special {OsrValue} index to indicate the context spill slot.
static const int kOsrContextSpillSlotIndex = -1;
+ // Special parameter indices used to pass fixed register data through
+ // interpreter dispatches.
+ static const int kInterpreterBytecodeParameter = 0;
+ static const int kInterpreterDispatchTableParameter = 1;
+
private:
CallDescriptor* const incoming_;
static Register ReturnValue2Reg() { return v1; }
static Register JSCallFunctionReg() { return a1; }
static Register ContextReg() { return cp; }
+ static Register InterpreterBytecodePointerReg() { return s0; }
+ static Register InterpreterDispatchTableReg() { return s1; }
static Register RuntimeCallFunctionReg() { return a1; }
static Register RuntimeCallArgCountReg() { return a0; }
static RegList CCalleeSaveRegisters() {
}
-CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(
- Zone* zone, const MachineSignature* sig) {
- return LH::GetInterpreterDispatchDescriptor(zone, sig);
+CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(Zone* zone) {
+ return LH::GetInterpreterDispatchDescriptor(zone);
}
} // namespace compiler
static Register ReturnValue2Reg() { return v1; }
static Register JSCallFunctionReg() { return a1; }
static Register ContextReg() { return cp; }
+ static Register InterpreterBytecodePointerReg() { return s0; }
+ static Register InterpreterDispatchTableReg() { return s1; }
static Register RuntimeCallFunctionReg() { return a1; }
static Register RuntimeCallArgCountReg() { return a0; }
static RegList CCalleeSaveRegisters() {
}
-CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(
- Zone* zone, const MachineSignature* sig) {
- return LH::GetInterpreterDispatchDescriptor(zone, sig);
+CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(Zone* zone) {
+ return LH::GetInterpreterDispatchDescriptor(zone);
}
} // namespace compiler
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/raw-machine-assembler.h"
+
#include "src/code-factory.h"
#include "src/compiler/pipeline.h"
-#include "src/compiler/raw-machine-assembler.h"
#include "src/compiler/scheduler.h"
namespace v8 {
#ifndef V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
#define V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
+#include "src/assembler.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
-
namespace v8 {
namespace internal {
namespace compiler {
const MachineSignature* machine_sig() const {
return call_descriptor_->GetMachineSignature();
}
+ BasicBlock* CurrentBlock();
// Finalizes the schedule and exports it to be used for code generation. Note
// that this RawMachineAssembler becomes invalid after export.
return NewNode(machine()->Load(rep), base, index, graph()->start(),
graph()->start());
}
- void Store(MachineType rep, Node* base, Node* value) {
- Store(rep, base, IntPtrConstant(0), value);
+ Node* Store(MachineType rep, Node* base, Node* value) {
+ return Store(rep, base, IntPtrConstant(0), value);
}
- void Store(MachineType rep, Node* base, Node* index, Node* value) {
- NewNode(machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)), base,
- index, value, graph()->start(), graph()->start());
+ Node* Store(MachineType rep, Node* base, Node* index, Node* value) {
+ return NewNode(machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)),
+ base, index, value, graph()->start(), graph()->start());
}
// Arithmetic Operations.
Node* LoadFromPointer(void* address, MachineType rep, int32_t offset = 0) {
return Load(rep, PointerConstant(address), Int32Constant(offset));
}
- void StoreToPointer(void* address, MachineType rep, Node* node) {
- Store(rep, PointerConstant(address), node);
+ Node* StoreToPointer(void* address, MachineType rep, Node* node) {
+ return Store(rep, PointerConstant(address), node);
}
Node* StringConstant(const char* string) {
return HeapConstant(isolate()->factory()->InternalizeUtf8String(string));
Node* MakeNode(const Operator* op, int input_count, Node** inputs);
BasicBlock* Use(Label* label);
BasicBlock* EnsureBlock(Label* label);
- BasicBlock* CurrentBlock();
Isolate* isolate_;
Graph* graph_;
static Register ReturnValue2Reg() { return rdx; }
static Register JSCallFunctionReg() { return rdi; }
static Register ContextReg() { return rsi; }
+ static Register InterpreterBytecodePointerReg() { return rbx; }
+ static Register InterpreterDispatchTableReg() { return rdi; }
static Register RuntimeCallFunctionReg() { return rbx; }
static Register RuntimeCallArgCountReg() { return rax; }
static RegList CCalleeSaveRegisters() {
}
-CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(
- Zone* zone, const MachineSignature* sig) {
- return LH::GetInterpreterDispatchDescriptor(zone, sig);
+CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(Zone* zone) {
+ return LH::GetInterpreterDispatchDescriptor(zone);
}
} // namespace compiler
// Flags for optimization types.
DEFINE_BOOL(optimize_for_size, false,
"Enables optimizations which favor memory size over execution "
- "speed.")
+ "speed")
DEFINE_VALUE_IMPLICATION(optimize_for_size, max_semi_space_size, 1)
DEFINE_BOOL(unbox_double_arrays, true, "automatically unbox arrays of doubles")
DEFINE_BOOL(string_slices, true, "use string slices")
+// Flags for Ignition.
+DEFINE_BOOL(ignition, false, "use ignition interpreter")
+DEFINE_STRING(ignition_filter, "~~", "filter for ignition interpreter")
+DEFINE_BOOL(trace_ignition_codegen, false,
+ "trace the codegen of ignition interpreter bytecode handlers")
+
// Flags for Crankshaft.
DEFINE_BOOL(crankshaft, true, "use crankshaft")
DEFINE_STRING(hydrogen_filter, "*", "optimization filter")
set_weak_stack_trace_list(Smi::FromInt(0));
+ // Will be filled in by Interpreter::Initialize().
+ set_interpreter_table(empty_fixed_array());
+
set_allocation_sites_scratchpad(
*factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED));
InitializeAllocationSitesScratchpad();
V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
V(Object, weak_stack_trace_list, WeakStackTraceList) \
V(Object, code_stub_context, CodeStubContext) \
- V(JSObject, code_stub_exports_object, CodeStubExportsObject)
+ V(JSObject, code_stub_exports_object, CodeStubExportsObject) \
+ V(FixedArray, interpreter_table, InterpreterTable)
// Entries in this list are limited to Smis and are not visited during GC.
#define SMI_ROOT_LIST(V) \
roots_[kMaterializedObjectsRootIndex] = objects;
}
+ void public_set_interpreter_table(FixedArray* table) {
+ roots_[kInterpreterTableRootIndex] = table;
+ }
+
// Generated code can embed this address to get access to the roots.
Object** roots_array_start() { return roots_; }
--- /dev/null
+include_rules = [
+ "+src/compiler/interpreter-assembler.h",
+ "-src/v8.h",
+]
--- /dev/null
+rmcilroy@chromium.org
--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// static
+const char* Bytecodes::ToString(Bytecode bytecode) {
+ switch (bytecode) {
+#define CASE(Name, _) \
+ case Bytecode::k##Name: \
+ return #Name;
+ BYTECODE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return "";
+}
+
+
+// static
+const int Bytecodes::NumberOfArguments(Bytecode bytecode) {
+ switch (bytecode) {
+#define CASE(Name, arg_count) \
+ case Bytecode::k##Name: \
+ return arg_count;
+ BYTECODE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return 0;
+}
+
+
+// static
+const int Bytecodes::Size(Bytecode bytecode) {
+ return NumberOfArguments(bytecode) + 1;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode) {
+ return os << Bytecodes::ToString(bytecode);
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODES_H_
+#define V8_INTERPRETER_BYTECODES_H_
+
+#include <iosfwd>
+
+// Clients of this interface shouldn't depend on lots of interpreter internals.
+// Do not include anything from src/interpreter here!
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// The list of bytecodes which are interpreted by the interpreter.
+#define BYTECODE_LIST(V) \
+ V(LoadLiteral0, 1) \
+ V(Return, 0)
+
+enum class Bytecode : uint8_t {
+#define DECLARE_BYTECODE(Name, _) k##Name,
+ BYTECODE_LIST(DECLARE_BYTECODE)
+#undef DECLARE_BYTECODE
+#define COUNT_BYTECODE(x, _) +1
+ // The COUNT_BYTECODE macro will turn this into kLast = -1 +1 +1... which will
+ // evaluate to the same value as the last real bytecode.
+ kLast = -1 BYTECODE_LIST(COUNT_BYTECODE)
+#undef COUNT_BYTECODE
+};
+
+class Bytecodes {
+ public:
+ // Returns string representation of |bytecode|.
+ static const char* ToString(Bytecode bytecode);
+
+ // Returns the number of arguments expected by |bytecode|.
+ static const int NumberOfArguments(Bytecode bytecode);
+
+ // Returns the size of the bytecode including its arguments.
+ static const int Size(Bytecode bytecode);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Bytecodes);
+};
+
+
+std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode);
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODES_H_
--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/interpreter.h"
+
+#include "src/compiler.h"
+#include "src/compiler/interpreter-assembler.h"
+#include "src/factory.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+using compiler::Node;
+#define __ assembler->
+
+
+Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {}
+
+
+void Interpreter::Initialize(bool create_heap_objects) {
+ DCHECK(FLAG_ignition);
+ if (create_heap_objects) {
+ Zone zone;
+ HandleScope scope(isolate_);
+ Handle<FixedArray> handler_table = isolate_->factory()->NewFixedArray(
+ static_cast<int>(Bytecode::kLast) + 1, TENURED);
+ isolate_->heap()->public_set_interpreter_table(*handler_table);
+
+#define GENERATE_CODE(Name, _) \
+ { \
+ compiler::InterpreterAssembler assembler(isolate_, &zone, \
+ Bytecode::k##Name); \
+ Do##Name(&assembler); \
+ handler_table->set(static_cast<int>(Bytecode::k##Name), \
+ *assembler.GenerateCode()); \
+ }
+ BYTECODE_LIST(GENERATE_CODE)
+#undef GENERATE_CODE
+ }
+}
+
+
+// Load literal '0' into the register index specified by the bytecode's
+// argument.
+void Interpreter::DoLoadLiteral0(compiler::InterpreterAssembler* assembler) {
+ Node* register_index = __ BytecodeArg(0);
+ __ StoreRegister(__ NumberConstant(0), register_index);
+ __ Dispatch();
+}
+
+
+// Return the value in register 0.
+void Interpreter::DoReturn(compiler::InterpreterAssembler* assembler) {
+ // TODO(rmcilroy) Jump to exit trampoline.
+}
+
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_INTERPRETER_H_
+#define V8_INTERPRETER_INTERPRETER_H_
+
+// Clients of this interface shouldn't depend on lots of interpreter internals.
+// Do not include anything from src/interpreter other than
+// src/interpreter/bytecodes.h here!
+#include "src/base/macros.h"
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class CompilationInfo;
+
+namespace compiler {
+class InterpreterAssembler;
+}
+
+namespace interpreter {
+
+class Interpreter {
+ public:
+ explicit Interpreter(Isolate* isolate);
+ virtual ~Interpreter() {}
+
+ void Initialize(bool create_heap_objects);
+
+ private:
+// Bytecode handler generator functions.
+#define DECLARE_BYTECODE_HANDLER_GENERATOR(Name, _) \
+ void Do##Name(compiler::InterpreterAssembler* assembler);
+ BYTECODE_LIST(DECLARE_BYTECODE_HANDLER_GENERATOR)
+#undef DECLARE_BYTECODE_HANDLER_GENERATOR
+
+ Isolate* isolate_;
+
+ DISALLOW_COPY_AND_ASSIGN(Interpreter);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_INTERPRETER_H_
#include "src/heap-profiler.h"
#include "src/hydrogen.h"
#include "src/ic/stub-cache.h"
+#include "src/interpreter/interpreter.h"
#include "src/lithium-allocator.h"
#include "src/log.h"
#include "src/messages.h"
Sampler* sampler = logger_->sampler();
if (sampler && sampler->IsActive()) sampler->Stop();
+ delete interpreter_;
+ interpreter_ = NULL;
+
delete deoptimizer_data_;
deoptimizer_data_ = NULL;
builtins_.TearDown();
new CallInterfaceDescriptorData[CallDescriptors::NUMBER_OF_DESCRIPTORS];
cpu_profiler_ = new CpuProfiler(this);
heap_profiler_ = new HeapProfiler(heap());
+ interpreter_ = new interpreter::Interpreter(this);
// Enable logging before setting up the heap
logger_->SetUp(this);
bootstrapper_->Initialize(create_heap_objects);
builtins_.SetUp(this, create_heap_objects);
+ if (FLAG_ignition) {
+ interpreter_->Initialize(create_heap_objects);
+ }
+
if (FLAG_log_internal_timer_events) {
set_event_logger(Logger::DefaultEventLoggerSentinel);
}
class Redirection;
class Simulator;
+namespace interpreter {
+class Interpreter;
+}
// Static indirection table for handles to constants. If a frame
// element represents a constant, the data contains an index into
HeapProfiler* heap_profiler_;
FunctionEntryHook function_entry_hook_;
+ interpreter::Interpreter* interpreter_;
+
typedef std::pair<InterruptCallback, void*> InterruptEntry;
std::queue<InterruptEntry> api_interrupts_queue_;
--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/compiler/interpreter-assembler-unittest.h"
+
+#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
+#include "test/unittests/compiler/compiler-test-utils.h"
+#include "test/unittests/compiler/node-test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+const interpreter::Bytecode kBytecodes[] = {
+#define DEFINE_BYTECODE(Name, _) interpreter::Bytecode::k##Name,
+ BYTECODE_LIST(DEFINE_BYTECODE)
+#undef DEFINE_BYTECODE
+};
+
+
+Graph*
+InterpreterAssemblerTest::InterpreterAssemblerForTest::GetCompletedGraph() {
+ End();
+ return graph();
+}
+
+
+Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoad(
+ const Matcher<LoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher) {
+ return ::i::compiler::IsLoad(rep_matcher, base_matcher, index_matcher,
+ graph()->start(), graph()->start());
+}
+
+
+Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
+ const Matcher<StoreRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& value_matcher) {
+ return ::i::compiler::IsStore(rep_matcher, base_matcher, index_matcher,
+ value_matcher, graph()->start(),
+ graph()->start());
+}
+
+
+Matcher<Node*> IsIntPtrAdd(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return kPointerSize == 8 ? IsInt64Add(lhs_matcher, rhs_matcher)
+ : IsInt32Add(lhs_matcher, rhs_matcher);
+}
+
+
+Matcher<Node*> IsIntPtrConstant(intptr_t value) {
+#ifdef V8_TARGET_ARCH_64_BIT
+ return IsInt64Constant(value);
+#else
+ return IsInt32Constant(value);
+#endif
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, Dispatch) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ m.Dispatch();
+ Graph* graph = m.GetCompletedGraph();
+
+ Node* end = graph->end();
+ EXPECT_EQ(1, end->InputCount());
+ Node* tail_call_node = end->InputAt(0);
+
+ Matcher<Node*> next_bytecode_matcher =
+ IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeParameter),
+ IsInt32Constant(interpreter::Bytecodes::Size(bytecode)));
+ Matcher<Node*> target_bytecode_matcher =
+ m.IsLoad(kMachUint8, next_bytecode_matcher, IsIntPtrConstant(0));
+ Matcher<Node*> code_target_matcher = m.IsLoad(
+ kMachPtr, IsParameter(Linkage::kInterpreterDispatchTableParameter),
+ IsWord32Shl(target_bytecode_matcher,
+ IsInt32Constant(kPointerSizeLog2)));
+
+ EXPECT_EQ(CallDescriptor::kInterpreterDispatch,
+ m.call_descriptor()->kind());
+ EXPECT_THAT(
+ tail_call_node,
+ IsTailCall(m.call_descriptor(), code_target_matcher,
+ next_bytecode_matcher,
+ IsParameter(Linkage::kInterpreterDispatchTableParameter),
+ graph->start(), graph->start()));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, BytecodeArg) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ int number_of_args = interpreter::Bytecodes::NumberOfArguments(bytecode);
+ for (int i = 0; i < number_of_args; i++) {
+ Node* load_arg_node = m.BytecodeArg(i);
+ EXPECT_THAT(load_arg_node,
+ m.IsLoad(kMachUint8,
+ IsParameter(Linkage::kInterpreterBytecodeParameter),
+ IsInt32Constant(1 + i)));
+ }
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, LoadRegisterFixed) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ for (int i = 0; i < m.kMaxRegisterIndex; i++) {
+ Node* load_reg_node = m.LoadRegister(i);
+ EXPECT_THAT(load_reg_node,
+ m.IsLoad(kMachPtr, IsLoadFramePointer(),
+ IsInt32Constant(m.kFirstRegisterOffsetFromFp -
+ (i << kPointerSizeLog2))));
+ }
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, LoadRegister) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* reg_index_node = m.Int32Constant(44);
+ Node* load_reg_node = m.LoadRegister(reg_index_node);
+ EXPECT_THAT(
+ load_reg_node,
+ m.IsLoad(kMachPtr, IsLoadFramePointer(),
+ IsInt32Sub(IsInt32Constant(m.kFirstRegisterOffsetFromFp),
+ IsWord32Shl(reg_index_node,
+ IsInt32Constant(kPointerSizeLog2)))));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, StoreRegisterFixed) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* store_value = m.Int32Constant(0xdeadbeef);
+ for (int i = 0; i < m.kMaxRegisterIndex; i++) {
+ Node* store_reg_node = m.StoreRegister(store_value, i);
+ EXPECT_THAT(store_reg_node,
+ m.IsStore(StoreRepresentation(kMachPtr, kNoWriteBarrier),
+ IsLoadFramePointer(),
+ IsInt32Constant(m.kFirstRegisterOffsetFromFp -
+ (i << kPointerSizeLog2)),
+ store_value));
+ }
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, StoreRegister) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* store_value = m.Int32Constant(0xdeadbeef);
+ Node* reg_index_node = m.Int32Constant(44);
+ Node* store_reg_node = m.StoreRegister(store_value, reg_index_node);
+ EXPECT_THAT(
+ store_reg_node,
+ m.IsStore(StoreRepresentation(kMachPtr, kNoWriteBarrier),
+ IsLoadFramePointer(),
+ IsInt32Sub(IsInt32Constant(m.kFirstRegisterOffsetFromFp),
+ IsWord32Shl(reg_index_node,
+ IsInt32Constant(kPointerSizeLog2))),
+ store_value));
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UNITTESTS_COMPILER_INTERPRETER_ASSEMBLER_UNITTEST_H_
+#define V8_UNITTESTS_COMPILER_INTERPRETER_ASSEMBLER_UNITTEST_H_
+
+#include "src/compiler/interpreter-assembler.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock-support.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+using ::testing::Matcher;
+
+class InterpreterAssemblerTest : public TestWithIsolateAndZone {
+ public:
+ InterpreterAssemblerTest() {}
+ ~InterpreterAssemblerTest() override {}
+
+ class InterpreterAssemblerForTest final : public InterpreterAssembler {
+ public:
+ InterpreterAssemblerForTest(InterpreterAssemblerTest* test,
+ interpreter::Bytecode bytecode)
+ : InterpreterAssembler(test->isolate(), test->zone(), bytecode) {}
+ ~InterpreterAssemblerForTest() override {}
+
+ Graph* GetCompletedGraph();
+
+ Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher);
+ Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& value_matcher);
+
+ using InterpreterAssembler::call_descriptor;
+ using InterpreterAssembler::graph;
+ using InterpreterAssembler::kMaxRegisterIndex;
+ using InterpreterAssembler::kFirstRegisterOffsetFromFp;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InterpreterAssemblerForTest);
+ };
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_UNITTESTS_COMPILER_INTERPRETER_ASSEMBLER_UNITTEST_H_
#include <vector>
#include "src/assembler.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
const Matcher<Node*> input_matcher_;
};
+class IsParameterMatcher final : public NodeMatcher {
+ public:
+ explicit IsParameterMatcher(const Matcher<int>& index_matcher)
+ : NodeMatcher(IrOpcode::kParameter), index_matcher_(index_matcher) {}
+
+ void DescribeTo(std::ostream* os) const override {
+ *os << "is a Parameter node with index(";
+ index_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(ParameterIndexOf(node->op()), "index",
+ index_matcher_, listener));
+ }
+
+ private:
+ const Matcher<int> index_matcher_;
+};
+
} // namespace
}
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ value_matchers.push_back(value2_matcher);
+ return MakeMatcher(new IsTailCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
+
+
Matcher<Node*> IsReferenceEqual(const Matcher<Type*>& type_matcher,
const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
}
+Matcher<Node*> IsParameter(const Matcher<int> index_matcher) {
+ return MakeMatcher(new IsParameterMatcher(index_matcher));
+}
+
+
+Matcher<Node*> IsLoadFramePointer() {
+ return MakeMatcher(new NodeMatcher(IrOpcode::kLoadFramePointer));
+}
+
+
#define IS_BINOP_MATCHER(Name) \
Matcher<Node*> Is##Name(const Matcher<Node*>& lhs_matcher, \
const Matcher<Node*>& rhs_matcher) { \
IS_BINOP_MATCHER(Int32LessThan)
IS_BINOP_MATCHER(Uint32LessThan)
IS_BINOP_MATCHER(Uint32LessThanOrEqual)
+IS_BINOP_MATCHER(Int64Add)
IS_BINOP_MATCHER(Float32Max)
IS_BINOP_MATCHER(Float32Min)
IS_BINOP_MATCHER(Float32Equal)
const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
Matcher<Node*> IsBooleanNot(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsReferenceEqual(const Matcher<Type*>& type_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsUint32LessThanOrEqual(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt64Add(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsChangeFloat64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeFloat64ToUint32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeInt32ToFloat64(const Matcher<Node*>& input_matcher);
const Matcher<Node*>& context_matcher);
Matcher<Node*> IsNumberToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsNumberToUint32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsParameter(const Matcher<int> index_matcher);
+Matcher<Node*> IsLoadFramePointer();
} // namespace compiler
} // namespace internal
'compiler/instruction-selector-unittest.h',
'compiler/instruction-sequence-unittest.cc',
'compiler/instruction-sequence-unittest.h',
+ 'compiler/interpreter-assembler-unittest.cc',
+ 'compiler/interpreter-assembler-unittest.h',
'compiler/js-builtin-reducer-unittest.cc',
'compiler/js-context-relaxation-unittest.cc',
'compiler/js-intrinsic-lowering-unittest.cc',
'../../src/compiler/instruction-selector.h',
'../../src/compiler/instruction.cc',
'../../src/compiler/instruction.h',
+ '../../src/compiler/interpreter-assembler.cc',
+ '../../src/compiler/interpreter-assembler.h',
'../../src/compiler/js-builtin-reducer.cc',
'../../src/compiler/js-builtin-reducer.h',
'../../src/compiler/js-context-relaxation.cc',
'../../src/interface-descriptors.h',
'../../src/interpreter-irregexp.cc',
'../../src/interpreter-irregexp.h',
+ '../../src/interpreter/bytecodes.cc',
+ '../../src/interpreter/bytecodes.h',
+ '../../src/interpreter/interpreter.cc',
+ '../../src/interpreter/interpreter.h',
'../../src/isolate.cc',
'../../src/isolate.h',
'../../src/json-parser.h',