ifeq ($(liveobjectlist), on)
GYPFLAGS += -Dv8_use_liveobjectlist=true
endif
+# vfp2=off
+ifeq ($(vfp2), off)
+ GYPFLAGS += -Dv8_can_use_vfp2_instructions=false
+else
+ GYPFLAGS += -Dv8_can_use_vfp2_instructions=true
+endif
# vfp3=off
ifeq ($(vfp3), off)
GYPFLAGS += -Dv8_can_use_vfp3_instructions=false
namespace internal {
+ArmDoubleRegister ArmDoubleRegister::FromAllocationIndex(int index) {
+ if (CpuFeatures::IsSupported(VFP2)) {
+ return DwVfpRegister::FromAllocationIndex(index);
+ } else {
+ return SoftFloatRegister::FromAllocationIndex(index);
+ }
+}
+
+
int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
ASSERT(!reg.is(kDoubleRegZero));
ASSERT(!reg.is(kScratchDoubleReg));
}
+int Register::NumAllocatableRegisters() {
+ if (CpuFeatures::IsSupported(VFP2)) {
+ return kMaxNumAllocatableRegisters;
+ } else {
+ return kMaxNumAllocatableRegisters - kGPRsPerNonVFP2Double;
+ }
+}
+
+
+int DoubleRegister::NumAllocatableRegisters() {
+ if (CpuFeatures::IsSupported(VFP2)) {
+ return DwVfpRegister::kMaxNumAllocatableRegisters;
+ } else {
+ return SoftFloatRegister::kMaxNumAllocatableRegisters;
+ }
+}
+
+
+const char* DoubleRegister::AllocationIndexToString(int index) {
+ if (CpuFeatures::IsSupported(VFP2)) {
+ return DwVfpRegister::AllocationIndexToString(index);
+ } else {
+ return SoftFloatRegister::AllocationIndexToString(index);
+ }
+}
+
+
void CpuFeatures::Probe() {
unsigned standard_features = static_cast<unsigned>(
OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler();
// Core register
struct Register {
static const int kNumRegisters = 16;
- static const int kNumAllocatableRegisters = 8;
+ static const int kMaxNumAllocatableRegisters = 8;
+ static const int kGPRsPerNonVFP2Double = 2;
+ static int NumAllocatableRegisters();
static const int kSizeInBytes = 4;
static int ToAllocationIndex(Register reg) {
- ASSERT(reg.code() < kNumAllocatableRegisters);
+ ASSERT(reg.code() < NumAllocatableRegisters());
return reg.code();
}
static Register FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < NumAllocatableRegisters());
return from_code(index);
}
static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < NumAllocatableRegisters());
const char* const names[] = {
"r0",
"r1",
};
-// Double word VFP register.
-struct DwVfpRegister {
- static const int kNumRegisters = 16;
+struct ArmDoubleRegister {
+ static const int kMaxNumRegisters = 16;
// A few double registers are reserved: one as a scratch register and one to
// hold 0.0, that does not fit in the immediate field of vmov instructions.
// d14: 0.0
// d15: scratch register.
static const int kNumReservedRegisters = 2;
- static const int kNumAllocatableRegisters = kNumRegisters -
+ static const int kMaxNumAllocatableRegisters = kMaxNumRegisters -
kNumReservedRegisters;
+ explicit ArmDoubleRegister(int code) { code_ = code; }
+ static int NumAllocatableRegisters();
+ static int NumRegisters() { return kNumRegisters; }
+ static const char* AllocationIndexToString(int index);
+ inline static ArmDoubleRegister FromAllocationIndex(int index);
+ inline static int ToAllocationIndex(ArmDoubleRegister reg) {
+ return reg.code();
+ }
+
+ static ArmDoubleRegister from_code(int code) {
+ ArmDoubleRegister r = ArmDoubleRegister(code);
+ return r;
+ }
+
+ bool is_valid() const {
+ return 0 <= code_ && code_ < NumRegisters();
+ }
+ bool is(ArmDoubleRegister reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+
+ int code_;
+};
+
+
+// Double word VFP register.
+struct DwVfpRegister : ArmDoubleRegister {
+ static const int kNumRegisters = 16;
- inline static int ToAllocationIndex(DwVfpRegister reg);
+ explicit DwVfpRegister(int code) : ArmDoubleRegister(code) {}
+
+ inline int ToAllocationIndex(DwVfpRegister reg);
static DwVfpRegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
return from_code(index);
}
static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"d0",
"d1",
}
static DwVfpRegister from_code(int code) {
- DwVfpRegister r = { code };
- return r;
+ return DwVfpRegister(code);
}
// Supporting d0 to d15, can be later extended to d31.
*m = (code_ & 0x10) >> 4;
*vm = code_ & 0x0F;
}
+};
- int code_;
+
+// Double word VFP register.
+struct SoftFloatRegister : ArmDoubleRegister {
+ static const int kNumRegisters = 1;
+ static const int kMaxNumAllocatableRegisters = kNumRegisters;
+
+ explicit SoftFloatRegister(int code) : ArmDoubleRegister(code) {}
+
+ static SoftFloatRegister FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ return from_code(index);
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ const char* const names[] = {
+ "sfpd0"
+ };
+ return names[index];
+ }
+
+ static SoftFloatRegister from_code(int code) {
+ SoftFloatRegister r = SoftFloatRegister(code);
+ return r;
+ }
};
-typedef DwVfpRegister DoubleRegister;
+typedef ArmDoubleRegister DoubleRegister;
// Support for the VFP registers s0 to s31 (d0 to d15).
const SwVfpRegister s30 = { 30 };
const SwVfpRegister s31 = { 31 };
-const DwVfpRegister no_dreg = { -1 };
-const DwVfpRegister d0 = { 0 };
-const DwVfpRegister d1 = { 1 };
-const DwVfpRegister d2 = { 2 };
-const DwVfpRegister d3 = { 3 };
-const DwVfpRegister d4 = { 4 };
-const DwVfpRegister d5 = { 5 };
-const DwVfpRegister d6 = { 6 };
-const DwVfpRegister d7 = { 7 };
-const DwVfpRegister d8 = { 8 };
-const DwVfpRegister d9 = { 9 };
-const DwVfpRegister d10 = { 10 };
-const DwVfpRegister d11 = { 11 };
-const DwVfpRegister d12 = { 12 };
-const DwVfpRegister d13 = { 13 };
-const DwVfpRegister d14 = { 14 };
-const DwVfpRegister d15 = { 15 };
+const DwVfpRegister no_dreg = DwVfpRegister(-1);
+const DwVfpRegister d0 = DwVfpRegister(0);
+const DwVfpRegister d1 = DwVfpRegister(1);
+const DwVfpRegister d2 = DwVfpRegister(2);
+const DwVfpRegister d3 = DwVfpRegister(3);
+const DwVfpRegister d4 = DwVfpRegister(4);
+const DwVfpRegister d5 = DwVfpRegister(5);
+const DwVfpRegister d6 = DwVfpRegister(6);
+const DwVfpRegister d7 = DwVfpRegister(7);
+const DwVfpRegister d8 = DwVfpRegister(8);
+const DwVfpRegister d9 = DwVfpRegister(9);
+const DwVfpRegister d10 = DwVfpRegister(10);
+const DwVfpRegister d11 = DwVfpRegister(11);
+const DwVfpRegister d12 = DwVfpRegister(12);
+const DwVfpRegister d13 = DwVfpRegister(13);
+const DwVfpRegister d14 = DwVfpRegister(14);
+const DwVfpRegister d15 = DwVfpRegister(15);
+
+const Register sfpd_lo = { kRegister_r6_Code };
+const Register sfpd_hi = { kRegister_r7_Code };
// Aliases for double registers. Defined using #define instead of
// "static const DwVfpRegister&" because Clang complains otherwise when a
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+void Builtins::Generate_NotifyICMiss(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
+ // Pass the function and deoptimization type to the runtime system.
+ __ CallRuntime(Runtime::kNotifyICMiss, 0);
+ __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
+ }
+
+ __ mov(ip, lr); // Stash the miss continuation
+ __ add(sp, sp, Operand(kPointerSize)); // Ignore state
+ __ pop(lr); // Restore LR to continuation in JSFunction
+ __ mov(pc, ip); // Jump to miss handler
+}
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
namespace internal {
+CodeStubInterfaceDescriptor*
+ KeyedLoadFastElementStub::GetInterfaceDescriptor(Isolate* isolate) {
+ static CodeStubInterfaceDescriptor* result = NULL;
+ if (result == NULL) {
+ Handle<Code> miss = isolate->builtins()->KeyedLoadIC_Miss();
+ static Register registers[] = { r1, r0 };
+ static CodeStubInterfaceDescriptor info = {
+ 2,
+ registers,
+ miss
+ };
+ result = &info;
+ }
+ return result;
+}
+
+
#define __ ACCESS_MASM(masm)
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
// scratch register. Destroys the source register. No GC occurs during this
// stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public CodeStub {
+class ConvertToDoubleStub : public PlatformCodeStub {
public:
ConvertToDoubleStub(Register result_reg_1,
Register result_reg_2,
const Register exponent = r2;
const Register heapnumbermap = r5;
const Register heapnumber = r0;
- const DoubleRegister double_base = d1;
- const DoubleRegister double_exponent = d2;
- const DoubleRegister double_result = d3;
- const DoubleRegister double_scratch = d0;
+ const DwVfpRegister double_base = d1;
+ const DwVfpRegister double_exponent = d2;
+ const DwVfpRegister double_result = d3;
+ const DwVfpRegister double_scratch = d0;
const SwVfpRegister single_scratch = s0;
const Register scratch = r9;
const Register scratch2 = r7;
void CodeStub::GenerateFPStubs() {
- CEntryStub save_doubles(1, kSaveFPRegs);
- Handle<Code> code = save_doubles.GetCode();
- code->set_is_pregenerated(true);
- StoreBufferOverflowStub stub(kSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
- code->GetIsolate()->set_fp_stubs_generated(true);
+ SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
+ ? kSaveFPRegs
+ : kDontSaveFPRegs;
+ CEntryStub save_doubles(1, mode);
+ StoreBufferOverflowStub stub(mode);
+ // These stubs might already be in the snapshot, detect that and don't
+ // regenerate, which would lead to code stub initialization state being messed
+ // up.
+ Code* save_doubles_code = NULL;
+ Code* store_buffer_overflow_code = NULL;
+ if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope2(VFP2);
+ save_doubles_code = *save_doubles.GetCode();
+ store_buffer_overflow_code = *stub.GetCode();
+ } else {
+ save_doubles_code = *save_doubles.GetCode();
+ store_buffer_overflow_code = *stub.GetCode();
+ }
+ save_doubles_code->set_is_pregenerated(true);
+ store_buffer_overflow_code->set_is_pregenerated(true);
+ }
+ ISOLATE->set_fp_stubs_generated(true);
}
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
+class TranscendentalCacheStub: public PlatformCodeStub {
public:
enum ArgumentType {
TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
};
-class StoreBufferOverflowStub: public CodeStub {
+class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
};
-class UnaryOpStub: public CodeStub {
+class UnaryOpStub: public PlatformCodeStub {
public:
UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode,
};
-class StringAddStub: public CodeStub {
+class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
};
-class SubStringStub: public CodeStub {
+class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
-class StringCompareStub: public CodeStub {
+class StringCompareStub: public PlatformCodeStub {
public:
StringCompareStub() { }
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public CodeStub {
+class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
public:
WriteInt32ToHeapNumberStub(Register the_int,
Register the_heap_number,
};
-class NumberToStringStub: public CodeStub {
+class NumberToStringStub: public PlatformCodeStub {
public:
NumberToStringStub() { }
};
-class RecordWriteStub: public CodeStub {
+class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
Register value,
Register GetRegThatIsNotOneOf(Register r1,
Register r2,
Register r3) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(r1)) continue;
if (candidate.is(r2)) continue;
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM.
-class RegExpCEntryStub: public CodeStub {
+class RegExpCEntryStub: public PlatformCodeStub {
public:
RegExpCEntryStub() {}
virtual ~RegExpCEntryStub() {}
// keep the code which called into native pinned in the memory. Currently the
// simplest approach is to generate such stub early enough so it can never be
// moved by GC
-class DirectCEntryStub: public CodeStub {
+class DirectCEntryStub: public PlatformCodeStub {
public:
DirectCEntryStub() {}
void Generate(MacroAssembler* masm);
};
-class StringDictionaryLookupStub: public CodeStub {
+class StringDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
{
CpuFeatures::Scope use_vfp(VFP2);
- DoubleRegister input = d0;
- DoubleRegister result = d1;
- DoubleRegister double_scratch1 = d2;
- DoubleRegister double_scratch2 = d3;
+ DwVfpRegister input = d0;
+ DwVfpRegister result = d1;
+ DwVfpRegister double_scratch1 = d2;
+ DwVfpRegister double_scratch2 = d3;
Register temp1 = r4;
Register temp2 = r5;
Register temp3 = r6;
void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
- DoubleRegister input,
- DoubleRegister result,
- DoubleRegister double_scratch1,
- DoubleRegister double_scratch2,
+ DwVfpRegister input,
+ DwVfpRegister result,
+ DwVfpRegister double_scratch1,
+ DwVfpRegister double_scratch2,
Register temp1,
Register temp2,
Register temp3) {
class CodeGenerator: public AstVisitor {
public:
+ CodeGenerator() {
+ InitializeAstVisitor();
+ }
+
static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
int pos,
bool right_here = false);
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
class MathExpGenerator : public AllStatic {
public:
static void EmitMathExp(MacroAssembler* masm,
- DoubleRegister input,
- DoubleRegister result,
- DoubleRegister double_scratch1,
- DoubleRegister double_scratch2,
+ DwVfpRegister input,
+ DwVfpRegister result,
+ DwVfpRegister double_scratch1,
+ DwVfpRegister double_scratch2,
Register temp1,
Register temp2,
Register temp3);
void Deoptimizer::DoComputeOsrOutputFrame() {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
+ compiled_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
- unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+ unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
unsigned outgoing_size = outgoing_height * kPointerSize;
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
unsigned pc_offset = data->OsrPcOffset()->value();
uint32_t pc = reinterpret_cast<uint32_t>(
- optimized_code_->entry() + pc_offset);
+ compiled_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
}
+void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
+ int frame_index) {
+ //
+ // FROM TO <-fp
+ // | .... | | .... |
+ // +-------------------------+ +-------------------------+
+ // | JSFunction continuation | | JSFunction continuation |
+ // +-------------------------+ +-------------------------+<-sp
+ // | | saved frame (fp) |
+ // | +=========================+<-fp
+ // | | JSFunction context |
+ // v +-------------------------+
+ // | COMPILED_STUB marker | fp = saved frame
+ // +-------------------------+ f8 = JSFunction context
+ // | |
+ // | ... |
+ // | |
+ // +-------------------------+<-sp
+ //
+ //
+ int output_frame_size = 1 * kPointerSize;
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, 0);
+ Code* notify_miss =
+ isolate_->builtins()->builtin(Builtins::kNotifyICMiss);
+ output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+ output_frame->SetContinuation(
+ reinterpret_cast<intptr_t>(notify_miss->entry()));
+
+ ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
+ int major_key = compiled_code_->major_key();
+ CodeStubInterfaceDescriptor* descriptor =
+ isolate_->code_stub_interface_descriptors()[major_key];
+ Handle<Code> miss_ic(descriptor->deoptimization_handler);
+ output_frame->SetPc(reinterpret_cast<intptr_t>(miss_ic->instruction_start()));
+ unsigned input_frame_size = input_->GetFrameSize();
+ intptr_t value = input_->GetFrameSlot(input_frame_size - kPointerSize);
+ output_frame->SetFrameSlot(0, value);
+ value = input_->GetFrameSlot(input_frame_size - 2 * kPointerSize);
+ output_frame->SetRegister(fp.code(), value);
+ output_frame->SetFp(value);
+ value = input_->GetFrameSlot(input_frame_size - 3 * kPointerSize);
+ output_frame->SetRegister(cp.code(), value);
+
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ ASSERT(opcode == Translation::REGISTER);
+ USE(opcode);
+ int input_reg = iterator->Next();
+ intptr_t input_value = input_->GetRegister(input_reg);
+ output_frame->SetRegister(r1.code(), input_value);
+
+ int32_t next = iterator->Next();
+ opcode = static_cast<Translation::Opcode>(next);
+ ASSERT(opcode == Translation::REGISTER);
+ input_reg = iterator->Next();
+ input_value = input_->GetRegister(input_reg);
+ output_frame->SetRegister(r0.code(), input_value);
+
+ ASSERT(frame_index == 0);
+ output_[frame_index] = output_frame;
+}
+
+
void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index) {
Builtins* builtins = isolate_->builtins();
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0);
}
Isolate* isolate = masm()->isolate();
- CpuFeatures::Scope scope(VFP3);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
const int kDoubleRegsSize =
- kDoubleSize * DwVfpRegister::kNumAllocatableRegisters;
-
- // Save all VFP registers before messing with them.
- DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
- DwVfpRegister last =
- DwVfpRegister::FromAllocationIndex(
- DwVfpRegister::kNumAllocatableRegisters - 1);
- ASSERT(last.code() > first.code());
- ASSERT((last.code() - first.code()) ==
- (DwVfpRegister::kNumAllocatableRegisters - 1));
+ kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
+
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ // Save all VFP registers before messing with them.
+ DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
+ DwVfpRegister last =
+ DwVfpRegister::FromAllocationIndex(
+ DwVfpRegister::kMaxNumAllocatableRegisters - 1);
+ ASSERT(last.code() > first.code());
+ ASSERT((last.code() - first.code()) ==
+ (DwVfpRegister::kMaxNumAllocatableRegisters - 1));
#ifdef DEBUG
- for (int i = 0; i <= (DwVfpRegister::kNumAllocatableRegisters - 1); i++) {
- ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
- (DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
- }
+ int max = DwVfpRegister::kMaxNumAllocatableRegisters - 1;
+ for (int i = 0; i <= max; i++) {
+ ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
+ (DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
+ }
#endif
- __ vstm(db_w, sp, first, last);
+ __ vstm(db_w, sp, first, last);
+ } else {
+ __ sub(sp, sp, Operand(kDoubleRegsSize));
+ }
// Push all 16 registers (needed to populate FrameDescription::registers_).
// TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
__ str(r2, MemOperand(r1, offset));
}
- // Copy VFP registers to
- // double_registers_[DoubleRegister::kNumAllocatableRegisters]
- int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
- __ vldr(d0, sp, src_offset);
- __ vstr(d0, r1, dst_offset);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ // Copy VFP registers to
+ // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); ++i) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ __ vldr(d0, sp, src_offset);
+ __ vstr(d0, r1, dst_offset);
+ }
}
// Remove the bailout id, eventually return address, and the saved registers
// frame description.
__ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
+ Label pop_loop_header;
+ __ b(&pop_loop_header);
__ bind(&pop_loop);
__ pop(r4);
__ str(r4, MemOperand(r3, 0));
__ add(r3, r3, Operand(sizeof(uint32_t)));
+ __ bind(&pop_loop_header);
__ cmp(r2, sp);
__ b(ne, &pop_loop);
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop;
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
// Outer loop state: r0 = current "FrameDescription** output_",
// r1 = one past the last FrameDescription**.
__ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
__ ldr(r0, MemOperand(r0, Deoptimizer::output_offset())); // r0 is output_.
__ add(r1, r0, Operand(r1, LSL, 2));
+ __ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: r2 = current FrameDescription*, r3 = loop index.
__ ldr(r2, MemOperand(r0, 0)); // output_[ix]
__ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ sub(r3, r3, Operand(sizeof(uint32_t)));
__ add(r6, r2, Operand(r3));
__ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
__ push(r7);
+ __ bind(&inner_loop_header);
__ cmp(r3, Operand(0));
__ b(ne, &inner_push_loop); // test for gt?
__ add(r0, r0, Operand(kPointerSize));
+ __ bind(&outer_loop_header);
__ cmp(r0, r1);
__ b(lt, &outer_push_loop);
#undef DEFINE_COMPILE
LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
register_spills_[i] = NULL;
}
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
double_register_spills_[i] = NULL;
}
}
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
#ifdef DEBUG
instr->VerifyCall();
#endif
Representation to = instr->to();
if (from.IsTagged()) {
if (to.IsDouble()) {
+ info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
+ info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- LOperand* external_pointer = UseRegister(instr->elements());
+ // float->double conversion on non-VFP2 requires an extra scratch
+ // register. For convenience, just mark the elements register as "UseTemp"
+ // so that it can be used as a temp during the float->double conversion
+ // after it's no longer needed after the float load.
+ bool needs_temp =
+ !CpuFeatures::IsSupported(VFP2) &&
+ (elements_kind == EXTERNAL_FLOAT_ELEMENTS);
+ LOperand* external_pointer = needs_temp
+ ? UseTempRegister(instr->elements())
+ : UseRegister(instr->elements());
result = new(zone()) LLoadKeyed(external_pointer, key);
}
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new(zone()) LParameter, spill_index);
+ LParameter* result = new(zone()) LParameter;
+ if (info()->IsOptimizing()) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ Register reg = descriptor->register_params[instr->index()];
+ return DefineFixed(result, reg);
+ }
}
void MarkAsCall() { is_call_ = true; }
+ // Interface to the register allocator and iterators.
+ bool ClobbersTemps() const { return is_call_; }
+ bool ClobbersRegisters() const { return is_call_; }
+ bool ClobbersDoubleRegisters() const { return is_call_; }
+
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return is_call_; }
// slot, i.e., that must also be restored to the spill slot on OSR entry.
// NULL if the register has no assigned spill slot. Indexed by allocation
// index.
- LOperand* register_spills_[Register::kNumAllocatableRegisters];
- LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+ LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
+ LOperand* double_register_spills_[
+ DoubleRegister::kMaxNumAllocatableRegisters];
};
HPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
- CpuFeatures::Scope scope1(VFP3);
- CpuFeatures::Scope scope2(ARMv7);
CodeStub::GenerateFPStubs();
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop_at");
- }
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ __ stop("stop_at");
+ }
#endif
- // r1: Callee's JS function.
- // cp: Callee's context.
- // fp: Caller's frame pointer.
- // lr: Caller's pc.
+ // r1: Callee's JS function.
+ // cp: Callee's context.
+ // fp: Caller's frame pointer.
+ // lr: Caller's pc.
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). r5 is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
- __ cmp(r5, Operand(0));
- __ b(eq, &ok);
- int receiver_offset = scope()->num_parameters() * kPointerSize;
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ str(r2, MemOperand(sp, receiver_offset));
- __ bind(&ok);
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). r5 is zero for method calls and non-zero for
+ // function calls.
+ if (!info_->is_classic_mode() || info_->is_native()) {
+ Label ok;
+ __ cmp(r5, Operand(0));
+ __ b(eq, &ok);
+ int receiver_offset = scope()->num_parameters() * kPointerSize;
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ str(r2, MemOperand(sp, receiver_offset));
+ __ bind(&ok);
+ }
}
-
info()->set_prologue_offset(masm_->pc_offset());
- {
+ if (NeedsEagerFrame()) {
PredictableCodeSizeScope predictible_code_size_scope(
masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
// The following three instructions must remain together and unmodified
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
// Adjust FP to point to saved FP.
__ add(fp, sp, Operand(2 * kPointerSize));
+ frame_is_built_ = true;
}
// Reserve space for the stack slots needed by the code.
}
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is in r1.
}
// Trace the call.
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
return !is_aborted();
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Deferred build frame",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ }
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate();
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Deferred destroy frame",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ ASSERT(frame_is_built_);
+ __ pop(ip);
+ __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit());
+ frame_is_built_ = false;
+ }
__ jmp(code->exit());
}
}
// Each entry in the jump table generates one instruction and inlines one
// 32bit data after it.
if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
- deopt_jump_table_.length() * 2)) {
+ deopt_jump_table_.length() * 7)) {
Abort("Generated code is too large");
}
- // Block the constant pool emission during the jump table emission.
- __ BlockConstPoolFor(deopt_jump_table_.length());
__ RecordComment("[ Deoptimisation jump table");
Label table_start;
__ bind(&table_start);
+ Label needs_frame_not_call;
+ Label needs_frame_is_call;
for (int i = 0; i < deopt_jump_table_.length(); i++) {
__ bind(&deopt_jump_table_[i].label);
- __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta));
- __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address));
+ Address entry = deopt_jump_table_[i].address;
+ if (deopt_jump_table_[i].needs_frame) {
+ __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
+ if (deopt_jump_table_[i].is_lazy_deopt) {
+ if (needs_frame_is_call.is_bound()) {
+ __ b(&needs_frame_is_call);
+ } else {
+ __ bind(&needs_frame_is_call);
+ __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ __ mov(lr, Operand(pc), LeaveCC, al);
+ __ mov(pc, ip);
+ }
+ } else {
+ if (needs_frame_not_call.is_bound()) {
+ __ b(&needs_frame_not_call);
+ } else {
+ __ bind(&needs_frame_not_call);
+ __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ __ mov(pc, ip);
+ }
+ }
+ } else {
+ if (deopt_jump_table_[i].is_lazy_deopt) {
+ __ mov(lr, Operand(pc), LeaveCC, al);
+ __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
+ } else {
+ __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
+ }
+ }
+ masm()->CheckConstPool(false, false);
}
- ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
- deopt_jump_table_.length() * 2);
__ RecordComment("]");
+ // Force constant pool emission at the end of the deopt jump table to make
+ // sure that no constant pools are emitted after.
+ masm()->CheckConstPool(true, false);
+
// The deoptimization jump table is the last part of the instruction
// sequence. Mark the generated code as done unless we bailed out.
if (!is_aborted()) status_ = DONE;
}
-DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
- return DoubleRegister::FromAllocationIndex(index);
+DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
+ return DwVfpRegister::FromAllocationIndex(index);
}
}
-DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
ASSERT(op->IsDoubleRegister());
return ToDoubleRegister(op->index());
}
-DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
- SwVfpRegister flt_scratch,
- DoubleRegister dbl_scratch) {
+DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
+ SwVfpRegister flt_scratch,
+ DwVfpRegister dbl_scratch) {
if (op->IsDoubleRegister()) {
return ToDoubleRegister(op->index());
} else if (op->IsConstantOperand()) {
translation,
arguments_index,
arguments_count);
- int closure_id = *info()->closure() != *environment->closure()
+ bool has_closure_id = !info()->closure().is_null() &&
+ *info()->closure() != *environment->closure();
+ int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
ASSERT(height == 0);
translation->BeginSetterStubFrame(closure_id);
break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
+ ? Deoptimizer::LAZY
+ : Deoptimizer::EAGER;
+ Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
- if (cc == al) {
+ bool needs_lazy_deopt = info()->IsStub();
+ ASSERT(info()->IsStub() || frame_is_built_);
+ if (cc == al && !needs_lazy_deopt) {
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (deopt_jump_table_.is_empty() ||
- (deopt_jump_table_.last().address != entry)) {
- deopt_jump_table_.Add(JumpTableEntry(entry), zone());
+ (deopt_jump_table_.last().address != entry) ||
+ (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) ||
+ (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
+ JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
+ deopt_jump_table_.Add(table_entry, zone());
}
__ b(cc, &deopt_jump_table_.last().label);
}
LOperand* left_argument,
LOperand* right_argument,
Token::Value op) {
+ CpuFeatures::Scope vfp_scope(VFP2);
Register left = ToRegister(left_argument);
Register right = ToRegister(right_argument);
void LCodeGen::DoConstantD(LConstantD* instr) {
ASSERT(instr->result()->IsDoubleRegister());
DwVfpRegister result = ToDoubleRegister(instr->result());
+ CpuFeatures::Scope scope(VFP2);
double v = instr->value();
__ Vmov(result, v, scratch0());
}
__ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
} else {
ASSERT(instr->hydrogen()->representation().IsDouble());
- DoubleRegister left_reg = ToDoubleRegister(left);
- DoubleRegister right_reg = ToDoubleRegister(right);
- DoubleRegister result_reg = ToDoubleRegister(instr->result());
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister left_reg = ToDoubleRegister(left);
+ DwVfpRegister right_reg = ToDoubleRegister(right);
+ DwVfpRegister result_reg = ToDoubleRegister(instr->result());
Label check_nan_left, check_zero, return_left, return_right, done;
__ VFPCompareAndSetFlags(left_reg, right_reg);
__ b(vs, &check_nan_left);
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- DoubleRegister left = ToDoubleRegister(instr->left());
- DoubleRegister right = ToDoubleRegister(instr->right());
- DoubleRegister result = ToDoubleRegister(instr->result());
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister left = ToDoubleRegister(instr->left());
+ DwVfpRegister right = ToDoubleRegister(instr->right());
+ DwVfpRegister result = ToDoubleRegister(instr->result());
switch (instr->op()) {
case Token::ADD:
__ vadd(result, left, right);
__ cmp(reg, Operand(0));
EmitBranch(true_block, false_block, ne);
} else if (r.IsDouble()) {
- DoubleRegister reg = ToDoubleRegister(instr->value());
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister reg = ToDoubleRegister(instr->value());
Register scratch = scratch0();
// Test the double value. Zero and NaN are false.
}
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ CpuFeatures::Scope scope(VFP2);
// heap number -> false iff +0, -0, or NaN.
- DoubleRegister dbl_scratch = double_scratch0();
+ DwVfpRegister dbl_scratch = double_scratch0();
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
__ b(ne, ¬_heap_number);
EmitGoto(next_block);
} else {
if (instr->is_double()) {
+ CpuFeatures::Scope scope(VFP2);
// Compare left and right operands as doubles and load the
// resulting flags into the normal status register.
__ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns its parameter in r0.
__ push(r0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- __ add(sp, sp, Operand(sp_delta));
+ if (NeedsEagerFrame()) {
+ int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
+ __ mov(sp, fp);
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ __ add(sp, sp, Operand(sp_delta));
+ }
+ if (info()->IsStub()) {
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ }
__ Jump(lr);
}
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- CpuFeatures::Scope scope(VFP3);
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
? Operand(constant_key << element_size_shift)
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ vldr(result.low(), scratch0(), additional_offset);
- __ vcvt_f64_f32(result, result.low());
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vldr(result, scratch0(), additional_offset);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ __ vldr(result.low(), scratch0(), additional_offset);
+ __ vcvt_f64_f32(result, result.low());
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+ __ vldr(result, scratch0(), additional_offset);
+ }
+ } else {
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ Register value = external_pointer;
+ __ ldr(value, MemOperand(scratch0(), additional_offset));
+ __ and_(sfpd_lo, value, Operand(kBinary32MantissaMask));
+
+ __ mov(scratch0(), Operand(value, LSR, kBinary32MantissaBits));
+ __ and_(scratch0(), scratch0(),
+ Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
+
+ Label exponent_rebiased;
+ __ teq(scratch0(), Operand(0x00));
+ __ b(eq, &exponent_rebiased);
+
+ __ teq(scratch0(), Operand(0xff));
+ __ mov(scratch0(), Operand(0x7ff), LeaveCC, eq);
+ __ b(eq, &exponent_rebiased);
+
+ // Rebias exponent.
+ __ add(scratch0(),
+ scratch0(),
+ Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
+
+ __ bind(&exponent_rebiased);
+ __ and_(sfpd_hi, value, Operand(kBinary32SignMask));
+ __ orr(sfpd_hi, sfpd_hi,
+ Operand(scratch0(), LSL, HeapNumber::kMantissaBitsInTopWord));
+
+ // Shift mantissa.
+ static const int kMantissaShiftForHiWord =
+ kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+ static const int kMantissaShiftForLoWord =
+ kBitsPerInt - kMantissaShiftForHiWord;
+
+ __ orr(sfpd_hi, sfpd_hi,
+ Operand(sfpd_lo, LSR, kMantissaShiftForHiWord));
+ __ mov(sfpd_lo, Operand(sfpd_lo, LSL, kMantissaShiftForLoWord));
+
+ } else {
+ __ ldr(sfpd_lo, MemOperand(scratch0(), additional_offset));
+ __ ldr(sfpd_hi, MemOperand(scratch0(),
+ additional_offset + kPointerSize));
+ }
}
} else {
Register result = ToRegister(instr->result());
key = ToRegister(instr->key());
}
- Operand operand = key_is_constant
- ? Operand(((constant_key + instr->additional_index()) <<
- element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag)
- : Operand(key, LSL, shift_size);
- __ add(elements, elements, operand);
+ int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
+ ((constant_key + instr->additional_index()) << element_size_shift);
if (!key_is_constant) {
- __ add(elements, elements,
- Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
- (instr->additional_index() << element_size_shift)));
- }
-
- __ vldr(result, elements, 0);
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- __ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr->environment());
+ __ add(elements, elements, Operand(key, LSL, shift_size));
+ }
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ __ add(elements, elements, Operand(base_offset));
+ __ vldr(result, elements, 0);
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ __ cmp(scratch, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr->environment());
+ }
+ } else {
+ __ ldr(sfpd_hi, MemOperand(elements, base_offset + kPointerSize));
+ __ ldr(sfpd_lo, MemOperand(elements, base_offset));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ ASSERT(kPointerSize == sizeof(kHoleNanLower32));
+ __ cmp(sfpd_hi, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr->environment());
+ }
}
}
void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+ CpuFeatures::Scope scope(VFP2);
// Class for deferred case.
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
public:
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
Register scratch = scratch0();
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister input = ToDoubleRegister(instr->value());
+ DwVfpRegister result = ToDoubleRegister(instr->result());
__ vsqrt(result, input);
}
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister temp = ToDoubleRegister(instr->temp());
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister input = ToDoubleRegister(instr->value());
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ DwVfpRegister temp = ToDoubleRegister(instr->temp());
// Note that according to ECMA-262 15.8.2.13:
// Math.pow(-Infinity, 0.5) == Infinity
void LCodeGen::DoPower(LPower* instr) {
+ CpuFeatures::Scope scope(VFP2);
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
void LCodeGen::DoRandom(LRandom* instr) {
+ CpuFeatures::Scope scope(VFP2);
class DeferredDoRandom: public LDeferredCode {
public:
DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
void LCodeGen::DoMathExp(LMathExp* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
- DoubleRegister double_scratch2 = double_scratch0();
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister input = ToDoubleRegister(instr->value());
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
+ DwVfpRegister double_scratch2 = double_scratch0();
Register temp1 = ToRegister(instr->temp1());
Register temp2 = ToRegister(instr->temp2());
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+ CpuFeatures::Scope scope(VFP2);
Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ CpuFeatures::Scope scope(VFP2);
DwVfpRegister value = ToDoubleRegister(instr->value());
Register elements = ToRegister(instr->elements());
Register key = no_reg;
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+ CpuFeatures::Scope scope(VFP2);
LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ CpuFeatures::Scope scope(VFP2);
LOperand* input = instr->value();
LOperand* output = instr->result();
}
+// Convert unsigned integer with specified number of leading zeroes in binary
+// representation to IEEE 754 double.
+// Integer to convert is passed in register hiword.
+// Resulting double is returned in registers hiword:loword.
+// This functions does not work correctly for 0.
+static void GenerateUInt2Double(MacroAssembler* masm,
+ Register hiword,
+ Register loword,
+ Register scratch,
+ int leading_zeroes) {
+ const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
+ const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
+
+ const int mantissa_shift_for_hi_word =
+ meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
+ const int mantissa_shift_for_lo_word =
+ kBitsPerInt - mantissa_shift_for_hi_word;
+ masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
+ if (mantissa_shift_for_hi_word > 0) {
+ masm->mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
+ masm->orr(hiword, scratch,
+ Operand(hiword, LSR, mantissa_shift_for_hi_word));
+ } else {
+ masm->mov(loword, Operand(0, RelocInfo::NONE));
+ masm->orr(hiword, scratch,
+ Operand(hiword, LSL, -mantissa_shift_for_hi_word));
+ }
+
+ // If least significant bit of biased exponent was not 1 it was corrupted
+ // by most significant bit of mantissa so we should fix that.
+ if (!(biased_exponent & 1)) {
+ masm->bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
+ }
+}
+
+
void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
LOperand* value,
IntegerSignedness signedness) {
Label slow;
Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
- DoubleRegister dbl_scratch = double_scratch0();
+ DwVfpRegister dbl_scratch = double_scratch0();
SwVfpRegister flt_scratch = dbl_scratch.low();
// Preserve the value of all registers.
__ SmiUntag(src, dst);
__ eor(src, src, Operand(0x80000000));
}
- __ vmov(flt_scratch, src);
- __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ __ vmov(flt_scratch, src);
+ __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+ } else {
+ FloatingPointHelper::Destination dest =
+ FloatingPointHelper::kCoreRegisters;
+ FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, d0,
+ sfpd_lo, sfpd_hi,
+ scratch0(), s0);
+ }
} else {
- __ vmov(flt_scratch, src);
- __ vcvt_f64_u32(dbl_scratch, flt_scratch);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ __ vmov(flt_scratch, src);
+ __ vcvt_f64_u32(dbl_scratch, flt_scratch);
+ } else {
+ Label no_leading_zero, done;
+ __ tst(src, Operand(0x80000000));
+ __ b(ne, &no_leading_zero);
+
+ // Integer has one leading zeros.
+ GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 1);
+ __ b(&done);
+
+ __ bind(&no_leading_zero);
+ GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 0);
+ __ b(&done);
+ }
}
if (FLAG_inline_new) {
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
+ __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT);
__ Move(dst, r5);
__ b(&done);
}
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
- __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
+ } else {
+ __ str(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset));
+ __ str(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset));
+ }
__ add(dst, dst, Operand(kHeapObjectTag));
__ StoreToSafepointRegisterSlot(dst, dst);
}
LNumberTagD* instr_;
};
- DoubleRegister input_reg = ToDoubleRegister(instr->value());
+ DwVfpRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = scratch0();
Register reg = ToRegister(instr->result());
Register temp1 = ToRegister(instr->temp());
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
- __ vstr(input_reg, reg, HeapNumber::kValueOffset);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ __ vstr(input_reg, reg, HeapNumber::kValueOffset);
+ } else {
+ __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
+ __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
+ }
// Now that we have finished with the object's real address tag it
__ add(reg, reg, Operand(kHeapObjectTag));
}
void LCodeGen::EmitNumberUntagD(Register input_reg,
- DoubleRegister result_reg,
+ DwVfpRegister result_reg,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
LEnvironment* env) {
Register scratch = scratch0();
SwVfpRegister flt_scratch = double_scratch0().low();
ASSERT(!result_reg.is(double_scratch0()));
+ CpuFeatures::Scope scope(VFP2);
Label load_smi, heap_number, done;
__ cmp(scratch1, Operand(ip));
if (instr->truncating()) {
+ CpuFeatures::Scope scope(VFP2);
Register scratch3 = ToRegister(instr->temp2());
SwVfpRegister single_scratch = double_scratch.low();
ASSERT(!scratch3.is(input_reg) &&
ASSERT(result->IsDoubleRegister());
Register input_reg = ToRegister(input);
- DoubleRegister result_reg = ToDoubleRegister(result);
+ DwVfpRegister result_reg = ToDoubleRegister(result);
EmitNumberUntagD(input_reg, result_reg,
instr->hydrogen()->deoptimize_on_undefined(),
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+ CpuFeatures::Scope vfp_scope(VFP2);
+ DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+ DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
__ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
}
void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+ CpuFeatures::Scope scope(VFP2);
Register unclamped_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
__ ClampUint8(result_reg, unclamped_reg);
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ CpuFeatures::Scope scope(VFP2);
Register scratch = scratch0();
Register input_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+ DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
Label is_smi, done, heap_number;
// Both smi and heap number cases are handled.
void LCodeGen::EnsureSpaceForLazyDeopt() {
+ if (info()->IsStub()) return;
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
deferred_(8, info->zone()),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
+ frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
// Support for converting LOperands to assembler types.
// LOperand must be a register.
Register ToRegister(LOperand* op) const;
Register EmitLoadRegister(LOperand* op, Register scratch);
// LOperand must be a double register.
- DoubleRegister ToDoubleRegister(LOperand* op) const;
+ DwVfpRegister ToDoubleRegister(LOperand* op) const;
// LOperand is loaded into dbl_scratch, unless already a double register.
- DoubleRegister EmitLoadDoubleRegister(LOperand* op,
- SwVfpRegister flt_scratch,
- DoubleRegister dbl_scratch);
+ DwVfpRegister EmitLoadDoubleRegister(LOperand* op,
+ SwVfpRegister flt_scratch,
+ DwVfpRegister dbl_scratch);
int ToInteger32(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
Register temporary2);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return scope()->num_parameters(); }
+ int GetParameterCount() const { return info()->num_parameters(); }
void Abort(const char* reason);
void Comment(const char* format, ...);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
- DoubleRegister ToDoubleRegister(int index) const;
+ DwVfpRegister ToDoubleRegister(int index) const;
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitNumberUntagD(Register input,
- DoubleRegister result,
+ DwVfpRegister result,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
LEnvironment* env);
LEnvironment* environment);
struct JumpTableEntry {
- explicit inline JumpTableEntry(Address entry)
+ inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
: label(),
- address(entry) { }
+ address(entry),
+ needs_frame(frame),
+ is_lazy_deopt(is_lazy) { }
Label label;
Address address;
+ bool needs_frame;
+ bool is_lazy_deopt;
};
void EnsureSpaceForLazyDeopt();
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
+ bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind)
: codegen_(codegen) {
+ ASSERT(codegen_->info()->is_calling());
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->expected_safepoint_kind_ = kind;
} else if (source->IsStackSlot()) {
__ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
+ CpuFeatures::Scope scope(VFP2);
__ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
+ CpuFeatures::Scope scope(VFP2);
__ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
} else if (saved_destination_->IsStackSlot()) {
__ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
+ CpuFeatures::Scope scope(VFP2);
__ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
} else if (saved_destination_->IsDoubleStackSlot()) {
+ CpuFeatures::Scope scope(VFP2);
__ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
if (!destination_operand.OffsetIsUint12Encodable()) {
- // ip is overwritten while saving the value to the destination.
+ CpuFeatures::Scope scope(VFP2);
+ // ip is overwritten while saving the value to the destination.
// Therefore we can't use ip. It is OK if the read from the source
// destroys ip, since that happens before the value is read.
__ vldr(kScratchDoubleReg.low(), source_operand);
}
} else if (source->IsDoubleRegister()) {
- DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ vmov(cgen_->ToDoubleRegister(destination), source_register);
} else {
}
} else if (source->IsDoubleStackSlot()) {
- MemOperand source_operand = cgen_->ToMemOperand(source);
+ CpuFeatures::Scope scope(VFP2);
+ MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ vldr(cgen_->ToDoubleRegister(destination), source_operand);
} else {
}
-void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
ASSERT(CpuFeatures::IsSupported(VFP2));
CpuFeatures::Scope scope(VFP2);
if (!dst.is(src)) {
void MacroAssembler::PushSafepointRegistersAndDoubles() {
PushSafepointRegisters();
- sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
+ sub(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
kDoubleSize));
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
}
}
void MacroAssembler::PopSafepointRegistersAndDoubles() {
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
}
- add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
+ add(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
kDoubleSize));
PopSafepointRegisters();
}
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
// General purpose registers are pushed last on the stack.
- int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize;
+ int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
}
}
}
-void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
+void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(dst, d0);
const Runtime::Function* function = Runtime::FunctionForId(id);
mov(r0, Operand(function->nargs));
mov(r1, Operand(ExternalReference(function, isolate())));
- CEntryStub stub(1, kSaveFPRegs);
+ SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
+ ? kSaveFPRegs
+ : kDontSaveFPRegs;
+ CEntryStub stub(1, mode);
CallStub(&stub);
}
if (use_eabi_hardfloat()) {
// In the hard floating point calling convention, we can use
// all double registers to pass doubles.
- if (num_double_arguments > DoubleRegister::kNumRegisters) {
+ if (num_double_arguments > DoubleRegister::NumRegisters()) {
stack_passed_words +=
- 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
+ 2 * (num_double_arguments - DoubleRegister::NumRegisters());
}
} else {
// In the soft floating point calling convention, every double
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
+void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(d0, dreg);
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
- DoubleRegister dreg2) {
+void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
+ DwVfpRegister dreg2) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
if (dreg2.is(d0)) {
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
+void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg,
Register reg) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
void MacroAssembler::ClampDoubleToUint8(Register result_reg,
- DoubleRegister input_reg,
- DoubleRegister temp_double_reg) {
+ DwVfpRegister input_reg,
+ DwVfpRegister temp_double_reg) {
Label above_zero;
Label done;
Label in_bounds;
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Handle<Object> value);
void Move(Register dst, Register src, Condition cond = al);
- void Move(DoubleRegister dst, DoubleRegister src);
+ void Move(DwVfpRegister dst, DwVfpRegister src);
// Load an object from the root table.
void LoadRoot(Register destination,
// whether soft or hard floating point ABI is used. These functions
// abstract parameter passing for the three different ways we call
// C functions from generated code.
- void SetCallCDoubleArguments(DoubleRegister dreg);
- void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
- void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
+ void SetCallCDoubleArguments(DwVfpRegister dreg);
+ void SetCallCDoubleArguments(DwVfpRegister dreg1, DwVfpRegister dreg2);
+ void SetCallCDoubleArguments(DwVfpRegister dreg, Register reg);
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
int num_reg_arguments,
int num_double_arguments);
- void GetCFunctionDoubleResult(const DoubleRegister dst);
+ void GetCFunctionDoubleResult(const DwVfpRegister dst);
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Restores context. stack_space
void ClampUint8(Register output_reg, Register input_reg);
void ClampDoubleToUint8(Register result_reg,
- DoubleRegister input_reg,
- DoubleRegister temp_double_reg);
+ DwVfpRegister input_reg,
+ DwVfpRegister temp_double_reg);
void LoadInstanceDescriptors(Register map, Register descriptors);
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
- // Needs access to SafepointRegisterStackIndex for optimized frame
+ // Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class OptimizedFrame;
+ friend class CompiledFrame;
};
}
-// Convert unsigned integer with specified number of leading zeroes in binary
-// representation to IEEE 754 double.
-// Integer to convert is passed in register hiword.
-// Resulting double is returned in registers hiword:loword.
-// This functions does not work correctly for 0.
-static void GenerateUInt2Double(MacroAssembler* masm,
- Register hiword,
- Register loword,
- Register scratch,
- int leading_zeroes) {
- const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
- const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
-
- const int mantissa_shift_for_hi_word =
- meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
-
- const int mantissa_shift_for_lo_word =
- kBitsPerInt - mantissa_shift_for_hi_word;
-
- __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
- if (mantissa_shift_for_hi_word > 0) {
- __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
- __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
- } else {
- __ mov(loword, Operand(0, RelocInfo::NONE));
- __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
- }
-
- // If least significant bit of biased exponent was not 1 it was corrupted
- // by most significant bit of mantissa so we should fix that.
- if (!(biased_exponent & 1)) {
- __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
- }
-}
-
-
#undef __
#define __ ACCESS_MASM(masm())
// -- r1 : receiver
// -----------------------------------
ElementsKind elements_kind = receiver_map->elements_kind();
- Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
- __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
+ if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements()) {
+ Handle<Code> stub = KeyedLoadFastElementStub(
+ receiver_map->instance_type() == JS_ARRAY_TYPE,
+ elements_kind).GetCode();
+ __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
+ } else {
+ Handle<Code> stub =
+ KeyedLoadDictionaryElementStub().GetCode();
+ __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
+ }
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
}
-void KeyedLoadStubCompiler::GenerateLoadExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss_force_generic, slow, failed_allocation;
-
- Register key = r0;
- Register receiver = r1;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
-
- __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // r3: elements array
-
- // Check that the index is in range.
- __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
- __ cmp(key, ip);
- // Unsigned comparison catches both negative and too-large values.
- __ b(hs, &miss_force_generic);
-
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
- // r3: base pointer of external storage
-
- // We are not untagging smi key and instead work with it
- // as if it was premultiplied by 2.
- STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
-
- Register value = r2;
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ ldrsb(value, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ ldrb(value, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ ldrsh(value, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ ldrh(value, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ ldr(value, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ add(r2, r3, Operand(key, LSL, 1));
- __ vldr(s0, r2, 0);
- } else {
- __ ldr(value, MemOperand(r3, key, LSL, 1));
- }
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ add(r2, r3, Operand(key, LSL, 2));
- __ vldr(d0, r2, 0);
- } else {
- __ add(r4, r3, Operand(key, LSL, 2));
- // r4: pointer to the beginning of the double we want to load.
- __ ldr(r2, MemOperand(r4, 0));
- __ ldr(r3, MemOperand(r4, Register::kSizeInBytes));
- }
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
-
- // For integer array types:
- // r2: value
- // For float array type:
- // s0: value (if VFP3 is supported)
- // r2: value (if VFP3 is not supported)
- // For double array type:
- // d0: value (if VFP3 is supported)
- // r2/r3: value (if VFP3 is not supported)
-
- if (elements_kind == EXTERNAL_INT_ELEMENTS) {
- // For the Int and UnsignedInt array types, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- Label box_int;
- __ cmp(value, Operand(0xC0000000));
- __ b(mi, &box_int);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- __ bind(&box_int);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't touch r0 or r1 as they are needed if allocation
- // fails.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
-
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
- // Now we can use r0 for the result as key is not needed any more.
- __ add(r0, r5, Operand(kHeapObjectTag));
- __ vmov(s0, value);
- __ vcvt_f64_s32(d0, s0);
- __ vstr(d0, r5, HeapNumber::kValueOffset);
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't touch r0 or r1 as they are needed if allocation
- // fails.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT);
- // Now we can use r0 for the result as key is not needed any more.
- __ mov(r0, r5);
- Register dst_mantissa = r1;
- Register dst_exponent = r3;
- FloatingPointHelper::Destination dest =
- FloatingPointHelper::kCoreRegisters;
- FloatingPointHelper::ConvertIntToDouble(masm,
- value,
- dest,
- d0,
- dst_mantissa,
- dst_exponent,
- r9,
- s0);
- __ str(dst_mantissa, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ str(dst_exponent, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ Ret();
- }
- } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- // The test is different for unsigned int values. Since we need
- // the value to be in the range of a positive smi, we can't
- // handle either of the top two bits being set in the value.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- Label box_int, done;
- __ tst(value, Operand(0xC0000000));
- __ b(ne, &box_int);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- __ bind(&box_int);
- __ vmov(s0, value);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
- // registers - also when jumping due to exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
-
- __ vcvt_f64_u32(d0, s0);
- __ vstr(d0, r2, HeapNumber::kValueOffset);
-
- __ add(r0, r2, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // Check whether unsigned integer fits into smi.
- Label box_int_0, box_int_1, done;
- __ tst(value, Operand(0x80000000));
- __ b(ne, &box_int_0);
- __ tst(value, Operand(0x40000000));
- __ b(ne, &box_int_1);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- Register hiword = value; // r2.
- Register loword = r3;
-
- __ bind(&box_int_0);
- // Integer does not have leading zeros.
- GenerateUInt2Double(masm, hiword, loword, r4, 0);
- __ b(&done);
-
- __ bind(&box_int_1);
- // Integer has one leading zero.
- GenerateUInt2Double(masm, hiword, loword, r4, 1);
-
-
- __ bind(&done);
- // Integer was converted to double in registers hiword:loword.
- // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
- // clobbers all registers - also when jumping due to exhausted young
- // space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r5, r7, r6, &slow, TAG_RESULT);
-
- __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
- __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
-
- __ mov(r0, r4);
- __ Ret();
- }
- } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
- __ vcvt_f64_f32(d0, s0);
- __ vstr(d0, r2, HeapNumber::kValueOffset);
-
- __ add(r0, r2, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r3, r4, r5, r6, &slow, TAG_RESULT);
- // VFP is not available, do manual single to double conversion.
-
- // r2: floating point value (binary32)
- // r3: heap number for result
-
- // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
- // the slow case from here.
- __ and_(r0, value, Operand(kBinary32MantissaMask));
-
- // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
- // the slow case from here.
- __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
- __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
-
- Label exponent_rebiased;
- __ teq(r1, Operand(0x00));
- __ b(eq, &exponent_rebiased);
-
- __ teq(r1, Operand(0xff));
- __ mov(r1, Operand(0x7ff), LeaveCC, eq);
- __ b(eq, &exponent_rebiased);
-
- // Rebias exponent.
- __ add(r1,
- r1,
- Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
-
- __ bind(&exponent_rebiased);
- __ and_(r2, value, Operand(kBinary32SignMask));
- value = no_reg;
- __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
-
- // Shift mantissa.
- static const int kMantissaShiftForHiWord =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaShiftForLoWord =
- kBitsPerInt - kMantissaShiftForHiWord;
-
- __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
- __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
-
- __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
- __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
-
- __ mov(r0, r3);
- __ Ret();
- }
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
- __ vstr(d0, r2, HeapNumber::kValueOffset);
-
- __ add(r0, r2, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r5, r6, r7, &slow, TAG_RESULT);
-
- __ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
- __ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset));
- __ mov(r0, r4);
- __ Ret();
- }
-
- } else {
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
- }
-
- // Slow case, key and receiver still in r0 and r1.
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, r2, r3);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
-
- __ Push(r1, r0);
-
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-
- __ bind(&miss_force_generic);
- Handle<Code> stub =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(stub, RelocInfo::CODE_TARGET);
-}
-
-
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
}
-void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, r0, r4, r5, d1, d2, &miss_force_generic);
-
- // Get the elements array.
- __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ AssertFastElements(r2);
-
- // Check that the key is within bounds.
- __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
- __ cmp(r0, Operand(r3));
- __ b(hs, &miss_force_generic);
-
- // Load the result and make sure it's not the hole.
- __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ ldr(r4,
- MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r4, ip);
- __ b(eq, &miss_force_generic);
- __ mov(r0, r4);
- __ Ret();
-
- __ bind(&miss_force_generic);
- Handle<Code> stub =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(stub, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss_force_generic, slow_allocate_heapnumber;
-
- Register key_reg = r0;
- Register receiver_reg = r1;
- Register elements_reg = r2;
- Register heap_number_reg = r2;
- Register indexed_double_offset = r3;
- Register scratch = r4;
- Register scratch2 = r5;
- Register scratch3 = r6;
- Register heap_number_map = r7;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
-
- // Get the elements array.
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-
- // Check that the key is within bounds.
- __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ cmp(key_reg, Operand(scratch));
- __ b(hs, &miss_force_generic);
-
- // Load the upper word of the double in the fixed array and test for NaN.
- __ add(indexed_double_offset, elements_reg,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
- __ cmp(scratch, Operand(kHoleNanUpper32));
- __ b(&miss_force_generic, eq);
-
- // Non-NaN. Allocate a new heap number and copy the double value into it.
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
- heap_number_map, &slow_allocate_heapnumber, TAG_RESULT);
-
- // Don't need to reload the upper 32 bits of the double, it's already in
- // scratch.
- __ str(scratch, FieldMemOperand(heap_number_reg,
- HeapNumber::kExponentOffset));
- __ ldr(scratch, FieldMemOperand(indexed_double_offset,
- FixedArray::kHeaderSize));
- __ str(scratch, FieldMemOperand(heap_number_reg,
- HeapNumber::kMantissaOffset));
-
- __ mov(r0, heap_number_reg);
- __ Ret();
-
- __ bind(&slow_allocate_heapnumber);
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ Jump(slow_ic, RelocInfo::CODE_TARGET);
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
}
+ExternalReference ExternalReference::ForDeoptEntry(Address entry) {
+ return ExternalReference(entry);
+}
+
+
// Helper function to compute x^y, where y is known to be an
// integer. Uses binary decomposition to limit the number of
// multiplications; see the discussion in "Hacker's Delight" by Henry
static ExternalReference page_flags(Page* page);
+ static ExternalReference ForDeoptEntry(Address entry);
+
Address address() const {return reinterpret_cast<Address>(address_);}
#ifdef ENABLE_DEBUGGER_SUPPORT
// ----------------------------------------------------------------------------
// Implementation of AstVisitor
-bool AstVisitor::CheckStackOverflow() {
- if (stack_overflow_) return true;
- StackLimitCheck check(isolate_);
- if (!check.HasOverflowed()) return false;
- return (stack_overflow_ = true);
-}
-
-
void AstVisitor::VisitDeclarations(ZoneList<Declaration*>* declarations) {
for (int i = 0; i < declarations->length(); i++) {
Visit(declarations->at(i));
class AstVisitor BASE_EMBEDDED {
public:
- AstVisitor() : isolate_(Isolate::Current()), stack_overflow_(false) { }
+ AstVisitor() {}
virtual ~AstVisitor() { }
// Stack overflow check and dynamic dispatch.
- void Visit(AstNode* node) { if (!CheckStackOverflow()) node->Accept(this); }
+ virtual void Visit(AstNode* node) = 0;
// Iteration left-to-right.
virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
virtual void VisitStatements(ZoneList<Statement*>* statements);
virtual void VisitExpressions(ZoneList<Expression*>* expressions);
- // Stack overflow tracking support.
- bool HasStackOverflow() const { return stack_overflow_; }
- bool CheckStackOverflow();
-
- // If a stack-overflow exception is encountered when visiting a
- // node, calling SetStackOverflow will make sure that the visitor
- // bails out without visiting more nodes.
- void SetStackOverflow() { stack_overflow_ = true; }
- void ClearStackOverflow() { stack_overflow_ = false; }
-
// Individual AST nodes.
#define DEF_VISIT(type) \
virtual void Visit##type(type* node) = 0;
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
+};
- protected:
- Isolate* isolate() { return isolate_; }
- private:
- Isolate* isolate_;
- bool stack_overflow_;
-};
+#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS() \
+public: \
+ virtual void Visit(AstNode* node) { \
+ if (!CheckStackOverflow()) node->Accept(this); \
+ } \
+ \
+ void SetStackOverflow() { stack_overflow_ = true; } \
+ void ClearStackOverflow() { stack_overflow_ = false; } \
+ bool HasStackOverflow() const { return stack_overflow_; } \
+ \
+ bool CheckStackOverflow() { \
+ if (stack_overflow_) return true; \
+ StackLimitCheck check(isolate_); \
+ if (!check.HasOverflowed()) return false; \
+ return (stack_overflow_ = true); \
+ } \
+ \
+private: \
+ void InitializeAstVisitor() { \
+ isolate_ = Isolate::Current(); \
+ stack_overflow_ = false; \
+ } \
+ Isolate* isolate() { return isolate_; } \
+ \
+ Isolate* isolate_; \
+ bool stack_overflow_
// ----------------------------------------------------------------------------
Code::kNoExtraICState) \
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(NotifyICMiss, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
V(NotifyOSR, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
static void Generate_NotifyDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
static void Generate_NotifyOSR(MacroAssembler* masm);
+ static void Generate_NotifyICMiss(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
static void Generate_FunctionCall(MacroAssembler* masm);
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "code-stubs.h"
+#include "hydrogen.h"
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+
+Handle<Code> HydrogenCodeStub::CodeFromGraph(HGraph* graph) {
+ graph->OrderBlocks();
+ graph->AssignDominators();
+ graph->CollectPhis();
+ graph->InsertRepresentationChanges();
+ graph->EliminateRedundantBoundsChecks();
+ LChunk* chunk = LChunk::NewChunk(graph);
+ ASSERT(chunk != NULL);
+ Handle<Code> stub = chunk->Codegen(Code::COMPILED_STUB);
+ return stub;
+}
+
+
+class CodeStubGraphBuilderBase : public HGraphBuilder {
+ public:
+ CodeStubGraphBuilderBase(Isolate* isolate, HydrogenCodeStub* stub)
+ : HGraphBuilder(&info_), info_(stub, isolate) {}
+ virtual bool BuildGraph();
+
+ protected:
+ virtual void BuildCodeStub() = 0;
+ HParameter* GetParameter(int parameter) { return parameters_[parameter]; }
+ HydrogenCodeStub* stub() { return info_.code_stub(); }
+
+ private:
+ SmartArrayPointer<HParameter*> parameters_;
+ CompilationInfoWithZone info_;
+};
+
+
+bool CodeStubGraphBuilderBase::BuildGraph() {
+ if (FLAG_trace_hydrogen) {
+ PrintF("-----------------------------------------------------------\n");
+ PrintF("Compiling stub using hydrogen\n");
+ HTracer::Instance()->TraceCompilation(&info_);
+ }
+ HBasicBlock* next_block = graph()->CreateBasicBlock();
+ next_block->SetInitialEnvironment(graph()->start_environment());
+ HGoto* jump = new(zone()) HGoto(next_block);
+ graph()->entry_block()->Finish(jump);
+ set_current_block(next_block);
+
+ int major_key = stub()->MajorKey();
+ CodeStubInterfaceDescriptor** descriptors =
+ info_.isolate()->code_stub_interface_descriptors();
+ if (descriptors[major_key] == NULL) {
+ descriptors[major_key] = stub()->GetInterfaceDescriptor(info_.isolate());
+ }
+
+ CodeStubInterfaceDescriptor* descriptor = descriptors[major_key];
+ parameters_.Reset(new HParameter*[descriptor->number_of_register_params]);
+
+ HGraph* graph = this->graph();
+ Zone* zone = this->zone();
+ for (int i = 0; i < descriptor->number_of_register_params; ++i) {
+ HParameter* param = new(zone) HParameter(i);
+ AddInstruction(param);
+ graph->start_environment()->Push(param);
+ parameters_[i] = param;
+ }
+ AddSimulate(BailoutId::StubEntry());
+
+ BuildCodeStub();
+
+ return true;
+}
+
+template <class Stub>
+class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
+ public:
+ explicit CodeStubGraphBuilder(Stub* stub)
+ : CodeStubGraphBuilderBase(Isolate::Current(), stub) {}
+
+ protected:
+ virtual void BuildCodeStub();
+ Stub* casted_stub() { return static_cast<Stub*>(stub()); }
+};
+
+
+template <>
+void CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
+ Zone* zone = this->zone();
+
+ HInstruction* load = BuildUncheckedMonomorphicElementAccess(
+ GetParameter(0), GetParameter(1), NULL, NULL,
+ casted_stub()->is_js_array(), casted_stub()->elements_kind(), false);
+ AddInstruction(load);
+
+ HReturn* ret = new(zone) HReturn(load);
+ current_block()->Finish(ret);
+}
+
+
+Handle<Code> KeyedLoadFastElementStub::GenerateCode() {
+ CodeStubGraphBuilder<KeyedLoadFastElementStub> builder(this);
+ return CodeFromGraph(builder.CreateGraph());
+}
+
+
+} } // namespace v8::internal
}
-void CodeStub::GenerateCode(MacroAssembler* masm) {
- // Update the static counter each time a new code stub is generated.
- masm->isolate()->counters()->code_stubs()->Increment();
-
- // Nested stubs are not allowed for leaves.
- AllowStubCallsScope allow_scope(masm, false);
-
- // Generate the code for the stub.
- masm->set_generating_stub(true);
- NoCurrentFrameScope scope(masm);
- Generate(masm);
-}
-
-
SmartArrayPointer<const char> CodeStub::GetName() {
char buffer[100];
NoAllocationStringAllocator allocator(buffer,
}
-void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
- Isolate* isolate = masm->isolate();
+void CodeStub::RecordCodeGeneration(Code* code, Isolate* isolate) {
SmartArrayPointer<const char> name = GetName();
PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name));
GDBJIT(AddCode(GDBJITInterface::STUB, *name, code));
}
+Handle<Code> PlatformCodeStub::GenerateCode() {
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
+
+ // Generate the new code.
+ MacroAssembler masm(isolate, NULL, 256);
+
+ {
+ // Update the static counter each time a new code stub is generated.
+ isolate->counters()->code_stubs()->Increment();
+
+ // Nested stubs are not allowed for leaves.
+ AllowStubCallsScope allow_scope(&masm, false);
+
+ // Generate the code for the stub.
+ masm.set_generating_stub(true);
+ NoCurrentFrameScope scope(&masm);
+ Generate(&masm);
+ }
+
+ // Create the code object.
+ CodeDesc desc;
+ masm.GetCode(&desc);
+
+ // Copy the generated code into a heap object.
+ Code::Flags flags = Code::ComputeFlags(
+ static_cast<Code::Kind>(GetCodeKind()), GetICState());
+ Handle<Code> new_object = factory->NewCode(
+ desc, flags, masm.CodeObject(), NeedsImmovableCode());
+ return new_object;
+}
+
+
Handle<Code> CodeStub::GetCode() {
Isolate* isolate = Isolate::Current();
Factory* factory = isolate->factory();
{
HandleScope scope(isolate);
- // Generate the new code.
- MacroAssembler masm(isolate, NULL, 256);
- GenerateCode(&masm);
-
- // Create the code object.
- CodeDesc desc;
- masm.GetCode(&desc);
-
- // Copy the generated code into a heap object.
- Code::Flags flags = Code::ComputeFlags(
- static_cast<Code::Kind>(GetCodeKind()),
- GetICState());
- Handle<Code> new_object = factory->NewCode(
- desc, flags, masm.CodeObject(), NeedsImmovableCode());
+ Handle<Code> new_object = GenerateCode();
new_object->set_major_key(MajorKey());
FinishCode(new_object);
- RecordCodeGeneration(*new_object, &masm);
+ RecordCodeGeneration(*new_object, isolate);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) {
}
-void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
- switch (elements_kind_) {
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
- break;
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm);
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, elements_kind_);
- break;
- case DICTIONARY_ELEMENTS:
- KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
- break;
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
+void KeyedLoadDictionaryElementStub::Generate(MacroAssembler* masm) {
+ KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
}
// Lookup the code in the (possibly custom) cache.
bool FindCodeInCache(Code** code_out, Isolate* isolate);
+ // Returns information for computing the number key.
+ virtual Major MajorKey() = 0;
+ virtual int MinorKey() = 0;
+
protected:
static bool CanUseFPRegisters();
- private:
- // Nonvirtual wrapper around the stub-specific Generate function. Call
- // this function to set up the macro assembler and generate the code.
- void GenerateCode(MacroAssembler* masm);
-
// Generates the assembler code for the stub.
- virtual void Generate(MacroAssembler* masm) = 0;
+ virtual Handle<Code> GenerateCode() = 0;
+ // BinaryOpStub needs to override this.
+ virtual InlineCacheState GetICState() {
+ return UNINITIALIZED;
+ }
+
+ // Returns whether the code generated for this stub needs to be allocated as
+ // a fixed (non-moveable) code object.
+ virtual bool NeedsImmovableCode() { return false; }
+
+ private:
// Perform bookkeeping required after code generation when stub code is
// initially generated.
- void RecordCodeGeneration(Code* code, MacroAssembler* masm);
+ void RecordCodeGeneration(Code* code, Isolate* isolate);
// Finish the code object after it has been generated.
virtual void FinishCode(Handle<Code> code) { }
// registering stub in the stub cache.
virtual void Activate(Code* code) { }
- // Returns information for computing the number key.
- virtual Major MajorKey() = 0;
- virtual int MinorKey() = 0;
-
// BinaryOpStub needs to override this.
virtual int GetCodeKind();
- // BinaryOpStub needs to override this.
- virtual InlineCacheState GetICState() {
- return UNINITIALIZED;
- }
-
// Add the code to a specialized cache, specific to an individual
// stub type. Please note, this method must add the code object to a
// roots object, otherwise we will remove the code during GC.
SmartArrayPointer<const char> GetName();
virtual void PrintName(StringStream* stream);
- // Returns whether the code generated for this stub needs to be allocated as
- // a fixed (non-moveable) code object.
- virtual bool NeedsImmovableCode() { return false; }
-
// Computes the key based on major and minor.
uint32_t GetKey() {
ASSERT(static_cast<int>(MajorKey()) < NUMBER_OF_IDS);
};
+class PlatformCodeStub : public CodeStub {
+ public:
+ // Retrieve the code for the stub. Generate the code if needed.
+ virtual Handle<Code> GenerateCode();
+
+ virtual int GetCodeKind() { return Code::STUB; }
+
+ protected:
+ // Generates the assembler code for the stub.
+ virtual void Generate(MacroAssembler* masm) = 0;
+};
+
+
+struct CodeStubInterfaceDescriptor {
+ int number_of_register_params;
+ Register* register_params;
+ Handle<Code> deoptimization_handler;
+};
+
+
+class HGraph;
+struct Register;
+class HydrogenCodeStub : public CodeStub {
+ public:
+ // Retrieve the code for the stub. Generate the code if needed.
+ virtual Handle<Code> GenerateCode() = 0;
+
+ virtual int GetCodeKind() { return Code::COMPILED_STUB; }
+
+ virtual CodeStubInterfaceDescriptor* GetInterfaceDescriptor(
+ Isolate* isolate) = 0;
+
+ protected:
+ Handle<Code> CodeFromGraph(HGraph* graph);
+};
+
+
// Helper interface to prepare to/restore after making runtime calls.
class RuntimeCallHelper {
public:
};
-class StackCheckStub : public CodeStub {
+class StackCheckStub : public PlatformCodeStub {
public:
StackCheckStub() { }
};
-class InterruptStub : public CodeStub {
+class InterruptStub : public PlatformCodeStub {
public:
InterruptStub() { }
};
-class ToNumberStub: public CodeStub {
+class ToNumberStub: public PlatformCodeStub {
public:
ToNumberStub() { }
};
-class FastNewClosureStub : public CodeStub {
+class FastNewClosureStub : public PlatformCodeStub {
public:
explicit FastNewClosureStub(LanguageMode language_mode)
: language_mode_(language_mode) { }
};
-class FastNewContextStub : public CodeStub {
+class FastNewContextStub : public PlatformCodeStub {
public:
static const int kMaximumSlots = 64;
};
-class FastNewBlockContextStub : public CodeStub {
+class FastNewBlockContextStub : public PlatformCodeStub {
public:
static const int kMaximumSlots = 64;
};
-class FastCloneShallowArrayStub : public CodeStub {
+class FastCloneShallowArrayStub : public PlatformCodeStub {
public:
// Maximum length of copied elements array.
static const int kMaximumClonedLength = 8;
};
-class FastCloneShallowObjectStub : public CodeStub {
+class FastCloneShallowObjectStub : public PlatformCodeStub {
public:
// Maximum number of properties in copied object.
static const int kMaximumClonedProperties = 6;
};
-class InstanceofStub: public CodeStub {
+class InstanceofStub: public PlatformCodeStub {
public:
enum Flags {
kNoFlags = 0,
};
-class MathPowStub: public CodeStub {
+class MathPowStub: public PlatformCodeStub {
public:
enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK};
};
-class BinaryOpStub: public CodeStub {
+class BinaryOpStub: public PlatformCodeStub {
public:
BinaryOpStub(Token::Value op, OverwriteMode mode)
: op_(op),
};
-class ICCompareStub: public CodeStub {
+class ICCompareStub: public PlatformCodeStub {
public:
ICCompareStub(Token::Value op,
CompareIC::State left,
};
-class CEntryStub : public CodeStub {
+class CEntryStub : public PlatformCodeStub {
public:
explicit CEntryStub(int result_size,
SaveFPRegsMode save_doubles = kDontSaveFPRegs)
};
-class JSEntryStub : public CodeStub {
+class JSEntryStub : public PlatformCodeStub {
public:
JSEntryStub() { }
};
-class ArgumentsAccessStub: public CodeStub {
+class ArgumentsAccessStub: public PlatformCodeStub {
public:
enum Type {
READ_ELEMENT,
};
-class RegExpExecStub: public CodeStub {
+class RegExpExecStub: public PlatformCodeStub {
public:
RegExpExecStub() { }
};
-class RegExpConstructResultStub: public CodeStub {
+class RegExpConstructResultStub: public PlatformCodeStub {
public:
RegExpConstructResultStub() { }
};
-class CallFunctionStub: public CodeStub {
+class CallFunctionStub: public PlatformCodeStub {
public:
CallFunctionStub(int argc, CallFunctionFlags flags)
: argc_(argc), flags_(flags) { }
};
-class CallConstructStub: public CodeStub {
+class CallConstructStub: public PlatformCodeStub {
public:
explicit CallConstructStub(CallFunctionFlags flags) : flags_(flags) {}
};
-class KeyedLoadElementStub : public CodeStub {
+class KeyedLoadDictionaryElementStub : public PlatformCodeStub {
public:
- explicit KeyedLoadElementStub(ElementsKind elements_kind)
- : elements_kind_(elements_kind)
- { }
+ KeyedLoadDictionaryElementStub() {}
Major MajorKey() { return KeyedLoadElement; }
- int MinorKey() { return elements_kind_; }
+ int MinorKey() { return DICTIONARY_ELEMENTS; }
void Generate(MacroAssembler* masm);
private:
- ElementsKind elements_kind_;
+ DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub);
+};
+
+
+class KeyedLoadFastElementStub : public HydrogenCodeStub {
+ public:
+ KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) {
+ bit_field_ = ElementsKindBits::encode(elements_kind) |
+ IsJSArrayBits::encode(is_js_array);
+ }
+
+ Major MajorKey() { return KeyedLoadElement; }
+ int MinorKey() { return bit_field_; }
+
+ bool is_js_array() const {
+ return IsJSArrayBits::decode(bit_field_);
+ }
+
+ ElementsKind elements_kind() const {
+ return ElementsKindBits::decode(bit_field_);
+ }
+
+ virtual Handle<Code> GenerateCode();
+
+ virtual CodeStubInterfaceDescriptor* GetInterfaceDescriptor(
+ Isolate* isolate);
+
+ private:
+ class IsJSArrayBits: public BitField<bool, 8, 1> {};
+ class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
+ uint32_t bit_field_;
- DISALLOW_COPY_AND_ASSIGN(KeyedLoadElementStub);
+ DISALLOW_COPY_AND_ASSIGN(KeyedLoadFastElementStub);
};
-class KeyedStoreElementStub : public CodeStub {
+class KeyedStoreElementStub : public PlatformCodeStub {
public:
KeyedStoreElementStub(bool is_js_array,
ElementsKind elements_kind,
};
-class ToBooleanStub: public CodeStub {
+class ToBooleanStub: public PlatformCodeStub {
public:
enum Type {
UNDEFINED,
};
-class ElementsTransitionAndStoreStub : public CodeStub {
+class ElementsTransitionAndStoreStub : public PlatformCodeStub {
public:
ElementsTransitionAndStoreStub(ElementsKind from,
ElementsKind to,
};
-class StoreArrayLiteralElementStub : public CodeStub {
+class StoreArrayLiteralElementStub : public PlatformCodeStub {
public:
StoreArrayLiteralElementStub()
: fp_registers_(CanUseFPRegisters()) { }
};
-class ProfileEntryHookStub : public CodeStub {
+class ProfileEntryHookStub : public PlatformCodeStub {
public:
explicit ProfileEntryHookStub() {}
if (print_code) {
// Print the source code if available.
FunctionLiteral* function = info->function();
- Handle<Script> script = info->script();
- if (!script->IsUndefined() && !script->source()->IsUndefined()) {
- PrintF("--- Raw source ---\n");
- StringInputBuffer stream(String::cast(script->source()));
- stream.Seek(function->start_position());
- // fun->end_position() points to the last character in the stream. We
- // need to compensate by adding one to calculate the length.
- int source_len =
- function->end_position() - function->start_position() + 1;
- for (int i = 0; i < source_len; i++) {
- if (stream.has_more()) PrintF("%c", stream.GetNext());
+ if (code->kind() != Code::COMPILED_STUB) {
+ Handle<Script> script = info->script();
+ if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+ PrintF("--- Raw source ---\n");
+ StringInputBuffer stream(String::cast(script->source()));
+ stream.Seek(function->start_position());
+ // fun->end_position() points to the last character in the stream. We
+ // need to compensate by adding one to calculate the length.
+ int source_len =
+ function->end_position() - function->start_position() + 1;
+ for (int i = 0; i < source_len; i++) {
+ if (stream.has_more()) PrintF("%c", stream.GetNext());
+ }
+ PrintF("\n\n");
}
- PrintF("\n\n");
}
if (info->IsOptimizing()) {
if (FLAG_print_unopt_code) {
} else {
PrintF("--- Code ---\n");
}
- code->Disassemble(*function->debug_name()->ToCString());
+ if (info->IsStub()) {
+ CodeStub::Major major_key = info->code_stub()->MajorKey();
+ code->Disassemble(CodeStub::MajorName(major_key, false));
+ } else {
+ code->Disassemble(*function->debug_name()->ToCString());
+ }
}
#endif // ENABLE_DISASSEMBLER
}
: flags_(LanguageModeField::encode(CLASSIC_MODE)),
script_(script),
osr_ast_id_(BailoutId::None()) {
- Initialize(zone);
+ Initialize(script->GetIsolate(), BASE, zone);
}
shared_info_(shared_info),
script_(Handle<Script>(Script::cast(shared_info->script()))),
osr_ast_id_(BailoutId::None()) {
- Initialize(zone);
+ Initialize(script_->GetIsolate(), BASE, zone);
}
script_(Handle<Script>(Script::cast(shared_info_->script()))),
context_(closure->context()),
osr_ast_id_(BailoutId::None()) {
- Initialize(zone);
+ Initialize(script_->GetIsolate(), BASE, zone);
}
-void CompilationInfo::Initialize(Zone* zone) {
- isolate_ = script_->GetIsolate();
+CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
+ Isolate* isolate, Zone* zone)
+ : flags_(LanguageModeField::encode(CLASSIC_MODE) |
+ IsLazy::encode(true)),
+ osr_ast_id_(BailoutId::None()) {
+ Initialize(isolate, STUB, zone);
+ code_stub_ = stub;
+}
+
+
+void CompilationInfo::Initialize(Isolate* isolate, Mode mode, Zone* zone) {
+ isolate_ = isolate;
function_ = NULL;
scope_ = NULL;
global_scope_ = NULL;
pre_parse_data_ = NULL;
zone_ = zone;
deferred_handles_ = NULL;
+ code_stub_ = NULL;
prologue_offset_ = kPrologueOffsetNotSet;
- mode_ = V8::UseCrankshaft() ? BASE : NONOPT;
+ if (mode == STUB) {
+ mode_ = STUB;
+ return;
+ }
+ mode_ = V8::UseCrankshaft() ? mode : NONOPT;
if (script_->type()->value() == Script::TYPE_NATIVE) {
MarkAsNative();
}
}
+int CompilationInfo::num_parameters() const {
+ if (IsStub()) {
+ return 0;
+ } else {
+ return scope()->num_parameters();
+ }
+}
+
+
+int CompilationInfo::num_heap_slots() const {
+ if (IsStub()) {
+ return 0;
+ } else {
+ return scope()->num_heap_slots();
+ }
+}
+
+
+Code::Flags CompilationInfo::flags() const {
+ if (IsStub()) {
+ return Code::ComputeFlags(Code::COMPILED_STUB);
+ } else {
+ return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
+ }
+}
+
+
// Disable optimization for the rest of the compilation pipeline.
void CompilationInfo::DisableOptimization() {
bool is_optimizable_closure =
if (FLAG_trace_hydrogen) {
PrintF("-----------------------------------------------------------\n");
PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
- HTracer::Instance()->TraceCompilation(info()->function());
+ HTracer::Instance()->TraceCompilation(info());
}
Handle<Context> native_context(
info()->closure()->context()->native_context());
oracle_ = new(info()->zone()) TypeFeedbackOracle(
code, native_context, info()->isolate(), info()->zone());
- graph_builder_ = new(info()->zone()) HGraphBuilder(info(), oracle_);
+ graph_builder_ = new(info()->zone()) HOptimizedGraphBuilder(info(), oracle_);
Timer t(this, &time_taken_to_create_graph_);
graph_ = graph_builder_->CreateGraph();
Timer timer(this, &time_taken_to_codegen_);
ASSERT(chunk_ != NULL);
ASSERT(graph_ != NULL);
- Handle<Code> optimized_code = chunk_->Codegen();
+ Handle<Code> optimized_code = chunk_->Codegen(Code::OPTIMIZED_FUNCTION);
if (optimized_code.is_null()) {
info()->set_bailout_reason("code generation failed");
return AbortOptimization();
static const int kPrologueOffsetNotSet = -1;
class ScriptDataImpl;
+class HydrogenCodeStub;
// CompilationInfo encapsulates some information known at compile time. It
// is constructed based on the resources available at compile-time.
CompilationInfo(Handle<Script> script, Zone* zone);
CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
CompilationInfo(Handle<JSFunction> closure, Zone* zone);
+ CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate, Zone* zone);
virtual ~CompilationInfo();
Handle<JSFunction> closure() const { return closure_; }
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
Handle<Script> script() const { return script_; }
+ HydrogenCodeStub* code_stub() {return code_stub_; }
v8::Extension* extension() const { return extension_; }
ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
Handle<Context> context() const { return context_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
+ int num_parameters() const;
+ int num_heap_slots() const;
+ Code::Flags flags() const;
void MarkAsEval() {
ASSERT(!is_lazy());
void MarkAsNative() {
flags_ |= IsNative::encode(true);
}
+
bool is_native() const {
return IsNative::decode(flags_);
}
+
+ bool is_calling() const {
+ return is_deferred_calling() || is_non_deferred_calling();
+ }
+
+ void MarkAsDeferredCalling() {
+ flags_ |= IsDeferredCalling::encode(true);
+ }
+
+ bool is_deferred_calling() const {
+ return IsDeferredCalling::decode(flags_);
+ }
+
+ void MarkAsNonDeferredCalling() {
+ flags_ |= IsNonDeferredCalling::encode(true);
+ }
+
+ bool is_non_deferred_calling() const {
+ return IsNonDeferredCalling::decode(flags_);
+ }
+
void SetFunction(FunctionLiteral* literal) {
ASSERT(function_ == NULL);
function_ = literal;
// Accessors for the different compilation modes.
bool IsOptimizing() const { return mode_ == OPTIMIZE; }
bool IsOptimizable() const { return mode_ == BASE; }
+ bool IsStub() const { return mode_ == STUB; }
void SetOptimizing(BailoutId osr_ast_id) {
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
enum Mode {
BASE,
OPTIMIZE,
- NONOPT
+ NONOPT,
+ STUB
};
- void Initialize(Zone* zone);
+ void Initialize(Isolate* isolate, Mode mode, Zone* zone);
void SetMode(Mode mode) {
ASSERT(V8::UseCrankshaft());
// If compiling for debugging produce just full code matching the
// initial mode setting.
class IsCompilingForDebugging: public BitField<bool, 8, 1> {};
+ // If the compiled code contains calls that require building a frame
+ class IsCalling: public BitField<bool, 9, 1> {};
+ // If the compiled code contains calls that require building a frame
+ class IsDeferredCalling: public BitField<bool, 10, 1> {};
+ // If the compiled code contains calls that require building a frame
+ class IsNonDeferredCalling: public BitField<bool, 11, 1> {};
unsigned flags_;
Scope* scope_;
// The global scope provided as a convenience.
Scope* global_scope_;
+ // For compiled stubs, the stub object
+ HydrogenCodeStub* code_stub_;
// The compiled code.
Handle<Code> code_;
: CompilationInfo(closure, &zone_),
zone_(closure->GetIsolate()),
zone_scope_(&zone_, DELETE_ON_EXIT) {}
+ explicit CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate)
+ : CompilationInfo(stub, isolate, &zone_),
+ zone_(isolate),
+ zone_scope_(&zone_, DELETE_ON_EXIT) {}
private:
Zone zone_;
class HGraph;
-class HGraphBuilder;
+class HOptimizedGraphBuilder;
class LChunk;
// A helper class that calls the three compilation phases in
private:
CompilationInfo* info_;
TypeFeedbackOracle* oracle_;
- HGraphBuilder* graph_builder_;
+ HOptimizedGraphBuilder* graph_builder_;
HGraph* graph_;
LChunk* chunk_;
int64_t time_taken_to_create_graph_;
reinterpret_cast<intptr_t>(from),
fp_to_sp_delta - (2 * kPointerSize));
}
- function->shared()->increment_deopt_count();
+ // For COMPILED_STUBs called from builtins, the function pointer
+ // is a SMI indicating an internal frame.
+ if (function->IsSmi()) {
+ function = NULL;
+ }
+ if (function != NULL && function->IsOptimized()) {
+ function->shared()->increment_deopt_count();
+ }
// Find the optimized code.
if (type == EAGER) {
ASSERT(from == NULL);
- optimized_code_ = function_->code();
+ compiled_code_ = function_->code();
if (FLAG_trace_deopt && FLAG_code_comments) {
// Print instruction associated with this bailout.
const char* last_comment = NULL;
int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
| RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
- for (RelocIterator it(optimized_code_, mask); !it.done(); it.next()) {
+ for (RelocIterator it(compiled_code_, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
if (info->rmode() == RelocInfo::COMMENT) {
last_comment = reinterpret_cast<const char*>(info->data());
}
}
} else if (type == LAZY) {
- optimized_code_ = FindDeoptimizingCodeFromAddress(from);
- ASSERT(optimized_code_ != NULL);
+ compiled_code_ = FindDeoptimizingCodeFromAddress(from);
+ if (compiled_code_ == NULL) {
+ compiled_code_ =
+ static_cast<Code*>(isolate->heap()->FindCodeObject(from));
+ }
+ ASSERT(compiled_code_ != NULL);
} else if (type == OSR) {
// The function has already been optimized and we're transitioning
// from the unoptimized shared version to the optimized one in the
// function. The return address (from) points to unoptimized code.
- optimized_code_ = function_->code();
- ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION);
- ASSERT(!optimized_code_->contains(from));
+ compiled_code_ = function_->code();
+ ASSERT(compiled_code_->kind() == Code::OPTIMIZED_FUNCTION);
+ ASSERT(!compiled_code_->contains(from));
} else if (type == DEBUGGER) {
- optimized_code_ = optimized_code;
- ASSERT(optimized_code_->contains(from));
+ compiled_code_ = optimized_code;
+ ASSERT(compiled_code_->contains(from));
}
ASSERT(HEAP->allow_allocation(false));
unsigned size = ComputeInputFrameSize();
// Determine basic deoptimization information. The optimized frame is
// described by the input data.
DeoptimizationInputData* input_data =
- DeoptimizationInputData::cast(optimized_code_->deoptimization_data());
+ DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
BailoutId node_id = input_data->AstId(bailout_id_);
ByteArray* translations = input_data->TranslationByteArray();
unsigned translation_index =
case Translation::SETTER_STUB_FRAME:
DoComputeAccessorStubFrame(&iterator, i, true);
break;
+ case Translation::COMPILED_STUB_FRAME:
+ DoCompiledStubFrame(&iterator, i);
+ break;
case Translation::BEGIN:
case Translation::REGISTER:
case Translation::INT32_REGISTER:
case Translation::LITERAL:
case Translation::ARGUMENTS_OBJECT:
case Translation::DUPLICATE:
+ default:
UNREACHABLE();
break;
}
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME:
+ case Translation::COMPILED_STUB_FRAME:
case Translation::DUPLICATE:
UNREACHABLE();
return;
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME:
+ case Translation::COMPILED_STUB_FRAME:
case Translation::DUPLICATE:
UNREACHABLE(); // Malformed input.
return false;
// environment at the OSR entry. The code for that his built into
// the DoComputeOsrOutputFrame function for now.
} else {
- unsigned stack_slots = optimized_code_->stack_slots();
- unsigned outgoing_size = ComputeOutgoingArgumentSize();
+ unsigned stack_slots = compiled_code_->stack_slots();
+ unsigned outgoing_size = compiled_code_->kind() == Code::COMPILED_STUB
+ ? 0 : ComputeOutgoingArgumentSize();
ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
}
#endif
unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
// The incoming arguments is the values for formal parameters and
// the receiver. Every slot contains a pointer.
+ if (function->IsSmi()) {
+ ASSERT(Smi::cast(function) == Smi::FromInt(StackFrame::STUB));
+ return 0;
+ }
unsigned arguments = function->shared()->formal_parameter_count() + 1;
return arguments * kPointerSize;
}
unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
+ compiled_code_->deoptimization_data());
unsigned height = data->ArgumentsStackHeight(bailout_id_)->value();
return height * kPointerSize;
}
Object* Deoptimizer::ComputeLiteral(int index) const {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
+ compiled_code_->deoptimization_data());
FixedArray* literals = data->LiteralArray();
return literals->get(index);
}
// cause us to emit relocation information for the external
// references. This is fine because the deoptimizer's code section
// isn't meant to be serialized at all.
- ASSERT(!Serializer::enabled());
-
ASSERT(type == EAGER || type == LAZY);
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
int entry_count = (type == EAGER)
GenerateDeoptimizationEntries(&masm, entry_count, type);
CodeDesc desc;
masm.GetCode(&desc);
- ASSERT(desc.reloc_size == 0);
VirtualMemory* memory = type == EAGER
? data->eager_deoptimization_entry_code_
}
+void Translation::BeginCompiledStubFrame() {
+ buffer_->Add(COMPILED_STUB_FRAME, zone());
+}
+
+
void Translation::StoreRegister(Register reg) {
buffer_->Add(REGISTER, zone());
buffer_->Add(reg.code(), zone());
case UINT32_STACK_SLOT:
case DOUBLE_STACK_SLOT:
case LITERAL:
+ case COMPILED_STUB_FRAME:
return 1;
case BEGIN:
case ARGUMENTS_ADAPTOR_FRAME:
return "GETTER_STUB_FRAME";
case SETTER_STUB_FRAME:
return "SETTER_STUB_FRAME";
+ case COMPILED_STUB_FRAME:
+ return "COMPILED_STUB_FRAME";
case REGISTER:
return "REGISTER";
case INT32_REGISTER:
int literal_index = iterator->Next();
return SlotRef(data->LiteralArray()->get(literal_index));
}
+
+ case Translation::COMPILED_STUB_FRAME:
+ UNREACHABLE();
+ break;
}
UNREACHABLE();
int output_count() const { return output_count_; }
+ Code::Kind compiled_code_kind() const { return compiled_code_->kind(); }
+
// Number of created JS frames. Not all created frames are necessarily JS.
int jsframe_count() const { return jsframe_count_; }
static size_t GetMaxDeoptTableSize();
+ static void EnsureCodeForDeoptimizationEntry(BailoutType type,
+ int max_entry_id);
+
private:
static const int kMinNumberOfEntries = 64;
static const int kMaxNumberOfEntries = 16384;
void DoComputeAccessorStubFrame(TranslationIterator* iterator,
int frame_index,
bool is_setter_stub_frame);
+ void DoCompiledStubFrame(TranslationIterator* iterator,
+ int frame_index);
void DoTranslateCommand(TranslationIterator* iterator,
int frame_index,
unsigned output_offset);
void AddArgumentsObjectValue(intptr_t value);
void AddDoubleValue(intptr_t slot_address, double value);
- static void EnsureCodeForDeoptimizationEntry(BailoutType type,
- int max_entry_id);
static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type);
Isolate* isolate_;
JSFunction* function_;
- Code* optimized_code_;
+ Code* compiled_code_;
unsigned bailout_id_;
BailoutType bailout_type_;
Address from_;
uintptr_t frame_size_; // Number of bytes.
JSFunction* function_;
intptr_t registers_[Register::kNumRegisters];
- double double_registers_[DoubleRegister::kNumAllocatableRegisters];
+ double double_registers_[DoubleRegister::kMaxNumAllocatableRegisters];
intptr_t top_;
intptr_t pc_;
intptr_t fp_;
GETTER_STUB_FRAME,
SETTER_STUB_FRAME,
ARGUMENTS_ADAPTOR_FRAME,
+ COMPILED_STUB_FRAME,
REGISTER,
INT32_REGISTER,
UINT32_REGISTER,
// Commands.
void BeginJSFrame(BailoutId node_id, int literal_id, unsigned height);
+ void BeginCompiledStubFrame();
void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
void BeginConstructStubFrame(int literal_id, unsigned height);
void BeginGetterStubFrame(int literal_id);
Address addr = relocinfo.target_address();
int id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::EAGER);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
- out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
+ id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::LAZY);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
+ } else {
+ out.AddFormatted(" ;; lazy deoptimization bailout %d", id);
+ }
} else {
out.AddFormatted(" ;; deoptimization bailout %d", id);
}
// Called by Code::CodePrint.
void Disassembler::Decode(FILE* f, Code* code) {
- int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION)
+ int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION ||
+ code->kind() == Code::COMPILED_STUB)
? static_cast<int>(code->safepoint_table_offset())
: code->instruction_size();
// If there might be a stack check table, stop before reaching it.
}
-inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
+inline CompiledFrame::CompiledFrame(StackFrameIterator* iterator)
: JavaScriptFrame(iterator) {
}
+inline StubFrame::StubFrame(StackFrameIterator* iterator)
+ : CompiledFrame(iterator) {
+}
+
+
+inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
+ : CompiledFrame(iterator) {
+}
+
+
inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
StackFrameIterator* iterator) : JavaScriptFrame(iterator) {
}
}
-void OptimizedFrame::Iterate(ObjectVisitor* v) const {
+void CompiledFrame::Iterate(ObjectVisitor* v) const {
#ifdef DEBUG
// Make sure that optimized frames do not contain any stack handlers.
StackHandlerIterator it(this, top_handler());
// Skip saved double registers.
if (safepoint_entry.has_doubles()) {
- parameters_base += DoubleRegister::kNumAllocatableRegisters *
+ parameters_base += DoubleRegister::NumAllocatableRegisters() *
kDoubleSize / kPointerSize;
}
}
}
+ // Visit the return address in the callee and incoming arguments.
+ IteratePc(v, pc_address(), code);
+}
+
+
+void StubFrame::Iterate(ObjectVisitor* v) const {
+ CompiledFrame::Iterate(v);
+}
+
+
+void OptimizedFrame::Iterate(ObjectVisitor* v) const {
+ CompiledFrame::Iterate(v);
+
// Visit the context and the function.
Object** fixed_base = &Memory::Object_at(
fp() + JavaScriptFrameConstants::kFunctionOffset);
Object** fixed_limit = &Memory::Object_at(fp());
v->VisitPointers(fixed_base, fixed_limit);
-
- // Visit the return address in the callee and incoming arguments.
- IteratePc(v, pc_address(), code);
}
V(EXIT, ExitFrame) \
V(JAVA_SCRIPT, JavaScriptFrame) \
V(OPTIMIZED, OptimizedFrame) \
+ V(STUB, StubFrame) \
V(INTERNAL, InternalFrame) \
V(CONSTRUCT, ConstructFrame) \
V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
};
-class OptimizedFrame : public JavaScriptFrame {
+class CompiledFrame : public JavaScriptFrame {
+ public:
+ virtual Type type() const = 0;
+
+ // GC support.
+ virtual void Iterate(ObjectVisitor* v) const;
+
+ protected:
+ inline explicit CompiledFrame(StackFrameIterator* iterator);
+};
+
+
+class StubFrame : public CompiledFrame {
+ public:
+ virtual Type type() const { return STUB; }
+
+ // GC support.
+ virtual void Iterate(ObjectVisitor* v) const;
+
+ protected:
+ inline explicit StubFrame(StackFrameIterator* iterator);
+
+ friend class StackFrameIterator;
+};
+
+
+class OptimizedFrame : public CompiledFrame {
public:
virtual Type type() const { return OPTIMIZED; }
!Snapshot::HaveASnapshotToStartFrom();
masm_->set_emit_debug_code(generate_debug_code_);
masm_->set_predictable_code_size(true);
+ InitializeAstVisitor();
}
// debugger to piggybag on.
class BreakableStatementChecker: public AstVisitor {
public:
- BreakableStatementChecker() : is_breakable_(false) {}
+ BreakableStatementChecker() : is_breakable_(false) {
+ InitializeAstVisitor();
+ }
void Check(Statement* stmt);
void Check(Expression* stmt);
bool is_breakable_;
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(BreakableStatementChecker);
};
friend class NestedStatement;
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(FullCodeGenerator);
};
ASSERT(HasEnvironment());
HEnvironment* environment = last_environment();
ASSERT(ast_id.IsNone() ||
+ ast_id == BailoutId::StubEntry() ||
environment->closure()->shared()->VerifyBailoutId(ast_id));
int push_count = environment->push_count();
}
-HGraphBuilder::HGraphBuilder(CompilationInfo* info,
- TypeFeedbackOracle* oracle)
- : function_state_(NULL),
+HGraph* HGraphBuilder::CreateGraph() {
+ graph_ = new(zone()) HGraph(info_);
+ if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info_);
+ HPhase phase("H_Block building");
+ set_current_block(graph()->entry_block());
+ if (!BuildGraph()) return NULL;
+ return graph_;
+}
+
+
+HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
+ ASSERT(current_block() != NULL);
+ current_block()->AddInstruction(instr);
+ return instr;
+}
+
+
+void HGraphBuilder::AddSimulate(BailoutId id,
+ RemovableSimulate removable) {
+ ASSERT(current_block() != NULL);
+ current_block()->AddSimulate(id, removable);
+}
+
+
+HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
+ HValue* external_elements,
+ HValue* checked_key,
+ HValue* val,
+ HValue* dependency,
+ ElementsKind elements_kind,
+ bool is_store) {
+ Zone* zone = this->zone();
+ if (is_store) {
+ ASSERT(val != NULL);
+ switch (elements_kind) {
+ case EXTERNAL_PIXEL_ELEMENTS: {
+ val = AddInstruction(new(zone) HClampToUint8(val));
+ break;
+ }
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
+ break;
+ }
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ break;
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ return new(zone) HStoreKeyed(external_elements, checked_key,
+ val, elements_kind);
+ } else {
+ ASSERT(val == NULL);
+ HLoadKeyed* load =
+ new(zone) HLoadKeyed(
+ external_elements, checked_key, dependency, elements_kind);
+ if (FLAG_opt_safe_uint32_operations &&
+ elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+ graph()->RecordUint32Instruction(load);
+ }
+ return load;
+ }
+}
+
+
+HInstruction* HGraphBuilder::BuildFastElementAccess(
+ HValue* elements,
+ HValue* checked_key,
+ HValue* val,
+ HValue* load_dependency,
+ ElementsKind elements_kind,
+ bool is_store) {
+ Zone* zone = this->zone();
+ if (is_store) {
+ ASSERT(val != NULL);
+ switch (elements_kind) {
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ // Smi-only arrays need a smi check.
+ AddInstruction(new(zone) HCheckSmi(val));
+ // Fall through.
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ return new(zone) HStoreKeyed(elements, checked_key, val, elements_kind);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+ }
+ // It's an element load (!is_store).
+ return new(zone) HLoadKeyed(elements,
+ checked_key,
+ load_dependency,
+ elements_kind);
+}
+
+
+HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
+ HValue* object,
+ HValue* key,
+ HValue* val,
+ HCheckMaps* mapcheck,
+ bool is_js_array,
+ ElementsKind elements_kind,
+ bool is_store) {
+ Zone* zone = this->zone();
+ // No GVNFlag is necessary for ElementsKind if there is an explicit dependency
+ // on a HElementsTransition instruction. The flag can also be removed if the
+ // map to check has FAST_HOLEY_ELEMENTS, since there can be no further
+ // ElementsKind transitions. Finally, the dependency can be removed for stores
+ // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
+ // generated store code.
+ if ((elements_kind == FAST_HOLEY_ELEMENTS) ||
+ (elements_kind == FAST_ELEMENTS && is_store)) {
+ if (mapcheck != NULL) {
+ mapcheck->ClearGVNFlag(kDependsOnElementsKind);
+ }
+ }
+ bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind);
+ bool fast_elements = IsFastObjectElementsKind(elements_kind);
+ HInstruction* elements =
+ AddInstruction(new(zone) HLoadElements(object, mapcheck));
+ if (is_store && (fast_elements || fast_smi_only_elements)) {
+ HCheckMaps* check_cow_map = new(zone) HCheckMaps(
+ elements, Isolate::Current()->factory()->fixed_array_map(), zone);
+ check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
+ AddInstruction(check_cow_map);
+ }
+ HInstruction* length = NULL;
+ HInstruction* checked_key = NULL;
+ if (IsExternalArrayElementsKind(elements_kind)) {
+ length = AddInstruction(new(zone) HFixedArrayBaseLength(elements));
+ checked_key = AddInstruction(new(zone) HBoundsCheck(key, length,
+ ALLOW_SMI_KEY));
+ HLoadExternalArrayPointer* external_elements =
+ new(zone) HLoadExternalArrayPointer(elements);
+ AddInstruction(external_elements);
+ return BuildExternalArrayElementAccess(
+ external_elements, checked_key, val, mapcheck,
+ elements_kind, is_store);
+ }
+ ASSERT(fast_smi_only_elements ||
+ fast_elements ||
+ IsFastDoubleElementsKind(elements_kind));
+ if (is_js_array) {
+ length = AddInstruction(new(zone) HJSArrayLength(object, mapcheck,
+ HType::Smi()));
+ } else {
+ length = AddInstruction(new(zone) HFixedArrayBaseLength(elements));
+ }
+ checked_key = AddInstruction(new(zone) HBoundsCheck(key, length,
+ ALLOW_SMI_KEY));
+ return BuildFastElementAccess(elements, checked_key, val, mapcheck,
+ elements_kind, is_store);
+}
+
+
+HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info,
+ TypeFeedbackOracle* oracle)
+ : HGraphBuilder(info),
+ function_state_(NULL),
initial_function_state_(this, info, oracle, NORMAL_RETURN),
ast_context_(NULL),
break_scope_(NULL),
- graph_(NULL),
- current_block_(NULL),
inlined_count_(0),
globals_(10, info->zone()),
- zone_(info->zone()),
inline_bailout_(false) {
// This is not initialized in the initializer list because the
// constructor for the initial state relies on function_state_ == NULL
// to know it's the initial state.
function_state_= &initial_function_state_;
+ InitializeAstVisitor();
}
-HBasicBlock* HGraphBuilder::CreateJoin(HBasicBlock* first,
- HBasicBlock* second,
- BailoutId join_id) {
+
+HBasicBlock* HOptimizedGraphBuilder::CreateJoin(HBasicBlock* first,
+ HBasicBlock* second,
+ BailoutId join_id) {
if (first == NULL) {
return second;
} else if (second == NULL) {
return first;
} else {
- HBasicBlock* join_block = graph_->CreateBasicBlock();
+ HBasicBlock* join_block = graph()->CreateBasicBlock();
first->Goto(join_block);
second->Goto(join_block);
join_block->SetJoinId(join_id);
}
-HBasicBlock* HGraphBuilder::JoinContinue(IterationStatement* statement,
- HBasicBlock* exit_block,
- HBasicBlock* continue_block) {
+HBasicBlock* HOptimizedGraphBuilder::JoinContinue(IterationStatement* statement,
+ HBasicBlock* exit_block,
+ HBasicBlock* continue_block) {
if (continue_block != NULL) {
if (exit_block != NULL) exit_block->Goto(continue_block);
continue_block->SetJoinId(statement->ContinueId());
}
-HBasicBlock* HGraphBuilder::CreateLoop(IterationStatement* statement,
- HBasicBlock* loop_entry,
- HBasicBlock* body_exit,
- HBasicBlock* loop_successor,
- HBasicBlock* break_block) {
+HBasicBlock* HOptimizedGraphBuilder::CreateLoop(IterationStatement* statement,
+ HBasicBlock* loop_entry,
+ HBasicBlock* body_exit,
+ HBasicBlock* loop_successor,
+ HBasicBlock* break_block) {
if (body_exit != NULL) body_exit->Goto(loop_entry);
loop_entry->PostProcessLoopHeader(statement);
if (break_block != NULL) {
is_recursive_(false),
use_optimistic_licm_(false),
type_change_checksum_(0) {
- start_environment_ =
- new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
+ if (info->IsStub()) {
+ start_environment_ =
+ new(zone_) HEnvironment(zone_);
+ } else {
+ start_environment_ =
+ new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
+ }
start_environment_->set_ast_id(BailoutId::FunctionEntry());
entry_block_ = CreateBasicBlock();
entry_block_->SetInitialEnvironment(start_environment_);
// Implementation of utility class to encapsulate the translation state for
// a (possibly inlined) function.
-FunctionState::FunctionState(HGraphBuilder* owner,
+FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
TypeFeedbackOracle* oracle,
InliningKind inlining_kind)
// Implementation of utility classes to represent an expression's context in
// the AST.
-AstContext::AstContext(HGraphBuilder* owner, Expression::Context kind)
+AstContext::AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind)
: owner_(owner),
kind_(kind),
outer_(owner->ast_context()),
void TestContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->IsControlInstruction());
- HGraphBuilder* builder = owner();
+ HOptimizedGraphBuilder* builder = owner();
builder->AddInstruction(instr);
// We expect a simulate after every expression with side effects, though
// this one isn't actually needed (and wouldn't work if it were targeted).
// connects a branch node to a join node. We conservatively ensure that
// property by always adding an empty block on the outgoing edges of this
// branch.
- HGraphBuilder* builder = owner();
+ HOptimizedGraphBuilder* builder = owner();
if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
builder->Bailout("arguments object value in a test context");
}
}
-// HGraphBuilder infrastructure for bailing out and checking bailouts.
+// HOptimizedGraphBuilder infrastructure for bailing out and checking bailouts.
#define CHECK_BAILOUT(call) \
do { \
call; \
} while (false)
-void HGraphBuilder::Bailout(const char* reason) {
+void HOptimizedGraphBuilder::Bailout(const char* reason) {
info()->set_bailout_reason(reason);
SetStackOverflow();
}
-void HGraphBuilder::VisitForEffect(Expression* expr) {
+void HOptimizedGraphBuilder::VisitForEffect(Expression* expr) {
EffectContext for_effect(this);
Visit(expr);
}
-void HGraphBuilder::VisitForValue(Expression* expr, ArgumentsAllowedFlag flag) {
+void HOptimizedGraphBuilder::VisitForValue(Expression* expr,
+ ArgumentsAllowedFlag flag) {
ValueContext for_value(this, flag);
Visit(expr);
}
-void HGraphBuilder::VisitForTypeOf(Expression* expr) {
+void HOptimizedGraphBuilder::VisitForTypeOf(Expression* expr) {
ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
for_value.set_for_typeof(true);
Visit(expr);
-void HGraphBuilder::VisitForControl(Expression* expr,
- HBasicBlock* true_block,
- HBasicBlock* false_block) {
+void HOptimizedGraphBuilder::VisitForControl(Expression* expr,
+ HBasicBlock* true_block,
+ HBasicBlock* false_block) {
TestContext for_test(this, expr, oracle(), true_block, false_block);
Visit(expr);
}
-void HGraphBuilder::VisitArgument(Expression* expr) {
+void HOptimizedGraphBuilder::VisitArgument(Expression* expr) {
CHECK_ALIVE(VisitForValue(expr));
Push(AddInstruction(new(zone()) HPushArgument(Pop())));
}
-void HGraphBuilder::VisitArgumentList(ZoneList<Expression*>* arguments) {
+void HOptimizedGraphBuilder::VisitArgumentList(
+ ZoneList<Expression*>* arguments) {
for (int i = 0; i < arguments->length(); i++) {
CHECK_ALIVE(VisitArgument(arguments->at(i)));
}
}
-void HGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs) {
+void HOptimizedGraphBuilder::VisitExpressions(
+ ZoneList<Expression*>* exprs) {
for (int i = 0; i < exprs->length(); ++i) {
CHECK_ALIVE(VisitForValue(exprs->at(i)));
}
}
-HGraph* HGraphBuilder::CreateGraph() {
- graph_ = new(zone()) HGraph(info());
- if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info());
-
- {
- HPhase phase("H_Block building");
- current_block_ = graph()->entry_block();
-
- Scope* scope = info()->scope();
- if (scope->HasIllegalRedeclaration()) {
- Bailout("function with illegal redeclaration");
- return NULL;
- }
- if (scope->calls_eval()) {
- Bailout("function calls eval");
- return NULL;
- }
- SetUpScope(scope);
-
- // Add an edge to the body entry. This is warty: the graph's start
- // environment will be used by the Lithium translation as the initial
- // environment on graph entry, but it has now been mutated by the
- // Hydrogen translation of the instructions in the start block. This
- // environment uses values which have not been defined yet. These
- // Hydrogen instructions will then be replayed by the Lithium
- // translation, so they cannot have an environment effect. The edge to
- // the body's entry block (along with some special logic for the start
- // block in HInstruction::InsertAfter) seals the start block from
- // getting unwanted instructions inserted.
- //
- // TODO(kmillikin): Fix this. Stop mutating the initial environment.
- // Make the Hydrogen instructions in the initial block into Hydrogen
- // values (but not instructions), present in the initial environment and
- // not replayed by the Lithium translation.
- HEnvironment* initial_env = environment()->CopyWithoutHistory();
- HBasicBlock* body_entry = CreateBasicBlock(initial_env);
- current_block()->Goto(body_entry);
- body_entry->SetJoinId(BailoutId::FunctionEntry());
- set_current_block(body_entry);
-
- // Handle implicit declaration of the function name in named function
- // expressions before other declarations.
- if (scope->is_function_scope() && scope->function() != NULL) {
- VisitVariableDeclaration(scope->function());
- }
- VisitDeclarations(scope->declarations());
- AddSimulate(BailoutId::Declarations());
-
- HValue* context = environment()->LookupContext();
- AddInstruction(
- new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry));
+bool HOptimizedGraphBuilder::BuildGraph() {
+ Scope* scope = info()->scope();
+ if (scope->HasIllegalRedeclaration()) {
+ Bailout("function with illegal redeclaration");
+ return false;
+ }
+ if (scope->calls_eval()) {
+ Bailout("function calls eval");
+ return false;
+ }
+ SetUpScope(scope);
+
+ // Add an edge to the body entry. This is warty: the graph's start
+ // environment will be used by the Lithium translation as the initial
+ // environment on graph entry, but it has now been mutated by the
+ // Hydrogen translation of the instructions in the start block. This
+ // environment uses values which have not been defined yet. These
+ // Hydrogen instructions will then be replayed by the Lithium
+ // translation, so they cannot have an environment effect. The edge to
+ // the body's entry block (along with some special logic for the start
+ // block in HInstruction::InsertAfter) seals the start block from
+ // getting unwanted instructions inserted.
+ //
+ // TODO(kmillikin): Fix this. Stop mutating the initial environment.
+ // Make the Hydrogen instructions in the initial block into Hydrogen
+ // values (but not instructions), present in the initial environment and
+ // not replayed by the Lithium translation.
+ HEnvironment* initial_env = environment()->CopyWithoutHistory();
+ HBasicBlock* body_entry = CreateBasicBlock(initial_env);
+ current_block()->Goto(body_entry);
+ body_entry->SetJoinId(BailoutId::FunctionEntry());
+ set_current_block(body_entry);
+
+ // Handle implicit declaration of the function name in named function
+ // expressions before other declarations.
+ if (scope->is_function_scope() && scope->function() != NULL) {
+ VisitVariableDeclaration(scope->function());
+ }
+ VisitDeclarations(scope->declarations());
+ AddSimulate(BailoutId::Declarations());
- VisitStatements(info()->function()->body());
- if (HasStackOverflow()) return NULL;
+ HValue* context = environment()->LookupContext();
+ AddInstruction(
+ new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry));
- if (current_block() != NULL) {
- HReturn* instr = new(zone()) HReturn(graph()->GetConstantUndefined());
- current_block()->FinishExit(instr);
- set_current_block(NULL);
- }
+ VisitStatements(info()->function()->body());
+ if (HasStackOverflow()) return false;
- // If the checksum of the number of type info changes is the same as the
- // last time this function was compiled, then this recompile is likely not
- // due to missing/inadequate type feedback, but rather too aggressive
- // optimization. Disable optimistic LICM in that case.
- Handle<Code> unoptimized_code(info()->shared_info()->code());
- ASSERT(unoptimized_code->kind() == Code::FUNCTION);
- Handle<TypeFeedbackInfo> type_info(
- TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
- int checksum = type_info->own_type_change_checksum();
- int composite_checksum = graph()->update_type_change_checksum(checksum);
- graph()->set_use_optimistic_licm(
- !type_info->matches_inlined_type_change_checksum(composite_checksum));
- type_info->set_inlined_type_change_checksum(composite_checksum);
+ if (current_block() != NULL) {
+ HReturn* instr = new(zone()) HReturn(graph()->GetConstantUndefined());
+ current_block()->FinishExit(instr);
+ set_current_block(NULL);
}
- return graph();
+ // If the checksum of the number of type info changes is the same as the
+ // last time this function was compiled, then this recompile is likely not
+ // due to missing/inadequate type feedback, but rather too aggressive
+ // optimization. Disable optimistic LICM in that case.
+ Handle<Code> unoptimized_code(info()->shared_info()->code());
+ ASSERT(unoptimized_code->kind() == Code::FUNCTION);
+ Handle<TypeFeedbackInfo> type_info(
+ TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
+ int checksum = type_info->own_type_change_checksum();
+ int composite_checksum = graph()->update_type_change_checksum(checksum);
+ graph()->set_use_optimistic_licm(
+ !type_info->matches_inlined_type_change_checksum(composite_checksum));
+ type_info->set_inlined_type_change_checksum(composite_checksum);
+
+ return true;
}
+
bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
*bailout_reason = SmartArrayPointer<char>();
OrderBlocks();
}
-HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
- ASSERT(current_block() != NULL);
- current_block()->AddInstruction(instr);
- return instr;
-}
-
-
-void HGraphBuilder::AddSimulate(BailoutId ast_id, RemovableSimulate removable) {
- ASSERT(current_block() != NULL);
- current_block()->AddSimulate(ast_id, removable);
-}
-
-
-void HGraphBuilder::AddPhi(HPhi* instr) {
+void HOptimizedGraphBuilder::AddPhi(HPhi* instr) {
ASSERT(current_block() != NULL);
current_block()->AddPhi(instr);
}
-void HGraphBuilder::PushAndAdd(HInstruction* instr) {
+void HOptimizedGraphBuilder::PushAndAdd(HInstruction* instr) {
Push(instr);
AddInstruction(instr);
}
template <class Instruction>
-HInstruction* HGraphBuilder::PreProcessCall(Instruction* call) {
+HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
int count = call->argument_count();
ZoneList<HValue*> arguments(count, zone());
for (int i = 0; i < count; ++i) {
}
-void HGraphBuilder::SetUpScope(Scope* scope) {
+void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
HConstant* undefined_constant = new(zone()) HConstant(
isolate()->factory()->undefined_value(), Representation::Tagged());
AddInstruction(undefined_constant);
- graph_->set_undefined_constant(undefined_constant);
+ graph()->set_undefined_constant(undefined_constant);
HArgumentsObject* object = new(zone()) HArgumentsObject;
AddInstruction(object);
}
-void HGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
+void HOptimizedGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
for (int i = 0; i < statements->length(); i++) {
CHECK_ALIVE(Visit(statements->at(i)));
}
}
-HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) {
+HBasicBlock* HOptimizedGraphBuilder::CreateBasicBlock(HEnvironment* env) {
HBasicBlock* b = graph()->CreateBasicBlock();
b->SetInitialEnvironment(env);
return b;
}
-HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() {
+HBasicBlock* HOptimizedGraphBuilder::CreateLoopHeaderBlock() {
HBasicBlock* header = graph()->CreateBasicBlock();
HEnvironment* entry_env = environment()->CopyAsLoopHeader(header);
header->SetInitialEnvironment(entry_env);
}
-void HGraphBuilder::VisitBlock(Block* stmt) {
+void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+void HOptimizedGraphBuilder::VisitExpressionStatement(
+ ExpressionStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+void HOptimizedGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+void HOptimizedGraphBuilder::VisitIfStatement(IfStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-HBasicBlock* HGraphBuilder::BreakAndContinueScope::Get(
+HBasicBlock* HOptimizedGraphBuilder::BreakAndContinueScope::Get(
BreakableStatement* stmt,
BreakType type,
int* drop_extra) {
}
-void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+void HOptimizedGraphBuilder::VisitContinueStatement(
+ ContinueStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
+void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitWithStatement(WithStatement* stmt) {
+void HOptimizedGraphBuilder::VisitWithStatement(WithStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-bool HGraphBuilder::HasOsrEntryAt(IterationStatement* statement) {
+bool HOptimizedGraphBuilder::HasOsrEntryAt(IterationStatement* statement) {
return statement->OsrEntryId() == info()->osr_ast_id();
}
-bool HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
+bool HOptimizedGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
if (!HasOsrEntryAt(statement)) return false;
HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
}
-void HGraphBuilder::VisitLoopBody(IterationStatement* stmt,
- HBasicBlock* loop_entry,
- BreakAndContinueInfo* break_info) {
+void HOptimizedGraphBuilder::VisitLoopBody(IterationStatement* stmt,
+ HBasicBlock* loop_entry,
+ BreakAndContinueInfo* break_info) {
BreakAndContinueScope push(break_info, this);
AddSimulate(stmt->StackCheckId());
HValue* context = environment()->LookupContext();
}
-void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
+void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
+void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
+void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
+void HOptimizedGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+void HOptimizedGraphBuilder::VisitTryFinallyStatement(
+ TryFinallyStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+void HOptimizedGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitSharedFunctionInfoLiteral(
+void HOptimizedGraphBuilder::VisitSharedFunctionInfoLiteral(
SharedFunctionInfoLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
}
-void HGraphBuilder::VisitConditional(Conditional* expr) {
+void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-HGraphBuilder::GlobalPropertyAccess HGraphBuilder::LookupGlobalProperty(
- Variable* var, LookupResult* lookup, bool is_store) {
+HOptimizedGraphBuilder::GlobalPropertyAccess
+ HOptimizedGraphBuilder::LookupGlobalProperty(
+ Variable* var, LookupResult* lookup, bool is_store) {
if (var->is_this() || !info()->has_global_object()) {
return kUseGeneric;
}
}
-HValue* HGraphBuilder::BuildContextChainWalk(Variable* var) {
+HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
ASSERT(var->IsContextSlot());
HValue* context = environment()->LookupContext();
int length = info()->scope()->ContextChainLength(var->scope());
}
-void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitLiteral(Literal* expr) {
+void HOptimizedGraphBuilder::VisitLiteral(Literal* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::AddCheckMapsWithTransitions(HValue* object,
- Handle<Map> map) {
+void HOptimizedGraphBuilder::AddCheckMapsWithTransitions(HValue* object,
+ Handle<Map> map) {
AddInstruction(new(zone()) HCheckNonSmi(object));
AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
}
-HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
- Handle<String> name,
- HValue* value,
- Handle<Map> map,
- LookupResult* lookup) {
+HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
+ HValue* object,
+ Handle<String> name,
+ HValue* value,
+ Handle<Map> map,
+ LookupResult* lookup) {
ASSERT(lookup->IsFound());
// If the property does not exist yet, we have to check that it wasn't made
// readonly or turned into a setter by some meanwhile modifications on the
}
-HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
- Handle<String> name,
- HValue* value) {
+HInstruction* HOptimizedGraphBuilder::BuildStoreNamedGeneric(
+ HValue* object,
+ Handle<String> name,
+ HValue* value) {
HValue* context = environment()->LookupContext();
return new(zone()) HStoreNamedGeneric(
context,
}
-HInstruction* HGraphBuilder::BuildCallSetter(HValue* object,
- HValue* value,
- Handle<Map> map,
- Handle<JSFunction> setter,
- Handle<JSObject> holder) {
+HInstruction* HOptimizedGraphBuilder::BuildCallSetter(
+ HValue* object,
+ HValue* value,
+ Handle<Map> map,
+ Handle<JSFunction> setter,
+ Handle<JSObject> holder) {
AddCheckConstantFunction(holder, object, map);
AddInstruction(new(zone()) HPushArgument(object));
AddInstruction(new(zone()) HPushArgument(value));
}
-HInstruction* HGraphBuilder::BuildStoreNamedMonomorphic(HValue* object,
- Handle<String> name,
- HValue* value,
- Handle<Map> map) {
+HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
+ HValue* object,
+ Handle<String> name,
+ HValue* value,
+ Handle<Map> map) {
// Handle a store to a known field.
LookupResult lookup(isolate());
if (ComputeLoadStoreField(map, name, &lookup, true)) {
}
-void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
- HValue* object,
- SmallMapList* types,
- Handle<String> name) {
+void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
+ Property* expr,
+ HValue* object,
+ SmallMapList* types,
+ Handle<String> name) {
int count = 0;
int previous_field_offset = 0;
bool previous_field_is_in_object = false;
}
-void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
- HValue* object,
- HValue* value,
- SmallMapList* types,
- Handle<String> name) {
+void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
+ Assignment* expr,
+ HValue* object,
+ HValue* value,
+ SmallMapList* types,
+ Handle<String> name) {
// TODO(ager): We should recognize when the prototype chains for different
// maps are identical. In that case we can avoid repeatedly generating the
// same prototype map checks.
}
-void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
+void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
expr->RecordTypeFeedback(oracle(), zone());
// Because not every expression has a position and there is not common
// superclass of Assignment and CountOperation, we cannot just pass the
// owning expression instead of position and ast_id separately.
-void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
- HValue* value,
- int position,
- BailoutId ast_id) {
+void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
+ Variable* var,
+ HValue* value,
+ int position,
+ BailoutId ast_id) {
LookupResult lookup(isolate());
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
if (type == kUseCell) {
}
-void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
+void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
Expression* target = expr->target();
VariableProxy* proxy = target->AsVariableProxy();
Property* prop = target->AsProperty();
}
-void HGraphBuilder::VisitAssignment(Assignment* expr) {
+void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitThrow(Throw* expr) {
+void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
- Handle<Map> map,
- LookupResult* lookup) {
+HLoadNamedField* HOptimizedGraphBuilder::BuildLoadNamedField(
+ HValue* object,
+ Handle<Map> map,
+ LookupResult* lookup) {
int index = lookup->GetLocalFieldIndexFromMap(*map);
if (index < 0) {
// Negative property indices are in-object properties, indexed
}
-HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* object,
- Handle<String> name,
- Property* expr) {
+HInstruction* HOptimizedGraphBuilder::BuildLoadNamedGeneric(
+ HValue* object,
+ Handle<String> name,
+ Property* expr) {
if (expr->IsUninitialized() && !FLAG_always_opt) {
AddInstruction(new(zone()) HSoftDeoptimize);
current_block()->MarkAsDeoptimizing();
}
-HInstruction* HGraphBuilder::BuildCallGetter(HValue* object,
- Handle<Map> map,
- Handle<JSFunction> getter,
- Handle<JSObject> holder) {
+HInstruction* HOptimizedGraphBuilder::BuildCallGetter(
+ HValue* object,
+ Handle<Map> map,
+ Handle<JSFunction> getter,
+ Handle<JSObject> holder) {
AddCheckConstantFunction(holder, object, map);
AddInstruction(new(zone()) HPushArgument(object));
return new(zone()) HCallConstantFunction(getter, 1);
}
-HInstruction* HGraphBuilder::BuildLoadNamedMonomorphic(HValue* object,
- Handle<String> name,
- Property* expr,
- Handle<Map> map) {
+HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
+ HValue* object,
+ Handle<String> name,
+ Property* expr,
+ Handle<Map> map) {
// Handle a load from a known field.
ASSERT(!map->is_dictionary_map());
LookupResult lookup(isolate());
}
-HInstruction* HGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
- HValue* key) {
+HInstruction* HOptimizedGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
+ HValue* key) {
HValue* context = environment()->LookupContext();
return new(zone()) HLoadKeyedGeneric(context, object, key);
}
-HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
- HValue* external_elements,
- HValue* checked_key,
+HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
+ HValue* object,
+ HValue* key,
HValue* val,
HValue* dependency,
- ElementsKind elements_kind,
+ Handle<Map> map,
bool is_store) {
- if (is_store) {
- ASSERT(val != NULL);
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS: {
- val = AddInstruction(new(zone()) HClampToUint8(val));
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- break;
- }
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- break;
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- return new(zone()) HStoreKeyed(external_elements,
- checked_key,
- val,
- elements_kind);
- } else {
- ASSERT(val == NULL);
- HLoadKeyed* load =
- new(zone()) HLoadKeyed(
- external_elements, checked_key, dependency, elements_kind);
- if (FLAG_opt_safe_uint32_operations &&
- elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- graph()->RecordUint32Instruction(load);
- }
- return load;
- }
-}
-
-
-HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
- HValue* checked_key,
- HValue* val,
- HValue* load_dependency,
- ElementsKind elements_kind,
- bool is_store) {
- if (is_store) {
- ASSERT(val != NULL);
- switch (elements_kind) {
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- // Smi-only arrays need a smi check.
- AddInstruction(new(zone()) HCheckSmi(val));
- // Fall through.
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- return new(zone()) HStoreKeyed(
- elements, checked_key, val, elements_kind);
- default:
- UNREACHABLE();
- return NULL;
- }
- }
- // It's an element load (!is_store).
- return new(zone()) HLoadKeyed(elements,
- checked_key,
- load_dependency,
- elements_kind);
-}
-
-
-HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
- HValue* key,
- HValue* val,
- HValue* dependency,
- Handle<Map> map,
- bool is_store) {
HCheckMaps* mapcheck = new(zone()) HCheckMaps(object, map,
zone(), dependency);
AddInstruction(mapcheck);
if (dependency) {
mapcheck->ClearGVNFlag(kDependsOnElementsKind);
}
- return BuildUncheckedMonomorphicElementAccess(object, key, val,
- mapcheck, map, is_store);
-}
-
-
-HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
- HValue* object,
- HValue* key,
- HValue* val,
- HCheckMaps* mapcheck,
- Handle<Map> map,
- bool is_store) {
- // No GVNFlag is necessary for ElementsKind if there is an explicit dependency
- // on a HElementsTransition instruction. The flag can also be removed if the
- // map to check has FAST_HOLEY_ELEMENTS, since there can be no further
- // ElementsKind transitions. Finally, the dependency can be removed for stores
- // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
- // generated store code.
- if ((map->elements_kind() == FAST_HOLEY_ELEMENTS) ||
- (map->elements_kind() == FAST_ELEMENTS && is_store)) {
- mapcheck->ClearGVNFlag(kDependsOnElementsKind);
- }
- bool fast_smi_only_elements = map->has_fast_smi_elements();
- bool fast_elements = map->has_fast_object_elements();
- HInstruction* elements =
- AddInstruction(new(zone()) HLoadElements(object, mapcheck));
- if (is_store && (fast_elements || fast_smi_only_elements)) {
- HCheckMaps* check_cow_map = new(zone()) HCheckMaps(
- elements, isolate()->factory()->fixed_array_map(), zone());
- check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
- AddInstruction(check_cow_map);
- }
- HInstruction* length = NULL;
- HInstruction* checked_key = NULL;
- if (map->has_external_array_elements()) {
- length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
- ALLOW_SMI_KEY));
- HLoadExternalArrayPointer* external_elements =
- new(zone()) HLoadExternalArrayPointer(elements);
- AddInstruction(external_elements);
- return BuildExternalArrayElementAccess(
- external_elements, checked_key, val, mapcheck,
- map->elements_kind(), is_store);
- }
- ASSERT(fast_smi_only_elements ||
- fast_elements ||
- map->has_fast_double_elements());
- if (map->instance_type() == JS_ARRAY_TYPE) {
- length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck,
- HType::Smi()));
- } else {
- length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
- }
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
- ALLOW_SMI_KEY));
- return BuildFastElementAccess(elements, checked_key, val, mapcheck,
- map->elements_kind(), is_store);
+ return BuildUncheckedMonomorphicElementAccess(
+ object, key, val,
+ mapcheck, map->instance_type() == JS_ARRAY_TYPE,
+ map->elements_kind(), is_store);
}
-HInstruction* HGraphBuilder::TryBuildConsolidatedElementLoad(
+HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
HValue* object,
HValue* key,
HValue* val,
HCheckMaps* check_maps = new(zone()) HCheckMaps(object, maps, zone());
AddInstruction(check_maps);
HInstruction* instr = BuildUncheckedMonomorphicElementAccess(
- object, key, val, check_maps, most_general_consolidated_map, false);
+ object, key, val, check_maps,
+ most_general_consolidated_map->instance_type() == JS_ARRAY_TYPE,
+ most_general_consolidated_map->elements_kind(),
+ false);
return instr;
}
-HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
- HValue* key,
- HValue* val,
- Expression* prop,
- BailoutId ast_id,
- int position,
- bool is_store,
- bool* has_side_effects) {
+HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
+ HValue* object,
+ HValue* key,
+ HValue* val,
+ Expression* prop,
+ BailoutId ast_id,
+ int position,
+ bool is_store,
+ bool* has_side_effects) {
*has_side_effects = false;
AddInstruction(new(zone()) HCheckNonSmi(object));
SmallMapList* maps = prop->GetReceiverTypes();
}
} else { // External array elements.
access = AddInstruction(BuildExternalArrayElementAccess(
- external_elements, checked_key, val, elements_kind_branch,
- elements_kind, is_store));
+ external_elements, checked_key, val,
+ elements_kind_branch, elements_kind, is_store));
}
*has_side_effects |= access->HasObservableSideEffects();
if (position != RelocInfo::kNoPosition) access->set_position(position);
}
-HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
- HValue* key,
- HValue* val,
- Expression* expr,
- BailoutId ast_id,
- int position,
- bool is_store,
- bool* has_side_effects) {
+HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
+ HValue* obj,
+ HValue* key,
+ HValue* val,
+ Expression* expr,
+ BailoutId ast_id,
+ int position,
+ bool is_store,
+ bool* has_side_effects) {
ASSERT(!expr->IsPropertyName());
HInstruction* instr = NULL;
if (expr->IsMonomorphic()) {
}
-HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
- HValue* key,
- HValue* value) {
+HInstruction* HOptimizedGraphBuilder::BuildStoreKeyedGeneric(
+ HValue* object,
+ HValue* key,
+ HValue* value) {
HValue* context = environment()->LookupContext();
return new(zone()) HStoreKeyedGeneric(
context,
}
-void HGraphBuilder::EnsureArgumentsArePushedForAccess() {
+void HOptimizedGraphBuilder::EnsureArgumentsArePushedForAccess() {
// Outermost function already has arguments on the stack.
if (function_state()->outer() == NULL) return;
}
-bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
+bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
VariableProxy* proxy = expr->obj()->AsVariableProxy();
if (proxy == NULL) return false;
if (!proxy->var()->IsStackAllocated()) return false;
}
-void HGraphBuilder::VisitProperty(Property* expr) {
+void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
- Handle<Map> receiver_map) {
+void HOptimizedGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
+ Handle<Map> receiver_map) {
if (!holder.is_null()) {
AddInstruction(new(zone()) HCheckPrototypeMaps(
Handle<JSObject>(JSObject::cast(receiver_map->prototype())), holder));
}
-void HGraphBuilder::AddCheckConstantFunction(Handle<JSObject> holder,
- HValue* receiver,
- Handle<Map> receiver_map) {
+void HOptimizedGraphBuilder::AddCheckConstantFunction(
+ Handle<JSObject> holder,
+ HValue* receiver,
+ Handle<Map> receiver_map) {
// Constant functions have the nice property that the map will change if they
// are overwritten. Therefore it is enough to check the map of the holder and
// its prototypes.
}
-void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
- HValue* receiver,
- SmallMapList* types,
- Handle<String> name) {
+void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
+ Call* expr,
+ HValue* receiver,
+ SmallMapList* types,
+ Handle<String> name) {
// TODO(ager): We should recognize when the prototype chains for different
// maps are identical. In that case we can avoid repeatedly generating the
// same prototype map checks.
}
-void HGraphBuilder::TraceInline(Handle<JSFunction> target,
- Handle<JSFunction> caller,
- const char* reason) {
+void HOptimizedGraphBuilder::TraceInline(Handle<JSFunction> target,
+ Handle<JSFunction> caller,
+ const char* reason) {
if (FLAG_trace_inlining) {
SmartArrayPointer<char> target_name =
target->shared()->DebugName()->ToCString();
static const int kNotInlinable = 1000000000;
-int HGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
+int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
if (!FLAG_use_inlining) return kNotInlinable;
// Precondition: call is monomorphic and we have found a target with the
}
-bool HGraphBuilder::TryInline(CallKind call_kind,
- Handle<JSFunction> target,
- int arguments_count,
- HValue* implicit_return_value,
- BailoutId ast_id,
- BailoutId return_id,
- InliningKind inlining_kind) {
+bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
+ Handle<JSFunction> target,
+ int arguments_count,
+ HValue* implicit_return_value,
+ BailoutId ast_id,
+ BailoutId return_id,
+ InliningKind inlining_kind) {
int nodes_added = InliningAstSize(target);
if (nodes_added == kNotInlinable) return false;
}
-bool HGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
+bool HOptimizedGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
// The function call we are inlining is a method call if the call
// is a property call.
CallKind call_kind = (expr->expression()->AsProperty() == NULL)
}
-bool HGraphBuilder::TryInlineConstruct(CallNew* expr,
- HValue* implicit_return_value) {
+bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
+ HValue* implicit_return_value) {
return TryInline(CALL_AS_FUNCTION,
expr->target(),
expr->arguments()->length(),
}
-bool HGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
- Property* prop) {
+bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
+ Property* prop) {
return TryInline(CALL_AS_METHOD,
getter,
0,
}
-bool HGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
- Assignment* assignment,
- HValue* implicit_return_value) {
+bool HOptimizedGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
+ Assignment* assignment,
+ HValue* implicit_return_value) {
return TryInline(CALL_AS_METHOD,
setter,
1,
}
-bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) {
+bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr,
+ bool drop_extra) {
if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
switch (id) {
}
-bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
- HValue* receiver,
- Handle<Map> receiver_map,
- CheckType check_type) {
+bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
+ Call* expr,
+ HValue* receiver,
+ Handle<Map> receiver_map,
+ CheckType check_type) {
ASSERT(check_type != RECEIVER_MAP_CHECK || !receiver_map.is_null());
// Try to inline calls like Math.* as operations in the calling function.
if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
}
-bool HGraphBuilder::TryCallApply(Call* expr) {
+bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
Expression* callee = expr->expression();
Property* prop = callee->AsProperty();
ASSERT(prop != NULL);
}
-void HGraphBuilder::VisitCall(Call* expr) {
+void HOptimizedGraphBuilder::VisitCall(Call* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitCallNew(CallNew* expr) {
+void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
// Support for generating inlined runtime functions.
-// Lookup table for generators for runtime calls that are generated inline.
-// Elements of the table are member pointers to functions of HGraphBuilder.
+// Lookup table for generators for runtime calls that are generated inline.
+// Elements of the table are member pointers to functions of
+// HOptimizedGraphBuilder.
#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \
- &HGraphBuilder::Generate##Name,
+ &HOptimizedGraphBuilder::Generate##Name,
-const HGraphBuilder::InlineFunctionGenerator
- HGraphBuilder::kInlineFunctionGenerators[] = {
+const HOptimizedGraphBuilder::InlineFunctionGenerator
+ HOptimizedGraphBuilder::kInlineFunctionGenerators[] = {
INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
};
#undef INLINE_FUNCTION_GENERATOR_ADDRESS
-void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
+void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
}
-void HGraphBuilder::VisitDelete(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
Property* prop = expr->expression()->AsProperty();
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (prop != NULL) {
}
-void HGraphBuilder::VisitVoid(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitVoid(UnaryOperation* expr) {
CHECK_ALIVE(VisitForEffect(expr->expression()));
return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
-void HGraphBuilder::VisitTypeof(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitTypeof(UnaryOperation* expr) {
CHECK_ALIVE(VisitForTypeOf(expr->expression()));
HValue* value = Pop();
HValue* context = environment()->LookupContext();
}
-void HGraphBuilder::VisitAdd(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitAdd(UnaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* value = Pop();
HValue* context = environment()->LookupContext();
HInstruction* instr =
- new(zone()) HMul(context, value, graph_->GetConstant1());
+ new(zone()) HMul(context, value, graph()->GetConstant1());
return ast_context()->ReturnInstruction(instr, expr->id());
}
-void HGraphBuilder::VisitSub(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitSub(UnaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* value = Pop();
HValue* context = environment()->LookupContext();
HInstruction* instr =
- new(zone()) HMul(context, value, graph_->GetConstantMinus1());
+ new(zone()) HMul(context, value, graph()->GetConstantMinus1());
TypeInfo info = oracle()->UnaryType(expr);
Representation rep = ToRepresentation(info);
if (info.IsUninitialized()) {
}
-void HGraphBuilder::VisitBitNot(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitBitNot(UnaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* value = Pop();
TypeInfo info = oracle()->UnaryType(expr);
}
-void HGraphBuilder::VisitNot(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitNot(UnaryOperation* expr) {
if (ast_context()->IsTest()) {
TestContext* context = TestContext::cast(ast_context());
VisitForControl(expr->expression(),
}
-HInstruction* HGraphBuilder::BuildIncrement(bool returns_original_input,
- CountOperation* expr) {
+HInstruction* HOptimizedGraphBuilder::BuildIncrement(
+ bool returns_original_input,
+ CountOperation* expr) {
// The input to the count operation is on top of the expression stack.
TypeInfo info = oracle()->IncrementType(expr);
Representation rep = ToRepresentation(info);
// to simulate the expression stack after this instruction.
// Any later failures deopt to the load of the input or earlier.
HConstant* delta = (expr->op() == Token::INC)
- ? graph_->GetConstant1()
- : graph_->GetConstantMinus1();
+ ? graph()->GetConstant1()
+ : graph()->GetConstantMinus1();
HValue* context = environment()->LookupContext();
HInstruction* instr = new(zone()) HAdd(context, Top(), delta);
// We can't insert a simulate here, because it would break deoptimization,
}
-void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
+void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
if (prop->key()->IsPropertyName()) {
// Named property.
- if (returns_original_input) Push(graph_->GetConstantUndefined());
+ if (returns_original_input) Push(graph()->GetConstantUndefined());
CHECK_ALIVE(VisitForValue(prop->obj()));
HValue* object = Top();
} else {
// Keyed property.
- if (returns_original_input) Push(graph_->GetConstantUndefined());
+ if (returns_original_input) Push(graph()->GetConstantUndefined());
CHECK_ALIVE(VisitForValue(prop->obj()));
CHECK_ALIVE(VisitForValue(prop->key()));
}
-HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* context,
- HValue* string,
- HValue* index) {
+HStringCharCodeAt* HOptimizedGraphBuilder::BuildStringCharCodeAt(
+ HValue* context,
+ HValue* string,
+ HValue* index) {
AddInstruction(new(zone()) HCheckNonSmi(string));
AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
HStringLength* length = new(zone()) HStringLength(string);
// directions that can be replaced by one rotate right instruction or not.
// Returns the operand and the shift amount for the rotate instruction in the
// former case.
-bool HGraphBuilder::MatchRotateRight(HValue* left,
- HValue* right,
- HValue** operand,
- HValue** shift_amount) {
+bool HOptimizedGraphBuilder::MatchRotateRight(HValue* left,
+ HValue* right,
+ HValue** operand,
+ HValue** shift_amount) {
HShl* shl;
HShr* shr;
if (left->IsShl() && right->IsShr()) {
}
-HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
- HValue* left,
- HValue* right) {
+HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
+ BinaryOperation* expr,
+ HValue* left,
+ HValue* right) {
HValue* context = environment()->LookupContext();
TypeInfo left_info, right_info, result_info, combined_info;
oracle()->BinaryType(expr, &left_info, &right_info, &result_info);
}
-void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitComma(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitComma(BinaryOperation* expr) {
CHECK_ALIVE(VisitForEffect(expr->left()));
// Visit the right subexpression in the same AST context as the entire
// expression.
}
-void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
bool is_logical_and = expr->op() == Token::AND;
if (ast_context()->IsTest()) {
TestContext* context = TestContext::cast(ast_context());
}
-void HGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
HValue* right = Pop();
}
-Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
+Representation HOptimizedGraphBuilder::ToRepresentation(TypeInfo info) {
if (info.IsUninitialized()) return Representation::None();
if (info.IsSmi()) return Representation::Integer32();
if (info.IsInteger32()) return Representation::Integer32();
}
-void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
- HTypeof* typeof_expr,
- Handle<String> check) {
+void HOptimizedGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
+ HTypeof* typeof_expr,
+ Handle<String> check) {
// Note: The HTypeof itself is removed during canonicalization, if possible.
HValue* value = typeof_expr->value();
HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check);
}
-void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
- HValue* value,
- NilValue nil) {
+void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
+ HValue* value,
+ NilValue nil) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-HInstruction* HGraphBuilder::BuildThisFunction() {
+HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
// If we share optimized code between different closures, the
// this-function is not a constant, except inside an inlined body.
if (function_state()->outer() != NULL) {
}
-void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
+void HOptimizedGraphBuilder::VisitThisFunction(ThisFunction* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
+void HOptimizedGraphBuilder::VisitDeclarations(
+ ZoneList<Declaration*>* declarations) {
ASSERT(globals_.is_empty());
AstVisitor::VisitDeclarations(declarations);
if (!globals_.is_empty()) {
}
-void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitVariableDeclaration(
+ VariableDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
}
-void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitFunctionDeclaration(
+ FunctionDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
}
-void HGraphBuilder::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitModuleDeclaration(
+ ModuleDeclaration* declaration) {
UNREACHABLE();
}
-void HGraphBuilder::VisitImportDeclaration(ImportDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitImportDeclaration(
+ ImportDeclaration* declaration) {
UNREACHABLE();
}
-void HGraphBuilder::VisitExportDeclaration(ExportDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitExportDeclaration(
+ ExportDeclaration* declaration) {
UNREACHABLE();
}
-void HGraphBuilder::VisitModuleLiteral(ModuleLiteral* module) {
+void HOptimizedGraphBuilder::VisitModuleLiteral(ModuleLiteral* module) {
UNREACHABLE();
}
-void HGraphBuilder::VisitModuleVariable(ModuleVariable* module) {
+void HOptimizedGraphBuilder::VisitModuleVariable(ModuleVariable* module) {
UNREACHABLE();
}
-void HGraphBuilder::VisitModulePath(ModulePath* module) {
+void HOptimizedGraphBuilder::VisitModulePath(ModulePath* module) {
UNREACHABLE();
}
-void HGraphBuilder::VisitModuleUrl(ModuleUrl* module) {
+void HOptimizedGraphBuilder::VisitModuleUrl(ModuleUrl* module) {
UNREACHABLE();
}
-void HGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) {
+void HOptimizedGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) {
UNREACHABLE();
}
// Generators for inline runtime functions.
// Support for types.
-void HGraphBuilder::GenerateIsSmi(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsSmi(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
}
-void HGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
}
-void HGraphBuilder::GenerateIsFunction(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsFunction(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
}
-void HGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
}
-void HGraphBuilder::GenerateIsArray(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsArray(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
}
-void HGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
}
-void HGraphBuilder::GenerateIsObject(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
}
-void HGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
return Bailout("inlined runtime function: IsNonNegativeSmi");
}
-void HGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
}
-void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
+void HOptimizedGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
CallRuntime* call) {
return Bailout(
"inlined runtime function: IsStringWrapperSafeForDefaultValueOf");
// Support for construct call checks.
-void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
ASSERT(call->arguments()->length() == 0);
if (function_state()->outer() != NULL) {
// We are generating graph for inlined function.
// Support for arguments.length and arguments[?].
-void HGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
// Our implementation of arguments (based on this stack frame or an
// adapter below it) does not work for inlined functions. This runtime
// function is blacklisted by AstNode::IsInlineable.
}
-void HGraphBuilder::GenerateArguments(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateArguments(CallRuntime* call) {
// Our implementation of arguments (based on this stack frame or an
// adapter below it) does not work for inlined functions. This runtime
// function is blacklisted by AstNode::IsInlineable.
// Support for accessing the class and value fields of an object.
-void HGraphBuilder::GenerateClassOf(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateClassOf(CallRuntime* call) {
// The special form detected by IsClassOfTest is detected before we get here
// and does not cause a bailout.
return Bailout("inlined runtime function: ClassOf");
}
-void HGraphBuilder::GenerateValueOf(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
}
-void HGraphBuilder::GenerateDateField(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
ASSERT_NE(NULL, call->arguments()->at(1)->AsLiteral());
Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->handle()));
}
-void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
// Fast support for charCodeAt(n).
-void HGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
// Fast support for string.charAt(n) and string[n].
-void HGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* char_code = Pop();
// Fast support for string.charAt(n) and string[n].
-void HGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
// Fast support for object equality testing.
-void HGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
}
-void HGraphBuilder::GenerateLog(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateLog(CallRuntime* call) {
// %_Log is ignored in optimized code.
return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
// Fast support for Math.random().
-void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
AddInstruction(global_object);
// Fast support for StringAdd.
-void HGraphBuilder::GenerateStringAdd(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
// Fast support for SubString.
-void HGraphBuilder::GenerateSubString(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
ASSERT_EQ(3, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
// Fast support for StringCompare.
-void HGraphBuilder::GenerateStringCompare(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
// Support for direct calls from JavaScript to native RegExp code.
-void HGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
ASSERT_EQ(4, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
// Construct a RegExp exec result with two in-object properties.
-void HGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
ASSERT_EQ(3, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
// Support for fast native caches.
-void HGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
return Bailout("inlined runtime function: GetFromCache");
}
// Fast support for number to string.
-void HGraphBuilder::GenerateNumberToString(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
// Fast call for custom callbacks.
-void HGraphBuilder::GenerateCallFunction(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
// 1 ~ The function to call is not itself an argument to the call.
int arg_count = call->arguments()->length() - 1;
ASSERT(arg_count >= 1); // There's always at least a receiver.
// Fast call to math functions.
-void HGraphBuilder::GenerateMathPow(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
}
-void HGraphBuilder::GenerateMathSin(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathSin(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
}
-void HGraphBuilder::GenerateMathCos(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathCos(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
}
-void HGraphBuilder::GenerateMathTan(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathTan(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
}
-void HGraphBuilder::GenerateMathLog(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathLog(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
}
-void HGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
return Bailout("inlined runtime function: MathSqrt");
}
// Check whether two RegExps are equivalent
-void HGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
return Bailout("inlined runtime function: IsRegExpEquivalent");
}
-void HGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
}
-void HGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
return Bailout("inlined runtime function: FastAsciiArrayJoin");
}
}
+HEnvironment::HEnvironment(Zone* zone)
+ : values_(0, zone),
+ assigned_variables_(0, zone),
+ frame_type_(STUB),
+ parameter_count_(0),
+ specials_count_(0),
+ local_count_(0),
+ outer_(NULL),
+ entry_(NULL),
+ pop_count_(0),
+ push_count_(0),
+ ast_id_(BailoutId::None()),
+ zone_(zone) {
+ Initialize(0, 0, 0);
+}
+
+
HEnvironment::HEnvironment(const HEnvironment* other, Zone* zone)
: values_(0, zone),
assigned_variables_(0, zone),
}
-void HTracer::TraceCompilation(FunctionLiteral* function) {
+void HTracer::TraceCompilation(CompilationInfo* info) {
Tag tag(this, "compilation");
- Handle<String> name = function->debug_name();
- PrintStringProperty("name", *name->ToCString());
- PrintStringProperty("method", *name->ToCString());
+ if (info->IsOptimizing()) {
+ Handle<String> name = info->function()->debug_name();
+ PrintStringProperty("name", *name->ToCString());
+ PrintStringProperty("method", *name->ToCString());
+ } else {
+ CodeStub::Major major_key = info->code_stub()->MajorKey();
+ PrintStringProperty("name", CodeStub::MajorName(major_key, false));
+ PrintStringProperty("method", "stub");
+ }
PrintLongProperty("date", static_cast<int64_t>(OS::TimeCurrentMillis()));
}
JS_CONSTRUCT,
JS_GETTER,
JS_SETTER,
- ARGUMENTS_ADAPTOR
+ ARGUMENTS_ADAPTOR,
+ STUB
};
Handle<JSFunction> closure,
Zone* zone);
+ explicit HEnvironment(Zone* zone);
+
HEnvironment* arguments_environment() {
return outer()->frame_type() == ARGUMENTS_ADAPTOR ? outer() : this;
}
};
-class HGraphBuilder;
+class HOptimizedGraphBuilder;
enum ArgumentsAllowedFlag {
ARGUMENTS_NOT_ALLOWED,
bool is_for_typeof() { return for_typeof_; }
protected:
- AstContext(HGraphBuilder* owner, Expression::Context kind);
+ AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind);
virtual ~AstContext();
- HGraphBuilder* owner() const { return owner_; }
+ HOptimizedGraphBuilder* owner() const { return owner_; }
inline Zone* zone() const;
#endif
private:
- HGraphBuilder* owner_;
+ HOptimizedGraphBuilder* owner_;
Expression::Context kind_;
AstContext* outer_;
bool for_typeof_;
class EffectContext: public AstContext {
public:
- explicit EffectContext(HGraphBuilder* owner)
+ explicit EffectContext(HOptimizedGraphBuilder* owner)
: AstContext(owner, Expression::kEffect) {
}
virtual ~EffectContext();
class ValueContext: public AstContext {
public:
- explicit ValueContext(HGraphBuilder* owner, ArgumentsAllowedFlag flag)
+ ValueContext(HOptimizedGraphBuilder* owner, ArgumentsAllowedFlag flag)
: AstContext(owner, Expression::kValue), flag_(flag) {
}
virtual ~ValueContext();
class TestContext: public AstContext {
public:
- TestContext(HGraphBuilder* owner,
+ TestContext(HOptimizedGraphBuilder* owner,
Expression* condition,
TypeFeedbackOracle* oracle,
HBasicBlock* if_true,
class FunctionState {
public:
- FunctionState(HGraphBuilder* owner,
+ FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
TypeFeedbackOracle* oracle,
InliningKind inlining_kind);
bool arguments_pushed() { return arguments_elements() != NULL; }
private:
- HGraphBuilder* owner_;
+ HOptimizedGraphBuilder* owner_;
CompilationInfo* compilation_info_;
TypeFeedbackOracle* oracle_;
};
-class HGraphBuilder: public AstVisitor {
+class HGraphBuilder {
+ public:
+ explicit HGraphBuilder(CompilationInfo* info)
+ : info_(info), graph_(NULL), current_block_(NULL) {}
+ virtual ~HGraphBuilder() {}
+
+ HBasicBlock* current_block() const { return current_block_; }
+ void set_current_block(HBasicBlock* block) { current_block_ = block; }
+ HEnvironment* environment() const {
+ return current_block()->last_environment();
+ }
+ Zone* zone() const { return info_->zone(); }
+ HGraph* graph() { return graph_; }
+
+ HGraph* CreateGraph();
+
+ // Adding instructions.
+ HInstruction* AddInstruction(HInstruction* instr);
+ void AddSimulate(BailoutId id,
+ RemovableSimulate removable = FIXED_SIMULATE);
+
+ protected:
+ virtual bool BuildGraph() = 0;
+
+ // Building common constructs
+ HInstruction* BuildExternalArrayElementAccess(
+ HValue* external_elements,
+ HValue* checked_key,
+ HValue* val,
+ HValue* dependency,
+ ElementsKind elements_kind,
+ bool is_store);
+
+ HInstruction* BuildFastElementAccess(
+ HValue* elements,
+ HValue* checked_key,
+ HValue* val,
+ HValue* dependency,
+ ElementsKind elements_kind,
+ bool is_store);
+
+ HInstruction* BuildUncheckedMonomorphicElementAccess(
+ HValue* object,
+ HValue* key,
+ HValue* val,
+ HCheckMaps* mapcheck,
+ bool is_js_array,
+ ElementsKind elements_kind,
+ bool is_store);
+
+ private:
+ HGraphBuilder();
+ CompilationInfo* info_;
+ HGraph* graph_;
+ HBasicBlock* current_block_;
+};
+
+
+class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
public:
enum BreakType { BREAK, CONTINUE };
enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH };
// structures mirroring BreakableStatement nesting.
class BreakAndContinueScope BASE_EMBEDDED {
public:
- BreakAndContinueScope(BreakAndContinueInfo* info, HGraphBuilder* owner)
+ BreakAndContinueScope(BreakAndContinueInfo* info,
+ HOptimizedGraphBuilder* owner)
: info_(info), owner_(owner), next_(owner->break_scope()) {
owner->set_break_scope(this);
}
~BreakAndContinueScope() { owner_->set_break_scope(next_); }
BreakAndContinueInfo* info() { return info_; }
- HGraphBuilder* owner() { return owner_; }
+ HOptimizedGraphBuilder* owner() { return owner_; }
BreakAndContinueScope* next() { return next_; }
// Search the break stack for a break or continue target.
private:
BreakAndContinueInfo* info_;
- HGraphBuilder* owner_;
+ HOptimizedGraphBuilder* owner_;
BreakAndContinueScope* next_;
};
- HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
+ HOptimizedGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
- HGraph* CreateGraph();
+ virtual bool BuildGraph();
// Simple accessors.
- HGraph* graph() const { return graph_; }
BreakAndContinueScope* break_scope() const { return break_scope_; }
void set_break_scope(BreakAndContinueScope* head) { break_scope_ = head; }
- HBasicBlock* current_block() const { return current_block_; }
- void set_current_block(HBasicBlock* block) { current_block_ = block; }
- HEnvironment* environment() const {
- return current_block()->last_environment();
- }
-
bool inline_bailout() { return inline_bailout_; }
- // Adding instructions.
- HInstruction* AddInstruction(HInstruction* instr);
- void AddSimulate(BailoutId ast_id,
- RemovableSimulate removable = FIXED_SIMULATE);
-
// Bailout environment manipulation.
void Push(HValue* value) { environment()->Push(value); }
HValue* Pop() { return environment()->Pop(); }
void operator delete(void* pointer, Zone* zone) { }
void operator delete(void* pointer) { }
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
private:
// Type of a member function that generates inline code for a native function.
- typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
+ typedef void (HOptimizedGraphBuilder::*InlineFunctionGenerator)
+ (CallRuntime* call);
// Forward declarations for inner scope classes.
class SubgraphScope;
HValue* right);
HInstruction* BuildIncrement(bool returns_original_input,
CountOperation* expr);
- HInstruction* BuildFastElementAccess(HValue* elements,
- HValue* checked_key,
- HValue* val,
- HValue* dependency,
- ElementsKind elements_kind,
- bool is_store);
+ HInstruction* BuildLoadKeyedGeneric(HValue* object,
+ HValue* key);
HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
HValue* key,
HValue* val,
SmallMapList* maps);
- HInstruction* BuildUncheckedMonomorphicElementAccess(HValue* object,
- HValue* key,
- HValue* val,
- HCheckMaps* mapcheck,
- Handle<Map> map,
- bool is_store);
-
HInstruction* BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
Handle<String> name,
Property* expr,
Handle<Map> map);
- HInstruction* BuildLoadKeyedGeneric(HValue* object, HValue* key);
- HInstruction* BuildExternalArrayElementAccess(
- HValue* external_elements,
- HValue* checked_key,
- HValue* val,
- HValue* dependency,
- ElementsKind elements_kind,
- bool is_store);
void AddCheckMapsWithTransitions(HValue* object,
Handle<Map> map);
HValue** operand,
HValue** shift_amount);
- Zone* zone() const { return zone_; }
-
// The translation state of the currently-being-translated function.
FunctionState* function_state_;
// A stack of breakable statements entered.
BreakAndContinueScope* break_scope_;
- HGraph* graph_;
- HBasicBlock* current_block_;
-
int inlined_count_;
ZoneList<Handle<Object> > globals_;
- Zone* zone_;
-
bool inline_bailout_;
friend class FunctionState; // Pushes and pops the state stack.
friend class AstContext; // Pushes and pops the AST context stack.
+ friend class KeyedLoadFastElementStub;
- DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
+ DISALLOW_COPY_AND_ASSIGN(HOptimizedGraphBuilder);
};
class HTracer: public Malloced {
public:
- void TraceCompilation(FunctionLiteral* function);
+ void TraceCompilation(CompilationInfo* info);
void TraceHydrogen(const char* name, HGraph* graph);
void TraceLithium(const char* name, LChunk* chunk);
void TraceLiveRanges(const char* name, LAllocator* allocator);
uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
+int IntelDoubleRegister::NumAllocatableRegisters() {
+ if (CpuFeatures::IsSupported(SSE2)) {
+ return XMMRegister::kNumAllocatableRegisters;
+ } else {
+ return X87TopOfStackRegister::kNumAllocatableRegisters;
+ }
+}
+
+
+int IntelDoubleRegister::NumRegisters() {
+ if (CpuFeatures::IsSupported(SSE2)) {
+ return XMMRegister::kNumRegisters;
+ } else {
+ return X87TopOfStackRegister::kNumRegisters;
+ }
+}
+
+
+const char* IntelDoubleRegister::AllocationIndexToString(int index) {
+ if (CpuFeatures::IsSupported(SSE2)) {
+ return XMMRegister::AllocationIndexToString(index);
+ } else {
+ return X87TopOfStackRegister::AllocationIndexToString(index);
+ }
+}
+
+
// The Probe method needs executable memory, so it uses Heap::CreateCode.
// Allocation failure is silent and leads to safe default.
void CpuFeatures::Probe() {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x18);
- XMMRegister code = { level }; // Emit hint number in Reg position of RegR/M.
+ // Emit hint number in Reg position of RegR/M.
+ XMMRegister code = XMMRegister(level);
emit_sse_operand(code, src);
}
// and best performance in optimized code.
//
struct Register {
- static const int kNumAllocatableRegisters = 6;
+ static const int kMaxNumAllocatableRegisters = 6;
+ static int NumAllocatableRegisters() {
+ return kMaxNumAllocatableRegisters;
+ }
static const int kNumRegisters = 8;
static inline const char* AllocationIndexToString(int index);
inline const char* Register::AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
// This is the mapping of allocation indices to registers.
const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
return kNames[index];
inline Register Register::FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
return (index >= 4) ? from_code(index + 2) : from_code(index);
}
-struct XMMRegister {
- static const int kNumAllocatableRegisters = 7;
- static const int kNumRegisters = 8;
+struct IntelDoubleRegister {
+ static const int kMaxNumAllocatableRegisters = 7;
+ explicit IntelDoubleRegister(int code) { code_ = code; }
+ static int NumAllocatableRegisters();
+ static int NumRegisters();
+ static const char* AllocationIndexToString(int index);
- static int ToAllocationIndex(XMMRegister reg) {
+ static int ToAllocationIndex(IntelDoubleRegister reg) {
ASSERT(reg.code() != 0);
return reg.code() - 1;
}
+ static IntelDoubleRegister FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < NumAllocatableRegisters());
+ return from_code(index + 1);
+ }
+
+ static IntelDoubleRegister from_code(int code) {
+ return IntelDoubleRegister(code);
+ }
+
+ bool is_valid() const {
+ return 0 <= code_ && code_ < NumRegisters();
+ }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+
+ int code_;
+};
+
+struct XMMRegister : IntelDoubleRegister {
+ static const int kNumAllocatableRegisters = 7;
+ static const int kNumRegisters = 8;
+
+ explicit XMMRegister(int code) : IntelDoubleRegister(code) {}
+
+ static XMMRegister from_code(int code) {
+ XMMRegister r = XMMRegister(code);
+ return r;
+ }
+
+ bool is(XMMRegister reg) const { return code_ == reg.code_; }
+
static XMMRegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < NumAllocatableRegisters());
return from_code(index + 1);
}
};
return names[index];
}
+};
- static XMMRegister from_code(int code) {
- XMMRegister r = { code };
- return r;
+
+const XMMRegister xmm0 = XMMRegister(0);
+const XMMRegister xmm1 = XMMRegister(1);
+const XMMRegister xmm2 = XMMRegister(2);
+const XMMRegister xmm3 = XMMRegister(3);
+const XMMRegister xmm4 = XMMRegister(4);
+const XMMRegister xmm5 = XMMRegister(5);
+const XMMRegister xmm6 = XMMRegister(6);
+const XMMRegister xmm7 = XMMRegister(7);
+
+struct X87TopOfStackRegister : IntelDoubleRegister {
+ static const int kNumAllocatableRegisters = 1;
+ static const int kNumRegisters = 1;
+
+ explicit X87TopOfStackRegister(int code)
+ : IntelDoubleRegister(code) {}
+
+ bool is(X87TopOfStackRegister reg) const {
+ return code_ == reg.code_;
}
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(XMMRegister reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
+ static const char* AllocationIndexToString(int index) {
+ ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ const char* const names[] = {
+ "st0",
+ };
+ return names[index];
}
- int code_;
+ static int ToAllocationIndex(X87TopOfStackRegister reg) {
+ ASSERT(reg.code() == 0);
+ return 0;
+ }
};
+const X87TopOfStackRegister x87tos = X87TopOfStackRegister(0);
-const XMMRegister xmm0 = { 0 };
-const XMMRegister xmm1 = { 1 };
-const XMMRegister xmm2 = { 2 };
-const XMMRegister xmm3 = { 3 };
-const XMMRegister xmm4 = { 4 };
-const XMMRegister xmm5 = { 5 };
-const XMMRegister xmm6 = { 6 };
-const XMMRegister xmm7 = { 7 };
-
-
-typedef XMMRegister DoubleRegister;
+typedef IntelDoubleRegister DoubleRegister;
enum Condition {
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+void Builtins::Generate_NotifyICMiss(MacroAssembler* masm) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ __ pushad();
+ __ CallRuntime(Runtime::kNotifyICMiss, 0);
+ __ popad();
+ // Tear down internal frame.
+ }
+
+ __ pop(MemOperand(esp, 0)); // Ignore state offset
+ __ ret(0); // Return to IC Miss stub, continuation still on stack.
+}
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
namespace v8 {
namespace internal {
+
+CodeStubInterfaceDescriptor*
+ KeyedLoadFastElementStub::GetInterfaceDescriptor(Isolate* isolate) {
+ static CodeStubInterfaceDescriptor* result = NULL;
+ if (result == NULL) {
+ Handle<Code> miss = isolate->builtins()->KeyedLoadIC_Miss();
+ static Register registers[] = { edx, ecx };
+ static CodeStubInterfaceDescriptor info = {
+ 2,
+ registers,
+ miss
+ };
+ result = &info;
+ }
+ return result;
+}
+
+
#define __ ACCESS_MASM(masm)
void ToNumberStub::Generate(MacroAssembler* masm) {
__ bind(&loaded);
} else { // UNTAGGED.
+ CpuFeatures::Scope scope(SSE2);
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatures::Scope sse4_scope(SSE4_1);
__ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx.
__ fstp(0);
__ ret(kPointerSize);
} else { // UNTAGGED.
+ CpuFeatures::Scope scope(SSE2);
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
}
if (tagged) {
__ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
} else { // UNTAGGED.
+ CpuFeatures::Scope scope(SSE2);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
__ sub(esp, Immediate(kDoubleSize));
__ movdbl(Operand(esp, 0), xmm1);
if (tagged) {
__ ret(kPointerSize);
} else { // UNTAGGED.
+ CpuFeatures::Scope scope(SSE2);
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
ExternalReference(RuntimeFunction(), masm->isolate());
__ TailCallExternalReference(runtime, 1, 1);
} else { // UNTAGGED.
+ CpuFeatures::Scope scope(SSE2);
__ bind(&runtime_call_clear_stack);
__ bind(&runtime_call);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
void CodeStub::GenerateFPStubs() {
- CEntryStub save_doubles(1, kSaveFPRegs);
- Handle<Code> code = save_doubles.GetCode();
- code->set_is_pregenerated(true);
- code->GetIsolate()->set_fp_stubs_generated(true);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CEntryStub save_doubles(1, kSaveFPRegs);
+ // Stubs might already be in the snapshot, detect that and don't regenerate,
+ // which would lead to code stub initialization state being messed up.
+ Code* save_doubles_code;
+ if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
+ save_doubles_code = *(save_doubles.GetCode());
+ }
+ save_doubles_code->set_is_pregenerated(true);
+ save_doubles_code->GetIsolate()->set_fp_stubs_generated(true);
+ }
}
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
+class TranscendentalCacheStub: public PlatformCodeStub {
public:
enum ArgumentType {
TAGGED = 0,
};
-class StoreBufferOverflowStub: public CodeStub {
+class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
};
-class UnaryOpStub: public CodeStub {
+class UnaryOpStub: public PlatformCodeStub {
public:
UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode,
};
-class StringAddStub: public CodeStub {
+class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
};
-class SubStringStub: public CodeStub {
+class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
};
-class StringCompareStub: public CodeStub {
+class StringCompareStub: public PlatformCodeStub {
public:
StringCompareStub() { }
};
-class NumberToStringStub: public CodeStub {
+class NumberToStringStub: public PlatformCodeStub {
public:
NumberToStringStub() { }
};
-class StringDictionaryLookupStub: public CodeStub {
+class StringDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
};
-class RecordWriteStub: public CodeStub {
+class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
Register value,
Register GetRegThatIsNotEcxOr(Register r1,
Register r2,
Register r3) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(ecx)) continue;
if (candidate.is(r1)) continue;
void Deoptimizer::DoComputeOsrOutputFrame() {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
+ compiled_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
// TODO(kasperl): This should not be the bailout_id_. It should be
// the ast id. Confusing.
unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
- unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+ unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
unsigned outgoing_size = outgoing_height * kPointerSize;
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
unsigned pc_offset = data->OsrPcOffset()->value();
uint32_t pc = reinterpret_cast<uint32_t>(
- optimized_code_->entry() + pc_offset);
+ compiled_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
Code* continuation =
}
+void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
+ int frame_index) {
+ //
+ // FROM TO <-ebp
+ // | .... | | .... |
+ // +-------------------------+ +-------------------------+
+ // | JSFunction continuation | | JSFunction continuation |
+ // +-------------------------+ +-------------------------+<-esp
+ // | | saved frame (ebp) |
+ // | +=========================+<-ebp
+ // | | JSFunction context |
+ // v +-------------------------+
+ // | COMPILED_STUB marker | ebp = saved frame
+ // +-------------------------+ esi = JSFunction context
+ // | |
+ // | ... |
+ // | |
+ // +-------------------------+<-esp
+ //
+ //
+ int output_frame_size = 1 * kPointerSize;
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, 0);
+ Code* notify_miss =
+ isolate_->builtins()->builtin(Builtins::kNotifyICMiss);
+ output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+ output_frame->SetContinuation(
+ reinterpret_cast<uint32_t>(notify_miss->entry()));
+
+ ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
+ int major_key = compiled_code_->major_key();
+ CodeStubInterfaceDescriptor* descriptor =
+ isolate_->code_stub_interface_descriptors()[major_key];
+ Handle<Code> miss_ic(descriptor->deoptimization_handler);
+ output_frame->SetPc(reinterpret_cast<intptr_t>(miss_ic->instruction_start()));
+ unsigned input_frame_size = input_->GetFrameSize();
+ intptr_t value = input_->GetFrameSlot(input_frame_size - kPointerSize);
+ output_frame->SetFrameSlot(0, value);
+ value = input_->GetFrameSlot(input_frame_size - 2 * kPointerSize);
+ output_frame->SetRegister(ebp.code(), value);
+ output_frame->SetFp(value);
+ value = input_->GetFrameSlot(input_frame_size - 3 * kPointerSize);
+ output_frame->SetRegister(esi.code(), value);
+
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ ASSERT(opcode == Translation::REGISTER);
+ USE(opcode);
+ int input_reg = iterator->Next();
+ intptr_t input_value = input_->GetRegister(input_reg);
+ output_frame->SetRegister(edx.code(), input_value);
+
+ int32_t next = iterator->Next();
+ opcode = static_cast<Translation::Opcode>(next);
+ ASSERT(opcode == Translation::REGISTER);
+ input_reg = iterator->Next();
+ input_value = input_->GetRegister(input_reg);
+ output_frame->SetRegister(ecx.code(), input_value);
+
+ ASSERT(frame_index == 0);
+ output_[frame_index] = output_frame;
+}
+
+
void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index) {
Builtins* builtins = isolate_->builtins();
}
input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0);
}
void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue();
- CpuFeatures::Scope scope(SSE2);
Isolate* isolate = masm()->isolate();
const int kDoubleRegsSize = kDoubleSize *
XMMRegister::kNumAllocatableRegisters;
__ sub(esp, Immediate(kDoubleRegsSize));
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ movdbl(Operand(esp, offset), xmm_reg);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+ int offset = i * kDoubleSize;
+ __ movdbl(Operand(esp, offset), xmm_reg);
+ }
}
__ pushad();
__ pop(Operand(ebx, offset));
}
- // Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize;
- __ movdbl(xmm0, Operand(esp, src_offset));
- __ movdbl(Operand(ebx, dst_offset), xmm0);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ // Fill in the double input registers.
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ int src_offset = i * kDoubleSize;
+ __ movdbl(xmm0, Operand(esp, src_offset));
+ __ movdbl(Operand(ebx, dst_offset), xmm0);
+ }
}
+ __ fninit();
// Remove the bailout id and the double registers from the stack.
if (type() == EAGER) {
// limit and copy the contents of the activation frame to the input
// frame description.
__ lea(edx, Operand(ebx, FrameDescription::frame_content_offset()));
+ Label pop_loop_header;
+ __ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
__ pop(Operand(edx, 0));
__ add(edx, Immediate(sizeof(uint32_t)));
+ __ bind(&pop_loop_header);
__ cmp(ecx, esp);
__ j(not_equal, &pop_loop);
}
// Replace the current frame with the output frames.
- Label outer_push_loop, inner_push_loop;
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
// Outer loop state: eax = current FrameDescription**, edx = one past the
// last FrameDescription**.
__ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
__ mov(eax, Operand(eax, Deoptimizer::output_offset()));
__ lea(edx, Operand(eax, edx, times_4, 0));
+ __ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: ebx = current FrameDescription*, ecx = loop index.
__ mov(ebx, Operand(eax, 0));
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ sub(ecx, Immediate(sizeof(uint32_t)));
__ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
+ __ bind(&inner_loop_header);
__ test(ecx, ecx);
__ j(not_zero, &inner_push_loop);
__ add(eax, Immediate(kPointerSize));
+ __ bind(&outer_loop_header);
__ cmp(eax, edx);
__ j(below, &outer_push_loop);
// In case of OSR, we have to restore the XMM registers.
if (type() == OSR) {
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
- __ movdbl(xmm_reg, Operand(ebx, src_offset));
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+ int src_offset = i * kDoubleSize + double_regs_offset;
+ __ movdbl(xmm_reg, Operand(ebx, src_offset));
+ }
}
}
#if defined(V8_TARGET_ARCH_IA32)
#include "ia32/lithium-codegen-ia32.h"
+#include "ic.h"
#include "code-stubs.h"
#include "deoptimizer.h"
#include "stub-cache.h"
HPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
- CpuFeatures::Scope scope(SSE2);
CodeStub::GenerateFPStubs();
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
- dynamic_frame_alignment_ = (chunk()->num_double_slots() > 2 &&
- !chunk()->graph()->is_recursive()) ||
- !info()->osr_ast_id().IsNone();
+ dynamic_frame_alignment_ = info()->IsOptimizing() &&
+ ((chunk()->num_double_slots() > 2 &&
+ !chunk()->graph()->is_recursive()) ||
+ !info()->osr_ast_id().IsNone());
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
+ GenerateJumpTable() &&
GenerateSafepointTable();
}
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
- Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
+ if (!info()->IsStub()) {
+ Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
+ }
}
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ __ int3();
+ }
#endif
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). ecx is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
- __ test(ecx, Operand(ecx));
- __ j(zero, &ok, Label::kNear);
- // +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
- __ mov(Operand(esp, receiver_offset),
- Immediate(isolate()->factory()->undefined_value()));
- __ bind(&ok);
- }
-
-
- if (dynamic_frame_alignment_) {
- // Move state of dynamic frame alignment into edx.
- __ mov(edx, Immediate(kNoAlignmentPadding));
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). ecx is zero for method calls and non-zero for
+ // function calls.
+ if (!info_->is_classic_mode() || info_->is_native()) {
+ Label ok;
+ __ test(ecx, Operand(ecx));
+ __ j(zero, &ok, Label::kNear);
+ // +1 for return address.
+ int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+ __ mov(Operand(esp, receiver_offset),
+ Immediate(isolate()->factory()->undefined_value()));
+ __ bind(&ok);
+ }
- Label do_not_pad, align_loop;
- STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
- // Align esp + 4 to a multiple of 2 * kPointerSize.
- __ test(esp, Immediate(kPointerSize));
- __ j(not_zero, &do_not_pad, Label::kNear);
- __ push(Immediate(0));
- __ mov(ebx, esp);
- __ mov(edx, Immediate(kAlignmentPaddingPushed));
- // Copy arguments, receiver, and return address.
- __ mov(ecx, Immediate(scope()->num_parameters() + 2));
-
- __ bind(&align_loop);
- __ mov(eax, Operand(ebx, 1 * kPointerSize));
- __ mov(Operand(ebx, 0), eax);
- __ add(Operand(ebx), Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &align_loop, Label::kNear);
- __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
- __ bind(&do_not_pad);
+ if (dynamic_frame_alignment_) {
+ // Move state of dynamic frame alignment into edx.
+ __ mov(edx, Immediate(kNoAlignmentPadding));
+
+ Label do_not_pad, align_loop;
+ STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
+ // Align esp + 4 to a multiple of 2 * kPointerSize.
+ __ test(esp, Immediate(kPointerSize));
+ __ j(not_zero, &do_not_pad, Label::kNear);
+ __ push(Immediate(0));
+ __ mov(ebx, esp);
+ __ mov(edx, Immediate(kAlignmentPaddingPushed));
+ // Copy arguments, receiver, and return address.
+ __ mov(ecx, Immediate(scope()->num_parameters() + 2));
+
+ __ bind(&align_loop);
+ __ mov(eax, Operand(ebx, 1 * kPointerSize));
+ __ mov(Operand(ebx, 0), eax);
+ __ add(Operand(ebx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &align_loop, Label::kNear);
+ __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+ __ bind(&do_not_pad);
+ }
}
info()->set_prologue_offset(masm_->pc_offset());
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- __ push(edi); // Callee's JS function.
+ if (NeedsEagerFrame()) {
+ ASSERT(!frame_is_built_);
+ frame_is_built_ = true;
+ __ push(ebp); // Caller's frame pointer.
+ __ mov(ebp, esp);
+ __ push(esi); // Callee's context.
+ if (info()->IsStub()) {
+ __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ } else {
+ __ push(edi); // Callee's JS function.
+ }
+ }
- if (dynamic_frame_alignment_ && FLAG_debug_code) {
+ if (info()->IsOptimizing() &&
+ dynamic_frame_alignment_ &&
+ FLAG_debug_code) {
__ test(esp, Immediate(kPointerSize));
__ Assert(zero, "frame is expected to be aligned");
}
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
- ASSERT_GE(slots, 1);
- if (slots == 1) {
- if (dynamic_frame_alignment_) {
- __ push(edx);
- } else {
- __ push(Immediate(kNoAlignmentPadding));
- }
- } else {
- if (FLAG_debug_code) {
- __ mov(Operand(eax), Immediate(slots));
- Label loop;
- __ bind(&loop);
- __ push(Immediate(kSlotsZapValue));
- __ dec(eax);
- __ j(not_zero, &loop);
+ ASSERT(slots != 0 || !info()->IsOptimizing());
+ if (slots > 0) {
+ if (slots == 1) {
+ if (dynamic_frame_alignment_) {
+ __ push(edx);
+ } else {
+ __ push(Immediate(kNoAlignmentPadding));
+ }
} else {
- __ sub(Operand(esp), Immediate(slots * kPointerSize));
- #ifdef _MSC_VER
- // On windows, you may not access the stack more than one page below
- // the most recently mapped page. To make the allocated area randomly
- // accessible, we write to each page in turn (the value is irrelevant).
- const int kPageSize = 4 * KB;
- for (int offset = slots * kPointerSize - kPageSize;
- offset > 0;
- offset -= kPageSize) {
- __ mov(Operand(esp, offset), eax);
+ if (FLAG_debug_code) {
+ __ mov(Operand(eax), Immediate(slots));
+ Label loop;
+ __ bind(&loop);
+ __ push(Immediate(kSlotsZapValue));
+ __ dec(eax);
+ __ j(not_zero, &loop);
+ } else {
+ __ sub(Operand(esp), Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+ // On windows, you may not access the stack more than one page below
+ // the most recently mapped page. To make the allocated area randomly
+ // accessible, we write to each page in turn (the value is irrelevant).
+ const int kPageSize = 4 * KB;
+ for (int offset = slots * kPointerSize - kPageSize;
+ offset > 0;
+ offset -= kPageSize) {
+ __ mov(Operand(esp, offset), eax);
+ }
+#endif
}
- #endif
- }
- // Store dynamic frame alignment state in the first local.
- if (dynamic_frame_alignment_) {
- __ mov(Operand(ebp,
- JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
- edx);
- } else {
- __ mov(Operand(ebp,
- JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
- Immediate(kNoAlignmentPadding));
+ // Store dynamic frame alignment state in the first local.
+ if (dynamic_frame_alignment_) {
+ __ mov(Operand(ebp,
+ JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
+ edx);
+ } else {
+ __ mov(Operand(ebp,
+ JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
+ Immediate(kNoAlignmentPadding));
+ }
}
}
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is still in edi.
}
// Trace the call.
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
// We have not executed any compiled code yet, so esi still holds the
// incoming context.
__ CallRuntime(Runtime::kTraceEnter, 0);
}
+bool LCodeGen::GenerateJumpTable() {
+ Label needs_frame_not_call;
+ Label needs_frame_is_call;
+ for (int i = 0; i < jump_table_.length(); i++) {
+ __ bind(&jump_table_[i].label);
+ Address entry = jump_table_[i].address;
+ if (jump_table_[i].needs_frame) {
+ __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
+ if (jump_table_[i].is_lazy_deopt) {
+ if (needs_frame_is_call.is_bound()) {
+ __ jmp(&needs_frame_is_call);
+ } else {
+ __ bind(&needs_frame_is_call);
+ __ push(esi);
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ // Push a PC inside the function so that the deopt code can find where
+ // the deopt comes from. It doesn't have to be the precise return
+ // address of a "calling" LAZY deopt, it only has to be somewhere
+ // inside the code body.
+ Label push_approx_pc;
+ __ call(&push_approx_pc);
+ __ bind(&push_approx_pc);
+ // Push the continuation which was stashed were the ebp should
+ // be. Replace it with the saved ebp.
+ __ push(MemOperand(esp, 3 * kPointerSize));
+ __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
+ __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
+ __ ret(0); // Call the continuation without clobbering registers.
+ }
+ } else {
+ if (needs_frame_not_call.is_bound()) {
+ __ jmp(&needs_frame_not_call);
+ } else {
+ __ bind(&needs_frame_not_call);
+ __ push(esi);
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ // Push the continuation which was stashed were the ebp should
+ // be. Replace it with the saved ebp.
+ __ push(MemOperand(esp, 2 * kPointerSize));
+ __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
+ __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
+ __ ret(0); // Call the continuation without clobbering registers.
+ }
+ }
+ } else {
+ if (jump_table_[i].is_lazy_deopt) {
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+ }
+ }
+ }
+ return !is_aborted();
+}
+
+
bool LCodeGen::GenerateDeferredCode() {
ASSERT(is_generating());
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Deferred build frame",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ // Build the frame in such a way that esi isn't trashed.
+ __ push(ebp); // Caller's frame pointer.
+ __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ __ lea(ebp, Operand(esp, 2 * kPointerSize));
+ }
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate();
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Deferred destroy frame",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ ASSERT(frame_is_built_);
+ frame_is_built_ = false;
+ __ mov(esp, ebp);
+ __ pop(ebp);
+ }
__ jmp(code->exit());
}
}
bool LCodeGen::GenerateSafepointTable() {
ASSERT(is_done());
+ if (!info()->IsStub()) {
+ // For lazy deoptimization we need space to patch a call after every call.
+ // Ensure there is always space for such patching, even if the code ends
+ // in a call.
+ int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
+ while (masm()->pc_offset() < target_offset) {
+ masm()->nop();
+ }
+ }
safepoints_.Emit(masm(), GetStackSlotCount());
return !is_aborted();
}
}
+bool LCodeGen::IsX87TopOfStack(LOperand* op) const {
+ return op->IsDoubleRegister();
+}
+
+
Register LCodeGen::ToRegister(LOperand* op) const {
ASSERT(op->IsRegister());
return ToRegister(op->index());
translation,
arguments_index,
arguments_count);
- int closure_id = *info()->closure() != *environment->closure()
+ bool has_closure_id = !info()->closure().is_null() &&
+ *info()->closure() != *environment->closure();
+ int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
switch (environment->frame_type()) {
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
+ default:
+ UNREACHABLE();
}
// Inlined frames which push their arguments cause the index to be
__ CallRuntime(fun, argc);
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+
+ ASSERT(info()->is_calling());
}
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+
+ ASSERT(info()->is_calling());
}
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ Deoptimizer::BailoutType bailout_type = frame_is_built_
+ ? Deoptimizer::EAGER
+ : Deoptimizer::LAZY;
+ Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
__ popfd();
}
+ ASSERT(info()->IsStub() || frame_is_built_);
+ bool lazy_deopt_needed = info()->IsStub();
if (cc == no_condition) {
if (FLAG_trap_on_deopt) __ int3();
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+ if (lazy_deopt_needed) {
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+ }
} else {
+ Label done;
if (FLAG_trap_on_deopt) {
- Label done;
__ j(NegateCondition(cc), &done, Label::kNear);
__ int3();
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
- __ bind(&done);
+ }
+ if (!lazy_deopt_needed && frame_is_built_) {
+ if (FLAG_trap_on_deopt) {
+ __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
+ }
} else {
- __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
+ // We often have several deopts to the same entry, reuse the last
+ // jump entry if this is the case.
+ if (jump_table_.is_empty() ||
+ jump_table_.last().address != entry ||
+ jump_table_.last().needs_frame != !frame_is_built_ ||
+ jump_table_.last().is_lazy_deopt != lazy_deopt_needed) {
+ JumpTableEntry table_entry(entry, !frame_is_built_, lazy_deopt_needed);
+ jump_table_.Add(table_entry, zone());
+ }
+ if (FLAG_trap_on_deopt) {
+ __ jmp(&jump_table_.last().label);
+ } else {
+ __ j(cc, &jump_table_.last().label);
+ }
}
+ __ bind(&done);
}
}
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatures::Scope scope(SSE4_1);
+ CpuFeatures::Scope scope1(SSE2);
+ CpuFeatures::Scope scope2(SSE4_1);
if (lower != 0) {
__ Set(temp, Immediate(lower));
__ movd(res, Operand(temp));
__ pinsrd(res, Operand(temp), 1);
}
} else {
+ CpuFeatures::Scope scope(SSE2);
__ Set(temp, Immediate(upper));
__ movd(res, Operand(temp));
__ psllq(res, 32);
void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ CpuFeatures::Scope scope(SSE2);
LOperand* left = instr->left();
LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister left = ToDoubleRegister(instr->left());
XMMRegister right = ToDoubleRegister(instr->right());
XMMRegister result = ToDoubleRegister(instr->result());
__ addsd(left, right);
break;
case Token::SUB:
- __ subsd(left, right);
- break;
+ __ subsd(left, right);
+ break;
case Token::MUL:
__ mulsd(left, right);
break;
void LCodeGen::DoBranch(LBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
+ CpuFeatures::Scope scope(SSE2);
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
Condition cc = TokenToCondition(instr->op(), instr->is_double());
+ CpuFeatures::Scope scope(SSE2);
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
// Preserve the return value on the stack and rely on the runtime call
// to return the value in the same register. We're leaving the code
// managed by the register allocator and tearing down the frame, it's
__ mov(edx, Operand(ebp,
JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
}
- __ mov(esp, ebp);
- __ pop(ebp);
+ if (NeedsEagerFrame()) {
+ __ mov(esp, ebp);
+ __ pop(ebp);
+ }
if (dynamic_frame_alignment_) {
Label no_padding;
__ cmp(edx, Immediate(kNoAlignmentPadding));
__ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
__ bind(&no_padding);
}
- __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
+ if (info()->IsStub()) {
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ Ret();
+ } else {
+ __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
+ }
}
0,
instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- XMMRegister result(ToDoubleRegister(instr->result()));
- __ movss(result, operand);
- __ cvtss2sd(result, result);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ XMMRegister result(ToDoubleRegister(instr->result()));
+ __ movss(result, operand);
+ __ cvtss2sd(result, result);
+ } else {
+ __ fld_s(operand);
+ HandleX87FPReturnValue(instr);
+ }
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ movdbl(ToDoubleRegister(instr->result()), operand);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ __ movdbl(ToDoubleRegister(instr->result()), operand);
+ } else {
+ __ fld_d(operand);
+ HandleX87FPReturnValue(instr);
+ }
} else {
Register result(ToRegister(instr->result()));
switch (elements_kind) {
}
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
- XMMRegister result = ToDoubleRegister(instr->result());
+void LCodeGen::HandleX87FPReturnValue(LInstruction* instr) {
+ if (IsX87TopOfStack(instr->result())) {
+ // Return value is already on stack. If the value has no uses, then
+ // pop it off the FP stack. Otherwise, make sure that there are enough
+ // copies of the value on the stack to feed all of the usages, e.g.
+ // when the following instruction uses the return value in multiple
+ // inputs.
+ int count = instr->hydrogen_value()->UseCount();
+ if (count == 0) {
+ __ fstp(0);
+ } else {
+ count--;
+ ASSERT(count <= 7);
+ while (count-- > 0) {
+ __ fld(0);
+ }
+ }
+ } else {
+ __ fstp_d(ToOperand(instr->result()));
+ }
+}
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
sizeof(kHoleNanLower32);
FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
- __ movdbl(result, double_load_operand);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ XMMRegister result = ToDoubleRegister(instr->result());
+ __ movdbl(result, double_load_operand);
+ } else {
+ __ fld_d(double_load_operand);
+ HandleX87FPReturnValue(instr);
+ }
}
ASSERT(instr->value()->Equals(instr->result()));
Representation r = instr->hydrogen()->value()->representation();
+ CpuFeatures::Scope scope(SSE2);
if (r.IsDouble()) {
XMMRegister scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->value());
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
}
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
__ sqrtsd(input_reg, input_reg);
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister xmm_scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = ToRegister(instr->temp());
DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
+ CpuFeatures::Scope scope(SSE2);
// Having marked this instruction as a call we can use any
// registers.
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+ CpuFeatures::Scope scope(SSE2);
ASSERT(instr->value()->Equals(instr->result()));
XMMRegister input_reg = ToDoubleRegister(instr->value());
Label positive, done, zero;
void LCodeGen::DoMathExp(LMathExp* instr) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister input = ToDoubleRegister(instr->value());
XMMRegister result = ToDoubleRegister(instr->result());
Register temp1 = ToRegister(instr->temp1());
}
DeoptimizeIf(below_equal, instr->environment());
} else {
+ if (instr->hydrogen()->index()->representation().IsTagged() &&
+ !instr->hydrogen()->index()->type().IsSmi()) {
+ __ test(ToRegister(instr->index()), Immediate(kSmiTagMask));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
__ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
DeoptimizeIf(above_equal, instr->environment());
}
0,
instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ CpuFeatures::Scope scope(SSE2);
__ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
__ movss(operand, xmm0);
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ CpuFeatures::Scope scope(SSE2);
__ movdbl(operand, ToDoubleRegister(instr->value()));
} else {
Register value = ToRegister(instr->value());
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister value = ToDoubleRegister(instr->value());
if (instr->NeedsCanonicalization()) {
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() || input->IsStackSlot());
- LOperand* output = instr->result();
- ASSERT(output->IsDoubleRegister());
- __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister() || input->IsStackSlot());
+ LOperand* output = instr->result();
+ ASSERT(output->IsDoubleRegister());
+ __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+ } else {
+ UNREACHABLE();
+ }
}
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ CpuFeatures::Scope scope(SSE2);
LOperand* input = instr->value();
LOperand* output = instr->result();
LOperand* temp = instr->temp();
// the value in there. If that fails, call the runtime system.
__ SmiUntag(reg);
__ xor_(reg, 0x80000000);
- __ cvtsi2sd(xmm0, Operand(reg));
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope feature_scope(SSE2);
+ __ cvtsi2sd(xmm0, Operand(reg));
+ } else {
+ __ push(reg);
+ __ fild_s(Operand(esp, 0));
+ __ pop(reg);
+ }
} else {
- __ LoadUint32(xmm0, reg, xmm1);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope feature_scope(SSE2);
+ __ LoadUint32(xmm0, reg, xmm1);
+ } else {
+ UNREACHABLE();
+ }
}
if (FLAG_inline_new) {
// Done. Put the value in xmm0 into the value of the allocated heap
// number.
__ bind(&done);
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope feature_scope(SSE2);
+ __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
+ }
__ StoreToSafepointRegisterSlot(reg, reg);
}
LNumberTagD* instr_;
};
- XMMRegister input_reg = ToDoubleRegister(instr->value());
Register reg = ToRegister(instr->result());
Register tmp = ToRegister(instr->temp());
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+ } else {
+ if (!IsX87TopOfStack(instr->value())) {
+ __ fld_d(ToOperand(instr->value()));
+ }
+ __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
+ }
}
DeoptimizeIf(not_equal, instr->environment());
DeoptimizeIf(parity_even, instr->environment()); // NaN.
}
- } else {
+ } else if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
// Deoptimize if we don't have a heap number.
__ RecordComment("Deferred TaggedToI: not a heap number");
DeoptimizeIf(not_equal, instr->environment());
__ RecordComment("Deferred TaggedToI: minus zero");
DeoptimizeIf(not_zero, instr->environment());
}
+ } else {
+ UNREACHABLE();
}
__ bind(&done);
}
LOperand* result = instr->result();
ASSERT(result->IsDoubleRegister());
- Register input_reg = ToRegister(input);
- XMMRegister result_reg = ToDoubleRegister(result);
-
- bool deoptimize_on_minus_zero =
- instr->hydrogen()->deoptimize_on_minus_zero();
- Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
-
- EmitNumberUntagD(input_reg,
- temp_reg,
- result_reg,
- instr->hydrogen()->deoptimize_on_undefined(),
- deoptimize_on_minus_zero,
- instr->environment());
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ Register input_reg = ToRegister(input);
+ XMMRegister result_reg = ToDoubleRegister(result);
+
+ bool deoptimize_on_minus_zero =
+ instr->hydrogen()->deoptimize_on_minus_zero();
+ Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
+
+ EmitNumberUntagD(input_reg,
+ temp_reg,
+ result_reg,
+ instr->hydrogen()->deoptimize_on_undefined(),
+ deoptimize_on_minus_zero,
+ instr->environment());
+ } else {
+ UNIMPLEMENTED();
+ }
}
ASSERT(input->IsDoubleRegister());
LOperand* result = instr->result();
ASSERT(result->IsRegister());
+ CpuFeatures::Scope scope(SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
Register result_reg = ToRegister(result);
void LCodeGen::DoCheckMapCommon(Register reg,
Handle<Map> map,
CompareMapMode mode,
- LEnvironment* env) {
+ LInstruction* instr) {
Label success;
__ CompareMap(reg, map, &success, mode);
- DeoptimizeIf(not_equal, env);
+ DeoptimizeIf(not_equal, instr->environment());
__ bind(&success);
}
__ j(equal, &success);
}
Handle<Map> map = map_set->last();
- DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
+ DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
__ bind(&success);
}
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
__ ClampDoubleToUint8(value_reg, xmm0, result_reg);
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ CpuFeatures::Scope scope(SSE2);
+
ASSERT(instr->unclamped()->Equals(instr->result()));
Register input_reg = ToRegister(instr->unclamped());
Label is_smi, done, heap_number;
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+ ALLOW_ELEMENT_TRANSITION_MAPS, instr);
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Check the holder map.
DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+ ALLOW_ELEMENT_TRANSITION_MAPS, instr);
}
void LCodeGen::EnsureSpaceForLazyDeopt() {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- int patch_size = Deoptimizer::patch_size();
- if (current_pc < last_lazy_deopt_pc_ + patch_size) {
- int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
- __ Nop(padding_size);
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ int patch_size = Deoptimizer::patch_size();
+ if (current_pc < last_lazy_deopt_pc_ + patch_size) {
+ int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+ __ Nop(padding_size);
+ }
}
last_lazy_deopt_pc_ = masm()->pc_offset();
}
current_instruction_(-1),
instructions_(chunk->instructions()),
deoptimizations_(4, info->zone()),
+ jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
dynamic_frame_alignment_(false),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
+ frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
// Support for converting LOperands to assembler types.
Operand ToOperand(LOperand* op) const;
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
+ bool IsX87TopOfStack(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
Immediate ToInteger32Immediate(LOperand* op) const {
Handle<Object> ToHandle(LConstantOperand* op) const;
+ // A utility for instructions that return floating point values on X87.
+ void HandleX87FPReturnValue(LInstruction* instr);
+
// The operand denoting the second word (the one with a higher address) of
// a double stack slot.
Operand HighOperand(LOperand* op);
Label* map_check);
void DoCheckMapCommon(Register reg, Handle<Map> map,
- CompareMapMode mode, LEnvironment* env);
+ CompareMapMode mode, LInstruction* instr);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
Register temporary2);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return scope()->num_parameters(); }
+ int GetParameterCount() const { return info()->num_parameters(); }
void Abort(const char* reason);
void Comment(const char* format, ...);
bool GeneratePrologue();
bool GenerateBody();
bool GenerateDeferredCode();
- // Pad the reloc info to ensure that we have enough space to patch during
- // deoptimization.
- bool GenerateRelocPadding();
+ bool GenerateJumpTable();
bool GenerateSafepointTable();
enum SafepointMode {
MacroAssembler* const masm_;
CompilationInfo* const info_;
+ struct JumpTableEntry {
+ inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
+ : label(),
+ address(entry),
+ needs_frame(frame),
+ is_lazy_deopt(is_lazy) { }
+ Label label;
+ Address address;
+ bool needs_frame;
+ bool is_lazy_deopt;
+ };
+
int current_block_;
int current_instruction_;
const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
+ ZoneList<JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;
bool dynamic_frame_alignment_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
+ bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->masm_->PushSafepointRegisters();
codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+ ASSERT(codegen_->info()->is_calling());
}
~PushSafepointRegistersScope() {
Register LGapResolver::GetFreeRegisterNot(Register reg) {
int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
return Register::FromAllocationIndex(i);
}
if (!moves_.is_empty()) return false;
if (spilled_register_ >= 0) return false;
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
if (source_uses_[i] != 0) return false;
if (destination_uses_[i] != 0) return false;
}
// 3. Prefer to spill a register that is not used in any remaining move
// because it will not need to be restored until the end.
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
Register scratch = Register::FromAllocationIndex(i);
__ push(scratch);
}
} else if (source->IsDoubleRegister()) {
- XMMRegister src = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movaps(dst, src);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ XMMRegister src = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ __ movaps(dst, src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ Operand dst = cgen_->ToOperand(destination);
+ __ movdbl(dst, src);
+ }
} else {
- ASSERT(destination->IsDoubleStackSlot());
- Operand dst = cgen_->ToOperand(destination);
- __ movdbl(dst, src);
+ UNREACHABLE();
}
} else if (source->IsDoubleStackSlot()) {
- ASSERT(destination->IsDoubleRegister() ||
- destination->IsDoubleStackSlot());
- Operand src = cgen_->ToOperand(source);
- if (destination->IsDoubleRegister()) {
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movdbl(dst, src);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ ASSERT(destination->IsDoubleRegister() ||
+ destination->IsDoubleStackSlot());
+ Operand src = cgen_->ToOperand(source);
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ __ movdbl(dst, src);
+ } else {
+ // We rely on having xmm0 available as a fixed scratch register.
+ Operand dst = cgen_->ToOperand(destination);
+ __ movdbl(xmm0, src);
+ __ movdbl(dst, xmm0);
+ }
} else {
- // We rely on having xmm0 available as a fixed scratch register.
- Operand dst = cgen_->ToOperand(destination);
- __ movdbl(xmm0, src);
- __ movdbl(dst, xmm0);
+ UNREACHABLE();
}
-
} else {
UNREACHABLE();
}
__ mov(src, tmp0);
}
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+ CpuFeatures::Scope scope(SSE2);
// XMM register-register swap. We rely on having xmm0
// available as a fixed scratch register.
XMMRegister src = cgen_->ToDoubleRegister(source);
ZoneList<LMoveOperands> moves_;
// Source and destination use counts for the general purpose registers.
- int source_uses_[Register::kNumAllocatableRegisters];
- int destination_uses_[Register::kNumAllocatableRegisters];
+ int source_uses_[Register::kMaxNumAllocatableRegisters];
+ int destination_uses_[Register::kMaxNumAllocatableRegisters];
// If we had to spill on demand, the currently spilled register's
// allocation index.
#undef DEFINE_COMPILE
LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
register_spills_[i] = NULL;
}
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
double_register_spills_[i] = NULL;
}
}
status_ = BUILDING;
// Reserve the first spill slot for the state of dynamic alignment.
- int alignment_state_index = chunk_->GetNextSpillIndex(false);
- ASSERT_EQ(alignment_state_index, 0);
- USE(alignment_state_index);
+ if (info()->IsOptimizing()) {
+ int alignment_state_index = chunk_->GetNextSpillIndex(false);
+ ASSERT_EQ(alignment_state_index, 0);
+ USE(alignment_state_index);
+ }
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
}
+LUnallocated* LChunkBuilder::ToUnallocated(X87TopOfStackRegister reg) {
+ return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+ X87TopOfStackRegister::ToAllocationIndex(reg));
+}
+
+
LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
return Use(value, ToUnallocated(fixed_register));
}
}
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineX87TOS(
+ LTemplateInstruction<1, I, T>* instr) {
+ return Define(instr, ToUnallocated(x87tos));
+}
+
+
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
+
#ifdef DEBUG
instr->VerifyCall();
#endif
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
+ // Only mark conversions that might need to allocate as calling rather than
+ // all changes. This makes simple, non-allocating conversion not have to force
+ // building a stack frame.
if (from.IsTagged()) {
if (to.IsDouble()) {
+ info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
// Temp register only necessary for minus zero check.
LOperand* temp = instr->deoptimize_on_minus_zero()
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
- LOperand* value = UseRegister(instr->value());
+ info()->MarkAsDeferredCalling();
+ LOperand* value = CpuFeatures::IsSupported(SSE2)
+ ? UseRegisterAtStart(instr->value())
+ : UseAtStart(instr->value());
LOperand* temp = TempRegister();
// Make sure that temp and result_temp are different registers.
DefineAsRegister(new(zone()) LDoubleToI(value, temp)));
}
} else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new(zone()) LParameter, spill_index);
+ LParameter* result = new(zone()) LParameter;
+ if (info()->IsOptimizing()) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ Register reg = descriptor->register_params[instr->index()];
+ return DefineFixed(result, reg);
+ }
}
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+ info()->MarkAsDeferredCalling();
if (instr->is_function_entry()) {
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(new(zone()) LStackCheck(context), instr);
void MarkAsCall() { is_call_ = true; }
// Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return is_call_; }
+ bool ClobbersTemps() const { return is_call_; }
+ bool ClobbersRegisters() const { return is_call_; }
+ virtual bool ClobbersDoubleRegisters() const {
+ return is_call_ || !CpuFeatures::IsSupported(SSE2);
+ }
virtual bool HasResult() const = 0;
virtual LOperand* result() = 0;
class LInstructionGap: public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+ virtual bool ClobbersDoubleRegisters() const { return false; }
DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
};
inputs_[0] = elements;
inputs_[1] = key;
}
-
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
ElementsKind elements_kind() const {
return hydrogen()->is_external();
}
+ virtual bool ClobbersDoubleRegisters() const {
+ return !IsDoubleOrFloatElementsKind(hydrogen()->elements_kind());
+ }
+
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
virtual void PrintDataTo(StringStream* stream);
uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ bool key_is_smi() {
+ return hydrogen()->key()->representation().IsTagged();
+ }
};
// slot, i.e., that must also be restored to the spill slot on OSR entry.
// NULL if the register has no assigned spill slot. Indexed by allocation
// index.
- LOperand* register_spills_[Register::kNumAllocatableRegisters];
- LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+ LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
+ LOperand* double_register_spills_[
+ DoubleRegister::kMaxNumAllocatableRegisters];
};
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(XMMRegister reg);
+ LUnallocated* ToUnallocated(X87TopOfStackRegister reg);
// Methods for setting up define-use relationships.
MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
template<int I, int T>
LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
XMMRegister reg);
+ template<int I, int T>
+ LInstruction* DefineX87TOS(LTemplateInstruction<1, I, T>* instr);
// Assigns an environment to an instruction. An instruction which can
// deoptimize must have an environment.
LInstruction* AssignEnvironment(LInstruction* instr);
const Runtime::Function* function = Runtime::FunctionForId(id);
Set(eax, Immediate(function->nargs));
mov(ebx, Immediate(ExternalReference(function, isolate())));
- CEntryStub ces(1, kSaveFPRegs);
+ CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? kSaveFPRegs
+ : kDontSaveFPRegs);
CallStub(&ces);
}
Operand SafepointRegisterSlot(Register reg);
static int SafepointRegisterStackIndex(int reg_code);
- // Needs access to SafepointRegisterStackIndex for optimized frame
+ // Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class OptimizedFrame;
+ friend class CompiledFrame;
};
// -----------------------------------
ElementsKind elements_kind = receiver_map->elements_kind();
- Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
- __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
+ if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements()) {
+ Handle<Code> stub = KeyedLoadFastElementStub(
+ receiver_map->instance_type() == JS_ARRAY_TYPE,
+ elements_kind).GetCode();
+ __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
+ } else {
+ Handle<Code> stub =
+ KeyedLoadDictionaryElementStub().GetCode();
+ __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
+ }
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
}
-void KeyedLoadStubCompiler::GenerateLoadExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic, failed_allocation, slow;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
-
- // Check that the index is in range.
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &miss_force_generic);
- __ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
- // ebx: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ SmiUntag(ecx); // Untag the index.
- __ movsx_b(eax, Operand(ebx, ecx, times_1, 0));
- break;
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- __ SmiUntag(ecx); // Untag the index.
- __ movzx_b(eax, Operand(ebx, ecx, times_1, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ movsx_w(eax, Operand(ebx, ecx, times_1, 0));
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movzx_w(eax, Operand(ebx, ecx, times_1, 0));
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- __ mov(eax, Operand(ebx, ecx, times_2, 0));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- __ fld_s(Operand(ebx, ecx, times_2, 0));
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- __ fld_d(Operand(ebx, ecx, times_4, 0));
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // For integer array types:
- // eax: value
- // For floating-point array type:
- // FP(0): value
-
- if (elements_kind == EXTERNAL_INT_ELEMENTS ||
- elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- // For the Int and UnsignedInt array types, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- Label box_int;
- if (elements_kind == EXTERNAL_INT_ELEMENTS) {
- __ cmp(eax, 0xc0000000);
- __ j(sign, &box_int);
- } else {
- ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
- // The test is different for unsigned int values. Since we need
- // the value to be in the range of a positive smi, we can't
- // handle either of the top two bits being set in the value.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &box_int);
- }
-
- __ SmiTag(eax);
- __ ret(0);
-
- __ bind(&box_int);
-
- // Allocate a HeapNumber for the int and perform int-to-double
- // conversion.
- if (elements_kind == EXTERNAL_INT_ELEMENTS) {
- __ push(eax);
- __ fild_s(Operand(esp, 0));
- __ pop(eax);
- } else {
- ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
- // Need to zero-extend the value.
- // There's no fild variant for unsigned values, so zero-extend
- // to a 64-bit int manually.
- __ push(Immediate(0));
- __ push(eax);
- __ fild_d(Operand(esp, 0));
- __ pop(eax);
- __ pop(eax);
- }
- // FP(0): value
- __ AllocateHeapNumber(eax, ebx, edi, &failed_allocation);
- // Set the value.
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- __ AllocateHeapNumber(eax, ebx, edi, &failed_allocation);
- // Set the value.
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- } else {
- __ SmiTag(eax);
- __ ret(0);
- }
-
- // If we fail allocation of the HeapNumber, we still have a value on
- // top of the FPU stack. Remove it.
- __ bind(&failed_allocation);
- __ fstp(0);
- // Fall through to slow case.
-
- // Slow case: Jump to runtime.
- __ bind(&slow);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
-
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- // Miss case: Jump to runtime.
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
}
-void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
- __ AssertFastElements(eax);
-
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
-
- // Load the result and make sure it's not the hole.
- __ mov(ebx, Operand(eax, ecx, times_2,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
- __ j(equal, &miss_force_generic);
- __ mov(eax, ebx);
- __ ret(0);
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic, slow_allocate_heapnumber;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
- __ AssertFastElements(eax);
-
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(eax, FixedDoubleArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
-
- // Check for the hole
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ cmp(FieldOperand(eax, ecx, times_4, offset), Immediate(kHoleNanUpper32));
- __ j(equal, &miss_force_generic);
-
- // Always allocate a heap number for the result.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(xmm0, FieldOperand(eax, ecx, times_4,
- FixedDoubleArray::kHeaderSize));
- } else {
- __ fld_d(FieldOperand(eax, ecx, times_4, FixedDoubleArray::kHeaderSize));
- }
- __ AllocateHeapNumber(eax, ebx, edi, &slow_allocate_heapnumber);
- // Set the value.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(0);
-
- __ bind(&slow_allocate_heapnumber);
- // A value was pushed on the floating point stack before the allocation, if
- // the allocation fails it needs to be removed.
- if (!CpuFeatures::IsSupported(SSE2)) {
- __ fstp(0);
- }
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ jmp(slow_ic, RelocInfo::CODE_TARGET);
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
ElementsKind elements_kind,
KeyedAccessGrowMode grow_mode) {
ASSERT(grow_mode == DO_NOT_ALLOW_JSARRAY_GROWTH);
- return KeyedLoadElementStub(elements_kind).GetCode();
+ if (IsFastElementsKind(elements_kind) ||
+ IsExternalArrayElementsKind(elements_kind)) {
+ return KeyedLoadFastElementStub(is_js_array, elements_kind).GetCode();
+ } else {
+ ASSERT(elements_kind == DICTIONARY_ELEMENTS);
+ return KeyedLoadDictionaryElementStub().GetCode();
+ }
}
string_tracker_(NULL),
regexp_stack_(NULL),
date_cache_(NULL),
+ code_stub_interface_descriptors_(NULL),
context_exit_happened_(false),
deferred_handles_head_(NULL),
optimizing_compiler_thread_(this) {
delete date_cache_;
date_cache_ = NULL;
+ delete[] code_stub_interface_descriptors_;
+ code_stub_interface_descriptors_ = NULL;
+
delete regexp_stack_;
regexp_stack_ = NULL;
regexp_stack_ = new RegExpStack();
regexp_stack_->isolate_ = this;
date_cache_ = new DateCache();
+ code_stub_interface_descriptors_ =
+ new CodeStubInterfaceDescriptor*[CodeStub::NUMBER_OF_IDS];
+ memset(code_stub_interface_descriptors_, 0,
+ kPointerSize * CodeStub::NUMBER_OF_IDS);
// Enable logging before setting up the heap
logger_->SetUp();
debug_->SetUp(create_heap_objects);
#endif
+ deoptimizer_data_ = new DeoptimizerData;
+
// If we are deserializing, read the state into the now-empty heap.
if (!create_heap_objects) {
des->Deserialize();
// Quiet the heap NaN if needed on target platform.
if (!create_heap_objects) Assembler::QuietNaN(heap_.nan_value());
- deoptimizer_data_ = new DeoptimizerData;
runtime_profiler_ = new RuntimeProfiler(this);
runtime_profiler_->SetUp();
state_ = INITIALIZED;
time_millis_at_init_ = OS::TimeCurrentMillis();
+
+ if (!create_heap_objects) {
+ // Now that the heap is consistent, it's OK to generate the code for the
+ // deopt entry table that might have been referred to by optimized code in
+ // the snapshot.
+ HandleScope scope(this);
+ Deoptimizer::EnsureCodeForDeoptimizationEntry(
+ Deoptimizer::LAZY,
+ kDeoptTableSerializeEntryCount - 1);
+ }
+
if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
return true;
}
class Bootstrapper;
class CodeGenerator;
class CodeRange;
+struct CodeStubInterfaceDescriptor;
class CompilationCache;
class ContextSlotCache;
class ContextSwitcher;
date_cache_ = date_cache;
}
+ CodeStubInterfaceDescriptor** code_stub_interface_descriptors() {
+ return code_stub_interface_descriptors_;
+ }
+
void IterateDeferredHandles(ObjectVisitor* visitor);
void LinkDeferredHandles(DeferredHandles* deferred_handles);
void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
RegExpStack* regexp_stack_;
DateCache* date_cache_;
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
+ CodeStubInterfaceDescriptor** code_stub_interface_descriptors_;
// The garbage collector should be a little more aggressive when it knows
// that a context was recently exited.
int LAllocator::FixedDoubleLiveRangeID(int index) {
- return -index - 1 - Register::kNumAllocatableRegisters;
+ return -index - 1 - Register::kMaxNumAllocatableRegisters;
}
LiveRange* LAllocator::FixedLiveRangeFor(int index) {
- ASSERT(index < Register::kNumAllocatableRegisters);
+ ASSERT(index < Register::kMaxNumAllocatableRegisters);
LiveRange* result = fixed_live_ranges_[index];
if (result == NULL) {
result = new(zone_) LiveRange(FixedLiveRangeID(index), zone_);
LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
- ASSERT(index < DoubleRegister::kNumAllocatableRegisters);
+ ASSERT(index < DoubleRegister::NumAllocatableRegisters());
LiveRange* result = fixed_double_live_ranges_[index];
if (result == NULL) {
result = new(zone_) LiveRange(FixedDoubleLiveRangeID(index), zone_);
void LAllocator::MeetRegisterConstraints(HBasicBlock* block) {
int start = block->first_instruction_index();
int end = block->last_instruction_index();
+ if (start == -1) return;
for (int i = start; i <= end; ++i) {
if (IsGapAt(i)) {
LInstruction* instr = NULL;
Define(curr_position, output, NULL);
}
- if (instr->IsMarkedAsCall()) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ if (instr->ClobbersRegisters()) {
+ for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
if (output == NULL || !output->IsRegister() ||
output->index() != i) {
LiveRange* range = FixedLiveRangeFor(i);
}
}
- if (instr->IsMarkedAsCall()) {
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+ if (instr->ClobbersDoubleRegisters()) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
if (output == NULL || !output->IsDoubleRegister() ||
output->index() != i) {
LiveRange* range = FixedDoubleLiveRangeFor(i);
for (TempIterator it(instr); !it.Done(); it.Advance()) {
LOperand* temp = it.Current();
- if (instr->IsMarkedAsCall()) {
+ if (instr->ClobbersTemps()) {
if (temp->IsRegister()) continue;
if (temp->IsUnallocated()) {
LUnallocated* temp_unalloc = LUnallocated::cast(temp);
while (!iterator.Done()) {
found = true;
int operand_index = iterator.Current();
- PrintF("Function: %s\n",
- *chunk_->info()->function()->debug_name()->ToCString());
+ if (chunk_->info()->IsStub()) {
+ CodeStub::Major major_key = chunk_->info()->code_stub()->MajorKey();
+ PrintF("Function: %s\n", CodeStub::MajorName(major_key, false));
+ } else {
+ ASSERT(chunk_->info()->IsOptimizing());
+ PrintF("Function: %s\n",
+ *chunk_->info()->function()->debug_name()->ToCString());
+ }
PrintF("Value %d used before first definition!\n", operand_index);
LiveRange* range = LiveRangeFor(operand_index);
PrintF("First use is at %d\n", range->first_pos()->pos().Value());
void LAllocator::AllocateGeneralRegisters() {
HPhase phase("L_Allocate general registers", this);
- num_registers_ = Register::kNumAllocatableRegisters;
+ num_registers_ = Register::NumAllocatableRegisters();
AllocateRegisters();
}
void LAllocator::AllocateDoubleRegisters() {
HPhase phase("L_Allocate double registers", this);
- num_registers_ = DoubleRegister::kNumAllocatableRegisters;
+ num_registers_ = DoubleRegister::NumAllocatableRegisters();
mode_ = DOUBLE_REGISTERS;
AllocateRegisters();
}
// TryAllocateFreeReg and AllocateBlockedReg assume this
// when allocating local arrays.
-STATIC_ASSERT(DoubleRegister::kNumAllocatableRegisters >=
- Register::kNumAllocatableRegisters);
+STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=
+ Register::kMaxNumAllocatableRegisters);
bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
- LifetimePosition free_until_pos[DoubleRegister::kNumAllocatableRegisters];
+ LifetimePosition free_until_pos[DoubleRegister::kMaxNumAllocatableRegisters];
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; i++) {
free_until_pos[i] = LifetimePosition::MaxPosition();
}
}
- LifetimePosition use_pos[DoubleRegister::kNumAllocatableRegisters];
- LifetimePosition block_pos[DoubleRegister::kNumAllocatableRegisters];
+ LifetimePosition use_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+ LifetimePosition block_pos[DoubleRegister::kMaxNumAllocatableRegisters];
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
}
ZoneList<LiveRange*> live_ranges_;
// Lists of live ranges
- EmbeddedVector<LiveRange*, Register::kNumAllocatableRegisters>
+ EmbeddedVector<LiveRange*, Register::kMaxNumAllocatableRegisters>
fixed_live_ranges_;
- EmbeddedVector<LiveRange*, DoubleRegister::kNumAllocatableRegisters>
+ EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumAllocatableRegisters>
fixed_double_live_ranges_;
ZoneList<LiveRange*> unhandled_live_ranges_;
ZoneList<LiveRange*> active_live_ranges_;
}
-Handle<Code> LChunk::Codegen() {
+Handle<Code> LChunk::Codegen(Code::Kind kind) {
MacroAssembler assembler(info()->isolate(), NULL, 0);
LCodeGen generator(this, &assembler, info());
PrintF("Crankshaft Compiler - ");
}
CodeGenerator::MakeCodePrologue(info());
- Code::Flags flags = Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
+ Code::Flags flags = Code::ComputeFlags(kind);
Handle<Code> code =
CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
generator.FinishCode(code);
Zone* zone() const { return info_->zone(); }
- Handle<Code> Codegen();
+ Handle<Code> Codegen(Code::Kind kind);
protected:
LChunk(CompilationInfo* info, HGraph* graph)
case Code::BINARY_OP_IC: // fall through
case Code::COMPARE_IC: // fall through
case Code::TO_BOOLEAN_IC: // fall through
+ case Code::COMPILED_STUB: // fall through
case Code::STUB:
description =
CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true);
int pos,
bool right_here = false);
+ DEFINE_AST_VISITOR_SUBCLASS_METHODS();
+
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
int Code::major_key() {
ASSERT(kind() == STUB ||
+ kind() == COMPILED_STUB ||
kind() == UNARY_OP_IC ||
kind() == BINARY_OP_IC ||
kind() == COMPARE_IC ||
void Code::set_major_key(int major) {
ASSERT(kind() == STUB ||
+ kind() == COMPILED_STUB ||
kind() == UNARY_OP_IC ||
kind() == BINARY_OP_IC ||
kind() == COMPARE_IC ||
unsigned Code::stack_slots() {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
+ ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
return StackSlotsField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_stack_slots(unsigned slots) {
CHECK(slots <= (1 << kStackSlotsBitCount));
- ASSERT(kind() == OPTIMIZED_FUNCTION);
+ ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
int updated = StackSlotsField::update(previous, slots);
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
unsigned Code::safepoint_table_offset() {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
+ ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
return SafepointTableOffsetField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
}
void Code::set_safepoint_table_offset(unsigned offset) {
CHECK(offset <= (1 << kSafepointTableOffsetBitCount));
- ASSERT(kind() == OPTIMIZED_FUNCTION);
+ ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
int updated = SafepointTableOffsetField::update(previous, offset);
break;
}
+ case Translation::COMPILED_STUB_FRAME: {
+ Code::Kind stub_kind = static_cast<Code::Kind>(iterator.Next());
+ PrintF(out, "{kind=%d}", stub_kind);
+ break;
+ }
+
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME: {
int function_id = iterator.Next();
switch (kind) {
case FUNCTION: return "FUNCTION";
case OPTIMIZED_FUNCTION: return "OPTIMIZED_FUNCTION";
+ case COMPILED_STUB: return "COMPILED_STUB";
case STUB: return "STUB";
case BUILTIN: return "BUILTIN";
case LOAD_IC: return "LOAD_IC";
}
PrintF("\n");
- if (kind() == OPTIMIZED_FUNCTION) {
+ if (kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB) {
SafepointTable table(this);
PrintF(out, "Safepoints (size = %u)\n", table.size());
for (unsigned i = 0; i < table.length(); i++) {
V(FUNCTION) \
V(OPTIMIZED_FUNCTION) \
V(STUB) \
+ V(COMPILED_STUB) \
V(BUILTIN) \
V(LOAD_IC) \
V(KEYED_LOAD_IC) \
return IsFastDoubleElementsKind(elements_kind());
}
+ inline bool has_fast_elements() {
+ return IsFastElementsKind(elements_kind());
+ }
+
inline bool has_non_strict_arguments_elements() {
return elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
}
namespace v8 {
namespace internal {
-class HGraphBuilder;
+class HOptimizedGraphBuilder;
class OptimizingCompiler;
class SharedFunctionInfo;
output_ = NULL;
size_ = 0;
pos_ = 0;
+ InitializeAstVisitor();
}
void PrintDeclarations(ZoneList<Declaration*>* declarations);
void PrintFunctionLiteral(FunctionLiteral* function);
void PrintCaseClause(CaseClause* clause);
+
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
};
result_assigned_(false),
is_set_(false),
in_try_(false),
- factory_(isolate(), zone) { }
+ factory_(Isolate::Current(), zone) {
+ InitializeAstVisitor();
+ }
virtual ~Processor() { }
#undef DEF_VISIT
void VisitIterationStatement(IterationStatement* stmt);
+
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
};
};
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyICMiss) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 0);
+ Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
+ ASSERT(isolate->heap()->IsAllocationAllowed());
+ ASSERT(deoptimizer->compiled_code_kind() == Code::COMPILED_STUB);
+ delete deoptimizer;
+ return isolate->heap()->undefined_value();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
static_cast<Deoptimizer::BailoutType>(args.smi_at(0));
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
ASSERT(isolate->heap()->IsAllocationAllowed());
- JavaScriptFrameIterator it(isolate);
+
+ ASSERT(deoptimizer->compiled_code_kind() != Code::COMPILED_STUB);
// Make sure to materialize objects before causing any allocation.
+ JavaScriptFrameIterator it(isolate);
deoptimizer->MaterializeHeapObjects(&it);
delete deoptimizer;
F(ForceParallelRecompile, 1, 1) \
F(InstallRecompiledCode, 1, 1) \
F(NotifyDeoptimized, 1, 1) \
+ F(NotifyICMiss, 0, 1) \
F(NotifyOSR, 0, 1) \
F(DeoptimizeFunction, 1, 1) \
F(ClearFunctionTypeFeedback, 1, 1) \
SafepointTable::SafepointTable(Code* code) {
- ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION ||
+ code->kind() == Code::COMPILED_STUB);
code_ = code;
Address header = code->instruction_start() + code->safepoint_table_offset();
length_ = Memory::uint32_at(header + kLengthOffset);
void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
- // For lazy deoptimization we need space to patch a call after every call.
- // Ensure there is always space for such patching, even if the code ends
- // in a call.
- int target_offset = assembler->pc_offset() + Deoptimizer::patch_size();
- while (assembler->pc_offset() < target_offset) {
- assembler->nop();
- }
-
// Make sure the safepoint table is properly aligned. Pad with nops.
assembler->Align(kIntSize);
assembler->RecordComment(";;; Safepoint table.");
#include "accessors.h"
#include "api.h"
#include "bootstrapper.h"
+#include "deoptimizer.h"
#include "execution.h"
#include "global-handles.h"
#include "ic-inl.h"
UNCLASSIFIED,
51,
"Code::MakeCodeYoung");
+
+ // Add a small set of deopt entry addresses to encoder without generating the
+ // deopt table code, which isn't possible at deserialization time.
+ HandleScope scope(Isolate::Current());
+ for (int entry = 0; entry < kDeoptTableSerializeEntryCount; ++entry) {
+ Address address = Deoptimizer::GetDeoptimizationEntry(
+ entry,
+ Deoptimizer::LAZY,
+ Deoptimizer::CALCULATE_ENTRY_ADDRESS);
+ Add(address, LAZY_DEOPTIMIZATION, 52 + entry, "lazy_deopt");
+ }
}
EXTENSION,
ACCESSOR,
RUNTIME_ENTRY,
- STUB_CACHE_TABLE
+ STUB_CACHE_TABLE,
+ LAZY_DEOPTIMIZATION
};
-const int kTypeCodeCount = STUB_CACHE_TABLE + 1;
+const int kTypeCodeCount = LAZY_DEOPTIMIZATION + 1;
const int kFirstTypeCode = UNCLASSIFIED;
const int kReferenceIdBits = 16;
const int kDebugRegisterBits = 4;
const int kDebugIdShift = kDebugRegisterBits;
+const int kDeoptTableSerializeEntryCount = 8;
// ExternalReferenceTable is a helper class that defines the relationship
// between external references and their encodings. It is used to build
// You can get the underlying pointer out with the * operator.
inline T* operator*() { return p_; }
- // You can use [n] to index as if it was a plain pointer
+ // You can use [n] to index as if it was a plain pointer.
inline T& operator[](size_t i) {
return p_[i];
}
+ // You can use [n] to index as if it was a plain pointer.
+ const inline T& operator[](size_t i) const {
+ return p_[i];
+ }
+
// We don't have implicit conversion to a T* since that hinders migration:
// You would not be able to change a method from returning a T* to
// returning an SmartArrayPointer<T> and then get errors wherever it is used.
return temp;
}
+ inline void Reset(T* new_value) {
+ if (p_) Deallocator::Delete(p_);
+ p_ = new_value;
+ }
+
// Assignment requires an empty (NULL) SmartArrayPointer as the receiver. Like
// the copy constructor it removes the pointer in the original to avoid
// double freeing.
CASE(FUNCTION);
CASE(OPTIMIZED_FUNCTION);
CASE(STUB);
+ CASE(COMPILED_STUB);
CASE(BUILTIN);
CASE(LOAD_IC);
CASE(KEYED_LOAD_IC);
Handle<Code> CompileLoadPolymorphic(MapHandleList* receiver_maps,
CodeHandleList* handler_ics);
- static void GenerateLoadExternalArray(MacroAssembler* masm,
- ElementsKind elements_kind);
-
- static void GenerateLoadFastElement(MacroAssembler* masm);
-
- static void GenerateLoadFastDoubleElement(MacroAssembler* masm);
-
static void GenerateLoadDictionaryElement(MacroAssembler* masm);
private:
static BailoutId FunctionEntry() { return BailoutId(kFunctionEntryId); }
static BailoutId Declarations() { return BailoutId(kDeclarationsId); }
static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); }
+ static BailoutId StubEntry() { return BailoutId(kStubEntryId); }
bool IsNone() const { return id_ == kNoneId; }
bool operator==(const BailoutId& other) const { return id_ == other.id_; }
// code (function declarations).
static const int kDeclarationsId = 3;
- // Ever FunctionState starts with this id.
+ // Every FunctionState starts with this id.
static const int kFirstUsableId = 4;
+ // Every compiled stub starts with this id.
+ static const int kStubEntryId = 5;
+
int id_;
};
// -----------------------------------------------------------------------------
// Register constants.
-const int Register::kRegisterCodeByAllocationIndex[kNumAllocatableRegisters] = {
+const int
+ Register::kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters] = {
// rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r15
0, 3, 2, 1, 7, 8, 9, 11, 14, 15
};
// r10 - fixed scratch register
// r12 - smi constant register
// r13 - root register
+ static const int kMaxNumAllocatableRegisters = 10;
+ static int NumAllocatableRegisters() {
+ return kMaxNumAllocatableRegisters;
+ }
static const int kNumRegisters = 16;
- static const int kNumAllocatableRegisters = 10;
static int ToAllocationIndex(Register reg) {
return kAllocationIndexByRegisterCode[reg.code()];
}
static Register FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
Register result = { kRegisterCodeByAllocationIndex[index] };
return result;
}
static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"rax",
"rbx",
int code_;
private:
- static const int kRegisterCodeByAllocationIndex[kNumAllocatableRegisters];
+ static const int kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters];
static const int kAllocationIndexByRegisterCode[kNumRegisters];
};
struct XMMRegister {
static const int kNumRegisters = 16;
- static const int kNumAllocatableRegisters = 15;
+ static const int kMaxNumAllocatableRegisters = 15;
+ static int NumAllocatableRegisters() {
+ return kMaxNumAllocatableRegisters;
+ }
static int ToAllocationIndex(XMMRegister reg) {
ASSERT(reg.code() != 0);
}
static XMMRegister FromAllocationIndex(int index) {
- ASSERT(0 <= index && index < kNumAllocatableRegisters);
+ ASSERT(0 <= index && index < kMaxNumAllocatableRegisters);
XMMRegister result = { index + 1 };
return result;
}
static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"xmm1",
"xmm2",
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+void Builtins::Generate_NotifyICMiss(MacroAssembler* masm) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ __ Pushad();
+ __ CallRuntime(Runtime::kNotifyICMiss, 0);
+ __ Popad();
+ // Tear down internal frame.
+ }
+
+ __ pop(MemOperand(rsp, 0)); // Ignore state offset
+ __ ret(0); // Return to IC Miss stub, continuation still on stack.
+}
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
// Enter an internal frame.
}
// Get the full codegen state from the stack and untag it.
- __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
+ __ SmiToInteger32(r10, Operand(rsp, 1 * kPointerSize));
// Switch on the state.
Label not_no_registers, not_tos_rax;
- __ cmpq(rcx, Immediate(FullCodeGenerator::NO_REGISTERS));
+ __ cmpq(r10, Immediate(FullCodeGenerator::NO_REGISTERS));
__ j(not_equal, ¬_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(¬_no_registers);
__ movq(rax, Operand(rsp, 2 * kPointerSize));
- __ cmpq(rcx, Immediate(FullCodeGenerator::TOS_REG));
+ __ cmpq(r10, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, ¬_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
namespace v8 {
namespace internal {
+
+CodeStubInterfaceDescriptor*
+ KeyedLoadFastElementStub::GetInterfaceDescriptor(Isolate* isolate) {
+ static CodeStubInterfaceDescriptor* result = NULL;
+ if (result == NULL) {
+ Handle<Code> miss = isolate->builtins()->KeyedLoadIC_Miss();
+ static Register registers[] = { rdx, rax };
+ static CodeStubInterfaceDescriptor info = {
+ 2,
+ registers,
+ miss
+ };
+ result = &info;
+ }
+ return result;
+}
+
+
#define __ ACCESS_MASM(masm)
void ToNumberStub::Generate(MacroAssembler* masm) {
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
+class TranscendentalCacheStub: public PlatformCodeStub {
public:
enum ArgumentType {
TAGGED = 0,
};
-class StoreBufferOverflowStub: public CodeStub {
+class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
};
-class UnaryOpStub: public CodeStub {
+class UnaryOpStub: public PlatformCodeStub {
public:
UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode,
};
-class StringAddStub: public CodeStub {
+class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
};
-class SubStringStub: public CodeStub {
+class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
};
-class StringCompareStub: public CodeStub {
+class StringCompareStub: public PlatformCodeStub {
public:
StringCompareStub() {}
};
-class NumberToStringStub: public CodeStub {
+class NumberToStringStub: public PlatformCodeStub {
public:
NumberToStringStub() { }
};
-class StringDictionaryLookupStub: public CodeStub {
+class StringDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
};
-class RecordWriteStub: public CodeStub {
+class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
Register value,
Register GetRegThatIsNotRcxOr(Register r1,
Register r2,
Register r3) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(rcx)) continue;
if (candidate.is(r1)) continue;
class CodeGenerator: public AstVisitor {
public:
+ CodeGenerator() {
+ InitializeAstVisitor();
+ }
+
static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
int pos,
bool right_here = false);
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
void Deoptimizer::DoComputeOsrOutputFrame() {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
+ compiled_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
// TODO(kasperl): This should not be the bailout_id_. It should be
// the ast id. Confusing.
unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
- unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+ unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
unsigned outgoing_size = outgoing_height * kPointerSize;
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
unsigned pc_offset = data->OsrPcOffset()->value();
intptr_t pc = reinterpret_cast<intptr_t>(
- optimized_code_->entry() + pc_offset);
+ compiled_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
Code* continuation =
}
+void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
+ int frame_index) {
+ //
+ // FROM TO <-rbp
+ // | .... | | .... |
+ // +-------------------------+ +-------------------------+
+ // | JSFunction continuation | | JSFunction continuation |
+ // +-------------------------+ +-------------------------+<-rsp
+ // | | saved frame (rbp) |
+ // | +=========================+<-rbp
+ // | | JSFunction context |
+ // v +-------------------------+
+ // | COMPILED_STUB marker | rbp = saved frame
+ // +-------------------------+ rsi = JSFunction context
+ // | |
+ // | ... |
+ // | |
+ // +-------------------------+<-rsp
+ //
+ //
+ int output_frame_size = 1 * kPointerSize;
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, 0);
+ Code* notify_miss =
+ isolate_->builtins()->builtin(Builtins::kNotifyICMiss);
+ output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+ output_frame->SetContinuation(
+ reinterpret_cast<intptr_t>(notify_miss->entry()));
+
+ ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
+ int major_key = compiled_code_->major_key();
+ CodeStubInterfaceDescriptor* descriptor =
+ isolate_->code_stub_interface_descriptors()[major_key];
+ Handle<Code> miss_ic(descriptor->deoptimization_handler);
+ output_frame->SetPc(reinterpret_cast<intptr_t>(miss_ic->instruction_start()));
+ unsigned input_frame_size = input_->GetFrameSize();
+ intptr_t value = input_->GetFrameSlot(input_frame_size - kPointerSize);
+ output_frame->SetFrameSlot(0, value);
+ value = input_->GetFrameSlot(input_frame_size - 2 * kPointerSize);
+ output_frame->SetRegister(rbp.code(), value);
+ output_frame->SetFp(value);
+ value = input_->GetFrameSlot(input_frame_size - 3 * kPointerSize);
+ output_frame->SetRegister(rsi.code(), value);
+
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ ASSERT(opcode == Translation::REGISTER);
+ USE(opcode);
+ int input_reg = iterator->Next();
+ intptr_t input_value = input_->GetRegister(input_reg);
+ output_frame->SetRegister(rdx.code(), input_value);
+
+ int32_t next = iterator->Next();
+ opcode = static_cast<Translation::Opcode>(next);
+ ASSERT(opcode == Translation::REGISTER);
+ input_reg = iterator->Next();
+ input_value = input_->GetRegister(input_reg);
+ output_frame->SetRegister(rax.code(), input_value);
+
+ ASSERT(frame_index == 0);
+ output_[frame_index] = output_frame;
+}
+
+
void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
int frame_index) {
Builtins* builtins = isolate_->builtins();
}
input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0);
}
const int kNumberOfRegisters = Register::kNumRegisters;
const int kDoubleRegsSize = kDoubleSize *
- XMMRegister::kNumAllocatableRegisters;
+ XMMRegister::NumAllocatableRegisters();
__ subq(rsp, Immediate(kDoubleRegsSize));
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
__ movsd(Operand(rsp, offset), xmm_reg);
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
int dst_offset = i * kDoubleSize + double_regs_offset;
__ pop(Operand(rbx, dst_offset));
}
// limit and copy the contents of the activation frame to the input
// frame description.
__ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
+ Label pop_loop_header;
+ __ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
__ pop(Operand(rdx, 0));
__ addq(rdx, Immediate(sizeof(intptr_t)));
+ __ bind(&pop_loop_header);
__ cmpq(rcx, rsp);
__ j(not_equal, &pop_loop);
__ pop(rax);
// Replace the current frame with the output frames.
- Label outer_push_loop, inner_push_loop;
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
// Outer loop state: rax = current FrameDescription**, rdx = one past the
// last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
__ movq(rax, Operand(rax, Deoptimizer::output_offset()));
__ lea(rdx, Operand(rax, rdx, times_8, 0));
+ __ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index.
__ movq(rbx, Operand(rax, 0));
__ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ subq(rcx, Immediate(sizeof(intptr_t)));
__ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
+ __ bind(&inner_loop_header);
__ testq(rcx, rcx);
__ j(not_zero, &inner_push_loop);
__ addq(rax, Immediate(kPointerSize));
+ __ bind(&outer_loop_header);
__ cmpq(rax, rdx);
__ j(below, &outer_push_loop);
// In case of OSR, we have to restore the XMM registers.
if (type() == OSR) {
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int src_offset = i * kDoubleSize + double_regs_offset;
__ movsd(xmm_reg, Operand(rbx, src_offset));
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ __ int3();
+ }
#endif
- // Strict mode functions need to replace the receiver with undefined
- // when called as functions (without an explicit receiver
- // object). rcx is zero for method calls and non-zero for function
- // calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
- __ testq(rcx, rcx);
- __ j(zero, &ok, Label::kNear);
- // +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(Operand(rsp, receiver_offset), kScratchRegister);
- __ bind(&ok);
+ // Strict mode functions need to replace the receiver with undefined
+ // when called as functions (without an explicit receiver
+ // object). rcx is zero for method calls and non-zero for function
+ // calls.
+ if (!info_->is_classic_mode() || info_->is_native()) {
+ Label ok;
+ __ testq(rcx, rcx);
+ __ j(zero, &ok, Label::kNear);
+ // +1 for return address.
+ int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+ __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+ __ movq(Operand(rsp, receiver_offset), kScratchRegister);
+ __ bind(&ok);
+ }
}
info()->set_prologue_offset(masm_->pc_offset());
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS function.
+ if (NeedsEagerFrame()) {
+ ASSERT(!frame_is_built_);
+ frame_is_built_ = true;
+ __ push(rbp); // Caller's frame pointer.
+ __ movq(rbp, rsp);
+ __ push(rsi); // Callee's context.
+ if (info()->IsStub()) {
+ __ Push(Smi::FromInt(StackFrame::STUB));
+ } else {
+ __ push(rdi); // Callee's JS function.
+ }
+ }
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
}
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is still in rdi.
}
// Trace the call.
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
return !is_aborted();
bool LCodeGen::GenerateJumpTable() {
+ Label needs_frame_not_call;
+ Label needs_frame_is_call;
for (int i = 0; i < jump_table_.length(); i++) {
__ bind(&jump_table_[i].label);
- __ Jump(jump_table_[i].address, RelocInfo::RUNTIME_ENTRY);
+ Address entry = jump_table_[i].address;
+ if (jump_table_[i].needs_frame) {
+ __ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
+ if (jump_table_[i].is_lazy_deopt) {
+ if (needs_frame_is_call.is_bound()) {
+ __ jmp(&needs_frame_is_call);
+ } else {
+ __ bind(&needs_frame_is_call);
+ __ push(rbp);
+ __ movq(rbp, rsp);
+ __ push(rsi);
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ Move(rsi, Smi::FromInt(StackFrame::STUB));
+ __ push(rsi);
+ __ movq(rsi, MemOperand(rsp, kPointerSize));
+ __ call(kScratchRegister);
+ }
+ } else {
+ if (needs_frame_not_call.is_bound()) {
+ __ jmp(&needs_frame_not_call);
+ } else {
+ __ bind(&needs_frame_not_call);
+ __ push(rbp);
+ __ movq(rbp, rsp);
+ __ push(r8);
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ Move(rsi, Smi::FromInt(StackFrame::STUB));
+ __ push(rsi);
+ __ movq(rsi, MemOperand(rsp, kPointerSize));
+ __ jmp(kScratchRegister);
+ }
+ }
+ } else {
+ if (jump_table_[i].is_lazy_deopt) {
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ }
+ }
}
return !is_aborted();
}
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Deferred build frame",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ // Build the frame in such a way that esi isn't trashed.
+ __ push(rbp); // Caller's frame pointer.
+ __ push(Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ Push(Smi::FromInt(StackFrame::STUB));
+ __ lea(rbp, Operand(rsp, 2 * kPointerSize));
+ }
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate();
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Deferred destroy frame",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ ASSERT(frame_is_built_);
+ frame_is_built_ = false;
+ __ movq(rsp, rbp);
+ __ pop(rbp);
+ }
__ jmp(code->exit());
}
}
translation,
arguments_index,
arguments_count);
- int closure_id = *info()->closure() != *environment->closure()
+ bool has_closure_id = !info()->closure().is_null() &&
+ *info()->closure() != *environment->closure();
+ int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
}
// Inlined frames which push their arguments cause the index to be
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
+ ? Deoptimizer::LAZY
+ : Deoptimizer::EAGER;
+ Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
}
+ ASSERT(info()->IsStub() || frame_is_built_);
+ bool lazy_deopt = info()->IsStub();
if (cc == no_condition) {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ if (lazy_deopt) {
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ }
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (jump_table_.is_empty() ||
- jump_table_.last().address != entry) {
- jump_table_.Add(JumpTableEntry(entry), zone());
+ jump_table_.last().address != entry ||
+ jump_table_.last().needs_frame != !frame_is_built_ ||
+ jump_table_.last().is_lazy_deopt != lazy_deopt) {
+ JumpTableEntry table_entry(entry, !frame_is_built_, lazy_deopt);
+ jump_table_.Add(table_entry, zone());
}
__ j(cc, &jump_table_.last().label);
}
void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
// Preserve the return value on the stack and rely on the runtime
// call to return the value in the same register.
__ push(rax);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- __ movq(rsp, rbp);
- __ pop(rbp);
- __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
+ if (NeedsEagerFrame()) {
+ __ movq(rsp, rbp);
+ __ pop(rbp);
+ }
+ if (info()->IsStub()) {
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ Ret(0, r10);
+ } else {
+ __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
+ }
}
void LCodeGen::DoCheckMapCommon(Register reg,
Handle<Map> map,
CompareMapMode mode,
- LEnvironment* env) {
+ LInstruction* instr) {
Label success;
__ CompareMap(reg, map, &success, mode);
- DeoptimizeIf(not_equal, env);
+ DeoptimizeIf(not_equal, instr->environment());
__ bind(&success);
}
__ j(equal, &success);
}
Handle<Map> map = map_set->last();
- DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
+ DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
__ bind(&success);
}
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+ ALLOW_ELEMENT_TRANSITION_MAPS, instr);
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
// Check the holder map.
DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+ ALLOW_ELEMENT_TRANSITION_MAPS, instr);
}
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+ if (info()->IsStub()) return;
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
deferred_(8, info->zone()),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
+ frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
// Support for converting LOperands to assembler types.
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
Label* map_check);
void DoCheckMapCommon(Register reg, Handle<Map> map,
- CompareMapMode mode, LEnvironment* env);
+ CompareMapMode mode, LInstruction* instr);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
Register scratch);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return scope()->num_parameters(); }
+ int GetParameterCount() const { return info()->num_parameters(); }
void Abort(const char* reason);
void Comment(const char* format, ...);
int* offset);
struct JumpTableEntry {
- explicit inline JumpTableEntry(Address entry)
+ inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
: label(),
- address(entry) { }
+ address(entry),
+ needs_frame(frame),
+ is_lazy_deopt(is_lazy) { }
Label label;
Address address;
+ bool needs_frame;
+ bool is_lazy_deopt;
};
void EnsureSpaceForLazyDeopt(int space_needed);
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
+ bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
+ ASSERT(codegen_->info()->is_calling());
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->masm_->PushSafepointRegisters();
codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
#undef DEFINE_COMPILE
LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
register_spills_[i] = NULL;
}
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
double_register_spills_[i] = NULL;
}
}
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
+
#ifdef DEBUG
instr->VerifyCall();
#endif
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
+ // Only mark conversions that might need to allocate as calling rather than
+ // all changes. This makes simple, non-allocating conversion not have to force
+ // building a stack frame.
if (from.IsTagged()) {
if (to.IsDouble()) {
+ info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
+ info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LOperand* temp = TempRegister();
return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToI(value)));
}
} else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new(zone()) LParameter, spill_index);
+ LParameter* result = new(zone()) LParameter;
+ if (info()->IsOptimizing()) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ Register reg = descriptor->register_params[instr->index()];
+ return DefineFixed(result, reg);
+ }
}
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+ info()->MarkAsDeferredCalling();
if (instr->is_function_entry()) {
return MarkAsCall(new(zone()) LStackCheck, instr);
} else {
void MarkAsCall() { is_call_ = true; }
+ // Interface to the register allocator and iterators.
+ bool ClobbersTemps() const { return is_call_; }
+ bool ClobbersRegisters() const { return is_call_; }
+ bool ClobbersDoubleRegisters() const { return is_call_; }
+
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
// Interface to the register allocator and iterators.
// slot, i.e., that must also be restored to the spill slot on OSR entry.
// NULL if the register has no assigned spill slot. Indexed by allocation
// index.
- LOperand* register_spills_[Register::kNumAllocatableRegisters];
- LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+ LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
+ LOperand* double_register_spills_[
+ DoubleRegister::kMaxNumAllocatableRegisters];
};
arg_stack_space * kPointerSize;
subq(rsp, Immediate(space));
int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
}
// r15 : argv
if (save_doubles) {
int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
}
return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
}
- // Needs access to SafepointRegisterStackIndex for optimized frame
+ // Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class OptimizedFrame;
+ friend class CompiledFrame;
};
// -- rsp[0] : return address
// -----------------------------------
ElementsKind elements_kind = receiver_map->elements_kind();
- Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
- __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
+ if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements()) {
+ Handle<Code> stub = KeyedLoadFastElementStub(
+ receiver_map->instance_type() == JS_ARRAY_TYPE,
+ elements_kind).GetCode();
+ __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
+ } else {
+ Handle<Code> stub =
+ KeyedLoadDictionaryElementStub().GetCode();
+ __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
+ }
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
return GetCode(Code::NORMAL, factory()->empty_string());
}
-void KeyedLoadStubCompiler::GenerateLoadExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
-
- // Check that the index is in range.
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ SmiToInteger32(rcx, rax);
- __ cmpq(rax, FieldOperand(rbx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &miss_force_generic);
-
- // rax: index (as a smi)
- // rdx: receiver (JSObject)
- // rcx: untagged index
- // rbx: elements array
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rbx: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- __ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(rcx, Operand(rbx, rcx, times_4, 0));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- __ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- __ movsd(xmm0, Operand(rbx, rcx, times_8, 0));
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // rax: index
- // rdx: receiver
- // For integer array types:
- // rcx: value
- // For floating-point array type:
- // xmm0: value as double.
-
- ASSERT(kSmiValueSize == 32);
- if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- // For the UnsignedInt array type, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- Label box_int;
-
- __ JumpIfUIntNotValidSmiValue(rcx, &box_int, Label::kNear);
-
- __ Integer32ToSmi(rax, rcx);
- __ ret(0);
-
- __ bind(&box_int);
-
- // Allocate a HeapNumber for the int and perform int-to-double
- // conversion.
- // The value is zero-extended since we loaded the value from memory
- // with movl.
- __ cvtqsi2sd(xmm0, rcx);
-
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // Set the value.
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
- } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // Set the value.
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
- } else {
- __ Integer32ToSmi(rax, rcx);
- __ ret(0);
- }
-
- // Slow case: Jump to runtime.
- __ bind(&slow);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
-
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Miss case: Jump to runtime.
- __ bind(&miss_force_generic);
-
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
}
-void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ AssertFastElements(rcx);
-
- // Check that the key is within bounds.
- __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
-
- // Load the result and make sure it's not the hole.
- SmiIndex index = masm->SmiToIndex(rbx, rax, kPointerSizeLog2);
- __ movq(rbx, FieldOperand(rcx,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
- __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
- __ j(equal, &miss_force_generic);
- __ movq(rax, rbx);
- __ ret(0);
-
- __ bind(&miss_force_generic);
- Code* code = masm->isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_MissForceGeneric);
- Handle<Code> ic(code);
- __ jmp(ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss_force_generic, slow_allocate_heapnumber;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ AssertFastElements(rcx);
-
- // Check that the key is within bounds.
- __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
-
- // Check for the hole
- __ SmiToInteger32(kScratchRegister, rax);
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ cmpl(FieldOperand(rcx, kScratchRegister, times_8, offset),
- Immediate(kHoleNanUpper32));
- __ j(equal, &miss_force_generic);
-
- // Always allocate a heap number for the result.
- __ movsd(xmm0, FieldOperand(rcx, kScratchRegister, times_8,
- FixedDoubleArray::kHeaderSize));
- __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
- // Set the value.
- __ movq(rax, rcx);
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
-
- __ bind(&slow_allocate_heapnumber);
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ jmp(slow_ic, RelocInfo::CODE_TARGET);
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
}
} else {
if (v8::internal::Snapshot::IsEnabled()) {
- CHECK_LE(delta, 2500 * 1024); // 2400.
+ CHECK_LE(delta, 2600 * 1024); // 2400.
} else {
CHECK_LE(delta, 2860 * 1024); // 2760.
}
"LazyRecompile": true,
"ParallelRecompile": true,
"NotifyDeoptimized": true,
+ "NotifyICMiss": true,
"NotifyOSR": true,
"CreateObjectLiteralBoilerplate": true,
"CloneLiteralBoilerplate": true,
'../../src/circular-queue.h',
'../../src/code-stubs.cc',
'../../src/code-stubs.h',
+ '../../src/code-stubs-hydrogen.cc',
'../../src/code.h',
'../../src/codegen.cc',
'../../src/codegen.h',