+2011-01-24: Version 3.0.10
+
+ Fixed External::Wrap for 64-bit addresses (issue 1037).
+
+ Fixed incorrect .arguments variable proxy handling in the full
+ code generator (issue 1060).
+
+ Introduced partial strict mode support.
+
+ Changed formatting of recursive error messages to match Firefox and Safari
+ (issue http://crbug.com/70334).
+
+ Fixed incorrect rounding for float-to-integer conversions for external
+ array types, which implement the Typed Array spec
+ (issue http://crbug.com/50972).
+
+ Performance improvements on the IA32 platform.
+
+
2011-01-19: Version 3.0.9
Added basic GDB JIT Interface integration.
// For 32-bit systems any 2 bytes aligned pointer can be encoded as smi
// with a plain reinterpret_cast.
- static const intptr_t kEncodablePointerMask = 0x1;
+ static const uintptr_t kEncodablePointerMask = 0x1;
static const int kPointerToSmiShift = 0;
};
// It might be not enough to cover stack allocated objects on some platforms.
static const int kPointerAlignment = 3;
- static const intptr_t kEncodablePointerMask =
- ~(intptr_t(0xffffffff) << kPointerAlignment);
+ static const uintptr_t kEncodablePointerMask =
+ ~(uintptr_t(0xffffffff) << kPointerAlignment);
static const int kPointerToSmiShift =
kSmiTagSize + kSmiShiftSize - kPointerAlignment;
typedef SmiTagging<kApiPointerSize> PlatformSmiTagging;
const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
-const intptr_t kEncodablePointerMask =
+const uintptr_t kEncodablePointerMask =
PlatformSmiTagging::kEncodablePointerMask;
const int kPointerToSmiShift = PlatformSmiTagging::kPointerToSmiShift;
}
static inline void* GetExternalPointerFromSmi(internal::Object* value) {
- const intptr_t address = reinterpret_cast<intptr_t>(value);
+ const uintptr_t address = reinterpret_cast<uintptr_t>(value);
return reinterpret_cast<void*>(address >> kPointerToSmiShift);
}
lithium-allocator.cc
lithium.cc
liveedit.cc
+ liveobjectlist.cc
log-utils.cc
log.cc
mark-compact.cc
static bool CanBeEncodedAsSmi(void* ptr) {
- const intptr_t address = reinterpret_cast<intptr_t>(ptr);
+ const uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
return ((address & i::kEncodablePointerMask) == 0);
}
static i::Smi* EncodeAsSmi(void* ptr) {
ASSERT(CanBeEncodedAsSmi(ptr));
- const intptr_t address = reinterpret_cast<intptr_t>(ptr);
+ const uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
i::Smi* result = reinterpret_cast<i::Smi*>(address << i::kPointerToSmiShift);
ASSERT(i::Internals::HasSmiTag(result));
ASSERT_EQ(result, i::Smi::FromInt(result->value()));
emit(reinterpret_cast<Instr>(msg));
#else // def __arm__
#ifdef CAN_USE_ARMV5_INSTRUCTIONS
- ASSERT(cond == al);
- bkpt(0);
+ if (cond != al) {
+ Label skip;
+ b(&skip, NegateCondition(cond));
+ bkpt(0);
+ bind(&skip);
+ } else {
+ bkpt(0);
+ }
#else // ndef CAN_USE_ARMV5_INSTRUCTIONS
svc(0x9f0001, cond);
#endif // ndef CAN_USE_ARMV5_INSTRUCTIONS
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
// Attempt to allocate the context in new space.
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
+ __ AllocateInNewSpace(FixedArray::SizeFor(slots_),
r0,
r1,
r2,
// Setup the object header.
__ LoadRoot(r2, Heap::kContextMapRootIndex);
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ mov(r2, Operand(Smi::FromInt(length)));
+ __ mov(r2, Operand(Smi::FromInt(slots_)));
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
// Setup the fixed slots.
// Initialize the rest of the slots to undefined.
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < slots_; i++) {
__ str(r1, MemOperand(r0, Context::SlotOffset(i)));
}
}
-// Uses registers r0 to r4. Expected input is
-// object in r0 (or at sp+1*kPointerSize) and function in
-// r1 (or at sp), depending on whether or not
-// args_in_registers() is true.
+// Uses registers r0 to r4.
+// Expected input (depending on whether args are in registers or on the stack):
+// * object: r0 or at sp + 1 * kPointerSize.
+// * function: r1 or at sp.
+//
+// An inlined call site may have been generated before calling this stub.
+// In this case the offset to the inline site to patch is passed on the stack,
+// in the safepoint slot for register r4.
+// (See LCodeGen::DoInstanceOfKnownGlobal)
void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Call site inlining and patching implies arguments in registers.
+ ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
+ // ReturnTrueFalse is only implemented for inlined call sites.
+ ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
+
// Fixed register usage throughout the stub:
const Register object = r0; // Object (lhs).
- const Register map = r3; // Map of the object.
+ Register map = r3; // Map of the object.
const Register function = r1; // Function (rhs).
const Register prototype = r4; // Prototype of the function.
+ const Register inline_site = r9;
const Register scratch = r2;
+
+ const int32_t kDeltaToLoadBoolResult = 3 * kPointerSize;
+
Label slow, loop, is_instance, is_not_instance, not_js_object;
+
if (!HasArgsInRegisters()) {
__ ldr(object, MemOperand(sp, 1 * kPointerSize));
__ ldr(function, MemOperand(sp, 0));
__ BranchOnSmi(object, ¬_js_object);
__ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
- // Look up the function and the map in the instanceof cache.
- Label miss;
- __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
- __ cmp(function, ip);
- __ b(ne, &miss);
- __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
- __ cmp(map, ip);
- __ b(ne, &miss);
- __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
- __ Ret(HasArgsInRegisters() ? 0 : 2);
+ // If there is a call site cache don't look in the global cache, but do the
+ // real lookup and update the call site cache.
+ if (!HasCallSiteInlineCheck()) {
+ Label miss;
+ __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
+ __ cmp(function, ip);
+ __ b(ne, &miss);
+ __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
+ __ cmp(map, ip);
+ __ b(ne, &miss);
+ __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+ __ Ret(HasArgsInRegisters() ? 0 : 2);
+
+ __ bind(&miss);
+ }
- __ bind(&miss);
+ // Get the prototype of the function.
__ TryGetFunctionPrototype(function, prototype, scratch, &slow);
// Check that the function prototype is a JS object.
__ BranchOnSmi(prototype, &slow);
__ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
+ // Update the global instanceof or call site inlined cache with the current
+ // map and function. The cached answer will be set when it is known below.
+ if (!HasCallSiteInlineCheck()) {
+ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
+ } else {
+ ASSERT(HasArgsInRegisters());
+ // Patch the (relocated) inlined map check.
+
+ // The offset was stored in r4 safepoint slot.
+ // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
+ __ ldr(scratch, MacroAssembler::SafepointRegisterSlot(r4));
+ __ sub(inline_site, lr, scratch);
+ // Get the map location in scratch and patch it.
+ __ GetRelocatedValueLocation(inline_site, scratch);
+ __ str(map, MemOperand(scratch));
+ }
// Register mapping: r3 is object map and r4 is function prototype.
// Get prototype of object into r2.
__ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
+ // We don't need map any more. Use it as a scratch register.
+ Register scratch2 = map;
+ map = no_reg;
+
// Loop through the prototype chain looking for the function prototype.
+ __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
__ bind(&loop);
__ cmp(scratch, Operand(prototype));
__ b(eq, &is_instance);
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(scratch, ip);
+ __ cmp(scratch, scratch2);
__ b(eq, &is_not_instance);
__ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
__ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
__ jmp(&loop);
__ bind(&is_instance);
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+ if (!HasCallSiteInlineCheck()) {
+ __ mov(r0, Operand(Smi::FromInt(0)));
+ __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+ } else {
+ // Patch the call site to return true.
+ __ LoadRoot(r0, Heap::kTrueValueRootIndex);
+ __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+ // Get the boolean result location in scratch and patch it.
+ __ GetRelocatedValueLocation(inline_site, scratch);
+ __ str(r0, MemOperand(scratch));
+
+ if (!ReturnTrueFalseObject()) {
+ __ mov(r0, Operand(Smi::FromInt(0)));
+ }
+ }
__ Ret(HasArgsInRegisters() ? 0 : 2);
__ bind(&is_not_instance);
- __ mov(r0, Operand(Smi::FromInt(1)));
- __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+ if (!HasCallSiteInlineCheck()) {
+ __ mov(r0, Operand(Smi::FromInt(1)));
+ __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+ } else {
+ // Patch the call site to return false.
+ __ LoadRoot(r0, Heap::kFalseValueRootIndex);
+ __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+ // Get the boolean result location in scratch and patch it.
+ __ GetRelocatedValueLocation(inline_site, scratch);
+ __ str(r0, MemOperand(scratch));
+
+ if (!ReturnTrueFalseObject()) {
+ __ mov(r0, Operand(Smi::FromInt(1)));
+ }
+ }
__ Ret(HasArgsInRegisters() ? 0 : 2);
Label object_not_null, object_not_null_or_smi;
// Before null, smi and string value checks, check that the rhs is a function
// as for a non-function rhs an exception needs to be thrown.
__ BranchOnSmi(function, &slow);
- __ CompareObjectType(function, map, scratch, JS_FUNCTION_TYPE);
+ __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
__ b(ne, &slow);
// Null is not instance of anything.
// Slow-case. Tail call builtin.
__ bind(&slow);
- if (HasArgsInRegisters()) {
+ if (!ReturnTrueFalseObject()) {
+ if (HasArgsInRegisters()) {
+ __ Push(r0, r1);
+ }
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
+ } else {
+ __ EnterInternalFrame();
__ Push(r0, r1);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_JS);
+ __ LeaveInternalFrame();
+ __ cmp(r0, Operand(0));
+ __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
+ __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
+ __ Ret(HasArgsInRegisters() ? 0 : 2);
}
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
}
+Register InstanceofStub::left() { return r0; }
+
+
+Register InstanceofStub::right() { return r1; }
+
+
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
// StringCharCodeAtGenerator
-
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Label flat_string;
Label ascii_string;
}
+void StringCharAtStub::Generate(MacroAssembler* masm) {
+ // Expects two arguments (object, index) on the stack:
+ // lr: return address
+ // sp[0]: index
+ // sp[4]: object
+ Register object = r1;
+ Register index = r0;
+ Register scratch1 = r2;
+ Register scratch2 = r3;
+ Register result = r0;
+
+ // Get object and index from the stack.
+ __ pop(index);
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharAtGenerator generator(object,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm);
+ __ b(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result, Heap::kEmptyStringRootIndex);
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ mov(result, Operand(Smi::FromInt(0)));
+ __ b(&done);
+
+ StubRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm, call_helper);
+
+ __ bind(&done);
+ __ Ret();
+}
+
+
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::SMIS);
Label miss;
frame_->AllocateStackSlots();
frame_->AssertIsSpilled();
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = scope()->num_heap_slots();
if (heap_slots > 0) {
// Allocate local context.
// Get outer context and create a new context based on it.
&& (Bit(20) == 0)
&& ((Bit(7) == 0)); }
+ // Test for a stop instruction.
+ inline bool IsStop() const {
+ return (TypeField() == 7) && (Bit(24) == 1) && (SvcField() >= stop);
+ }
+
// Special accessors that test for existence of a value.
inline bool HasS() const { return SField() == 1; }
inline bool HasB() const { return BField() == 1; }
1 << 6 | // r6 v3
1 << 7 | // r7 v4
1 << 8 | // r8 v5 (cp in JavaScript code)
- kR9Available
- << 9 | // r9 v6
+ kR9Available << 9 | // r9 v6
1 << 10 | // r10 v7
1 << 11; // r11 v8 (fp in JavaScript code)
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = scope()->num_heap_slots();
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is in r1.
}
-// Convert unsigned integer with specified number of leading zeroes in binary
-// representation to IEEE 754 double.
-// Integer to convert is passed in register hiword.
-// Resulting double is returned in registers hiword:loword.
-// This functions does not work correctly for 0.
-static void GenerateUInt2Double(MacroAssembler* masm,
- Register hiword,
- Register loword,
- Register scratch,
- int leading_zeroes) {
- const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
- const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
-
- const int mantissa_shift_for_hi_word =
- meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
-
- const int mantissa_shift_for_lo_word =
- kBitsPerInt - mantissa_shift_for_hi_word;
-
- __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
- if (mantissa_shift_for_hi_word > 0) {
- __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
- __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
- } else {
- __ mov(loword, Operand(0, RelocInfo::NONE));
- __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
- }
-
- // If least significant bit of biased exponent was not 1 it was corrupted
- // by most significant bit of mantissa so we should fix that.
- if (!(biased_exponent & 1)) {
- __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
- }
-}
-
-
-void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label slow, failed_allocation;
-
- Register key = r0;
- Register receiver = r1;
-
- // Check that the object isn't a smi
- __ BranchOnSmi(receiver, &slow);
-
- // Check that the key is a smi.
- __ BranchOnNotSmi(key, &slow);
-
- // Check that the object is a JS object. Load map into r2.
- __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
- __ b(lt, &slow);
-
- // Check that the receiver does not require access checks. We need
- // to check this explicitly since this generic stub does not perform
- // map checks.
- __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
- __ b(ne, &slow);
-
- // Check that the elements array is the appropriate type of
- // ExternalArray.
- __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
- __ cmp(r2, ip);
- __ b(ne, &slow);
-
- // Check that the index is in range.
- __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
- __ cmp(ip, Operand(key, ASR, kSmiTagSize));
- // Unsigned comparison catches both negative and too-large values.
- __ b(lo, &slow);
-
- // r3: elements array
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
- // r3: base pointer of external storage
-
- // We are not untagging smi key and instead work with it
- // as if it was premultiplied by 2.
- ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
-
- Register value = r2;
- switch (array_type) {
- case kExternalByteArray:
- __ ldrsb(value, MemOperand(r3, key, LSR, 1));
- break;
- case kExternalUnsignedByteArray:
- __ ldrb(value, MemOperand(r3, key, LSR, 1));
- break;
- case kExternalShortArray:
- __ ldrsh(value, MemOperand(r3, key, LSL, 0));
- break;
- case kExternalUnsignedShortArray:
- __ ldrh(value, MemOperand(r3, key, LSL, 0));
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ ldr(value, MemOperand(r3, key, LSL, 1));
- break;
- case kExternalFloatArray:
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ add(r2, r3, Operand(key, LSL, 1));
- __ vldr(s0, r2, 0);
- } else {
- __ ldr(value, MemOperand(r3, key, LSL, 1));
- }
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // For integer array types:
- // r2: value
- // For floating-point array type
- // s0: value (if VFP3 is supported)
- // r2: value (if VFP3 is not supported)
-
- if (array_type == kExternalIntArray) {
- // For the Int and UnsignedInt array types, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- Label box_int;
- __ cmp(value, Operand(0xC0000000));
- __ b(mi, &box_int);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- __ bind(&box_int);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't touch r0 or r1 as they are needed if allocation
- // fails.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
- // Now we can use r0 for the result as key is not needed any more.
- __ mov(r0, r5);
-
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, value);
- __ vcvt_f64_s32(d0, s0);
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r3, HeapNumber::kValueOffset);
- __ Ret();
- } else {
- WriteInt32ToHeapNumberStub stub(value, r0, r3);
- __ TailCallStub(&stub);
- }
- } else if (array_type == kExternalUnsignedIntArray) {
- // The test is different for unsigned int values. Since we need
- // the value to be in the range of a positive smi, we can't
- // handle either of the top two bits being set in the value.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- Label box_int, done;
- __ tst(value, Operand(0xC0000000));
- __ b(ne, &box_int);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- __ bind(&box_int);
- __ vmov(s0, value);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
- // registers - also when jumping due to exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
-
- __ vcvt_f64_u32(d0, s0);
- __ sub(r1, r2, Operand(kHeapObjectTag));
- __ vstr(d0, r1, HeapNumber::kValueOffset);
-
- __ mov(r0, r2);
- __ Ret();
- } else {
- // Check whether unsigned integer fits into smi.
- Label box_int_0, box_int_1, done;
- __ tst(value, Operand(0x80000000));
- __ b(ne, &box_int_0);
- __ tst(value, Operand(0x40000000));
- __ b(ne, &box_int_1);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- Register hiword = value; // r2.
- Register loword = r3;
-
- __ bind(&box_int_0);
- // Integer does not have leading zeros.
- GenerateUInt2Double(masm, hiword, loword, r4, 0);
- __ b(&done);
-
- __ bind(&box_int_1);
- // Integer has one leading zero.
- GenerateUInt2Double(masm, hiword, loword, r4, 1);
-
-
- __ bind(&done);
- // Integer was converted to double in registers hiword:loword.
- // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
- // clobbers all registers - also when jumping due to exhausted young
- // space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r5, r7, r6, &slow);
-
- __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
- __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
-
- __ mov(r0, r4);
- __ Ret();
- }
- } else if (array_type == kExternalFloatArray) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
- __ vcvt_f64_f32(d0, s0);
- __ sub(r1, r2, Operand(kHeapObjectTag));
- __ vstr(d0, r1, HeapNumber::kValueOffset);
-
- __ mov(r0, r2);
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r3, r4, r5, r6, &slow);
- // VFP is not available, do manual single to double conversion.
-
- // r2: floating point value (binary32)
- // r3: heap number for result
-
- // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
- // the slow case from here.
- __ and_(r0, value, Operand(kBinary32MantissaMask));
-
- // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
- // the slow case from here.
- __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
- __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
-
- Label exponent_rebiased;
- __ teq(r1, Operand(0x00, RelocInfo::NONE));
- __ b(eq, &exponent_rebiased);
-
- __ teq(r1, Operand(0xff));
- __ mov(r1, Operand(0x7ff), LeaveCC, eq);
- __ b(eq, &exponent_rebiased);
-
- // Rebias exponent.
- __ add(r1,
- r1,
- Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
-
- __ bind(&exponent_rebiased);
- __ and_(r2, value, Operand(kBinary32SignMask));
- value = no_reg;
- __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
-
- // Shift mantissa.
- static const int kMantissaShiftForHiWord =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaShiftForLoWord =
- kBitsPerInt - kMantissaShiftForHiWord;
-
- __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
- __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
-
- __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
- __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
-
- __ mov(r0, r3);
- __ Ret();
- }
-
- } else {
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
- }
-
- // Slow case, key and receiver still in r0 and r1.
- __ bind(&slow);
- __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3);
- GenerateRuntimeGetProperty(masm);
-}
-
-
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
}
-// Convert and store int passed in register ival to IEEE 754 single precision
-// floating point value at memory location (dst + 4 * wordoffset)
-// If VFP3 is available use it for conversion.
-static void StoreIntAsFloat(MacroAssembler* masm,
- Register dst,
- Register wordoffset,
- Register ival,
- Register fval,
- Register scratch1,
- Register scratch2) {
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, ival);
- __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
- __ vcvt_f32_s32(s0, s0);
- __ vstr(s0, scratch1, 0);
- } else {
- Label not_special, done;
- // Move sign bit from source to destination. This works because the sign
- // bit in the exponent word of the double has the same position and polarity
- // as the 2's complement sign bit in a Smi.
- ASSERT(kBinary32SignMask == 0x80000000u);
-
- __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
- // Negate value if it is negative.
- __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne);
-
- // We have -1, 0 or 1, which we treat specially. Register ival contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ cmp(ival, Operand(1));
- __ b(gt, ¬_special);
-
- // For 1 or -1 we need to or in the 0 exponent (biased).
- static const uint32_t exponent_word_for_1 =
- kBinary32ExponentBias << kBinary32ExponentShift;
-
- __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
- __ b(&done);
-
- __ bind(¬_special);
- // Count leading zeros.
- // Gets the wrong answer for 0, but we already checked for that case above.
- Register zeros = scratch2;
- __ CountLeadingZeros(zeros, ival, scratch1);
-
- // Compute exponent and or it into the exponent register.
- __ rsb(scratch1,
- zeros,
- Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
-
- __ orr(fval,
- fval,
- Operand(scratch1, LSL, kBinary32ExponentShift));
-
- // Shift up the source chopping the top bit off.
- __ add(zeros, zeros, Operand(1));
- // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
- __ mov(ival, Operand(ival, LSL, zeros));
- // And the top (top 20 bits).
- __ orr(fval,
- fval,
- Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
-
- __ bind(&done);
- __ str(fval, MemOperand(dst, wordoffset, LSL, 2));
- }
-}
-
-
-static bool IsElementTypeSigned(ExternalArrayType array_type) {
- switch (array_type) {
- case kExternalByteArray:
- case kExternalShortArray:
- case kExternalIntArray:
- return true;
-
- case kExternalUnsignedByteArray:
- case kExternalUnsignedShortArray:
- case kExternalUnsignedIntArray:
- return false;
-
- default:
- UNREACHABLE();
- return false;
- }
-}
-
-
-void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label slow, check_heap_number;
-
- // Register usage.
- Register value = r0;
- Register key = r1;
- Register receiver = r2;
- // r3 mostly holds the elements array or the destination external array.
-
- // Check that the object isn't a smi.
- __ BranchOnSmi(receiver, &slow);
-
- // Check that the object is a JS object. Load map into r3.
- __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
- __ b(le, &slow);
-
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
- __ b(ne, &slow);
-
- // Check that the key is a smi.
- __ BranchOnNotSmi(key, &slow);
-
- // Check that the elements array is the appropriate type of ExternalArray.
- __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
- __ cmp(r4, ip);
- __ b(ne, &slow);
-
- // Check that the index is in range.
- __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index.
- __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
- __ cmp(r4, ip);
- // Unsigned comparison catches both negative and too-large values.
- __ b(hs, &slow);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // r3: external array.
- // r4: key (integer).
- __ BranchOnNotSmi(value, &check_heap_number);
- __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
-
- // r3: base pointer of external storage.
- // r4: key (integer).
- // r5: value (integer).
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ strb(r5, MemOperand(r3, r4, LSL, 0));
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ strh(r5, MemOperand(r3, r4, LSL, 1));
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ str(r5, MemOperand(r3, r4, LSL, 2));
- break;
- case kExternalFloatArray:
- // Perform int-to-float conversion and store to memory.
- StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // Entry registers are intact, r0 holds the value which is the return value.
- __ Ret();
-
-
- // r3: external array.
- // r4: index (integer).
- __ bind(&check_heap_number);
- __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
- __ b(ne, &slow);
-
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
-
- // r3: base pointer of external storage.
- // r4: key (integer).
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
-
-
- if (array_type == kExternalFloatArray) {
- // vldr requires offset to be a multiple of 4 so we can not
- // include -kHeapObjectTag into it.
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ add(r5, r3, Operand(r4, LSL, 2));
- __ vcvt_f32_f64(s0, d0);
- __ vstr(s0, r5, 0);
- } else {
- // Need to perform float-to-int conversion.
- // Test for NaN or infinity (both give zero).
- __ ldr(r6, FieldMemOperand(r5, HeapNumber::kExponentOffset));
-
- // Hoisted load. vldr requires offset to be a multiple of 4 so we can not
- // include -kHeapObjectTag into it.
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
-
- __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- // NaNs and Infinities have all-one exponents so they sign extend to -1.
- __ cmp(r6, Operand(-1));
- __ mov(r5, Operand(Smi::FromInt(0)), LeaveCC, eq);
-
- // Not infinity or NaN simply convert to int.
- if (IsElementTypeSigned(array_type)) {
- __ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne);
- } else {
- __ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne);
- }
- __ vmov(r5, s0, ne);
-
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ strb(r5, MemOperand(r3, r4, LSL, 0));
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ strh(r5, MemOperand(r3, r4, LSL, 1));
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ str(r5, MemOperand(r3, r4, LSL, 2));
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
-
- // Entry registers are intact, r0 holds the value which is the return value.
- __ Ret();
- } else {
- // VFP3 is not available do manual conversions.
- __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
- __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
-
- if (array_type == kExternalFloatArray) {
- Label done, nan_or_infinity_or_zero;
- static const int kMantissaInHiWordShift =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaInLoWordShift =
- kBitsPerInt - kMantissaInHiWordShift;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ mov(r7, Operand(HeapNumber::kExponentMask));
- __ and_(r9, r5, Operand(r7), SetCC);
- __ b(eq, &nan_or_infinity_or_zero);
-
- __ teq(r9, Operand(r7));
- __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
- __ b(eq, &nan_or_infinity_or_zero);
-
- // Rebias exponent.
- __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
- __ add(r9,
- r9,
- Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
-
- __ cmp(r9, Operand(kBinary32MaxExponent));
- __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
- __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
- __ b(gt, &done);
-
- __ cmp(r9, Operand(kBinary32MinExponent));
- __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
- __ b(lt, &done);
-
- __ and_(r7, r5, Operand(HeapNumber::kSignMask));
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
- __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
- __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
-
- __ bind(&done);
- __ str(r5, MemOperand(r3, r4, LSL, 2));
- // Entry registers are intact, r0 holds the value which is the return
- // value.
- __ Ret();
-
- __ bind(&nan_or_infinity_or_zero);
- __ and_(r7, r5, Operand(HeapNumber::kSignMask));
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r9, r9, r7);
- __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
- __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
- __ b(&done);
- } else {
- bool is_signed_type = IsElementTypeSigned(array_type);
- int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
- int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
-
- Label done, sign;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ mov(r7, Operand(HeapNumber::kExponentMask));
- __ and_(r9, r5, Operand(r7), SetCC);
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- __ b(eq, &done);
-
- __ teq(r9, Operand(r7));
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
- __ b(eq, &done);
-
- // Unbias exponent.
- __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
- __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
- // If exponent is negative than result is 0.
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
- __ b(mi, &done);
-
- // If exponent is too big than result is minimal value.
- __ cmp(r9, Operand(meaningfull_bits - 1));
- __ mov(r5, Operand(min_value), LeaveCC, ge);
- __ b(ge, &done);
-
- __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
-
- __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
- __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
- __ b(pl, &sign);
-
- __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
- __ mov(r5, Operand(r5, LSL, r9));
- __ rsb(r9, r9, Operand(meaningfull_bits));
- __ orr(r5, r5, Operand(r6, LSR, r9));
-
- __ bind(&sign);
- __ teq(r7, Operand(0, RelocInfo::NONE));
- __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
-
- __ bind(&done);
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ strb(r5, MemOperand(r3, r4, LSL, 0));
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ strh(r5, MemOperand(r3, r4, LSL, 1));
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ str(r5, MemOperand(r3, r4, LSL, 2));
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- }
-
- // Slow case: call runtime.
- __ bind(&slow);
-
- // Entry registers are intact.
- // r0: value
- // r1: key
- // r2: receiver
- GenerateRuntimeSetProperty(masm);
-}
-
-
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
}
-void LInstruction::PrintTo(StringStream* stream) const {
+void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
if (HasResult()) {
- result()->PrintTo(stream);
- stream->Add(" ");
+ PrintOutputOperandTo(stream);
}
+
PrintDataTo(stream);
if (HasEnvironment()) {
}
-void LLabel::PrintDataTo(StringStream* stream) const {
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ inputs_.PrintOperandsTo(stream);
+}
+
+
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
+ results_.PrintOperandsTo(stream);
+}
+
+
+template<typename T, int N>
+void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) {
+ for (int i = 0; i < N; i++) {
+ if (i > 0) stream->Add(" ");
+ elems_[i]->PrintTo(stream);
+ }
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
LGap::PrintDataTo(stream);
LLabel* rep = replacement();
if (rep != NULL) {
}
-
-void LBinaryOperation::PrintDataTo(StringStream* stream) const {
- stream->Add("= ");
- left()->PrintTo(stream);
- stream->Add(" ");
- right()->PrintTo(stream);
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) const {
+void LGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", block_id());
}
-void LBranch::PrintDataTo(StringStream* stream) const {
+void LBranch::PrintDataTo(StringStream* stream) {
stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
}
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) const {
+void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
- left()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(" %s ", Token::String(op()));
- right()->PrintTo(stream);
+ InputAt(1)->PrintTo(stream);
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) const {
+void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if ");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(is_strict() ? " === null" : " == null");
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) const {
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_object(");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) const {
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_smi(");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) const {
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_instance_type(");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) const {
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if has_cached_array_index(");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) const {
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(", \"%o\") then B%d else B%d",
*hydrogen()->class_name(),
true_block_id(),
}
-void LTypeofIs::PrintDataTo(StringStream* stream) const {
- input()->PrintTo(stream);
+void LTypeofIs::PrintDataTo(StringStream* stream) {
+ InputAt(0)->PrintTo(stream);
stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
}
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) const {
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(" == \"%s\" then B%d else B%d",
*hydrogen()->type_literal()->ToCString(),
true_block_id(), false_block_id());
}
-void LCallConstantFunction::PrintDataTo(StringStream* stream) const {
+void LCallConstantFunction::PrintDataTo(StringStream* stream) {
stream->Add("#%d / ", arity());
}
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) const {
+void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
stream->Add("/%s ", hydrogen()->OpName());
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
}
}
-void LCallKeyed::PrintDataTo(StringStream* stream) const {
+void LCallKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[r2] #%d / ", arity());
}
-void LCallNamed::PrintDataTo(StringStream* stream) const {
+void LCallNamed::PrintDataTo(StringStream* stream) {
SmartPointer<char> name_string = name()->ToCString();
stream->Add("%s #%d / ", *name_string, arity());
}
-void LCallGlobal::PrintDataTo(StringStream* stream) const {
+void LCallGlobal::PrintDataTo(StringStream* stream) {
SmartPointer<char> name_string = name()->ToCString();
stream->Add("%s #%d / ", *name_string, arity());
}
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) const {
+void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
stream->Add("#%d / ", arity());
}
-void LCallNew::PrintDataTo(StringStream* stream) const {
- LUnaryOperation::PrintDataTo(stream);
+void LCallNew::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ InputAt(0)->PrintTo(stream);
stream->Add(" #%d / ", arity());
}
-void LClassOfTest::PrintDataTo(StringStream* stream) const {
+void LClassOfTest::PrintDataTo(StringStream* stream) {
stream->Add("= class_of_test(");
- input()->PrintTo(stream);
+ InputAt(0)->PrintTo(stream);
stream->Add(", \"%o\")", *hydrogen()->class_name());
}
-void LUnaryOperation::PrintDataTo(StringStream* stream) const {
- stream->Add("= ");
- input()->PrintTo(stream);
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) const {
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
stream->Add(" length ");
}
+void LStoreNamed::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add(".");
+ stream->Add(*String::cast(*name())->ToCString());
+ stream->Add(" <- ");
+ value()->PrintTo(stream);
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
LChunk::LChunk(HGraph* graph)
: spill_slot_count_(0),
graph_(graph),
}
-void LChunk::Verify() const {
- // TODO(twuerthinger): Implement verification for chunk.
-}
-
-
int LChunk::GetNextSpillIndex(bool is_double) {
// Skip a slot if for a double-width slot.
if (is_double) spill_slot_count_++;
}
-void LStoreNamed::PrintDataTo(StringStream* stream) const {
- object()->PrintTo(stream);
- stream->Add(".");
- stream->Add(*String::cast(*name())->ToCString());
- stream->Add(" <- ");
- value()->PrintTo(stream);
-}
-
-
-void LStoreKeyed::PrintDataTo(StringStream* stream) const {
- object()->PrintTo(stream);
- stream->Add("[");
- key()->PrintTo(stream);
- stream->Add("] <- ");
- value()->PrintTo(stream);
-}
-
-
int LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
LGap* gap = new LGap(block);
int index = -1;
}
-LInstruction* LChunkBuilder::Define(LInstruction* instr) {
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
+ LUnallocated* result) {
+ allocator_->RecordDefinition(current_instruction_, result);
+ instr->set_result(result);
+ return instr;
+}
+
+
+template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
return Define(instr, new LUnallocated(LUnallocated::NONE));
}
-LInstruction* LChunkBuilder::DefineAsRegister(LInstruction* instr) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsRegister(
+ LTemplateInstruction<1, I, T>* instr) {
return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
-LInstruction* LChunkBuilder::DefineAsSpilled(LInstruction* instr, int index) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineAsSpilled(
+ LTemplateInstruction<1, I, T>* instr, int index) {
return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
}
-LInstruction* LChunkBuilder::DefineSameAsFirst(LInstruction* instr) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+ LTemplateInstruction<1, I, T>* instr) {
return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
}
-LInstruction* LChunkBuilder::DefineFixed(LInstruction* instr, Register reg) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixed(
+ LTemplateInstruction<1, I, T>* instr, Register reg) {
return Define(instr, ToUnallocated(reg));
}
-LInstruction* LChunkBuilder::DefineFixedDouble(LInstruction* instr,
- DoubleRegister reg) {
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineFixedDouble(
+ LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) {
return Define(instr, ToUnallocated(reg));
}
}
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
- ASSERT(!instr->HasPointerMap());
- instr->set_pointer_map(new LPointerMap(position_));
+LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
+ allocator_->MarkAsSaveDoubles();
return instr;
}
-LInstruction* LChunkBuilder::Define(LInstruction* instr, LUnallocated* result) {
- allocator_->RecordDefinition(current_instruction_, result);
- instr->set_result(result);
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+ ASSERT(!instr->HasPointerMap());
+ instr->set_pointer_map(new LPointerMap(position_));
return instr;
}
ASSERT(right->representation().IsTagged());
LOperand* left_operand = UseFixed(left, r1);
LOperand* right_operand = UseFixed(right, r0);
- LInstruction* result = new LArithmeticT(op, left_operand, right_operand);
+ LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
return MarkAsCall(DefineFixed(result, r0), instr);
}
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- if (current->IsBranch()) {
- instr->set_hydrogen_value(HBranch::cast(current)->value());
+ if (current->IsTest() && !instr->IsGoto()) {
+ ASSERT(instr->IsControl());
+ HTest* test = HTest::cast(current);
+ instr->set_hydrogen_value(test->value());
+ HBasicBlock* first = test->FirstSuccessor();
+ HBasicBlock* second = test->SecondSuccessor();
+ ASSERT(first != NULL && second != NULL);
+ instr->SetBranchTargets(first->block_id(), second->block_id());
} else {
instr->set_hydrogen_value(current);
}
}
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+LInstruction* LChunkBuilder::DoTest(HTest* instr) {
HValue* v = instr->value();
- HBasicBlock* first = instr->FirstSuccessor();
- HBasicBlock* second = instr->SecondSuccessor();
- ASSERT(first != NULL && second != NULL);
- int first_id = first->block_id();
- int second_id = second->block_id();
-
if (v->EmitAtUses()) {
if (v->IsClassOfTest()) {
HClassOfTest* compare = HClassOfTest::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
- TempRegister(),
- first_id,
- second_id);
+ TempRegister());
} else if (v->IsCompare()) {
HCompare* compare = HCompare::cast(v);
Token::Value op = compare->token();
ASSERT(left->representation().IsInteger32());
ASSERT(right->representation().IsInteger32());
return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseOrConstantAtStart(right),
- first_id,
- second_id);
+ UseOrConstantAtStart(right));
} else if (r.IsDouble()) {
ASSERT(left->representation().IsDouble());
ASSERT(right->representation().IsDouble());
return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseRegisterAtStart(right),
- first_id,
- second_id);
+ UseRegisterAtStart(right));
} else {
ASSERT(left->representation().IsTagged());
ASSERT(right->representation().IsTagged());
LOperand* left_operand = UseFixed(left, reversed ? r0 : r1);
LOperand* right_operand = UseFixed(right, reversed ? r1 : r0);
LInstruction* result = new LCmpTAndBranch(left_operand,
- right_operand,
- first_id,
- second_id);
+ right_operand);
return MarkAsCall(result, instr);
}
} else if (v->IsIsSmi()) {
HIsSmi* compare = HIsSmi::cast(v);
ASSERT(compare->value()->representation().IsTagged());
- return new LIsSmiAndBranch(Use(compare->value()),
- first_id,
- second_id);
+ return new LIsSmiAndBranch(Use(compare->value()));
} else if (v->IsHasInstanceType()) {
HHasInstanceType* compare = HHasInstanceType::cast(v);
ASSERT(compare->value()->representation().IsTagged());
-
- return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()),
- first_id,
- second_id);
+ return new LHasInstanceTypeAndBranch(
+ UseRegisterAtStart(compare->value()));
} else if (v->IsHasCachedArrayIndex()) {
HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
ASSERT(compare->value()->representation().IsTagged());
return new LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(compare->value()), first_id, second_id);
+ UseRegisterAtStart(compare->value()));
} else if (v->IsIsNull()) {
HIsNull* compare = HIsNull::cast(v);
ASSERT(compare->value()->representation().IsTagged());
- return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
- first_id,
- second_id);
+ return new LIsNullAndBranch(UseRegisterAtStart(compare->value()));
} else if (v->IsIsObject()) {
HIsObject* compare = HIsObject::cast(v);
ASSERT(compare->value()->representation().IsTagged());
LOperand* temp2 = TempRegister();
return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
temp1,
- temp2,
- first_id,
- second_id);
+ temp2);
} else if (v->IsCompareJSObjectEq()) {
HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()),
- first_id,
- second_id);
+ UseRegisterAtStart(compare->right()));
} else if (v->IsInstanceOf()) {
HInstanceOf* instance_of = HInstanceOf::cast(v);
LInstruction* result =
new LInstanceOfAndBranch(Use(instance_of->left()),
- Use(instance_of->right()),
- first_id,
- second_id);
+ Use(instance_of->right()));
return MarkAsCall(result, instr);
} else if (v->IsTypeofIs()) {
HTypeofIs* typeof_is = HTypeofIs::cast(v);
- return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()),
- first_id,
- second_id);
+ return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
} else {
if (v->IsConstant()) {
if (HConstant::cast(v)->handle()->IsTrue()) {
- return new LGoto(first_id);
+ return new LGoto(instr->FirstSuccessor()->block_id());
} else if (HConstant::cast(v)->handle()->IsFalse()) {
- return new LGoto(second_id);
+ return new LGoto(instr->SecondSuccessor()->block_id());
}
}
Abort("Undefined compare before branch");
return NULL;
}
}
- return new LBranch(UseRegisterAtStart(v), first_id, second_id);
+ return new LBranch(UseRegisterAtStart(v));
}
-LInstruction* LChunkBuilder::DoCompareMapAndBranch(
- HCompareMapAndBranch* instr) {
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
LOperand* temp = TempRegister();
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LInstruction* result =
+ LInstanceOf* result =
new LInstanceOf(UseFixed(instr->left(), r0),
UseFixed(instr->right(), r1));
return MarkAsCall(DefineFixed(result, r0), instr);
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
- LInstruction* result =
- new LInstanceOfKnownGlobal(UseFixed(instr->value(), r0));
- return MarkAsCall(DefineFixed(result, r0), instr);
+ LInstanceOfKnownGlobal* result =
+ new LInstanceOfKnownGlobal(UseFixed(instr->value(), r0), FixedTemp(r4));
+ MarkAsSaveDoubles(result);
+ return AssignEnvironment(AssignPointerMap(DefineFixed(result, r0)));
}
LOperand* receiver = UseFixed(instr->receiver(), r0);
LOperand* length = UseRegisterAtStart(instr->length());
LOperand* elements = UseRegisterAtStart(instr->elements());
- LInstruction* result = new LApplyArguments(function,
- receiver,
- length,
- elements);
+ LApplyArguments* result = new LApplyArguments(function,
+ receiver,
+ length,
+ elements);
return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
}
BuiltinFunctionId op = instr->op();
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
- LInstruction* result = new LUnaryMathOperation(input, temp);
+ LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
switch (op) {
case kMathAbs:
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
argument_count_ -= instr->argument_count();
- UseFixed(instr->key(), r2);
- return MarkAsCall(DefineFixed(new LCallKeyed, r0), instr);
+ LOperand* key = UseFixed(instr->key(), r2);
+ return MarkAsCall(DefineFixed(new LCallKeyed(key), r0), instr);
}
LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LOperand* constructor = UseFixed(instr->constructor(), r1);
argument_count_ -= instr->argument_count();
- LInstruction* result = new LCallNew(constructor);
+ LCallNew* result = new LCallNew(constructor);
return MarkAsCall(DefineFixed(result, r0), instr);
}
bool reversed = (op == Token::GT || op == Token::LTE);
LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
- LInstruction* result = new LCmpT(left, right);
+ LCmpT* result = new LCmpT(left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
}
HCompareJSObjectEq* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- LInstruction* result = new LCmpJSObjectEq(left, right);
+ LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
- LInstruction* result = new LValueOf(object, TempRegister());
+ LValueOf* result = new LValueOf(object, TempRegister());
return AssignEnvironment(DefineSameAsFirst(result));
}
if (from.IsTagged()) {
if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value());
- LInstruction* res = new LNumberUntagD(value);
+ LNumberUntagD* res = new LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
} else {
ASSERT(to.IsInteger32());
// Make sure that the temp and result_temp registers are
// different.
LUnallocated* result_temp = TempRegister();
- LInstruction* result = new LNumberTagD(value, temp1, temp2);
+ LNumberTagD* result = new LNumberTagD(value, temp1, temp2);
Define(result, result_temp);
return AssignPointerMap(result);
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- LInstruction* res = new LDoubleToI(value);
+ LDoubleToI* res = new LDoubleToI(value);
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineSameAsFirst(new LSmiTag(value));
} else {
- LInstruction* result = new LNumberTagI(value);
+ LNumberTagI* result = new LNumberTagI(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
}
} else {
LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
- LInstruction* result = new LLoadGlobal();
+ LLoadGlobal* result = new LLoadGlobal();
return instr->check_hole_value()
? AssignEnvironment(DefineAsRegister(result))
: DefineAsRegister(result);
ASSERT(instr->key()->representation().IsInteger32());
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key());
- LInstruction* result = new LLoadKeyedFastElement(obj, key);
+ LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
return AssignEnvironment(DefineSameAsFirst(result));
}
}
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = UseRegisterOrConstant(instr->index());
+ LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
+ LOperand* string = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LStringLength(string));
+}
+
+
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
return MarkAsCall(DefineFixed(new LArrayLiteral, r0), instr);
}
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LOperand* object = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key());
- LInstruction* result = new LDeleteProperty(object, key);
+ LDeleteProperty* result = new LDeleteProperty(object, key);
return MarkAsCall(DefineFixed(result, r0), instr);
}
LOperand* arguments = UseRegister(instr->arguments());
LOperand* length = UseTempRegister(instr->length());
LOperand* index = UseRegister(instr->index());
- LInstruction* result = new LAccessArgumentsAt(arguments, length, index);
- return DefineAsRegister(AssignEnvironment(result));
+ LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
+ return AssignEnvironment(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
- LInstruction* result = new LTypeof(UseRegisterAtStart(instr->value()));
+ LTypeof* result = new LTypeof(UseRegisterAtStart(instr->value()));
return MarkAsCall(DefineFixed(result, r0), instr);
}
// Type hierarchy:
//
// LInstruction
-// LAccessArgumentsAt
-// LArgumentsElements
-// LArgumentsLength
-// LBinaryOperation
+// LTemplateInstruction
+// LControlInstruction
+// LBranch
+// LClassOfTestAndBranch
+// LCmpJSObjectEqAndBranch
+// LCmpIDAndBranch
+// LHasCachedArrayIndexAndBranch
+// LHasInstanceTypeAndBranch
+// LInstanceOfAndBranch
+// LIsNullAndBranch
+// LIsObjectAndBranch
+// LIsSmiAndBranch
+// LTypeofIsAndBranch
+// LAccessArgumentsAt
+// LArgumentsElements
+// LArgumentsLength
// LAddI
// LApplyArguments
// LArithmeticD
// LBitI
// LBoundsCheck
// LCmpID
-// LCmpIDAndBranch
// LCmpJSObjectEq
-// LCmpJSObjectEqAndBranch
// LCmpT
// LDivI
// LInstanceOf
-// LInstanceOfAndBranch
// LInstanceOfKnownGlobal
// LLoadKeyedFastElement
// LLoadKeyedGeneric
// LModI
// LMulI
+// LPower
// LShiftI
// LSubI
-// LCallConstantFunction
-// LCallFunction
-// LCallGlobal
-// LCallKeyed
-// LCallKnownGlobal
-// LCallNamed
-// LCallRuntime
-// LCallStub
-// LCheckPrototypeMaps
-// LConstant
-// LConstantD
-// LConstantI
-// LConstantT
-// LDeoptimize
-// LFunctionLiteral
-// LGlobalObject
-// LGlobalReceiver
-// LLabel
-// LLazyBailout
-// LLoadContextSlot
-// LLoadGlobal
-// LMaterializedLiteral
+// LCallConstantFunction
+// LCallFunction
+// LCallGlobal
+// LCallKeyed
+// LCallKnownGlobal
+// LCallNamed
+// LCallRuntime
+// LCallStub
+// LConstant
+// LConstantD
+// LConstantI
+// LConstantT
+// LDeoptimize
+// LFunctionLiteral
+// LGap
+// LLabel
+// LGlobalObject
+// LGlobalReceiver
+// LGoto
+// LLazyBailout
+// LLoadGlobal
+// LCheckPrototypeMaps
+// LLoadContextSlot
// LArrayLiteral
// LObjectLiteral
// LRegExpLiteral
-// LOsrEntry
-// LParameter
-// LStackCheck
-// LStoreKeyed
-// LStoreKeyedFastElement
-// LStoreKeyedGeneric
-// LStoreNamed
-// LStoreNamedField
-// LStoreNamedGeneric
-// LUnaryOperation
-// LJSArrayLength
-// LFixedArrayLength
+// LOsrEntry
+// LParameter
+// LRegExpConstructResult
+// LStackCheck
+// LStoreKeyed
+// LStoreKeyedFastElement
+// LStoreKeyedGeneric
+// LStoreNamed
+// LStoreNamedField
+// LStoreNamedGeneric
+// LStringCharCodeAt
// LBitNotI
-// LBranch
// LCallNew
// LCheckFunction
+// LCheckPrototypeMaps
// LCheckInstanceType
// LCheckMap
// LCheckSmi
// LClassOfTest
-// LClassOfTestAndBranch
// LDeleteProperty
// LDoubleToI
+// LFixedArrayLength
// LHasCachedArrayIndex
-// LHasCachedArrayIndexAndBranch
// LHasInstanceType
-// LHasInstanceTypeAndBranch
// LInteger32ToDouble
// LIsNull
-// LIsNullAndBranch
// LIsObject
-// LIsObjectAndBranch
// LIsSmi
-// LIsSmiAndBranch
+// LJSArrayLength
// LLoadNamedField
// LLoadNamedGeneric
// LLoadFunctionPrototype
// LReturn
// LSmiTag
// LStoreGlobal
+// LStringLength
// LTaggedToI
// LThrow
// LTypeof
// LTypeofIs
-// LTypeofIsAndBranch
// LUnaryMathOperation
// LValueOf
-// LUnknownOSRValue
+// LUnknownOSRValue
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
- V(BinaryOperation) \
+ V(ControlInstruction) \
V(Constant) \
V(Call) \
- V(MaterializedLiteral) \
V(StoreKeyed) \
V(StoreNamed) \
- V(UnaryOperation) \
LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(SubI) \
+ V(StringCharCodeAt) \
+ V(StringLength) \
V(TaggedToI) \
V(Throw) \
V(Typeof) \
virtual void CompileToNative(LCodeGen* generator) = 0;
virtual const char* Mnemonic() const = 0;
- virtual void PrintTo(StringStream* stream) const;
- virtual void PrintDataTo(StringStream* stream) const { }
+ virtual void PrintTo(StringStream* stream);
+ virtual void PrintDataTo(StringStream* stream) = 0;
+ virtual void PrintOutputOperandTo(StringStream* stream) = 0;
// Declare virtual type testers.
#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
+
virtual bool IsControl() const { return false; }
+ virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
void set_environment(LEnvironment* env) { environment_.set(env); }
LEnvironment* environment() const { return environment_.get(); }
LPointerMap* pointer_map() const { return pointer_map_.get(); }
bool HasPointerMap() const { return pointer_map_.is_set(); }
- void set_result(LOperand* operand) { result_.set(operand); }
- LOperand* result() const { return result_.get(); }
- bool HasResult() const { return result_.is_set(); }
+ virtual bool HasResult() const = 0;
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
private:
SetOncePointer<LEnvironment> environment_;
SetOncePointer<LPointerMap> pointer_map_;
- SetOncePointer<LOperand> result_;
HValue* hydrogen_value_;
SetOncePointer<LEnvironment> deoptimization_environment_;
};
-class LGap: public LInstruction {
+template<typename ElementType, int NumElements>
+class OperandContainer {
+ public:
+ OperandContainer() {
+ for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
+ }
+ int length() { return NumElements; }
+ ElementType& operator[](int i) {
+ ASSERT(i < length());
+ return elems_[i];
+ }
+ void PrintOperandsTo(StringStream* stream);
+
+ private:
+ ElementType elems_[NumElements];
+};
+
+
+template<typename ElementType>
+class OperandContainer<ElementType, 0> {
+ public:
+ int length() { return 0; }
+ void PrintOperandsTo(StringStream* stream) { }
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction: public LInstruction {
+ public:
+ // Allow 0 or 1 output operands.
+ STATIC_ASSERT(R == 0 || R == 1);
+ virtual bool HasResult() const { return R != 0; }
+ void set_result(LOperand* operand) { results_[0] = operand; }
+ LOperand* result() { return results_[0]; }
+
+ int InputCount() { return I; }
+ LOperand* InputAt(int i) { return inputs_[i]; }
+
+ int TempCount() { return T; }
+ LOperand* TempAt(int i) { return temps_[i]; }
+
+ virtual void PrintDataTo(StringStream* stream);
+ virtual void PrintOutputOperandTo(StringStream* stream);
+
+ protected:
+ OperandContainer<LOperand*, R> results_;
+ OperandContainer<LOperand*, I> inputs_;
+ OperandContainer<LOperand*, T> temps_;
+};
+
+
+class LGap: public LTemplateInstruction<0, 0, 0> {
public:
explicit LGap(HBasicBlock* block)
: block_(block) {
};
-class LGoto: public LInstruction {
+class LGoto: public LTemplateInstruction<0, 0, 0> {
public:
LGoto(int block_id, bool include_stack_check = false)
: block_id_(block_id), include_stack_check_(include_stack_check) { }
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int block_id() const { return block_id_; }
};
-class LLazyBailout: public LInstruction {
+class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
};
-class LDeoptimize: public LInstruction {
+class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
};
DECLARE_CONCRETE_INSTRUCTION(Label, "label")
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
int block_id() const { return block()->block_id(); }
bool is_loop_header() const { return block()->IsLoopHeader(); }
};
-class LParameter: public LInstruction {
+class LParameter: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
};
-class LCallStub: public LInstruction {
+class LCallStub: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
DECLARE_HYDROGEN_ACCESSOR(CallStub)
};
-class LUnknownOSRValue: public LInstruction {
+class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
};
-class LUnaryOperation: public LInstruction {
- public:
- explicit LUnaryOperation(LOperand* input) : input_(input) { }
-
- DECLARE_INSTRUCTION(UnaryOperation)
-
- LOperand* input() const { return input_; }
-
- virtual void PrintDataTo(StringStream* stream) const;
-
- private:
- LOperand* input_;
-};
-
-
-class LBinaryOperation: public LInstruction {
+template<int I, int T>
+class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
- LBinaryOperation(LOperand* left, LOperand* right)
- : left_(left), right_(right) { }
-
- DECLARE_INSTRUCTION(BinaryOperation)
+ DECLARE_INSTRUCTION(ControlInstruction)
+ virtual bool IsControl() const { return true; }
- LOperand* left() const { return left_; }
- LOperand* right() const { return right_; }
- virtual void PrintDataTo(StringStream* stream) const;
+ int true_block_id() const { return true_block_id_; }
+ int false_block_id() const { return false_block_id_; }
+ void SetBranchTargets(int true_block_id, int false_block_id) {
+ true_block_id_ = true_block_id;
+ false_block_id_ = false_block_id;
+ }
private:
- LOperand* left_;
- LOperand* right_;
+ int true_block_id_;
+ int false_block_id_;
};
-class LApplyArguments: public LBinaryOperation {
+class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
public:
LApplyArguments(LOperand* function,
LOperand* receiver,
LOperand* length,
- LOperand* elements)
- : LBinaryOperation(function, receiver),
- length_(length),
- elements_(elements) { }
+ LOperand* elements) {
+ inputs_[0] = function;
+ inputs_[1] = receiver;
+ inputs_[2] = length;
+ inputs_[3] = elements;
+ }
DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
- LOperand* function() const { return left(); }
- LOperand* receiver() const { return right(); }
- LOperand* length() const { return length_; }
- LOperand* elements() const { return elements_; }
-
- private:
- LOperand* length_;
- LOperand* elements_;
+ LOperand* function() { return inputs_[0]; }
+ LOperand* receiver() { return inputs_[1]; }
+ LOperand* length() { return inputs_[2]; }
+ LOperand* elements() { return inputs_[3]; }
};
-class LAccessArgumentsAt: public LInstruction {
+class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
public:
- LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index)
- : arguments_(arguments), length_(length), index_(index) { }
+ LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+ inputs_[0] = arguments;
+ inputs_[1] = length;
+ inputs_[2] = index;
+ }
DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
- LOperand* arguments() const { return arguments_; }
- LOperand* length() const { return length_; }
- LOperand* index() const { return index_; }
+ LOperand* arguments() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
+ LOperand* index() { return inputs_[2]; }
- virtual void PrintDataTo(StringStream* stream) const;
-
- private:
- LOperand* arguments_;
- LOperand* length_;
- LOperand* index_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LArgumentsLength: public LUnaryOperation {
+class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LArgumentsLength(LOperand* elements) : LUnaryOperation(elements) {}
+ explicit LArgumentsLength(LOperand* elements) {
+ inputs_[0] = elements;
+ }
DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
};
-class LArgumentsElements: public LInstruction {
+class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
public:
LArgumentsElements() { }
};
-class LModI: public LBinaryOperation {
+class LModI: public LTemplateInstruction<1, 2, 0> {
public:
- LModI(LOperand* left, LOperand* right) : LBinaryOperation(left, right) { }
+ LModI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
DECLARE_HYDROGEN_ACCESSOR(Mod)
};
-class LDivI: public LBinaryOperation {
+class LDivI: public LTemplateInstruction<1, 2, 0> {
public:
- LDivI(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) { }
+ LDivI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
};
-class LMulI: public LBinaryOperation {
+class LMulI: public LTemplateInstruction<1, 2, 1> {
public:
- LMulI(LOperand* left, LOperand* right, LOperand* temp)
- : LBinaryOperation(left, right), temp_(temp) { }
+ LMulI(LOperand* left, LOperand* right, LOperand* temp) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
DECLARE_HYDROGEN_ACCESSOR(Mul)
-
- LOperand* temp() const { return temp_; }
-
- private:
- LOperand* temp_;
};
-class LCmpID: public LBinaryOperation {
+class LCmpID: public LTemplateInstruction<1, 2, 0> {
public:
- LCmpID(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) { }
+ LCmpID(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
+ DECLARE_HYDROGEN_ACCESSOR(Compare)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
return hydrogen()->GetInputRepresentation().IsDouble();
}
-
- DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
};
-class LCmpIDAndBranch: public LCmpID {
+class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
- LCmpIDAndBranch(LOperand* left,
- LOperand* right,
- int true_block_id,
- int false_block_id)
- : LCmpID(left, right),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ LCmpIDAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
+ DECLARE_HYDROGEN_ACCESSOR(Compare)
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
+ Token::Value op() const { return hydrogen()->token(); }
+ bool is_double() const {
+ return hydrogen()->GetInputRepresentation().IsDouble();
+ }
- private:
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LUnaryMathOperation: public LUnaryOperation {
+class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LUnaryMathOperation(LOperand* value, LOperand* temp)
- : LUnaryOperation(value), temp_(temp) { }
+ LUnaryMathOperation(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
BuiltinFunctionId op() const { return hydrogen()->op(); }
- LOperand* temp() const { return temp_; }
-
- private:
- LOperand* temp_;
};
-class LCmpJSObjectEq: public LBinaryOperation {
+class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
public:
- LCmpJSObjectEq(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) {}
+ LCmpJSObjectEq(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
};
-class LCmpJSObjectEqAndBranch: public LCmpJSObjectEq {
+class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
- LCmpJSObjectEqAndBranch(LOperand* left,
- LOperand* right,
- int true_block_id,
- int false_block_id)
- : LCmpJSObjectEq(left, right),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
"cmp-jsobject-eq-and-branch")
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- private:
- int true_block_id_;
- int false_block_id_;
};
-class LIsNull: public LUnaryOperation {
+class LIsNull: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LIsNull(LOperand* value) : LUnaryOperation(value) {}
+ explicit LIsNull(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
- DECLARE_HYDROGEN_ACCESSOR(IsNull);
+ DECLARE_HYDROGEN_ACCESSOR(IsNull)
bool is_strict() const { return hydrogen()->is_strict(); }
};
-
-class LIsNullAndBranch: public LIsNull {
+class LIsNullAndBranch: public LControlInstruction<1, 0> {
public:
- LIsNullAndBranch(LOperand* value,
- int true_block_id,
- int false_block_id)
- : LIsNull(value),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ explicit LIsNullAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
+ DECLARE_HYDROGEN_ACCESSOR(IsNull)
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
+ bool is_strict() const { return hydrogen()->is_strict(); }
- private:
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LIsObject: public LUnaryOperation {
+class LIsObject: public LTemplateInstruction<1, 1, 1> {
public:
- LIsObject(LOperand* value, LOperand* temp)
- : LUnaryOperation(value), temp_(temp) {}
+ LIsObject(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
-
- LOperand* temp() const { return temp_; }
-
- private:
- LOperand* temp_;
};
-class LIsObjectAndBranch: public LIsObject {
+class LIsObjectAndBranch: public LControlInstruction<1, 2> {
public:
- LIsObjectAndBranch(LOperand* value,
- LOperand* temp,
- LOperand* temp2,
- int true_block_id,
- int false_block_id)
- : LIsObject(value, temp),
- temp2_(temp2),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ LIsObjectAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ temps_[1] = temp2;
+ }
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- LOperand* temp2() const { return temp2_; }
- private:
- LOperand* temp2_;
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LIsSmi: public LUnaryOperation {
+class LIsSmi: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LIsSmi(LOperand* value) : LUnaryOperation(value) {}
+ explicit LIsSmi(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
DECLARE_HYDROGEN_ACCESSOR(IsSmi)
};
-class LIsSmiAndBranch: public LIsSmi {
+class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
- LIsSmiAndBranch(LOperand* value,
- int true_block_id,
- int false_block_id)
- : LIsSmi(value),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ explicit LIsSmiAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- private:
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LHasInstanceType: public LUnaryOperation {
+class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LHasInstanceType(LOperand* value)
- : LUnaryOperation(value) { }
+ explicit LHasInstanceType(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
-
- InstanceType TestType(); // The type to test against when generating code.
- Condition BranchCondition(); // The branch condition for 'true'.
};
-class LHasInstanceTypeAndBranch: public LHasInstanceType {
+class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
public:
- LHasInstanceTypeAndBranch(LOperand* value,
- int true_block_id,
- int false_block_id)
- : LHasInstanceType(value),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ explicit LHasInstanceTypeAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
- private:
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LHasCachedArrayIndex: public LUnaryOperation {
+class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LHasCachedArrayIndex(LOperand* value) : LUnaryOperation(value) {}
+ explicit LHasCachedArrayIndex(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
};
-class LHasCachedArrayIndexAndBranch: public LHasCachedArrayIndex {
+class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
- LHasCachedArrayIndexAndBranch(LOperand* value,
- int true_block_id,
- int false_block_id)
- : LHasCachedArrayIndex(value),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- private:
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LClassOfTest: public LUnaryOperation {
+class LClassOfTest: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LClassOfTest(LOperand* value) : LUnaryOperation(value) {}
+ explicit LClassOfTest(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LClassOfTestAndBranch: public LClassOfTest {
+class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
public:
- LClassOfTestAndBranch(LOperand* value,
- LOperand* temporary,
- int true_block_id,
- int false_block_id)
- : LClassOfTest(value),
- temporary_(temporary),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
- LOperand* temporary() { return temporary_; }
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
- private:
- LOperand* temporary_;
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LCmpT: public LBinaryOperation {
+class LCmpT: public LTemplateInstruction<1, 2, 0> {
public:
- LCmpT(LOperand* left, LOperand* right) : LBinaryOperation(left, right) {}
+ LCmpT(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(Compare)
};
-class LCmpTAndBranch: public LCmpT {
+class LCmpTAndBranch: public LControlInstruction<2, 0> {
public:
- LCmpTAndBranch(LOperand* left,
- LOperand* right,
- int true_block_id,
- int false_block_id)
- : LCmpT(left, right),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ LCmpTAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(Compare)
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- private:
- int true_block_id_;
- int false_block_id_;
+ Token::Value op() const { return hydrogen()->token(); }
};
-class LInstanceOf: public LBinaryOperation {
+class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
public:
- LInstanceOf(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) { }
+ LInstanceOf(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
-class LInstanceOfAndBranch: public LInstanceOf {
+class LInstanceOfAndBranch: public LControlInstruction<2, 0> {
public:
- LInstanceOfAndBranch(LOperand* left,
- LOperand* right,
- int true_block_id,
- int false_block_id)
- : LInstanceOf(left, right),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ LInstanceOfAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- private:
- int true_block_id_;
- int false_block_id_;
};
-class LInstanceOfKnownGlobal: public LUnaryOperation {
+class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LInstanceOfKnownGlobal(LOperand* left)
- : LUnaryOperation(left) { }
+ LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
"instance-of-known-global")
};
-class LBoundsCheck: public LBinaryOperation {
+class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
public:
- LBoundsCheck(LOperand* index, LOperand* length)
- : LBinaryOperation(index, length) { }
+ LBoundsCheck(LOperand* index, LOperand* length) {
+ inputs_[0] = index;
+ inputs_[1] = length;
+ }
- LOperand* index() const { return left(); }
- LOperand* length() const { return right(); }
+ LOperand* index() { return inputs_[0]; }
+ LOperand* length() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
};
-class LBitI: public LBinaryOperation {
+class LBitI: public LTemplateInstruction<1, 2, 0> {
public:
LBitI(Token::Value op, LOperand* left, LOperand* right)
- : LBinaryOperation(left, right), op_(op) { }
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
Token::Value op() const { return op_; }
};
-class LShiftI: public LBinaryOperation {
+class LShiftI: public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
- : LBinaryOperation(left, right), op_(op), can_deopt_(can_deopt) { }
+ : op_(op), can_deopt_(can_deopt) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
Token::Value op() const { return op_; }
};
-class LSubI: public LBinaryOperation {
+class LSubI: public LTemplateInstruction<1, 2, 0> {
public:
- LSubI(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) { }
+ LSubI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
DECLARE_HYDROGEN_ACCESSOR(Sub)
};
-class LConstant: public LInstruction {
+class LConstant: public LTemplateInstruction<1, 0, 0> {
DECLARE_INSTRUCTION(Constant)
};
};
-class LBranch: public LUnaryOperation {
+class LBranch: public LControlInstruction<1, 0> {
public:
- LBranch(LOperand* input, int true_block_id, int false_block_id)
- : LUnaryOperation(input),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ explicit LBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
DECLARE_HYDROGEN_ACCESSOR(Value)
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
-
- private:
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LCmpMapAndBranch: public LUnaryOperation {
+class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
public:
- LCmpMapAndBranch(LOperand* value, LOperand* temp)
- : LUnaryOperation(value), temp_(temp) { }
+ LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMapAndBranch)
+ DECLARE_HYDROGEN_ACCESSOR(CompareMap)
virtual bool IsControl() const { return true; }
- LOperand* temp() const { return temp_; }
Handle<Map> map() const { return hydrogen()->map(); }
int true_block_id() const {
- return hydrogen()->true_destination()->block_id();
+ return hydrogen()->FirstSuccessor()->block_id();
}
int false_block_id() const {
- return hydrogen()->false_destination()->block_id();
+ return hydrogen()->SecondSuccessor()->block_id();
}
-
- private:
- LOperand* temp_;
};
-class LJSArrayLength: public LUnaryOperation {
+class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LJSArrayLength(LOperand* input) : LUnaryOperation(input) { }
+ explicit LJSArrayLength(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
};
-class LFixedArrayLength: public LUnaryOperation {
+class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LFixedArrayLength(LOperand* input) : LUnaryOperation(input) { }
+ explicit LFixedArrayLength(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(FixedArrayLength, "fixed-array-length")
DECLARE_HYDROGEN_ACCESSOR(FixedArrayLength)
};
-class LValueOf: public LUnaryOperation {
+class LValueOf: public LTemplateInstruction<1, 1, 1> {
public:
- LValueOf(LOperand* input, LOperand* temporary)
- : LUnaryOperation(input), temporary_(temporary) { }
-
- LOperand* temporary() const { return temporary_; }
+ LValueOf(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-
- private:
- LOperand* temporary_;
};
-class LThrow: public LUnaryOperation {
+class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
- explicit LThrow(LOperand* value) : LUnaryOperation(value) { }
+ explicit LThrow(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
-class LBitNotI: public LUnaryOperation {
+class LBitNotI: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LBitNotI(LOperand* use) : LUnaryOperation(use) { }
+ explicit LBitNotI(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
};
-class LAddI: public LBinaryOperation {
+class LAddI: public LTemplateInstruction<1, 2, 0> {
public:
- LAddI(LOperand* left, LOperand* right)
- : LBinaryOperation(left, right) { }
+ LAddI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
DECLARE_HYDROGEN_ACCESSOR(Add)
};
-class LArithmeticD: public LBinaryOperation {
+class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
- : LBinaryOperation(left, right), op_(op) { }
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
Token::Value op() const { return op_; }
};
-class LArithmeticT: public LBinaryOperation {
+class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
- : LBinaryOperation(left, right), op_(op) { }
+ : op_(op) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
};
-class LReturn: public LUnaryOperation {
+class LReturn: public LTemplateInstruction<0, 1, 0> {
public:
- explicit LReturn(LOperand* use) : LUnaryOperation(use) { }
+ explicit LReturn(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
-class LLoadNamedField: public LUnaryOperation {
+class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LLoadNamedField(LOperand* object) : LUnaryOperation(object) { }
+ explicit LLoadNamedField(LOperand* object) {
+ inputs_[0] = object;
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
};
-class LLoadNamedGeneric: public LUnaryOperation {
+class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LLoadNamedGeneric(LOperand* object) : LUnaryOperation(object) { }
+ explicit LLoadNamedGeneric(LOperand* object) {
+ inputs_[0] = object;
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
- LOperand* object() const { return input(); }
+ LOperand* object() { return inputs_[0]; }
Handle<Object> name() const { return hydrogen()->name(); }
};
-class LLoadFunctionPrototype: public LUnaryOperation {
+class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LLoadFunctionPrototype(LOperand* function)
- : LUnaryOperation(function) { }
+ explicit LLoadFunctionPrototype(LOperand* function) {
+ inputs_[0] = function;
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
- LOperand* function() const { return input(); }
+ LOperand* function() { return inputs_[0]; }
};
-class LLoadElements: public LUnaryOperation {
+class LLoadElements: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LLoadElements(LOperand* obj) : LUnaryOperation(obj) { }
+ explicit LLoadElements(LOperand* object) {
+ inputs_[0] = object;
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
};
-class LLoadKeyedFastElement: public LBinaryOperation {
+class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedFastElement(LOperand* elements, LOperand* key)
- : LBinaryOperation(elements, key) { }
+ LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
- LOperand* elements() const { return left(); }
- LOperand* key() const { return right(); }
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
};
-class LLoadKeyedGeneric: public LBinaryOperation {
+class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyedGeneric(LOperand* obj, LOperand* key)
- : LBinaryOperation(obj, key) { }
+ LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ }
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
- LOperand* object() const { return left(); }
- LOperand* key() const { return right(); }
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
};
-class LLoadGlobal: public LInstruction {
+class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
};
-class LStoreGlobal: public LUnaryOperation {
+class LStoreGlobal: public LTemplateInstruction<0, 1, 0> {
public:
- explicit LStoreGlobal(LOperand* value) : LUnaryOperation(value) {}
+ explicit LStoreGlobal(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
};
-class LLoadContextSlot: public LInstruction {
+class LLoadContextSlot: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
- int context_chain_length() const {
- return hydrogen()->context_chain_length();
- }
- int slot_index() const { return hydrogen()->slot_index(); }
+ int context_chain_length() { return hydrogen()->context_chain_length(); }
+ int slot_index() { return hydrogen()->slot_index(); }
virtual void PrintDataTo(StringStream* stream);
};
-class LPushArgument: public LUnaryOperation {
+class LPushArgument: public LTemplateInstruction<0, 1, 0> {
public:
- explicit LPushArgument(LOperand* argument) : LUnaryOperation(argument) {}
+ explicit LPushArgument(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
};
-class LGlobalObject: public LInstruction {
+class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
};
-class LGlobalReceiver: public LInstruction {
+class LGlobalReceiver: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
};
-class LCallConstantFunction: public LInstruction {
+class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
- Handle<JSFunction> function() const { return hydrogen()->function(); }
+ Handle<JSFunction> function() { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKeyed: public LInstruction {
+class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
public:
+ explicit LCallKeyed(LOperand* key) {
+ inputs_[0] = key;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNamed: public LInstruction {
+
+class LCallNamed: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
DECLARE_HYDROGEN_ACCESSOR(CallNamed)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
Handle<String> name() const { return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallFunction: public LInstruction {
+class LCallFunction: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
};
-class LCallGlobal: public LInstruction {
+class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
Handle<String> name() const {return hydrogen()->name(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallKnownGlobal: public LInstruction {
+class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
Handle<JSFunction> target() const { return hydrogen()->target(); }
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallNew: public LUnaryOperation {
+class LCallNew: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LCallNew(LOperand* constructor) : LUnaryOperation(constructor) { }
+ explicit LCallNew(LOperand* constructor) {
+ inputs_[0] = constructor;
+ }
DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
DECLARE_HYDROGEN_ACCESSOR(CallNew)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
};
-class LCallRuntime: public LInstruction {
+class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
};
-class LInteger32ToDouble: public LUnaryOperation {
+class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LInteger32ToDouble(LOperand* use) : LUnaryOperation(use) { }
+ explicit LInteger32ToDouble(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
};
-class LNumberTagI: public LUnaryOperation {
+class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LNumberTagI(LOperand* use) : LUnaryOperation(use) { }
+ explicit LNumberTagI(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
};
-class LNumberTagD: public LUnaryOperation {
+class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
public:
- LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2)
- : LUnaryOperation(value), temp1_(temp1), temp2_(temp2) { }
+ LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
-
- LOperand* temp1() const { return temp1_; }
- LOperand* temp2() const { return temp2_; }
-
- private:
- LOperand* temp1_;
- LOperand* temp2_;
};
// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LUnaryOperation {
+class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LDoubleToI(LOperand* value) : LUnaryOperation(value) { }
+ explicit LDoubleToI(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
DECLARE_HYDROGEN_ACCESSOR(Change)
// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LUnaryOperation {
+class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
public:
- LTaggedToI(LOperand* value, LOperand* temp)
- : LUnaryOperation(value), temp_(temp) { }
+ LTaggedToI(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
DECLARE_HYDROGEN_ACCESSOR(Change)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
- LOperand* temp() const { return temp_; }
-
- private:
- LOperand* temp_;
};
-class LSmiTag: public LUnaryOperation {
+class LSmiTag: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LSmiTag(LOperand* use) : LUnaryOperation(use) { }
+ explicit LSmiTag(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
};
-class LNumberUntagD: public LUnaryOperation {
+class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LNumberUntagD(LOperand* value) : LUnaryOperation(value) { }
+ explicit LNumberUntagD(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
};
-class LSmiUntag: public LUnaryOperation {
+class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
public:
- LSmiUntag(LOperand* use, bool needs_check)
- : LUnaryOperation(use), needs_check_(needs_check) { }
+ LSmiUntag(LOperand* value, bool needs_check)
+ : needs_check_(needs_check) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
};
-class LStoreNamed: public LInstruction {
+class LStoreNamed: public LTemplateInstruction<0, 2, 0> {
public:
- LStoreNamed(LOperand* obj, LOperand* val)
- : object_(obj), value_(val) { }
+ LStoreNamed(LOperand* obj, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = val;
+ }
DECLARE_INSTRUCTION(StoreNamed)
DECLARE_HYDROGEN_ACCESSOR(StoreNamed)
- virtual void PrintDataTo(StringStream* stream) const;
+ virtual void PrintDataTo(StringStream* stream);
- LOperand* object() const { return object_; }
+ LOperand* object() { return inputs_[0]; }
+ LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
- LOperand* value() const { return value_; }
-
- private:
- LOperand* object_;
- LOperand* value_;
};
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
- Handle<Map> transition() { return hydrogen()->transition(); }
+ Handle<Map> transition() const { return hydrogen()->transition(); }
};
};
-class LStoreKeyed: public LInstruction {
+class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val)
- : object_(obj), key_(key), value_(val) { }
+ LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
DECLARE_INSTRUCTION(StoreKeyed)
- virtual void PrintDataTo(StringStream* stream) const;
-
- LOperand* object() const { return object_; }
- LOperand* key() const { return key_; }
- LOperand* value() const { return value_; }
+ virtual void PrintDataTo(StringStream* stream);
- private:
- LOperand* object_;
- LOperand* key_;
- LOperand* value_;
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
};
};
-class LCheckFunction: public LUnaryOperation {
+class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LStringCharCodeAt(LOperand* string, LOperand* index) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+};
+
+
+class LStringLength: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LCheckFunction(LOperand* use) : LUnaryOperation(use) { }
+ explicit LStringLength(LOperand* string) {
+ inputs_[0] = string;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
+ DECLARE_HYDROGEN_ACCESSOR(StringLength)
+
+ LOperand* string() { return inputs_[0]; }
+};
+
+
+class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
+ public:
+ explicit LCheckFunction(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
};
-class LCheckInstanceType: public LUnaryOperation {
+class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckInstanceType(LOperand* use) : LUnaryOperation(use) { }
+ explicit LCheckInstanceType(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-
- LOperand* temp() const { return temp_; }
-
- private:
- LOperand* temp_;
};
-class LCheckMap: public LUnaryOperation {
+class LCheckMap: public LTemplateInstruction<0, 1, 0> {
public:
- explicit LCheckMap(LOperand* use) : LUnaryOperation(use) { }
+ explicit LCheckMap(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
DECLARE_HYDROGEN_ACCESSOR(CheckMap)
};
-class LCheckPrototypeMaps: public LInstruction {
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
public:
- LCheckPrototypeMaps(LOperand* temp1, LOperand* temp2)
- : temp1_(temp1), temp2_(temp2) { }
+ LCheckPrototypeMaps(LOperand* temp1, LOperand* temp2) {
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
Handle<JSObject> holder() const { return hydrogen()->holder(); }
-
- LOperand* temp1() const { return temp1_; }
- LOperand* temp2() const { return temp2_; }
-
- private:
- LOperand* temp1_;
- LOperand* temp2_;
};
-class LCheckSmi: public LUnaryOperation {
+class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
public:
- LCheckSmi(LOperand* use, Condition condition)
- : LUnaryOperation(use), condition_(condition) { }
+ LCheckSmi(LOperand* value, Condition condition)
+ : condition_(condition) {
+ inputs_[0] = value;
+ }
Condition condition() const { return condition_; }
};
-class LMaterializedLiteral: public LInstruction {
- public:
- DECLARE_INSTRUCTION(MaterializedLiteral)
-};
-
-
-class LArrayLiteral: public LMaterializedLiteral {
+class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
};
-class LObjectLiteral: public LMaterializedLiteral {
+class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
};
-class LRegExpLiteral: public LMaterializedLiteral {
+class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
};
-class LFunctionLiteral: public LInstruction {
+class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
};
-class LTypeof: public LUnaryOperation {
+class LTypeof: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LTypeof(LOperand* input) : LUnaryOperation(input) { }
+ explicit LTypeof(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
};
-class LTypeofIs: public LUnaryOperation {
+class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LTypeofIs(LOperand* input) : LUnaryOperation(input) { }
- virtual void PrintDataTo(StringStream* stream) const;
+ explicit LTypeofIs(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+ virtual void PrintDataTo(StringStream* stream);
};
-class LTypeofIsAndBranch: public LTypeofIs {
+class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
public:
- LTypeofIsAndBranch(LOperand* value,
- int true_block_id,
- int false_block_id)
- : LTypeofIs(value),
- true_block_id_(true_block_id),
- false_block_id_(false_block_id) { }
+ explicit LTypeofIsAndBranch(LOperand* value) {
+ inputs_[0] = value;
+ }
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
- virtual void PrintDataTo(StringStream* stream) const;
- virtual bool IsControl() const { return true; }
-
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
+ Handle<String> type_literal() { return hydrogen()->type_literal(); }
- private:
- int true_block_id_;
- int false_block_id_;
+ virtual void PrintDataTo(StringStream* stream);
};
-class LDeleteProperty: public LBinaryOperation {
+class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
public:
- LDeleteProperty(LOperand* obj, LOperand* key) : LBinaryOperation(obj, key) {}
+ LDeleteProperty(LOperand* obj, LOperand* key) {
+ inputs_[0] = obj;
+ inputs_[1] = key;
+ }
DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
- LOperand* object() const { return left(); }
- LOperand* key() const { return right(); }
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
};
-class LOsrEntry: public LInstruction {
+class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
public:
LOsrEntry();
};
-class LStackCheck: public LInstruction {
+class LStackCheck: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
};
inlined_closures_.Add(closure);
}
- void Verify() const;
-
private:
int spill_slot_count_;
HGraph* const graph_;
LUnallocated* ToUnallocated(DoubleRegister reg);
// Methods for setting up define-use relationships.
- LOperand* Use(HValue* value, LUnallocated* operand);
- LOperand* UseFixed(HValue* value, Register fixed_register);
- LOperand* UseFixedDouble(HValue* value, DoubleRegister fixed_register);
+ MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+ MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+ MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+ DoubleRegister fixed_register);
// A value that is guaranteed to be allocated to a register.
// Operand created by UseRegister is guaranteed to be live until the end of
// instruction start. Register allocator is free to assign the same register
// to some other operand used inside instruction (i.e. temporary or
// output).
- LOperand* UseRegister(HValue* value);
- LOperand* UseRegisterAtStart(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
// An input operand in a register that may be trashed.
- LOperand* UseTempRegister(HValue* value);
+ MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
// An input operand in a register or stack slot.
- LOperand* Use(HValue* value);
- LOperand* UseAtStart(HValue* value);
+ MUST_USE_RESULT LOperand* Use(HValue* value);
+ MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
// An input operand in a register, stack slot or a constant operand.
- LOperand* UseOrConstant(HValue* value);
- LOperand* UseOrConstantAtStart(HValue* value);
+ MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
// An input operand in a register or a constant operand.
- LOperand* UseRegisterOrConstant(HValue* value);
- LOperand* UseRegisterOrConstantAtStart(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+ MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
// An input operand in register, stack slot or a constant operand.
// Will not be moved to a register even if one is freely available.
- LOperand* UseAny(HValue* value);
+ MUST_USE_RESULT LOperand* UseAny(HValue* value);
+
+ // Temporary operand that must be in a register.
+ MUST_USE_RESULT LUnallocated* TempRegister();
+ MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+ MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
- LInstruction* Define(LInstruction* instr, LUnallocated* result);
- LInstruction* Define(LInstruction* instr);
- LInstruction* DefineAsRegister(LInstruction* instr);
- LInstruction* DefineAsSpilled(LInstruction* instr, int index);
- LInstruction* DefineSameAsFirst(LInstruction* instr);
- LInstruction* DefineFixed(LInstruction* instr, Register reg);
- LInstruction* DefineFixedDouble(LInstruction* instr, DoubleRegister reg);
+ template<int I, int T>
+ LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
+ LUnallocated* result);
+ template<int I, int T>
+ LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
+ template<int I, int T>
+ LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
+ template<int I, int T>
+ LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
+ int index);
+ template<int I, int T>
+ LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
+ template<int I, int T>
+ LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
+ Register reg);
+ template<int I, int T>
+ LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
+ DoubleRegister reg);
LInstruction* AssignEnvironment(LInstruction* instr);
LInstruction* AssignPointerMap(LInstruction* instr);
LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+ LInstruction* MarkAsSaveDoubles(LInstruction* instr);
LInstruction* SetInstructionPendingDeoptimizationEnvironment(
LInstruction* instr, int ast_id);
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
- // Temporary operand that must be in a register.
- LUnallocated* TempRegister();
- LOperand* FixedTemp(Register reg);
- LOperand* FixedTemp(DoubleRegister reg);
-
void VisitInstruction(HInstruction* current);
void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
break;
}
case CodeStub::StringCharAt: {
- Abort("StringCharAtStub unimplemented.");
+ StringCharAtStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::MathPow: {
LModI* instr_;
};
// These registers hold untagged 32 bit values.
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
Register result = ToRegister(instr->result());
Register scratch = scratch0();
__ bind(&ok);
}
+ // Try a few common cases before using the generic stub.
+ Label call_stub;
+ const int kUnfolds = 3;
+ // Skip if either side is negative.
+ __ cmp(left, Operand(0));
+ __ cmp(right, Operand(0), NegateCondition(mi));
+ __ b(mi, &call_stub);
+ // If the right hand side is smaller than the (nonnegative)
+ // left hand side, it is the result. Else try a few subtractions
+ // of the left hand side.
+ __ mov(scratch, left);
+ for (int i = 0; i < kUnfolds; i++) {
+ // Check if the left hand side is less or equal than the
+ // the right hand side.
+ __ cmp(scratch, right);
+ __ mov(result, scratch, LeaveCC, lt);
+ __ b(lt, &done);
+ // If not, reduce the left hand side by the right hand
+ // side and check again.
+ if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
+ }
+
+ // Check for power of two on the right hand side.
+ __ sub(scratch, right, Operand(1), SetCC);
+ __ b(mi, &call_stub);
+ __ tst(scratch, right);
+ __ b(ne, &call_stub);
+ // Perform modulo operation.
+ __ and_(result, scratch, Operand(left));
+
+ __ bind(&call_stub);
// Call the generic stub. The numbers in r0 and r1 have
// to be tagged to Smis. If that is not possible, deoptimize.
DeferredModI* deferred = new DeferredModI(this, instr);
// If the result in r0 is a Smi, untag it, else deoptimize.
__ BranchOnNotSmi(result, &deoptimize);
- __ mov(result, Operand(result, ASR, 1));
+ __ SmiUntag(result);
__ b(al, &done);
__ bind(&deoptimize);
LDivI* instr_;
};
- const Register left = ToRegister(instr->left());
- const Register right = ToRegister(instr->right());
+ const Register left = ToRegister(instr->InputAt(0));
+ const Register right = ToRegister(instr->InputAt(1));
const Register scratch = scratch0();
const Register result = ToRegister(instr->result());
}
-void LCodeGen::DoDeferredGenericBinaryStub(LBinaryOperation* instr,
+template<int T>
+void LCodeGen::DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr,
Token::Value op) {
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
__ PushSafepointRegistersAndDoubles();
GenericBinaryOpStub stub(op, OVERWRITE_LEFT, left, right);
void LCodeGen::DoMulI(LMulI* instr) {
Register scratch = scratch0();
- Register left = ToRegister(instr->left());
- Register right = EmitLoadRegister(instr->right(), scratch);
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = EmitLoadRegister(instr->InputAt(1), scratch);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero) &&
- !instr->right()->IsConstantOperand()) {
- __ orr(ToRegister(instr->temp()), left, right);
+ !instr->InputAt(1)->IsConstantOperand()) {
+ __ orr(ToRegister(instr->TempAt(0)), left, right);
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
Label done;
__ tst(left, Operand(left));
__ b(ne, &done);
- if (instr->right()->IsConstantOperand()) {
- if (ToInteger32(LConstantOperand::cast(instr->right())) < 0) {
+ if (instr->InputAt(1)->IsConstantOperand()) {
+ if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) < 0) {
DeoptimizeIf(no_condition, instr->environment());
}
} else {
// Test the non-zero operand for negative sign.
- __ cmp(ToRegister(instr->temp()), Operand(0));
+ __ cmp(ToRegister(instr->TempAt(0)), Operand(0));
DeoptimizeIf(mi, instr->environment());
}
__ bind(&done);
void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
ASSERT(left->Equals(instr->result()));
ASSERT(left->IsRegister());
Register result = ToRegister(left);
void LCodeGen::DoShiftI(LShiftI* instr) {
Register scratch = scratch0();
- LOperand* left = instr->left();
- LOperand* right = instr->right();
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
ASSERT(left->Equals(instr->result()));
ASSERT(left->IsRegister());
Register result = ToRegister(left);
void LCodeGen::DoSubI(LSubI* instr) {
- Register left = ToRegister(instr->left());
- Register right = EmitLoadRegister(instr->right(), ip);
- ASSERT(instr->left()->Equals(instr->result()));
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = EmitLoadRegister(instr->InputAt(1), ip);
+ ASSERT(instr->InputAt(0)->Equals(instr->result()));
__ sub(left, left, right, SetCC);
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(vs, instr->environment());
void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->input());
+ Register array = ToRegister(instr->InputAt(0));
__ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset));
}
void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
Register result = ToRegister(instr->result());
- Register array = ToRegister(instr->input());
+ Register array = ToRegister(instr->InputAt(0));
__ ldr(result, FieldMemOperand(array, FixedArray::kLengthOffset));
}
void LCodeGen::DoValueOf(LValueOf* instr) {
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->temporary());
+ Register map = ToRegister(instr->TempAt(0));
ASSERT(input.is(result));
Label done;
void LCodeGen::DoBitNotI(LBitNotI* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->Equals(instr->result()));
__ mvn(ToRegister(input), Operand(ToRegister(input)));
}
void LCodeGen::DoThrow(LThrow* instr) {
- Register input_reg = EmitLoadRegister(instr->input(), ip);
+ Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
__ push(input_reg);
CallRuntime(Runtime::kThrow, 1, instr);
void LCodeGen::DoAddI(LAddI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
ASSERT(left->Equals(instr->result()));
Register right_reg = EmitLoadRegister(right, ip);
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- DoubleRegister left = ToDoubleRegister(instr->left());
- DoubleRegister right = ToDoubleRegister(instr->right());
+ DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
+ DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
switch (instr->op()) {
case Token::ADD:
__ vadd(left, left, right);
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
- ASSERT(ToRegister(instr->left()).is(r1));
- ASSERT(ToRegister(instr->right()).is(r0));
+ ASSERT(ToRegister(instr->InputAt(0)).is(r1));
+ ASSERT(ToRegister(instr->InputAt(1)).is(r0));
ASSERT(ToRegister(instr->result()).is(r0));
// TODO(regis): Implement TypeRecordingBinaryOpStub and replace current
Representation r = instr->hydrogen()->representation();
if (r.IsInteger32()) {
- Register reg = ToRegister(instr->input());
+ Register reg = ToRegister(instr->InputAt(0));
__ cmp(reg, Operand(0));
EmitBranch(true_block, false_block, nz);
} else if (r.IsDouble()) {
- DoubleRegister reg = ToDoubleRegister(instr->input());
+ DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
Register scratch = scratch0();
// Test the double value. Zero and NaN are false.
EmitBranch(true_block, false_block, ne);
} else {
ASSERT(r.IsTagged());
- Register reg = ToRegister(instr->input());
+ Register reg = ToRegister(instr->InputAt(0));
if (instr->hydrogen()->type().IsBoolean()) {
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(reg, ip);
void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
- Register left = ToRegister(instr->left());
- Register right = ToRegister(instr->right());
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
Register result = ToRegister(instr->result());
__ cmp(left, Operand(right));
__ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
__ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
- Abort("DoCmpJSObjectEq untested.");
}
void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
- Abort("DoCmpJSObjectEqAndBranch unimplemented.");
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ __ cmp(left, Operand(right));
+ EmitBranch(true_block, false_block, eq);
}
void LCodeGen::DoIsNull(LIsNull* instr) {
- Register reg = ToRegister(instr->input());
+ Register reg = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
__ LoadRoot(ip, Heap::kNullValueRootIndex);
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Register scratch = scratch0();
- Register reg = ToRegister(instr->input());
+ Register reg = ToRegister(instr->InputAt(0));
// TODO(fsc): If the expression is known to be a smi, then it's
// definitely not null. Jump to the false block.
void LCodeGen::DoIsSmi(LIsSmi* instr) {
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
Register result = ToRegister(instr->result());
- Register input_reg = EmitLoadRegister(instr->input(), ip);
+ Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
__ tst(input_reg, Operand(kSmiTagMask));
__ LoadRoot(result, Heap::kTrueValueRootIndex);
Label done;
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- Register input_reg = EmitLoadRegister(instr->input(), ip);
+ Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
__ tst(input_reg, Operand(kSmiTagMask));
EmitBranch(true_block, false_block, eq);
}
-InstanceType LHasInstanceType::TestType() {
- InstanceType from = hydrogen()->from();
- InstanceType to = hydrogen()->to();
+static InstanceType TestType(HHasInstanceType* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
if (from == FIRST_TYPE) return to;
ASSERT(from == to || to == LAST_TYPE);
return from;
}
-Condition LHasInstanceType::BranchCondition() {
- InstanceType from = hydrogen()->from();
- InstanceType to = hydrogen()->to();
+static Condition BranchCondition(HHasInstanceType* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
if (from == to) return eq;
if (to == LAST_TYPE) return hs;
if (from == FIRST_TYPE) return ls;
void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
- Abort("DoHasInstanceType unimplemented.");
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ ASSERT(instr->hydrogen()->value()->representation().IsTagged());
+ Label done;
+ __ tst(input, Operand(kSmiTagMask));
+ __ LoadRoot(result, Heap::kFalseValueRootIndex, eq);
+ __ b(eq, &done);
+ __ CompareObjectType(input, result, result, TestType(instr->hydrogen()));
+ Condition cond = BranchCondition(instr->hydrogen());
+ __ LoadRoot(result, Heap::kTrueValueRootIndex, cond);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex, NegateCondition(cond));
+ __ bind(&done);
}
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register scratch = scratch0();
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
__ tst(input, Operand(kSmiTagMask));
__ b(eq, false_label);
- __ CompareObjectType(input, scratch, scratch, instr->TestType());
- EmitBranch(true_block, false_block, instr->BranchCondition());
+ __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
+ EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
}
void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
ASSERT(input.is(result));
Handle<String> class_name = instr->hydrogen()->class_name();
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
Register temp = scratch0();
- Register temp2 = ToRegister(instr->temporary());
+ Register temp2 = ToRegister(instr->TempAt(0));
Handle<String> class_name = instr->hydrogen()->class_name();
int true_block = chunk_->LookupDestination(instr->true_block_id());
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Register reg = ToRegister(instr->input());
- Register temp = ToRegister(instr->temp());
+ Register reg = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
int true_block = instr->true_block_id();
int false_block = instr->false_block_id();
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0.
- ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1.
+ ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0.
+ ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1.
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- Abort("DoInstanceOfKnownGlobal unimplemented.");
+ class DeferredInstanceOfKnownGlobal: public LDeferredCode {
+ public:
+ DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
+ LInstanceOfKnownGlobal* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
+ }
+
+ Label* map_check() { return &map_check_; }
+
+ private:
+ LInstanceOfKnownGlobal* instr_;
+ Label map_check_;
+ };
+
+ DeferredInstanceOfKnownGlobal* deferred;
+ deferred = new DeferredInstanceOfKnownGlobal(this, instr);
+
+ Label done, false_result;
+ Register object = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+ Register result = ToRegister(instr->result());
+
+ ASSERT(object.is(r0));
+ ASSERT(result.is(r0));
+
+ // A Smi is not instance of anything.
+ __ BranchOnSmi(object, &false_result);
+
+ // This is the inlined call site instanceof cache. The two occurences of the
+ // hole value will be patched to the last map/result pair generated by the
+ // instanceof stub.
+ Label cache_miss;
+ Register map = temp;
+ __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ bind(deferred->map_check()); // Label for calculating code patching.
+ // We use Factory::the_hole_value() on purpose instead of loading from the
+ // root array to force relocation to be able to later patch with
+ // the cached map.
+ __ mov(ip, Operand(Factory::the_hole_value()));
+ __ cmp(map, Operand(ip));
+ __ b(ne, &cache_miss);
+ // We use Factory::the_hole_value() on purpose instead of loading from the
+ // root array to force relocation to be able to later patch
+ // with true or false.
+ __ mov(result, Operand(Factory::the_hole_value()));
+ __ b(&done);
+
+ // The inlined call site cache did not match. Check null and string before
+ // calling the deferred code.
+ __ bind(&cache_miss);
+ // Null is not instance of anything.
+ __ LoadRoot(ip, Heap::kNullValueRootIndex);
+ __ cmp(object, Operand(ip));
+ __ b(eq, &false_result);
+
+ // String values is not instance of anything.
+ Condition is_string = masm_->IsObjectStringType(object, temp);
+ __ b(is_string, &false_result);
+
+ // Go to the deferred code.
+ __ b(deferred->entry());
+
+ __ bind(&false_result);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+
+ // Here result has either true or false. Deferred code also produces true or
+ // false object.
+ __ bind(deferred->exit());
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check) {
+ Register result = ToRegister(instr->result());
+ ASSERT(result.is(r0));
+
+ InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kArgsInRegisters);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kCallSiteInlineCheck);
+ flags = static_cast<InstanceofStub::Flags>(
+ flags | InstanceofStub::kReturnTrueFalseObject);
+ InstanceofStub stub(flags);
+
+ __ PushSafepointRegisters();
+
+ // Get the temp register reserved by the instruction. This needs to be r4 as
+ // its slot of the pushing of safepoint registers is used to communicate the
+ // offset to the location of the map check.
+ Register temp = ToRegister(instr->TempAt(0));
+ ASSERT(temp.is(r4));
+ __ mov(InstanceofStub::right(), Operand(instr->function()));
+ static const int kAdditionalDelta = 4;
+ int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
+ Label before_push_delta;
+ __ bind(&before_push_delta);
+ __ BlockConstPoolFor(kAdditionalDelta);
+ __ mov(temp, Operand(delta * kPointerSize));
+ __ StoreToSafepointRegisterSlot(temp);
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ ASSERT_EQ(kAdditionalDelta,
+ masm_->InstructionsGeneratedSince(&before_push_delta));
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ // Put the result value into the result register slot and
+ // restore all registers.
+ __ StoreToSafepointRegisterSlot(result);
+
+ __ PopSafepointRegisters();
}
void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
- Register value = ToRegister(instr->input());
+ Register value = ToRegister(instr->InputAt(0));
__ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
__ str(value, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
}
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
- Register object = ToRegister(instr->input());
+ Register object = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
__ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset()));
void LCodeGen::DoLoadElements(LLoadElements* instr) {
- ASSERT(instr->result()->Equals(instr->input()));
- Register reg = ToRegister(instr->input());
+ ASSERT(instr->result()->Equals(instr->InputAt(0)));
+ Register reg = ToRegister(instr->InputAt(0));
Register scratch = scratch0();
__ ldr(reg, FieldMemOperand(reg, JSObject::kElementsOffset));
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
- Register elem = ToRegister(instr->input());
+ Register elem = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Label done;
void LCodeGen::DoPushArgument(LPushArgument* instr) {
- LOperand* argument = instr->input();
+ LOperand* argument = instr->InputAt(0);
if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
Abort("DoPushArgument not implemented for double type.");
} else {
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- Abort("DoDeferredMathAbsTaggedHeapNumber unimplemented.");
+ Register input = ToRegister(instr->InputAt(0));
+ Register scratch = scratch0();
+
+ // Deoptimize if not a heap number.
+ __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(scratch, Operand(ip));
+ DeoptimizeIf(ne, instr->environment());
+
+ Label done;
+
+ Label negative;
+ __ ldr(scratch, FieldMemOperand(input, HeapNumber::kExponentOffset));
+ // Check the sign of the argument. If the argument is positive, just
+ // return it. We do not need to patch the stack since |input| and
+ // |result| are the same register and |input| will be restored
+ // unchanged by popping safepoint registers.
+ __ tst(scratch, Operand(HeapNumber::kSignMask));
+ __ b(ne, &negative);
+ __ jmp(&done);
+
+ __ bind(&negative);
+ // Preserve the value of all registers.
+ __ PushSafepointRegisters();
+
+ Register tmp = input.is(r0) ? r1 : r0;
+ Register tmp2 = input.is(r2) ? r3 : r2;
+ Register tmp3 = input.is(r4) ? r5 : r4;
+
+ Label allocated, slow;
+ __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(tmp, tmp2, tmp3, scratch, &slow);
+ __ b(&allocated);
+
+ // Slow case: Call the runtime system to do the number allocation.
+ __ bind(&slow);
+
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ // Set the pointer to the new heap number in tmp.
+ if (!tmp.is(r0)) __ mov(tmp, Operand(r0));
+
+ // Restore input_reg after call to runtime.
+ MemOperand input_register_slot = masm()->SafepointRegisterSlot(input);
+ __ ldr(input, input_register_slot);
+
+ __ bind(&allocated);
+ __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kExponentOffset));
+ __ bic(tmp2, tmp2, Operand(HeapNumber::kSignMask));
+ __ str(tmp2, FieldMemOperand(tmp, HeapNumber::kExponentOffset));
+ __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+ __ str(tmp2, FieldMemOperand(tmp, HeapNumber::kMantissaOffset));
+
+ __ str(tmp, input_register_slot);
+ __ PopSafepointRegisters();
+
+ __ bind(&done);
+}
+
+
+void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
+ Label is_positive;
+ uint32_t kSignMask = 0x80000000u;
+ Register input = ToRegister(instr->InputAt(0));
+ __ tst(input, Operand(kSignMask));
+ __ b(eq, &is_positive);
+ __ rsb(input, input, Operand(0), SetCC);
+ // Deoptimize on overflow.
+ DeoptimizeIf(vs, instr->environment());
+ __ bind(&is_positive);
}
void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
- Abort("DoMathAbs unimplemented.");
+ // Class for deferred case.
+ class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
+ public:
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
+ LUnaryMathOperation* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() {
+ codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+ }
+ private:
+ LUnaryMathOperation* instr_;
+ };
+
+ ASSERT(instr->InputAt(0)->Equals(instr->result()));
+ Representation r = instr->hydrogen()->value()->representation();
+ if (r.IsDouble()) {
+ DwVfpRegister input = ToDoubleRegister(instr->InputAt(0));
+ // __ vabs(input, input);
+ Abort("Double DoMathAbs unimplemented");
+ } else if (r.IsInteger32()) {
+ EmitIntegerMathAbs(instr);
+ } else {
+ // Representation is tagged.
+ DeferredMathAbsTaggedHeapNumber* deferred =
+ new DeferredMathAbsTaggedHeapNumber(this, instr);
+ Register input = ToRegister(instr->InputAt(0));
+ // Smi check.
+ __ BranchOnNotSmi(input, deferred->entry());
+ // If smi, handle it directly.
+ EmitIntegerMathAbs(instr);
+ __ bind(deferred->exit());
+ }
}
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->input());
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
- Register prev_fpscr = ToRegister(instr->temp());
+ Register prev_fpscr = ToRegister(instr->TempAt(0));
SwVfpRegister single_scratch = double_scratch0().low();
Register scratch = scratch0();
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->input());
+ DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
ASSERT(ToDoubleRegister(instr->result()).is(input));
__ vsqrt(input, input);
}
void LCodeGen::DoCallNew(LCallNew* instr) {
- ASSERT(ToRegister(instr->input()).is(r1));
+ ASSERT(ToRegister(instr->InputAt(0)).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
}
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+ class DeferredStringCharCodeAt: public LDeferredCode {
+ public:
+ DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ private:
+ LStringCharCodeAt* instr_;
+ };
+
+ Register scratch = scratch0();
+ Register string = ToRegister(instr->string());
+ Register index = no_reg;
+ int const_index = -1;
+ if (instr->index()->IsConstantOperand()) {
+ const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+ if (!Smi::IsValid(const_index)) {
+ // Guaranteed to be out of bounds because of the assert above.
+ // So the bounds check that must dominate this instruction must
+ // have deoptimized already.
+ if (FLAG_debug_code) {
+ __ Abort("StringCharCodeAt: out of bounds index.");
+ }
+ // No code needs to be generated.
+ return;
+ }
+ } else {
+ index = ToRegister(instr->index());
+ }
+ Register result = ToRegister(instr->result());
+
+ DeferredStringCharCodeAt* deferred =
+ new DeferredStringCharCodeAt(this, instr);
+
+ Label flat_string, ascii_string, done;
+
+ // Fetch the instance type of the receiver into result register.
+ __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for non-flat strings.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ tst(result, Operand(kStringRepresentationMask));
+ __ b(eq, &flat_string);
+
+ // Handle non-flat strings.
+ __ tst(result, Operand(kIsConsStringMask));
+ __ b(eq, deferred->entry());
+
+ // ConsString.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ ldr(scratch, FieldMemOperand(string, ConsString::kSecondOffset));
+ __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
+ __ cmp(scratch, ip);
+ __ b(ne, deferred->entry());
+ // Get the first of the two strings and load its instance type.
+ __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
+ __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ tst(result, Operand(kStringRepresentationMask));
+ __ b(ne, deferred->entry());
+
+ // Check for 1-byte or 2-byte string.
+ __ bind(&flat_string);
+ STATIC_ASSERT(kAsciiStringTag != 0);
+ __ tst(result, Operand(kStringEncodingMask));
+ __ b(ne, &ascii_string);
+
+ // 2-byte string.
+ // Load the 2-byte character code into the result register.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ if (instr->index()->IsConstantOperand()) {
+ __ ldrh(result,
+ FieldMemOperand(string,
+ SeqTwoByteString::kHeaderSize + 2 * const_index));
+ } else {
+ __ add(scratch,
+ string,
+ Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ ldrh(result, MemOperand(scratch, index, LSL, 1));
+ }
+ __ jmp(&done);
+
+ // ASCII string.
+ // Load the byte into the result register.
+ __ bind(&ascii_string);
+ if (instr->index()->IsConstantOperand()) {
+ __ ldrb(result, FieldMemOperand(string,
+ SeqAsciiString::kHeaderSize + const_index));
+ } else {
+ __ add(scratch,
+ string,
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ ldrb(result, MemOperand(scratch, index));
+ }
+ __ bind(&done);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ mov(result, Operand(0));
+
+ __ PushSafepointRegisters();
+ __ push(string);
+ // Push the index as a smi. This is safe because of the checks in
+ // DoStringCharCodeAt above.
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ __ mov(scratch, Operand(Smi::FromInt(const_index)));
+ __ push(scratch);
+ } else {
+ Register index = ToRegister(instr->index());
+ __ SmiTag(index);
+ __ push(index);
+ }
+ __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(r0);
+ }
+ __ SmiUntag(r0);
+ MemOperand result_stack_slot = masm()->SafepointRegisterSlot(result);
+ __ str(r0, result_stack_slot);
+ __ PopSafepointRegisters();
+}
+
+
+void LCodeGen::DoStringLength(LStringLength* instr) {
+ Register string = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ __ ldr(result, FieldMemOperand(string, String::kLengthOffset));
+}
+
+
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
ASSERT(output->IsDoubleRegister());
LNumberTagI* instr_;
};
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
Label slow;
- Register reg = ToRegister(instr->input());
+ Register reg = ToRegister(instr->InputAt(0));
DoubleRegister dbl_scratch = d0;
SwVfpRegister flt_scratch = s0;
LNumberTagD* instr_;
};
- DoubleRegister input_reg = ToDoubleRegister(instr->input());
+ DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
Register scratch = scratch0();
Register reg = ToRegister(instr->result());
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
+ Register temp1 = ToRegister(instr->TempAt(0));
+ Register temp2 = ToRegister(instr->TempAt(1));
DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() && input->Equals(instr->result()));
ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
__ SmiTag(ToRegister(input));
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() && input->Equals(instr->result()));
if (instr->needs_check()) {
__ tst(ToRegister(input), Operand(kSmiTagMask));
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Label done;
- Register input_reg = ToRegister(instr->input());
+ Register input_reg = ToRegister(instr->InputAt(0));
Register scratch = scratch0();
DoubleRegister dbl_scratch = d0;
SwVfpRegister flt_scratch = s0;
- DoubleRegister dbl_tmp = ToDoubleRegister(instr->temp());
+ DoubleRegister dbl_tmp = ToDoubleRegister(instr->TempAt(0));
// Heap number map check.
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
ASSERT(input->Equals(instr->result()));
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
LOperand* result = instr->result();
ASSERT(result->IsDoubleRegister());
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
__ tst(ToRegister(input), Operand(kSmiTagMask));
DeoptimizeIf(instr->condition(), instr->environment());
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
Register scratch = scratch0();
InstanceType first = instr->hydrogen()->first();
InstanceType last = instr->hydrogen()->last();
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- ASSERT(instr->input()->IsRegister());
- Register reg = ToRegister(instr->input());
+ ASSERT(instr->InputAt(0)->IsRegister());
+ Register reg = ToRegister(instr->InputAt(0));
__ cmp(reg, Operand(instr->hydrogen()->target()));
DeoptimizeIf(ne, instr->environment());
}
void LCodeGen::DoCheckMap(LCheckMap* instr) {
Register scratch = scratch0();
- LOperand* input = instr->input();
+ LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
__ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
+ Register temp1 = ToRegister(instr->TempAt(0));
+ Register temp2 = ToRegister(instr->TempAt(1));
Handle<JSObject> holder = instr->holder();
Handle<JSObject> current_prototype = instr->prototype();
void LCodeGen::DoTypeof(LTypeof* instr) {
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
__ push(input);
CallRuntime(Runtime::kTypeof, 1, instr);
}
void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Label true_label;
Label false_label;
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Register input = ToRegister(instr->input());
+ Register input = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
void FinishCode(Handle<Code> code);
// Deferred code support.
- void DoDeferredGenericBinaryStub(LBinaryOperation* instr, Token::Value op);
+ template<int T>
+ void DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr,
+ Token::Value op);
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LGoto* instr);
+ void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+ void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+ Label* map_check);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
MemOperand ToMemOperand(LOperand* op) const;
// Specific math operations - used from DoUnaryMathOperation.
+ void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr);
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
PopSafepointRegisters();
}
+void MacroAssembler::StoreToSafepointRegisterSlot(Register reg) {
+ str(reg, SafepointRegisterSlot(reg));
+}
+
+
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the highest encoding,
// which means that lowest encodings are closest to the stack pointer.
}
+MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
+ return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
+}
+
+
void MacroAssembler::Ldrd(Register dst1, Register dst2,
const MemOperand& src, Condition cond) {
ASSERT(src.rm().is(no_reg));
}
+void MacroAssembler::AbortIfNotSmi(Register object) {
+ ASSERT_EQ(0, kSmiTag);
+ tst(object, Operand(kSmiTagMask));
+ Assert(eq, "Operand is not smi");
+}
+
+
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
Register first,
Register second,
}
+void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
+ Register result) {
+ const uint32_t kLdrOffsetMask = (1 << 12) - 1;
+ const int32_t kPCRegOffset = 2 * kPointerSize;
+ ldr(result, MemOperand(ldr_location));
+ if (FLAG_debug_code) {
+ // Check that the instruction is a ldr reg, [pc + offset] .
+ and_(result, result, Operand(kLdrPCPattern));
+ cmp(result, Operand(kLdrPCPattern));
+ Check(eq, "The instruction to patch should be a load from pc.");
+ // Result was clobbered. Restore it.
+ ldr(result, MemOperand(ldr_location));
+ }
+ // Get the address of the constant.
+ and_(result, result, Operand(kLdrOffsetMask));
+ add(result, ldr_location, Operand(result));
+ add(result, result, Operand(kPCRegOffset));
+}
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
void PopSafepointRegisters();
void PushSafepointRegistersAndDoubles();
void PopSafepointRegistersAndDoubles();
-
+ void StoreToSafepointRegisterSlot(Register reg);
static int SafepointRegisterStackIndex(int reg_code);
+ static MemOperand SafepointRegisterSlot(Register reg);
// Load two consecutive registers with two consecutive memory locations.
void Ldrd(Register dst1,
// Abort execution if argument is a smi. Used in debug code.
void AbortIfSmi(Register object);
+ void AbortIfNotSmi(Register object);
// ---------------------------------------------------------------------------
// String utilities
Label* failure);
+ // ---------------------------------------------------------------------------
+ // Patching helpers.
+
+ // Get the location of a relocated constant (its address in the constant pool)
+ // from its load site.
+ void GetRelocatedValueLocation(Register ldr_location,
+ Register result);
+
+
private:
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
if (sim_->isWatchedStop(code) && !sim_->watched_stops[code].desc) {
sim_->watched_stops[code].desc = msg;
}
- PrintF("Simulator hit %s\n", msg);
+ // Print the stop message and code if it is not the default code.
+ if (code != kMaxStopCode) {
+ PrintF("Simulator hit stop %u: %s\n", code, msg);
+ } else {
+ PrintF("Simulator hit %s\n", msg);
+ }
sim_->set_pc(sim_->get_pc() + 2 * Instr::kInstrSize);
Debug();
}
PrintF("DIV BY ZERO flag: %d; ", sim_->div_zero_vfp_flag_);
PrintF("OVERFLOW flag: %d; ", sim_->overflow_vfp_flag_);
PrintF("UNDERFLOW flag: %d; ", sim_->underflow_vfp_flag_);
- PrintF("INEXACT flag: %d; ", sim_->inexact_vfp_flag_);
+ PrintF("INEXACT flag: %d;\n", sim_->inexact_vfp_flag_);
} else if (strcmp(cmd, "stop") == 0) {
int32_t value;
intptr_t stop_pc = sim_->get_pc() - 2 * Instr::kInstrSize;
break;
}
}
+ // If the instruction is a non taken conditional stop, we need to skip the
+ // inlined message address.
+ } else if (instr->IsStop()) {
+ set_pc(get_pc() + 2 * Instr::kInstrSize);
}
if (!pc_modified_) {
set_register(pc, reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
}
+// Convert and store int passed in register ival to IEEE 754 single precision
+// floating point value at memory location (dst + 4 * wordoffset)
+// If VFP3 is available use it for conversion.
+static void StoreIntAsFloat(MacroAssembler* masm,
+ Register dst,
+ Register wordoffset,
+ Register ival,
+ Register fval,
+ Register scratch1,
+ Register scratch2) {
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(s0, ival);
+ __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
+ __ vcvt_f32_s32(s0, s0);
+ __ vstr(s0, scratch1, 0);
+ } else {
+ Label not_special, done;
+ // Move sign bit from source to destination. This works because the sign
+ // bit in the exponent word of the double has the same position and polarity
+ // as the 2's complement sign bit in a Smi.
+ ASSERT(kBinary32SignMask == 0x80000000u);
+
+ __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
+ // Negate value if it is negative.
+ __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+
+ // We have -1, 0 or 1, which we treat specially. Register ival contains
+ // absolute value: it is either equal to 1 (special case of -1 and 1),
+ // greater than 1 (not a special case) or less than 1 (special case of 0).
+ __ cmp(ival, Operand(1));
+ __ b(gt, ¬_special);
+
+ // For 1 or -1 we need to or in the 0 exponent (biased).
+ static const uint32_t exponent_word_for_1 =
+ kBinary32ExponentBias << kBinary32ExponentShift;
+
+ __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
+ __ b(&done);
+
+ __ bind(¬_special);
+ // Count leading zeros.
+ // Gets the wrong answer for 0, but we already checked for that case above.
+ Register zeros = scratch2;
+ __ CountLeadingZeros(zeros, ival, scratch1);
+
+ // Compute exponent and or it into the exponent register.
+ __ rsb(scratch1,
+ zeros,
+ Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
+
+ __ orr(fval,
+ fval,
+ Operand(scratch1, LSL, kBinary32ExponentShift));
+
+ // Shift up the source chopping the top bit off.
+ __ add(zeros, zeros, Operand(1));
+ // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
+ __ mov(ival, Operand(ival, LSL, zeros));
+ // And the top (top 20 bits).
+ __ orr(fval,
+ fval,
+ Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
+
+ __ bind(&done);
+ __ str(fval, MemOperand(dst, wordoffset, LSL, 2));
+ }
+}
+
+
+// Convert unsigned integer with specified number of leading zeroes in binary
+// representation to IEEE 754 double.
+// Integer to convert is passed in register hiword.
+// Resulting double is returned in registers hiword:loword.
+// This functions does not work correctly for 0.
+static void GenerateUInt2Double(MacroAssembler* masm,
+ Register hiword,
+ Register loword,
+ Register scratch,
+ int leading_zeroes) {
+ const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
+ const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
+
+ const int mantissa_shift_for_hi_word =
+ meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
+
+ const int mantissa_shift_for_lo_word =
+ kBitsPerInt - mantissa_shift_for_hi_word;
+
+ __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
+ if (mantissa_shift_for_hi_word > 0) {
+ __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
+ __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
+ } else {
+ __ mov(loword, Operand(0, RelocInfo::NONE));
+ __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
+ }
+
+ // If least significant bit of biased exponent was not 1 it was corrupted
+ // by most significant bit of mantissa so we should fix that.
+ if (!(biased_exponent & 1)) {
+ __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
+ }
+}
+
#undef __
#define __ ACCESS_MASM(masm())
}
+static bool IsElementTypeSigned(ExternalArrayType array_type) {
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalShortArray:
+ case kExternalIntArray:
+ return true;
+
+ case kExternalUnsignedByteArray:
+ case kExternalUnsignedShortArray:
+ case kExternalUnsignedIntArray:
+ return false;
+
+ default:
+ UNREACHABLE();
+ return false;
+ }
+}
+
+
+MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
+ ExternalArrayType array_type, Code::Flags flags) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+ Label slow, failed_allocation;
+
+ Register key = r0;
+ Register receiver = r1;
+
+ // Check that the object isn't a smi
+ __ BranchOnSmi(receiver, &slow);
+
+ // Check that the key is a smi.
+ __ BranchOnNotSmi(key, &slow);
+
+ // Check that the object is a JS object. Load map into r2.
+ __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
+ __ b(lt, &slow);
+
+ // Check that the receiver does not require access checks. We need
+ // to check this explicitly since this generic stub does not perform
+ // map checks.
+ __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
+ __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ b(ne, &slow);
+
+ // Check that the elements array is the appropriate type of
+ // ExternalArray.
+ __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
+ __ cmp(r2, ip);
+ __ b(ne, &slow);
+
+ // Check that the index is in range.
+ __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
+ __ cmp(ip, Operand(key, ASR, kSmiTagSize));
+ // Unsigned comparison catches both negative and too-large values.
+ __ b(lo, &slow);
+
+ // r3: elements array
+ __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
+ // r3: base pointer of external storage
+
+ // We are not untagging smi key and instead work with it
+ // as if it was premultiplied by 2.
+ ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
+
+ Register value = r2;
+ switch (array_type) {
+ case kExternalByteArray:
+ __ ldrsb(value, MemOperand(r3, key, LSR, 1));
+ break;
+ case kExternalUnsignedByteArray:
+ __ ldrb(value, MemOperand(r3, key, LSR, 1));
+ break;
+ case kExternalShortArray:
+ __ ldrsh(value, MemOperand(r3, key, LSL, 0));
+ break;
+ case kExternalUnsignedShortArray:
+ __ ldrh(value, MemOperand(r3, key, LSL, 0));
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ ldr(value, MemOperand(r3, key, LSL, 1));
+ break;
+ case kExternalFloatArray:
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ add(r2, r3, Operand(key, LSL, 1));
+ __ vldr(s0, r2, 0);
+ } else {
+ __ ldr(value, MemOperand(r3, key, LSL, 1));
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ // For integer array types:
+ // r2: value
+ // For floating-point array type
+ // s0: value (if VFP3 is supported)
+ // r2: value (if VFP3 is not supported)
+
+ if (array_type == kExternalIntArray) {
+ // For the Int and UnsignedInt array types, we need to see whether
+ // the value can be represented in a Smi. If not, we need to convert
+ // it to a HeapNumber.
+ Label box_int;
+ __ cmp(value, Operand(0xC0000000));
+ __ b(mi, &box_int);
+ // Tag integer as smi and return it.
+ __ mov(r0, Operand(value, LSL, kSmiTagSize));
+ __ Ret();
+
+ __ bind(&box_int);
+ // Allocate a HeapNumber for the result and perform int-to-double
+ // conversion. Don't touch r0 or r1 as they are needed if allocation
+ // fails.
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
+ // Now we can use r0 for the result as key is not needed any more.
+ __ mov(r0, r5);
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ __ vmov(s0, value);
+ __ vcvt_f64_s32(d0, s0);
+ __ sub(r3, r0, Operand(kHeapObjectTag));
+ __ vstr(d0, r3, HeapNumber::kValueOffset);
+ __ Ret();
+ } else {
+ WriteInt32ToHeapNumberStub stub(value, r0, r3);
+ __ TailCallStub(&stub);
+ }
+ } else if (array_type == kExternalUnsignedIntArray) {
+ // The test is different for unsigned int values. Since we need
+ // the value to be in the range of a positive smi, we can't
+ // handle either of the top two bits being set in the value.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ Label box_int, done;
+ __ tst(value, Operand(0xC0000000));
+ __ b(ne, &box_int);
+ // Tag integer as smi and return it.
+ __ mov(r0, Operand(value, LSL, kSmiTagSize));
+ __ Ret();
+
+ __ bind(&box_int);
+ __ vmov(s0, value);
+ // Allocate a HeapNumber for the result and perform int-to-double
+ // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
+ // registers - also when jumping due to exhausted young space.
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
+
+ __ vcvt_f64_u32(d0, s0);
+ __ sub(r1, r2, Operand(kHeapObjectTag));
+ __ vstr(d0, r1, HeapNumber::kValueOffset);
+
+ __ mov(r0, r2);
+ __ Ret();
+ } else {
+ // Check whether unsigned integer fits into smi.
+ Label box_int_0, box_int_1, done;
+ __ tst(value, Operand(0x80000000));
+ __ b(ne, &box_int_0);
+ __ tst(value, Operand(0x40000000));
+ __ b(ne, &box_int_1);
+ // Tag integer as smi and return it.
+ __ mov(r0, Operand(value, LSL, kSmiTagSize));
+ __ Ret();
+
+ Register hiword = value; // r2.
+ Register loword = r3;
+
+ __ bind(&box_int_0);
+ // Integer does not have leading zeros.
+ GenerateUInt2Double(masm(), hiword, loword, r4, 0);
+ __ b(&done);
+
+ __ bind(&box_int_1);
+ // Integer has one leading zero.
+ GenerateUInt2Double(masm(), hiword, loword, r4, 1);
+
+
+ __ bind(&done);
+ // Integer was converted to double in registers hiword:loword.
+ // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
+ // clobbers all registers - also when jumping due to exhausted young
+ // space.
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r4, r5, r7, r6, &slow);
+
+ __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
+ __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
+
+ __ mov(r0, r4);
+ __ Ret();
+ }
+ } else if (array_type == kExternalFloatArray) {
+ // For the floating-point array type, we need to always allocate a
+ // HeapNumber.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+ // Allocate a HeapNumber for the result. Don't use r0 and r1 as
+ // AllocateHeapNumber clobbers all registers - also when jumping due to
+ // exhausted young space.
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
+ __ vcvt_f64_f32(d0, s0);
+ __ sub(r1, r2, Operand(kHeapObjectTag));
+ __ vstr(d0, r1, HeapNumber::kValueOffset);
+
+ __ mov(r0, r2);
+ __ Ret();
+ } else {
+ // Allocate a HeapNumber for the result. Don't use r0 and r1 as
+ // AllocateHeapNumber clobbers all registers - also when jumping due to
+ // exhausted young space.
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r3, r4, r5, r6, &slow);
+ // VFP is not available, do manual single to double conversion.
+
+ // r2: floating point value (binary32)
+ // r3: heap number for result
+
+ // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
+ // the slow case from here.
+ __ and_(r0, value, Operand(kBinary32MantissaMask));
+
+ // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
+ // the slow case from here.
+ __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
+ __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
+
+ Label exponent_rebiased;
+ __ teq(r1, Operand(0x00));
+ __ b(eq, &exponent_rebiased);
+
+ __ teq(r1, Operand(0xff));
+ __ mov(r1, Operand(0x7ff), LeaveCC, eq);
+ __ b(eq, &exponent_rebiased);
+
+ // Rebias exponent.
+ __ add(r1,
+ r1,
+ Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
+
+ __ bind(&exponent_rebiased);
+ __ and_(r2, value, Operand(kBinary32SignMask));
+ value = no_reg;
+ __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
+
+ // Shift mantissa.
+ static const int kMantissaShiftForHiWord =
+ kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+ static const int kMantissaShiftForLoWord =
+ kBitsPerInt - kMantissaShiftForHiWord;
+
+ __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
+ __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
+
+ __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
+ __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
+
+ __ mov(r0, r3);
+ __ Ret();
+ }
+
+ } else {
+ // Tag integer as smi and return it.
+ __ mov(r0, Operand(value, LSL, kSmiTagSize));
+ __ Ret();
+ }
+
+ // Slow case, key and receiver still in r0 and r1.
+ __ bind(&slow);
+ __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3);
+
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+
+ __ Push(r1, r0);
+
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+
+ return GetCode(flags);
+}
+
+
+MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
+ ExternalArrayType array_type, Code::Flags flags) {
+ // ---------- S t a t e --------------
+ // -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+ Label slow, check_heap_number;
+
+ // Register usage.
+ Register value = r0;
+ Register key = r1;
+ Register receiver = r2;
+ // r3 mostly holds the elements array or the destination external array.
+
+ // Check that the object isn't a smi.
+ __ BranchOnSmi(receiver, &slow);
+
+ // Check that the object is a JS object. Load map into r3.
+ __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
+ __ b(le, &slow);
+
+ // Check that the receiver does not require access checks. We need
+ // to do this because this generic stub does not perform map checks.
+ __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset));
+ __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ b(ne, &slow);
+
+ // Check that the key is a smi.
+ __ BranchOnNotSmi(key, &slow);
+
+ // Check that the elements array is the appropriate type of ExternalArray.
+ __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
+ __ cmp(r4, ip);
+ __ b(ne, &slow);
+
+ // Check that the index is in range.
+ __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index.
+ __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
+ __ cmp(r4, ip);
+ // Unsigned comparison catches both negative and too-large values.
+ __ b(hs, &slow);
+
+ // Handle both smis and HeapNumbers in the fast path. Go to the
+ // runtime for all other kinds of values.
+ // r3: external array.
+ // r4: key (integer).
+ __ BranchOnNotSmi(value, &check_heap_number);
+ __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
+ __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
+
+ // r3: base pointer of external storage.
+ // r4: key (integer).
+ // r5: value (integer).
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ strb(r5, MemOperand(r3, r4, LSL, 0));
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ strh(r5, MemOperand(r3, r4, LSL, 1));
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ str(r5, MemOperand(r3, r4, LSL, 2));
+ break;
+ case kExternalFloatArray:
+ // Perform int-to-float conversion and store to memory.
+ StoreIntAsFloat(masm(), r3, r4, r5, r6, r7, r9);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ // Entry registers are intact, r0 holds the value which is the return value.
+ __ Ret();
+
+
+ // r3: external array.
+ // r4: index (integer).
+ __ bind(&check_heap_number);
+ __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
+ __ b(ne, &slow);
+
+ __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
+
+ // r3: base pointer of external storage.
+ // r4: key (integer).
+
+ // The WebGL specification leaves the behavior of storing NaN and
+ // +/-Infinity into integer arrays basically undefined. For more
+ // reproducible behavior, convert these to zero.
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+
+
+ if (array_type == kExternalFloatArray) {
+ // vldr requires offset to be a multiple of 4 so we can not
+ // include -kHeapObjectTag into it.
+ __ sub(r5, r0, Operand(kHeapObjectTag));
+ __ vldr(d0, r5, HeapNumber::kValueOffset);
+ __ add(r5, r3, Operand(r4, LSL, 2));
+ __ vcvt_f32_f64(s0, d0);
+ __ vstr(s0, r5, 0);
+ } else {
+ // Need to perform float-to-int conversion.
+ // Test for NaN or infinity (both give zero).
+ __ ldr(r6, FieldMemOperand(value, HeapNumber::kExponentOffset));
+
+ // Hoisted load. vldr requires offset to be a multiple of 4 so we can not
+ // include -kHeapObjectTag into it.
+ __ sub(r5, value, Operand(kHeapObjectTag));
+ __ vldr(d0, r5, HeapNumber::kValueOffset);
+
+ __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ // NaNs and Infinities have all-one exponents so they sign extend to -1.
+ __ cmp(r6, Operand(-1));
+ __ mov(r5, Operand(0), LeaveCC, eq);
+
+ // Not infinity or NaN simply convert to int.
+ if (IsElementTypeSigned(array_type)) {
+ __ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne);
+ } else {
+ __ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne);
+ }
+ __ vmov(r5, s0, ne);
+
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ strb(r5, MemOperand(r3, r4, LSL, 0));
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ strh(r5, MemOperand(r3, r4, LSL, 1));
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ str(r5, MemOperand(r3, r4, LSL, 2));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ // Entry registers are intact, r0 holds the value which is the return value.
+ __ Ret();
+ } else {
+ // VFP3 is not available do manual conversions.
+ __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
+ __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+
+ if (array_type == kExternalFloatArray) {
+ Label done, nan_or_infinity_or_zero;
+ static const int kMantissaInHiWordShift =
+ kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+ static const int kMantissaInLoWordShift =
+ kBitsPerInt - kMantissaInHiWordShift;
+
+ // Test for all special exponent values: zeros, subnormal numbers, NaNs
+ // and infinities. All these should be converted to 0.
+ __ mov(r7, Operand(HeapNumber::kExponentMask));
+ __ and_(r9, r5, Operand(r7), SetCC);
+ __ b(eq, &nan_or_infinity_or_zero);
+
+ __ teq(r9, Operand(r7));
+ __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
+ __ b(eq, &nan_or_infinity_or_zero);
+
+ // Rebias exponent.
+ __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
+ __ add(r9,
+ r9,
+ Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
+
+ __ cmp(r9, Operand(kBinary32MaxExponent));
+ __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
+ __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
+ __ b(gt, &done);
+
+ __ cmp(r9, Operand(kBinary32MinExponent));
+ __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
+ __ b(lt, &done);
+
+ __ and_(r7, r5, Operand(HeapNumber::kSignMask));
+ __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
+ __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
+ __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
+ __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
+
+ __ bind(&done);
+ __ str(r5, MemOperand(r3, r4, LSL, 2));
+ // Entry registers are intact, r0 holds the value which is the return
+ // value.
+ __ Ret();
+
+ __ bind(&nan_or_infinity_or_zero);
+ __ and_(r7, r5, Operand(HeapNumber::kSignMask));
+ __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
+ __ orr(r9, r9, r7);
+ __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
+ __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
+ __ b(&done);
+ } else {
+ bool is_signed_type = IsElementTypeSigned(array_type);
+ int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
+ int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
+
+ Label done, sign;
+
+ // Test for all special exponent values: zeros, subnormal numbers, NaNs
+ // and infinities. All these should be converted to 0.
+ __ mov(r7, Operand(HeapNumber::kExponentMask));
+ __ and_(r9, r5, Operand(r7), SetCC);
+ __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
+ __ b(eq, &done);
+
+ __ teq(r9, Operand(r7));
+ __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
+ __ b(eq, &done);
+
+ // Unbias exponent.
+ __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
+ __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
+ // If exponent is negative then result is 0.
+ __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
+ __ b(mi, &done);
+
+ // If exponent is too big then result is minimal value.
+ __ cmp(r9, Operand(meaningfull_bits - 1));
+ __ mov(r5, Operand(min_value), LeaveCC, ge);
+ __ b(ge, &done);
+
+ __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
+ __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
+ __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
+
+ __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
+ __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
+ __ b(pl, &sign);
+
+ __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
+ __ mov(r5, Operand(r5, LSL, r9));
+ __ rsb(r9, r9, Operand(meaningfull_bits));
+ __ orr(r5, r5, Operand(r6, LSR, r9));
+
+ __ bind(&sign);
+ __ teq(r7, Operand(0, RelocInfo::NONE));
+ __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+
+ __ bind(&done);
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ strb(r5, MemOperand(r3, r4, LSL, 0));
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ strh(r5, MemOperand(r3, r4, LSL, 1));
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ str(r5, MemOperand(r3, r4, LSL, 2));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+
+ // Slow case: call runtime.
+ __ bind(&slow);
+
+ // Entry registers are intact.
+ // ---------- S t a t e --------------
+ // -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+
+ // Push receiver, key and value for runtime call.
+ __ Push(r2, r1, r0);
+
+ __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
+
+ return GetCode(flags);
+}
+
+
#undef __
} } // namespace v8::internal
#include "parser.h"
#include "scopes.h"
#include "string-stream.h"
-#include "stub-cache.h"
namespace v8 {
namespace internal {
}
-static bool CallWithoutIC(Handle<JSFunction> target, int arity) {
+static bool CanCallWithoutIC(Handle<JSFunction> target, int arity) {
SharedFunctionInfo* info = target->shared();
- if (target->NeedsArgumentsAdaption()) {
- // If the number of formal parameters of the target function
- // does not match the number of arguments we're passing, we
- // don't want to deal with it.
- return info->formal_parameter_count() == arity;
- } else {
- // If the target doesn't need arguments adaption, we can call
- // it directly, but we avoid to do so if it has a custom call
- // generator, because that is likely to generate better code.
- return !info->HasBuiltinFunctionId() ||
- !CallStubCompiler::HasCustomCallGenerator(info->builtin_function_id());
- }
+ // If the number of formal parameters of the target function does
+ // not match the number of arguments we're passing, we don't want to
+ // deal with it. Otherwise, we can call it directly.
+ return !target->NeedsArgumentsAdaption() ||
+ info->formal_parameter_count() == arity;
}
type = Handle<Map>(holder()->map());
} else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) {
target_ = Handle<JSFunction>(lookup.GetConstantFunctionFromMap(*type));
- return CallWithoutIC(target_, arguments()->length());
+ return CanCallWithoutIC(target_, arguments()->length());
} else {
return false;
}
Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
// If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code.
- if (!Heap::InNewSpace(*candidate)
- && CallWithoutIC(candidate, arguments()->length())) {
+ if (!Heap::InNewSpace(*candidate) &&
+ CanCallWithoutIC(candidate, arguments()->length())) {
target_ = candidate;
return true;
}
int start_position,
int end_position,
bool is_expression,
- bool contains_loops)
+ bool contains_loops,
+ bool strict_mode)
: name_(name),
scope_(scope),
body_(body),
end_position_(end_position),
is_expression_(is_expression),
contains_loops_(contains_loops),
+ strict_mode_(strict_mode),
function_token_position_(RelocInfo::kNoPosition),
inferred_name_(Heap::empty_string()),
try_full_codegen_(false),
int end_position() const { return end_position_; }
bool is_expression() const { return is_expression_; }
bool contains_loops() const { return contains_loops_; }
+ bool strict_mode() const { return strict_mode_; }
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
int end_position_;
bool is_expression_;
bool contains_loops_;
+ bool strict_mode_;
int function_token_position_;
Handle<String> inferred_name_;
bool try_full_codegen_;
}
-static void Generate_KeyedLoadIC_ExternalByteArray(MacroAssembler* masm) {
- KeyedLoadIC::GenerateExternalArray(masm, kExternalByteArray);
-}
-
-
-static void Generate_KeyedLoadIC_ExternalUnsignedByteArray(
- MacroAssembler* masm) {
- KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedByteArray);
-}
-
-
-static void Generate_KeyedLoadIC_ExternalShortArray(MacroAssembler* masm) {
- KeyedLoadIC::GenerateExternalArray(masm, kExternalShortArray);
-}
-
-
-static void Generate_KeyedLoadIC_ExternalUnsignedShortArray(
- MacroAssembler* masm) {
- KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedShortArray);
-}
-
-
-static void Generate_KeyedLoadIC_ExternalIntArray(MacroAssembler* masm) {
- KeyedLoadIC::GenerateExternalArray(masm, kExternalIntArray);
-}
-
-
-static void Generate_KeyedLoadIC_ExternalUnsignedIntArray(
- MacroAssembler* masm) {
- KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedIntArray);
-}
-
-
-static void Generate_KeyedLoadIC_ExternalFloatArray(MacroAssembler* masm) {
- KeyedLoadIC::GenerateExternalArray(masm, kExternalFloatArray);
-}
-
-
static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
KeyedLoadIC::GeneratePreMonomorphic(masm);
}
}
-static void Generate_KeyedStoreIC_ExternalByteArray(MacroAssembler* masm) {
- KeyedStoreIC::GenerateExternalArray(masm, kExternalByteArray);
-}
-
-
-static void Generate_KeyedStoreIC_ExternalUnsignedByteArray(
- MacroAssembler* masm) {
- KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedByteArray);
-}
-
-
-static void Generate_KeyedStoreIC_ExternalShortArray(MacroAssembler* masm) {
- KeyedStoreIC::GenerateExternalArray(masm, kExternalShortArray);
-}
-
-
-static void Generate_KeyedStoreIC_ExternalUnsignedShortArray(
- MacroAssembler* masm) {
- KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedShortArray);
-}
-
-
-static void Generate_KeyedStoreIC_ExternalIntArray(MacroAssembler* masm) {
- KeyedStoreIC::GenerateExternalArray(masm, kExternalIntArray);
-}
-
-
-static void Generate_KeyedStoreIC_ExternalUnsignedIntArray(
- MacroAssembler* masm) {
- KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedIntArray);
-}
-
-
-static void Generate_KeyedStoreIC_ExternalFloatArray(MacroAssembler* masm) {
- KeyedStoreIC::GenerateExternalArray(masm, kExternalFloatArray);
-}
-
-
static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
KeyedStoreIC::GenerateMiss(masm);
}
V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC) \
V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC) \
- V(KeyedLoadIC_ExternalByteArray, KEYED_LOAD_IC, MEGAMORPHIC) \
- V(KeyedLoadIC_ExternalUnsignedByteArray, KEYED_LOAD_IC, MEGAMORPHIC) \
- V(KeyedLoadIC_ExternalShortArray, KEYED_LOAD_IC, MEGAMORPHIC) \
- V(KeyedLoadIC_ExternalUnsignedShortArray, KEYED_LOAD_IC, MEGAMORPHIC) \
- V(KeyedLoadIC_ExternalIntArray, KEYED_LOAD_IC, MEGAMORPHIC) \
- V(KeyedLoadIC_ExternalUnsignedIntArray, KEYED_LOAD_IC, MEGAMORPHIC) \
- V(KeyedLoadIC_ExternalFloatArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC) \
\
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED) \
\
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED) \
V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC) \
- V(KeyedStoreIC_ExternalByteArray, KEYED_STORE_IC, MEGAMORPHIC) \
- V(KeyedStoreIC_ExternalUnsignedByteArray, KEYED_STORE_IC, MEGAMORPHIC) \
- V(KeyedStoreIC_ExternalShortArray, KEYED_STORE_IC, MEGAMORPHIC) \
- V(KeyedStoreIC_ExternalUnsignedShortArray, KEYED_STORE_IC, MEGAMORPHIC) \
- V(KeyedStoreIC_ExternalIntArray, KEYED_STORE_IC, MEGAMORPHIC) \
- V(KeyedStoreIC_ExternalUnsignedIntArray, KEYED_STORE_IC, MEGAMORPHIC) \
- V(KeyedStoreIC_ExternalFloatArray, KEYED_STORE_IC, MEGAMORPHIC) \
\
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED) \
class FastNewContextStub : public CodeStub {
public:
- static const int kMaximumSlots = 64;
+ // We want no more than 64 different stubs.
+ static const int kMaximumSlots = Context::MIN_CONTEXT_SLOTS + 63;
explicit FastNewContextStub(int slots) : slots_(slots) {
- ASSERT(slots_ > 0 && slots <= kMaximumSlots);
+ ASSERT(slots_ >= Context::MIN_CONTEXT_SLOTS && slots_ <= kMaximumSlots);
}
void Generate(MacroAssembler* masm);
private:
- int slots_;
+ virtual const char* GetName() { return "FastNewContextStub"; }
+ virtual Major MajorKey() { return FastNewContext; }
+ virtual int MinorKey() { return slots_; }
- const char* GetName() { return "FastNewContextStub"; }
- Major MajorKey() { return FastNewContext; }
- int MinorKey() { return slots_; }
+ int slots_;
};
CodeGenerator::PrintCode(code, info);
info->SetCode(code); // May be an empty handle.
#ifdef ENABLE_GDB_JIT_INTERFACE
- if (!code.is_null()) {
+ if (FLAG_gdbjit && !code.is_null()) {
GDBJITLineInfo* lineinfo =
masm.positions_recorder()->DetachGDBJITLineInfo();
#include "full-codegen.h"
#include "gdb-jit.h"
#include "hydrogen.h"
-#include "lithium-allocator.h"
+#include "lithium.h"
#include "liveedit.h"
#include "oprofile-agent.h"
#include "parser.h"
{
'variables': {
- 'icu_src_dir%': '',
+ # TODO(cira): Find out how to pass this value for arbitrary embedder.
+ # Chromium sets it in common.gypi and does force include of that file for
+ # all sub projects.
+ 'icu_src_dir%': '../../../../third_party/icu',
},
'targets': [
{
// parser.cc
DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
+DEFINE_bool(strict_mode, true, "allow strict mode directives")
// rewriter.cc
DEFINE_bool(optimize_ast, true, "optimize the ast")
CodeGenerator::PrintCode(code, info);
info->SetCode(code); // may be an empty handle.
#ifdef ENABLE_GDB_JIT_INTERFACE
- if (!code.is_null()) {
+ if (FLAG_gdbjit && !code.is_null()) {
GDBJITLineInfo* lineinfo =
masm.positions_recorder()->DetachGDBJITLineInfo();
V(zero_symbol, "0") \
V(global_eval_symbol, "GlobalEval") \
V(identity_hash_symbol, "v8::IdentityHash") \
- V(closure_symbol, "(closure)")
+ V(closure_symbol, "(closure)") \
+ V(use_strict, "use strict") \
+ V(KeyedLoadExternalArray_symbol, "KeyedLoadExternalArray") \
+ V(KeyedStoreExternalArray_symbol, "KeyedStoreExternalArray")
// Forward declarations.
}
-void HBranch::PrintDataTo(StringStream* stream) const {
- int first_id = FirstSuccessor()->block_id();
- int second_id = SecondSuccessor()->block_id();
- stream->Add("on ");
- value()->PrintNameTo(stream);
- stream->Add(" (B%d, B%d)", first_id, second_id);
-}
-
-
-void HCompareMapAndBranch::PrintDataTo(StringStream* stream) const {
- stream->Add("on ");
- value()->PrintNameTo(stream);
- stream->Add(" (%p)", *map());
-}
-
-
-void HGoto::PrintDataTo(StringStream* stream) const {
- stream->Add("B%d", FirstSuccessor()->block_id());
+void HControlInstruction::PrintDataTo(StringStream* stream) const {
+ if (FirstSuccessor() != NULL) {
+ int first_id = FirstSuccessor()->block_id();
+ if (SecondSuccessor() == NULL) {
+ stream->Add(" B%d", first_id);
+ } else {
+ int second_id = SecondSuccessor()->block_id();
+ stream->Add(" goto (B%d, B%d)", first_id, second_id);
+ }
+ }
}
-void HReturn::PrintDataTo(StringStream* stream) const {
+void HUnaryControlInstruction::PrintDataTo(StringStream* stream) const {
value()->PrintNameTo(stream);
+ HControlInstruction::PrintDataTo(stream);
}
-void HThrow::PrintDataTo(StringStream* stream) const {
+void HCompareMap::PrintDataTo(StringStream* stream) const {
value()->PrintNameTo(stream);
+ stream->Add(" (%p)", *map());
+ HControlInstruction::PrintDataTo(stream);
}
}
+HType HBitwiseBinaryOperation::CalculateInferredType() const {
+ return HType::TaggedNumber();
+}
+
+
HType HArithmeticBinaryOperation::CalculateInferredType() const {
return HType::TaggedNumber();
}
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// HStoreNamed
// HStoreNamedField
// HStoreNamedGeneric
+// HStringCharCodeAt
// HBlockEntry
// HCall
// HCallConstantFunction
// HDeoptimize
// HGoto
// HUnaryControlInstruction
-// HBranch
-// HCompareMapAndBranch
+// HCompareMap
// HReturn
+// HTest
// HThrow
// HEnterInlined
// HFunctionLiteral
// HLoadNamedGeneric
// HLoadFunctionPrototype
// HPushArgument
+// HStringLength
// HTypeof
// HUnaryMathOperation
// HUnaryPredicate
V(BitXor) \
V(BlockEntry) \
V(BoundsCheck) \
- V(Branch) \
V(CallConstantFunction) \
V(CallFunction) \
V(CallGlobal) \
V(CheckSmi) \
V(Compare) \
V(CompareJSObjectEq) \
- V(CompareMapAndBranch) \
+ V(CompareMap) \
V(Constant) \
V(DeleteProperty) \
V(Deoptimize) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
+ V(StringCharCodeAt) \
+ V(StringLength) \
V(Sub) \
+ V(Test) \
V(Throw) \
V(Typeof) \
V(TypeofIs) \
class HControlInstruction: public HInstruction {
public:
- virtual HBasicBlock* FirstSuccessor() const { return NULL; }
- virtual HBasicBlock* SecondSuccessor() const { return NULL; }
+ HControlInstruction(HBasicBlock* first, HBasicBlock* second)
+ : first_successor_(first), second_successor_(second) {
+ }
+
+ HBasicBlock* FirstSuccessor() const { return first_successor_; }
+ HBasicBlock* SecondSuccessor() const { return second_successor_; }
+
+ virtual void PrintDataTo(StringStream* stream) const;
DECLARE_INSTRUCTION(ControlInstruction)
+
+ private:
+ HBasicBlock* first_successor_;
+ HBasicBlock* second_successor_;
};
class HDeoptimize: public HControlInstruction {
public:
+ HDeoptimize() : HControlInstruction(NULL, NULL) { }
+
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
};
class HGoto: public HControlInstruction {
public:
- explicit HGoto(HBasicBlock* destination)
- : destination_(destination),
- include_stack_check_(false) {}
+ explicit HGoto(HBasicBlock* target)
+ : HControlInstruction(target, NULL), include_stack_check_(false) {
+ }
- virtual HBasicBlock* FirstSuccessor() const { return destination_; }
void set_include_stack_check(bool include_stack_check) {
include_stack_check_ = include_stack_check;
}
bool include_stack_check() const { return include_stack_check_; }
- virtual void PrintDataTo(StringStream* stream) const;
-
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
private:
- HBasicBlock* destination_;
bool include_stack_check_;
};
class HUnaryControlInstruction: public HControlInstruction {
public:
- explicit HUnaryControlInstruction(HValue* value) {
+ explicit HUnaryControlInstruction(HValue* value,
+ HBasicBlock* true_target,
+ HBasicBlock* false_target)
+ : HControlInstruction(true_target, false_target) {
SetOperandAt(0, value);
}
return Representation::Tagged();
}
+ virtual void PrintDataTo(StringStream* stream) const;
+
HValue* value() const { return OperandAt(0); }
virtual int OperandCount() const { return 1; }
virtual HValue* OperandAt(int index) const { return operands_[index]; }
};
-class HBranch: public HUnaryControlInstruction {
+class HTest: public HUnaryControlInstruction {
public:
- HBranch(HBasicBlock* true_destination,
- HBasicBlock* false_destination,
- HValue* boolean_value)
- : HUnaryControlInstruction(boolean_value),
- true_destination_(true_destination),
- false_destination_(false_destination) {
- ASSERT(true_destination != NULL && false_destination != NULL);
+ HTest(HValue* value, HBasicBlock* true_target, HBasicBlock* false_target)
+ : HUnaryControlInstruction(value, true_target, false_target) {
+ ASSERT(true_target != NULL && false_target != NULL);
}
virtual Representation RequiredInputRepresentation(int index) const {
return Representation::None();
}
- virtual HBasicBlock* FirstSuccessor() const { return true_destination_; }
- virtual HBasicBlock* SecondSuccessor() const { return false_destination_; }
-
- virtual void PrintDataTo(StringStream* stream) const;
-
- DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
-
- private:
- HBasicBlock* true_destination_;
- HBasicBlock* false_destination_;
+ DECLARE_CONCRETE_INSTRUCTION(Test, "test")
};
-class HCompareMapAndBranch: public HUnaryControlInstruction {
+class HCompareMap: public HUnaryControlInstruction {
public:
- HCompareMapAndBranch(HValue* result,
- Handle<Map> map,
- HBasicBlock* true_destination,
- HBasicBlock* false_destination)
- : HUnaryControlInstruction(result),
- map_(map),
- true_destination_(true_destination),
- false_destination_(false_destination) {
- ASSERT(true_destination != NULL);
- ASSERT(false_destination != NULL);
+ HCompareMap(HValue* value,
+ Handle<Map> map,
+ HBasicBlock* true_target,
+ HBasicBlock* false_target)
+ : HUnaryControlInstruction(value, true_target, false_target),
+ map_(map) {
+ ASSERT(true_target != NULL);
+ ASSERT(false_target != NULL);
ASSERT(!map.is_null());
}
- virtual HBasicBlock* FirstSuccessor() const { return true_destination_; }
- virtual HBasicBlock* SecondSuccessor() const { return false_destination_; }
-
- HBasicBlock* true_destination() const { return true_destination_; }
- HBasicBlock* false_destination() const { return false_destination_; }
-
virtual void PrintDataTo(StringStream* stream) const;
Handle<Map> map() const { return map_; }
- DECLARE_CONCRETE_INSTRUCTION(CompareMapAndBranch, "compare_map_and_branch")
+ DECLARE_CONCRETE_INSTRUCTION(CompareMap, "compare_map")
private:
Handle<Map> map_;
- HBasicBlock* true_destination_;
- HBasicBlock* false_destination_;
};
class HReturn: public HUnaryControlInstruction {
public:
- explicit HReturn(HValue* result) : HUnaryControlInstruction(result) { }
-
- virtual void PrintDataTo(StringStream* stream) const;
+ explicit HReturn(HValue* value)
+ : HUnaryControlInstruction(value, NULL, NULL) {
+ }
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
};
class HThrow: public HUnaryControlInstruction {
public:
- explicit HThrow(HValue* value) : HUnaryControlInstruction(value) { }
-
- virtual void PrintDataTo(StringStream* stream) const;
+ explicit HThrow(HValue* value)
+ : HUnaryControlInstruction(value, NULL, NULL) { }
DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
};
ASSERT(first <= last);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
+ if ((FIRST_STRING_TYPE < first && last <= LAST_STRING_TYPE) ||
+ (FIRST_STRING_TYPE <= first && last < LAST_STRING_TYPE)) {
+ // A particular string instance type can change because of GC or
+ // externalization, but the value still remains a string.
+ SetFlag(kDependsOnMaps);
+ }
}
virtual bool IsCheckInstruction() const { return true; }
public:
HBitwiseBinaryOperation(HValue* left, HValue* right)
: HBinaryOperation(left, right) {
- // Default to truncating, Integer32, UseGVN.
- set_representation(Representation::Integer32());
- SetFlag(kTruncatingToInt32);
- SetFlag(kUseGVN);
+ set_representation(Representation::Tagged());
+ SetFlag(kFlexibleRepresentation);
+ SetFlagMask(AllSideEffects());
}
virtual Representation RequiredInputRepresentation(int index) const {
- return Representation::Integer32();
+ return representation();
+ }
+
+ virtual void RepresentationChanged(Representation to) {
+ if (!to.IsTagged()) {
+ ASSERT(to.IsInteger32());
+ ClearFlagMask(AllSideEffects());
+ SetFlag(kTruncatingToInt32);
+ SetFlag(kUseGVN);
+ }
}
+ HType CalculateInferredType() const;
+
DECLARE_INSTRUCTION(BitwiseBinaryOperation)
};
};
+class HStringCharCodeAt: public HBinaryOperation {
+ public:
+ HStringCharCodeAt(HValue* string, HValue* index)
+ : HBinaryOperation(string, index) {
+ set_representation(Representation::Integer32());
+ SetFlag(kUseGVN);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ // The index is supposed to be Integer32.
+ return (index == 1) ? Representation::Integer32()
+ : Representation::Tagged();
+ }
+
+ virtual bool DataEquals(HValue* other) const { return true; }
+
+ HValue* string() const { return OperandAt(0); }
+ HValue* index() const { return OperandAt(1); }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string_char_code_at")
+
+ protected:
+ virtual Range* InferRange() {
+ return new Range(0, String::kMaxUC16CharCode);
+ }
+};
+
+
+class HStringLength: public HUnaryOperation {
+ public:
+ explicit HStringLength(HValue* string) : HUnaryOperation(string) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) const {
+ return Representation::Tagged();
+ }
+
+ virtual HType CalculateInferredType() const {
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+ return HType::Smi();
+ }
+
+ virtual bool DataEquals(HValue* other) const { return true; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringLength, "string_length")
+
+ protected:
+ virtual Range* InferRange() {
+ return new Range(0, String::kMaxLength);
+ }
+};
+
+
class HMaterializedLiteral: public HInstruction {
public:
HMaterializedLiteral(int index, int depth)
#include "lithium-allocator.h"
#include "parser.h"
#include "scopes.h"
+#include "stub-cache.h"
#if V8_TARGET_ARCH_IA32
#include "ia32/lithium-codegen-ia32.h"
void HSubgraph::AppendOptional(HSubgraph* graph,
bool on_true_branch,
- HValue* boolean_value) {
+ HValue* value) {
ASSERT(HasExit() && graph->HasExit());
HBasicBlock* other_block = graph_->CreateBasicBlock();
HBasicBlock* join_block = graph_->CreateBasicBlock();
- HBasicBlock* true_branch = other_block;
- HBasicBlock* false_branch = graph->entry_block();
- if (on_true_branch) {
- true_branch = graph->entry_block();
- false_branch = other_block;
- }
-
- exit_block_->Finish(new HBranch(true_branch, false_branch, boolean_value));
+ HTest* test = on_true_branch
+ ? new HTest(value, graph->entry_block(), other_block)
+ : new HTest(value, other_block, graph->entry_block());
+ exit_block_->Finish(test);
other_block->Goto(join_block);
graph->exit_block()->Goto(join_block);
exit_block_ = join_block;
private:
void TraceRange(const char* msg, ...);
void Analyze(HBasicBlock* block);
- void InferControlFlowRange(HBranch* branch, HBasicBlock* dest);
+ void InferControlFlowRange(HTest* test, HBasicBlock* dest);
void InferControlFlowRange(Token::Value op, HValue* value, HValue* other);
void InferPhiRange(HPhi* phi);
void InferRange(HValue* value);
// Infer range based on control flow.
if (block->predecessors()->length() == 1) {
HBasicBlock* pred = block->predecessors()->first();
- if (pred->end()->IsBranch()) {
- InferControlFlowRange(HBranch::cast(pred->end()), block);
+ if (pred->end()->IsTest()) {
+ InferControlFlowRange(HTest::cast(pred->end()), block);
}
}
}
-void HRangeAnalysis::InferControlFlowRange(HBranch* branch, HBasicBlock* dest) {
- ASSERT(branch->FirstSuccessor() == dest || branch->SecondSuccessor() == dest);
- ASSERT(branch->FirstSuccessor() != dest || branch->SecondSuccessor() != dest);
-
- if (branch->value()->IsCompare()) {
- HCompare* compare = HCompare::cast(branch->value());
+void HRangeAnalysis::InferControlFlowRange(HTest* test, HBasicBlock* dest) {
+ ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
+ if (test->value()->IsCompare()) {
+ HCompare* compare = HCompare::cast(test->value());
Token::Value op = compare->token();
- if (branch->SecondSuccessor() == dest) {
+ if (test->SecondSuccessor() == dest) {
op = Token::NegateCompareOp(op);
}
Token::Value inverted_op = Token::InvertCompareOp(op);
HGraphBuilder* builder = owner();
HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
- HBranch* branch = new HBranch(empty_true, empty_false, value);
- builder->CurrentBlock()->Finish(branch);
+ HTest* test = new HTest(value, empty_true, empty_false);
+ builder->CurrentBlock()->Finish(test);
HValue* const no_return_value = NULL;
HBasicBlock* true_target = if_true();
prev_graph->exit_block()->Finish(new HGoto(subgraph->entry_block()));
} else {
HBasicBlock* empty = graph()->CreateBasicBlock();
- prev_graph->exit_block()->Finish(new HBranch(empty,
- subgraph->entry_block(),
- prev_compare_inst));
+ prev_graph->exit_block()->Finish(new HTest(prev_compare_inst,
+ empty,
+ subgraph->entry_block()));
}
// Build instructions for current subgraph.
if (prev_graph != current_subgraph_) {
last_false_block = graph()->CreateBasicBlock();
HBasicBlock* empty = graph()->CreateBasicBlock();
- prev_graph->exit_block()->Finish(new HBranch(empty,
- last_false_block,
- prev_compare_inst));
+ prev_graph->exit_block()->Finish(new HTest(prev_compare_inst,
+ empty,
+ last_false_block));
}
// If we have a non-smi compare clause, we deoptimize after trying
HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
HBasicBlock* osr_entry = graph()->CreateBasicBlock();
HValue* true_value = graph()->GetConstantTrue();
- HBranch* branch = new HBranch(non_osr_entry, osr_entry, true_value);
- exit_block()->Finish(branch);
+ HTest* test = new HTest(true_value, non_osr_entry, osr_entry);
+ exit_block()->Finish(test);
HBasicBlock* loop_predecessor = graph()->CreateBasicBlock();
non_osr_entry->Goto(loop_predecessor);
(i == (maps->length() - 1))
? subgraphs->last()
: map_compare_subgraphs.last();
- current_subgraph_->exit_block()->Finish(
- new HCompareMapAndBranch(receiver,
- maps->at(i),
- subgraphs->at(i)->entry_block(),
- else_subgraph->entry_block()));
+ HCompareMap* compare = new HCompareMap(receiver,
+ maps->at(i),
+ subgraphs->at(i)->entry_block(),
+ else_subgraph->entry_block());
+ current_subgraph_->exit_block()->Finish(compare);
map_compare_subgraphs.Add(subgraph);
}
AddInstruction(new HCheckNonSmi(receiver));
HSubgraph* else_subgraph =
(maps->length() == 1) ? subgraphs->at(1) : map_compare_subgraphs.last();
- current_subgraph_->exit_block()->Finish(
- new HCompareMapAndBranch(receiver,
- Handle<Map>(maps->first()),
- subgraphs->first()->entry_block(),
- else_subgraph->entry_block()));
+ HCompareMap* compare = new HCompareMap(receiver,
+ Handle<Map>(maps->first()),
+ subgraphs->first()->entry_block(),
+ else_subgraph->entry_block());
+ current_subgraph_->exit_block()->Finish(compare);
// Join all the call subgraphs in a new basic block and make
// this basic block the current basic block.
// TODO(3168478): refactor to avoid this.
HBasicBlock* empty_true = graph()->CreateBasicBlock();
HBasicBlock* empty_false = graph()->CreateBasicBlock();
- HBranch* branch =
- new HBranch(empty_true, empty_false, return_value);
- body->exit_block()->Finish(branch);
+ HTest* test = new HTest(return_value, empty_true, empty_false);
+ body->exit_block()->Finish(test);
HValue* const no_return_value = NULL;
empty_true->AddLeaveInlined(no_return_value, test_context->if_true());
}
-bool HGraphBuilder::TryMathFunctionInline(Call* expr) {
+bool HGraphBuilder::TryInlineBuiltinFunction(Call* expr,
+ HValue* receiver,
+ Handle<Map> receiver_map,
+ CheckType check_type) {
+ ASSERT(check_type != RECEIVER_MAP_CHECK || !receiver_map.is_null());
// Try to inline calls like Math.* as operations in the calling function.
- if (!expr->target()->shared()->IsBuiltinMathFunction()) return false;
+ if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
int argument_count = expr->arguments()->length() + 1; // Plus receiver.
switch (id) {
+ case kStringCharCodeAt:
+ if (argument_count == 2 && check_type == STRING_CHECK) {
+ HValue* index = Pop();
+ HValue* string = Pop();
+ ASSERT(!expr->holder().is_null());
+ AddInstruction(new HCheckPrototypeMaps(
+ oracle()->GetPrototypeForPrimitiveCheck(STRING_CHECK),
+ expr->holder()));
+ HStringCharCodeAt* result = BuildStringCharCodeAt(string, index);
+ ast_context()->ReturnInstruction(result, expr->id());
+ return true;
+ }
+ break;
case kMathRound:
case kMathFloor:
case kMathAbs:
case kMathLog:
case kMathSin:
case kMathCos:
- if (argument_count == 2) {
+ if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
+ AddCheckConstantFunction(expr, receiver, receiver_map, true);
HValue* argument = Pop();
Drop(1); // Receiver.
HUnaryMathOperation* op = new HUnaryMathOperation(argument, id);
}
break;
case kMathPow:
- if (argument_count == 3) {
+ if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
+ AddCheckConstantFunction(expr, receiver, receiver_map, true);
HValue* right = Pop();
HValue* left = Pop();
Pop(); // Pop receiver.
double exponent = HConstant::cast(right)->DoubleValue();
if (exponent == 0.5) {
result = new HUnaryMathOperation(left, kMathPowHalf);
- ast_context()->ReturnInstruction(result, expr->id());
- return true;
} else if (exponent == -0.5) {
HConstant* double_one =
new HConstant(Handle<Object>(Smi::FromInt(1)),
// an environment simulation here.
ASSERT(!square_root->HasSideEffects());
result = new HDiv(double_one, square_root);
- ast_context()->ReturnInstruction(result, expr->id());
- return true;
} else if (exponent == 2.0) {
result = new HMul(left, left);
- ast_context()->ReturnInstruction(result, expr->id());
- return true;
}
} else if (right->IsConstant() &&
- HConstant::cast(right)->HasInteger32Value() &&
- HConstant::cast(right)->Integer32Value() == 2) {
+ HConstant::cast(right)->HasInteger32Value() &&
+ HConstant::cast(right)->Integer32Value() == 2) {
result = new HMul(left, left);
- ast_context()->ReturnInstruction(result, expr->id());
- return true;
}
- result = new HPower(left, right);
+ if (result == NULL) {
+ result = new HPower(left, right);
+ }
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
}
+static bool HasCustomCallGenerator(Handle<JSFunction> function) {
+ SharedFunctionInfo* info = function->shared();
+ return info->HasBuiltinFunctionId() &&
+ CallStubCompiler::HasCustomCallGenerator(info->builtin_function_id());
+}
+
+
void HGraphBuilder::VisitCall(Call* expr) {
Expression* callee = expr->expression();
int argument_count = expr->arguments()->length() + 1; // Plus receiver.
expr->RecordTypeFeedback(oracle());
ZoneMapList* types = expr->GetReceiverTypes();
- if (expr->IsMonomorphic() && expr->check_type() == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr, receiver, types->first(), true);
-
- if (TryMathFunctionInline(expr)) {
- return;
- } else if (TryInline(expr)) {
- if (subgraph()->HasExit()) {
- HValue* return_value = Pop();
- // If we inlined a function in a test context then we need to emit
- // a simulate here to shadow the ones at the end of the
- // predecessor blocks. Those environments contain the return
- // value on top and do not correspond to any actual state of the
- // unoptimized code.
- if (ast_context()->IsEffect()) AddSimulate(expr->id());
- ast_context()->ReturnValue(return_value);
- }
+ if (expr->IsMonomorphic()) {
+ Handle<Map> receiver_map =
+ (types == NULL) ? Handle<Map>::null() : types->first();
+ if (TryInlineBuiltinFunction(expr,
+ receiver,
+ receiver_map,
+ expr->check_type())) {
return;
- } else {
- // Check for bailout, as the TryInline call in the if condition above
- // might return false due to bailout during hydrogen processing.
- CHECK_BAILOUT;
- call = new HCallConstantFunction(expr->target(), argument_count);
}
+ if (HasCustomCallGenerator(expr->target()) ||
+ expr->check_type() != RECEIVER_MAP_CHECK) {
+ // When the target has a custom call IC generator, use the IC,
+ // because it is likely to generate better code. Also use the
+ // IC when a primitive receiver check is required.
+ call = new HCallNamed(name, argument_count);
+ } else {
+ AddCheckConstantFunction(expr, receiver, receiver_map, true);
+
+ if (TryInline(expr)) {
+ if (subgraph()->HasExit()) {
+ HValue* return_value = Pop();
+ // If we inlined a function in a test context then we need to emit
+ // a simulate here to shadow the ones at the end of the
+ // predecessor blocks. Those environments contain the return
+ // value on top and do not correspond to any actual state of the
+ // unoptimized code.
+ if (ast_context()->IsEffect()) AddSimulate(expr->id());
+ ast_context()->ReturnValue(return_value);
+ }
+ return;
+ } else {
+ // Check for bailout, as the TryInline call in the if condition above
+ // might return false due to bailout during hydrogen processing.
+ CHECK_BAILOUT;
+ call = new HCallConstantFunction(expr->target(), argument_count);
+ }
+ }
} else if (types != NULL && types->length() > 1) {
ASSERT(expr->check_type() == RECEIVER_MAP_CHECK);
HandlePolymorphicCallNamed(expr, receiver, types, name);
}
+HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* string,
+ HValue* index) {
+ AddInstruction(new HCheckNonSmi(string));
+ AddInstruction(new HCheckInstanceType(
+ string, FIRST_STRING_TYPE, LAST_STRING_TYPE));
+ HStringLength* length = new HStringLength(string);
+ AddInstruction(length);
+ AddInstruction(new HBoundsCheck(index, length));
+ return new HStringCharCodeAt(string, index);
+}
+
+
HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
HValue* left,
HValue* right) {
if (FLAG_trace_representation) {
PrintF("Info: %s/%s\n", info.ToString(), ToRepresentation(info).Mnemonic());
}
- AssumeRepresentation(instr, ToRepresentation(info));
+ Representation rep = ToRepresentation(info);
+ // We only generate either int32 or generic tagged bitwise operations.
+ if (instr->IsBitwiseBinaryOperation() && rep.IsDouble()) {
+ rep = Representation::Integer32();
+ }
+ AssumeRepresentation(instr, rep);
return instr;
}
graph_->GetMaximumValueID());
}
value->ChangeRepresentation(r);
- // The representation of the value is dictated by type feedback.
+ // The representation of the value is dictated by type feedback and
+ // will not be changed later.
value->ClearFlag(HValue::kFlexibleRepresentation);
} else if (FLAG_trace_representation) {
PrintF("No representation assumed\n");
// Fast support for charCodeAt(n).
void HGraphBuilder::GenerateStringCharCodeAt(int argument_count, int ast_id) {
- BAILOUT("inlined runtime function: StringCharCodeAt");
+ ASSERT(argument_count == 2);
+ HValue* index = Pop();
+ HValue* string = Pop();
+ HStringCharCodeAt* result = BuildStringCharCodeAt(string, index);
+ ast_context()->ReturnInstruction(result, ast_id);
}
bool TryArgumentsAccess(Property* expr);
bool TryCallApply(Call* expr);
bool TryInline(Call* expr);
- bool TryMathFunctionInline(Call* expr);
+ bool TryInlineBuiltinFunction(Call* expr,
+ HValue* receiver,
+ Handle<Map> receiver_map,
+ CheckType check_type);
void TraceInline(Handle<JSFunction> target, bool result);
void HandleGlobalVariableAssignment(Variable* var,
ZoneMapList* types,
Handle<String> name);
+ HStringCharCodeAt* BuildStringCharCodeAt(HValue* string,
+ HValue* index);
HInstruction* BuildBinaryOperation(BinaryOperation* expr,
HValue* left,
HValue* right);
if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p -= delta; // Relocate entry.
+ CPU::FlushICache(p, sizeof(uint32_t));
} else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
// Special handling of js_return when a break point is set (call
// instruction has been inserted).
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= delta; // Relocate entry.
+ CPU::FlushICache(p, sizeof(uint32_t));
} else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) {
// Special handling of a debug break slot when a break point is set (call
// instruction has been inserted).
int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
*p -= delta; // Relocate entry.
+ CPU::FlushICache(p, sizeof(uint32_t));
} else if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p += delta; // Relocate entry.
+ CPU::FlushICache(p, sizeof(uint32_t));
}
}
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target;
+ CPU::FlushICache(pc_, sizeof(Address));
}
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
+ CPU::FlushICache(pc_, sizeof(Address));
}
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitPointer(target_object_address());
+ CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(target_reference_address());
+ CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitPointer(target_object_address());
+ CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
StaticVisitor::VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
+ CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
+ __ AllocateInNewSpace((slots_ * kPointerSize) + FixedArray::kHeaderSize,
eax, ebx, ecx, &gc, TAG_OBJECT);
// Get the function from the stack.
// Setup the object header.
__ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
__ mov(FieldOperand(eax, Context::kLengthOffset),
- Immediate(Smi::FromInt(length)));
+ Immediate(Smi::FromInt(slots_)));
// Setup the fixed slots.
__ Set(ebx, Immediate(0)); // Set to NULL.
// Initialize the rest of the slots to undefined.
__ mov(ebx, Factory::undefined_value());
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < slots_; i++) {
__ mov(Operand(eax, Context::SlotOffset(i)), ebx);
}
frame_->AllocateStackSlots();
// Allocate the local context if needed.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = scope()->num_heap_slots();
if (heap_slots > 0) {
Comment cmnt(masm_, "[ allocate local context");
// Allocate local context.
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = scope()->num_heap_slots();
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in edi.
}
-void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type) {
- // ----------- S t a t e -------------
- // -- eax : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow, failed_allocation;
-
- // Check that the object isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
-
- // Check that the key is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &slow, not_taken);
-
- // Get the map of the receiver.
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to check this explicitly since this generic stub does not perform
- // map checks.
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded);
- __ j(not_zero, &slow, not_taken);
-
- __ CmpInstanceType(ecx, JS_OBJECT_TYPE);
- __ j(not_equal, &slow, not_taken);
-
- // Check that the elements array is the appropriate type of
- // ExternalArray.
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
- Handle<Map> map(Heap::MapForExternalArrayType(array_type));
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(map));
- __ j(not_equal, &slow, not_taken);
-
- // eax: key, known to be a smi.
- // edx: receiver, known to be a JSObject.
- // ebx: elements object, known to be an external array.
- // Check that the index is in range.
- __ mov(ecx, eax);
- __ SmiUntag(ecx); // Untag the index.
- __ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
-
- __ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
- // ebx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- __ movsx_b(ecx, Operand(ebx, ecx, times_1, 0));
- break;
- case kExternalUnsignedByteArray:
- __ movzx_b(ecx, Operand(ebx, ecx, times_1, 0));
- break;
- case kExternalShortArray:
- __ movsx_w(ecx, Operand(ebx, ecx, times_2, 0));
- break;
- case kExternalUnsignedShortArray:
- __ movzx_w(ecx, Operand(ebx, ecx, times_2, 0));
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ mov(ecx, Operand(ebx, ecx, times_4, 0));
- break;
- case kExternalFloatArray:
- __ fld_s(Operand(ebx, ecx, times_4, 0));
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // For integer array types:
- // ecx: value
- // For floating-point array type:
- // FP(0): value
-
- if (array_type == kExternalIntArray ||
- array_type == kExternalUnsignedIntArray) {
- // For the Int and UnsignedInt array types, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- Label box_int;
- if (array_type == kExternalIntArray) {
- __ cmp(ecx, 0xC0000000);
- __ j(sign, &box_int);
- } else {
- ASSERT_EQ(array_type, kExternalUnsignedIntArray);
- // The test is different for unsigned int values. Since we need
- // the value to be in the range of a positive smi, we can't
- // handle either of the top two bits being set in the value.
- __ test(ecx, Immediate(0xC0000000));
- __ j(not_zero, &box_int);
- }
-
- __ mov(eax, ecx);
- __ SmiTag(eax);
- __ ret(0);
-
- __ bind(&box_int);
-
- // Allocate a HeapNumber for the int and perform int-to-double
- // conversion.
- if (array_type == kExternalIntArray) {
- __ push(ecx);
- __ fild_s(Operand(esp, 0));
- __ pop(ecx);
- } else {
- ASSERT(array_type == kExternalUnsignedIntArray);
- // Need to zero-extend the value.
- // There's no fild variant for unsigned values, so zero-extend
- // to a 64-bit int manually.
- __ push(Immediate(0));
- __ push(ecx);
- __ fild_d(Operand(esp, 0));
- __ pop(ecx);
- __ pop(ecx);
- }
- // FP(0): value
- __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
- // Set the value.
- __ mov(eax, ecx);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- } else if (array_type == kExternalFloatArray) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
- // Set the value.
- __ mov(eax, ecx);
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- } else {
- __ mov(eax, ecx);
- __ SmiTag(eax);
- __ ret(0);
- }
-
- // If we fail allocation of the HeapNumber, we still have a value on
- // top of the FPU stack. Remove it.
- __ bind(&failed_allocation);
- __ ffree();
- __ fincstp();
- // Fall through to slow case.
-
- // Slow case: Jump to runtime.
- __ bind(&slow);
- __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
- GenerateRuntimeGetProperty(masm);
-}
-
-
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : key
}
-void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label slow, check_heap_number;
-
- // Check that the object isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &slow);
- // Get the map from the receiver.
- __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
- 1 << Map::kIsAccessCheckNeeded);
- __ j(not_zero, &slow);
- // Check that the key is a smi.
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow);
- // Get the instance type from the map of the receiver.
- __ CmpInstanceType(edi, JS_OBJECT_TYPE);
- __ j(not_equal, &slow);
-
- // Check that the elements array is the appropriate type of
- // ExternalArray.
- // eax: value
- // edx: receiver, a JSObject
- // ecx: key, a smi
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ CheckMap(edi, Handle<Map>(Heap::MapForExternalArrayType(array_type)),
- &slow, true);
-
- // Check that the index is in range.
- __ mov(ebx, ecx);
- __ SmiUntag(ebx);
- __ cmp(ebx, FieldOperand(edi, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // eax: value
- // edx: receiver
- // ecx: key
- // edi: elements array
- // ebx: untagged index
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_equal, &check_heap_number);
- // smi case
- __ mov(ecx, eax); // Preserve the value in eax. Key is no longer needed.
- __ SmiUntag(ecx);
- __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
- // ecx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ mov_w(Operand(edi, ebx, times_2, 0), ecx);
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ mov(Operand(edi, ebx, times_4, 0), ecx);
- break;
- case kExternalFloatArray:
- // Need to perform int-to-float conversion.
- __ push(ecx);
- __ fild_s(Operand(esp, 0));
- __ pop(ecx);
- __ fstp_s(Operand(edi, ebx, times_4, 0));
- break;
- default:
- UNREACHABLE();
- break;
- }
- __ ret(0); // Return the original value.
-
- __ bind(&check_heap_number);
- // eax: value
- // edx: receiver
- // ecx: key
- // edi: elements array
- // ebx: untagged index
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
- __ j(not_equal, &slow);
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
- // ebx: untagged index
- // edi: base pointer of external storage
- // top of FPU stack: value
- if (array_type == kExternalFloatArray) {
- __ fstp_s(Operand(edi, ebx, times_4, 0));
- __ ret(0);
- } else {
- // Need to perform float-to-int conversion.
- // Test the top of the FP stack for NaN.
- Label is_nan;
- __ fucomi(0);
- __ j(parity_even, &is_nan);
-
- if (array_type != kExternalUnsignedIntArray) {
- __ push(ecx); // Make room on stack
- __ fistp_s(Operand(esp, 0));
- __ pop(ecx);
- } else {
- // fistp stores values as signed integers.
- // To represent the entire range, we need to store as a 64-bit
- // int and discard the high 32 bits.
- __ sub(Operand(esp), Immediate(2 * kPointerSize));
- __ fistp_d(Operand(esp, 0));
- __ pop(ecx);
- __ add(Operand(esp), Immediate(kPointerSize));
- }
- // ecx: untagged integer value
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ mov_w(Operand(edi, ebx, times_2, 0), ecx);
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray: {
- // We also need to explicitly check for +/-Infinity. These are
- // converted to MIN_INT, but we need to be careful not to
- // confuse with legal uses of MIN_INT.
- Label not_infinity;
- // This test would apparently detect both NaN and Infinity,
- // but we've already checked for NaN using the FPU hardware
- // above.
- __ mov_w(edx, FieldOperand(eax, HeapNumber::kValueOffset + 6));
- __ and_(edx, 0x7FF0);
- __ cmp(edx, 0x7FF0);
- __ j(not_equal, ¬_infinity);
- __ mov(ecx, 0);
- __ bind(¬_infinity);
- __ mov(Operand(edi, ebx, times_4, 0), ecx);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- __ ret(0); // Return original value.
-
- __ bind(&is_nan);
- __ ffree();
- __ fincstp();
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ mov_b(Operand(edi, ebx, times_1, 0), 0);
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ Set(ecx, Immediate(0));
- __ mov_w(Operand(edi, ebx, times_2, 0), ecx);
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ mov(Operand(edi, ebx, times_4, 0), Immediate(0));
- break;
- default:
- UNREACHABLE();
- break;
- }
- __ ret(0); // Return the original value.
- }
-
- // Slow case: call runtime.
- __ bind(&slow);
- GenerateRuntimeSetProperty(masm);
-}
-
-
// The generated code does not accept smi keys.
// The generated code falls through if both probes miss.
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
NearLabel done;
Condition cc = TokenToCondition(instr->op(), instr->is_double());
- __ mov(ToRegister(result), Handle<Object>(Heap::true_value()));
+ __ mov(ToRegister(result), Factory::true_value());
__ j(cc, &done);
__ bind(&unordered);
- __ mov(ToRegister(result), Handle<Object>(Heap::false_value()));
+ __ mov(ToRegister(result), Factory::false_value());
__ bind(&done);
}
Register result = ToRegister(instr->result());
__ cmp(left, Operand(right));
- __ mov(result, Handle<Object>(Heap::true_value()));
+ __ mov(result, Factory::true_value());
NearLabel done;
__ j(equal, &done);
- __ mov(result, Handle<Object>(Heap::false_value()));
+ __ mov(result, Factory::false_value());
__ bind(&done);
}
__ cmp(reg, Factory::null_value());
if (instr->is_strict()) {
- __ mov(result, Handle<Object>(Heap::true_value()));
+ __ mov(result, Factory::true_value());
NearLabel done;
__ j(equal, &done);
- __ mov(result, Handle<Object>(Heap::false_value()));
+ __ mov(result, Factory::false_value());
__ bind(&done);
} else {
NearLabel true_value, false_value, done;
__ test(scratch, Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, &true_value);
__ bind(&false_value);
- __ mov(result, Handle<Object>(Heap::false_value()));
+ __ mov(result, Factory::false_value());
__ jmp(&done);
__ bind(&true_value);
- __ mov(result, Handle<Object>(Heap::true_value()));
+ __ mov(result, Factory::true_value());
__ bind(&done);
}
}
__ j(true_cond, &is_true);
__ bind(&is_false);
- __ mov(result, Handle<Object>(Heap::false_value()));
+ __ mov(result, Factory::false_value());
__ jmp(&done);
__ bind(&is_true);
- __ mov(result, Handle<Object>(Heap::true_value()));
+ __ mov(result, Factory::true_value());
__ bind(&done);
}
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
__ test(input, Immediate(kSmiTagMask));
- __ mov(result, Handle<Object>(Heap::true_value()));
+ __ mov(result, Factory::true_value());
NearLabel done;
__ j(zero, &done);
- __ mov(result, Handle<Object>(Heap::false_value()));
+ __ mov(result, Factory::false_value());
__ bind(&done);
}
}
-
static Condition BranchCondition(HHasInstanceType* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
__ j(zero, &is_false);
__ CmpObjectType(input, TestType(instr->hydrogen()), result);
__ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
- __ mov(result, Handle<Object>(Heap::true_value()));
+ __ mov(result, Factory::true_value());
__ jmp(&done);
__ bind(&is_false);
- __ mov(result, Handle<Object>(Heap::false_value()));
+ __ mov(result, Factory::false_value());
__ bind(&done);
}
Register result = ToRegister(instr->result());
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ mov(result, Handle<Object>(Heap::true_value()));
+ __ mov(result, Factory::true_value());
__ test(FieldOperand(input, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
NearLabel done;
__ j(not_zero, &done);
- __ mov(result, Handle<Object>(Heap::false_value()));
+ __ mov(result, Factory::false_value());
__ bind(&done);
}
__ j(not_equal, &is_false);
__ bind(&is_true);
- __ mov(result, Handle<Object>(Heap::true_value()));
+ __ mov(result, Factory::true_value());
__ jmp(&done);
__ bind(&is_false);
- __ mov(result, Handle<Object>(Heap::false_value()));
+ __ mov(result, Factory::false_value());
__ bind(&done);
}
Label negative;
__ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
- // Check the sign of the argument. If the argument is positive,
- // just return it.
+ // Check the sign of the argument. If the argument is positive, just
+ // return it. We do not need to patch the stack since |input| and
+ // |result| are the same register and |input| will be restored
+ // unchanged by popping safepoint registers.
__ test(tmp, Immediate(HeapNumber::kSignMask));
__ j(not_zero, &negative);
- __ mov(tmp, input_reg);
__ jmp(&done);
__ bind(&negative);
__ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
__ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
__ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
-
- __ bind(&done);
__ mov(Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize), tmp);
+ __ bind(&done);
__ PopSafepointRegisters();
}
+void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
+ Register input_reg = ToRegister(instr->InputAt(0));
+ __ test(input_reg, Operand(input_reg));
+ Label is_positive;
+ __ j(not_sign, &is_positive);
+ __ neg(input_reg);
+ __ test(input_reg, Operand(input_reg));
+ DeoptimizeIf(negative, instr->environment());
+ __ bind(&is_positive);
+}
+
+
void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
// Class for deferred case.
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
__ subsd(scratch, input_reg);
__ pand(input_reg, scratch);
} else if (r.IsInteger32()) {
- Register input_reg = ToRegister(instr->InputAt(0));
- __ test(input_reg, Operand(input_reg));
- Label is_positive;
- __ j(not_sign, &is_positive);
- __ neg(input_reg);
- __ test(input_reg, Operand(input_reg));
- DeoptimizeIf(negative, instr->environment());
- __ bind(&is_positive);
+ EmitIntegerMathAbs(instr);
} else { // Tagged case.
DeferredMathAbsTaggedHeapNumber* deferred =
new DeferredMathAbsTaggedHeapNumber(this, instr);
- Label not_smi;
Register input_reg = ToRegister(instr->InputAt(0));
// Smi check.
__ test(input_reg, Immediate(kSmiTagMask));
__ j(not_zero, deferred->entry());
- __ test(input_reg, Operand(input_reg));
- Label is_positive;
- __ j(not_sign, &is_positive);
- __ neg(input_reg);
-
- __ test(input_reg, Operand(input_reg));
- DeoptimizeIf(negative, instr->environment());
-
- __ bind(&is_positive);
+ EmitIntegerMathAbs(instr);
__ bind(deferred->exit());
}
}
}
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+ class DeferredStringCharCodeAt: public LDeferredCode {
+ public:
+ DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+ private:
+ LStringCharCodeAt* instr_;
+ };
+
+ Register string = ToRegister(instr->string());
+ Register index = no_reg;
+ int const_index = -1;
+ if (instr->index()->IsConstantOperand()) {
+ const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+ if (!Smi::IsValid(const_index)) {
+ // Guaranteed to be out of bounds because of the assert above.
+ // So the bounds check that must dominate this instruction must
+ // have deoptimized already.
+ if (FLAG_debug_code) {
+ __ Abort("StringCharCodeAt: out of bounds index.");
+ }
+ // No code needs to be generated.
+ return;
+ }
+ } else {
+ index = ToRegister(instr->index());
+ }
+ Register result = ToRegister(instr->result());
+
+ DeferredStringCharCodeAt* deferred =
+ new DeferredStringCharCodeAt(this, instr);
+
+ NearLabel flat_string, ascii_string, done;
+
+ // Fetch the instance type of the receiver into result register.
+ __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for non-flat strings.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ test(result, Immediate(kStringRepresentationMask));
+ __ j(zero, &flat_string);
+
+ // Handle non-flat strings.
+ __ test(result, Immediate(kIsConsStringMask));
+ __ j(zero, deferred->entry());
+
+ // ConsString.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ cmp(FieldOperand(string, ConsString::kSecondOffset),
+ Immediate(Factory::empty_string()));
+ __ j(not_equal, deferred->entry());
+ // Get the first of the two strings and load its instance type.
+ __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
+ __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ test(result, Immediate(kStringRepresentationMask));
+ __ j(not_zero, deferred->entry());
+
+ // Check for 1-byte or 2-byte string.
+ __ bind(&flat_string);
+ STATIC_ASSERT(kAsciiStringTag != 0);
+ __ test(result, Immediate(kStringEncodingMask));
+ __ j(not_zero, &ascii_string);
+
+ // 2-byte string.
+ // Load the 2-byte character code into the result register.
+ STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ if (instr->index()->IsConstantOperand()) {
+ __ movzx_w(result,
+ FieldOperand(string,
+ SeqTwoByteString::kHeaderSize + 2 * const_index));
+ } else {
+ __ movzx_w(result, FieldOperand(string,
+ index,
+ times_2,
+ SeqTwoByteString::kHeaderSize));
+ }
+ __ jmp(&done);
+
+ // ASCII string.
+ // Load the byte into the result register.
+ __ bind(&ascii_string);
+ if (instr->index()->IsConstantOperand()) {
+ __ movzx_b(result, FieldOperand(string,
+ SeqAsciiString::kHeaderSize + const_index));
+ } else {
+ __ movzx_b(result, FieldOperand(string,
+ index,
+ times_1,
+ SeqAsciiString::kHeaderSize));
+ }
+ __ bind(&done);
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Set(result, Immediate(0));
+
+ __ PushSafepointRegisters();
+ __ push(string);
+ // Push the index as a smi. This is safe because of the checks in
+ // DoStringCharCodeAt above.
+ STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+ if (instr->index()->IsConstantOperand()) {
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+ __ push(Immediate(Smi::FromInt(const_index)));
+ } else {
+ Register index = ToRegister(instr->index());
+ __ SmiTag(index);
+ __ push(index);
+ }
+ __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
+ if (FLAG_debug_code) {
+ __ AbortIfNotSmi(eax);
+ }
+ __ SmiUntag(eax);
+ __ mov(Operand(esp, EspIndexForPushAll(result) * kPointerSize), eax);
+ __ PopSafepointRegisters();
+}
+
+
+void LCodeGen::DoStringLength(LStringLength* instr) {
+ Register string = ToRegister(instr->string());
+ Register result = ToRegister(instr->result());
+ __ mov(result, FieldOperand(string, String::kLengthOffset));
+}
+
+
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
LOperand* input = instr->InputAt(0);
ASSERT(input->IsRegister() || input->IsStackSlot());
InstanceType last = instr->hydrogen()->last();
__ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
- __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
- static_cast<int8_t>(first));
// If there is only one type in the interval check for equality.
if (first == last) {
+ __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
+ static_cast<int8_t>(first));
DeoptimizeIf(not_equal, instr->environment());
- } else {
+ } else if (first == FIRST_STRING_TYPE && last == LAST_STRING_TYPE) {
+ // String has a dedicated bit in instance type.
+ __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), kIsNotStringMask);
+ DeoptimizeIf(not_zero, instr->environment());
+ } else {
+ __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
+ static_cast<int8_t>(first));
DeoptimizeIf(below, instr->environment());
// Omit check for the last type.
if (last != LAST_TYPE) {
instr->type_literal());
__ j(final_branch_condition, &true_label);
__ bind(&false_label);
- __ mov(result, Handle<Object>(Heap::false_value()));
+ __ mov(result, Factory::false_value());
__ jmp(&done);
__ bind(&true_label);
- __ mov(result, Handle<Object>(Heap::true_value()));
+ __ mov(result, Factory::true_value());
__ bind(&done);
}
final_branch_condition = below;
} else if (type_name->Equals(Heap::boolean_symbol())) {
- __ cmp(input, Handle<Object>(Heap::true_value()));
+ __ cmp(input, Factory::true_value());
__ j(equal, true_label);
- __ cmp(input, Handle<Object>(Heap::false_value()));
+ __ cmp(input, Factory::false_value());
final_branch_condition = equal;
} else if (type_name->Equals(Heap::undefined_symbol())) {
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LGoto* instr);
+ void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
int ToInteger32(LConstantOperand* op) const;
// Specific math operations - used from DoUnaryMathOperation.
+ void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr);
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathRound(LUnaryMathOperation* instr);
#include "v8.h"
-#include "lithium-allocator.h"
+#include "lithium.h"
namespace v8 {
namespace internal {
case Token::MUL: return "mul-t";
case Token::MOD: return "mod-t";
case Token::DIV: return "div-t";
+ case Token::BIT_AND: return "bit-and-t";
+ case Token::BIT_OR: return "bit-or-t";
+ case Token::BIT_XOR: return "bit-xor-t";
+ case Token::SHL: return "sal-t";
+ case Token::SAR: return "sar-t";
+ case Token::SHR: return "shr-t";
default:
UNREACHABLE();
return NULL;
LInstruction* LChunkBuilder::DoBit(Token::Value op,
HBitwiseBinaryOperation* instr) {
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineSameAsFirst(new LBitI(op, left, right));
+ LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ return DefineSameAsFirst(new LBitI(op, left, right));
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), edx);
+ LOperand* right = UseFixed(instr->right(), eax);
+ LArithmeticT* result = new LArithmeticT(op, left, right);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+ }
}
LInstruction* LChunkBuilder::DoShift(Token::Value op,
HBitwiseBinaryOperation* instr) {
+ if (instr->representation().IsTagged()) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+
+ LOperand* left = UseFixed(instr->left(), edx);
+ LOperand* right = UseFixed(instr->right(), eax);
+ LArithmeticT* result = new LArithmeticT(op, left, right);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+ }
+
ASSERT(instr->representation().IsInteger32());
ASSERT(instr->OperandAt(0)->representation().IsInteger32());
ASSERT(instr->OperandAt(1)->representation().IsInteger32());
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- if (current->IsBranch() && !instr->IsGoto()) {
- // TODO(fschneider): Handle branch instructions uniformly like
- // other instructions. This requires us to generate the right
- // branch instruction already at the HIR level.
+ if (current->IsTest() && !instr->IsGoto()) {
ASSERT(instr->IsControl());
- HBranch* branch = HBranch::cast(current);
- instr->set_hydrogen_value(branch->value());
- HBasicBlock* first = branch->FirstSuccessor();
- HBasicBlock* second = branch->SecondSuccessor();
+ HTest* test = HTest::cast(current);
+ instr->set_hydrogen_value(test->value());
+ HBasicBlock* first = test->FirstSuccessor();
+ HBasicBlock* second = test->SecondSuccessor();
ASSERT(first != NULL && second != NULL);
instr->SetBranchTargets(first->block_id(), second->block_id());
} else {
}
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+LInstruction* LChunkBuilder::DoTest(HTest* instr) {
HValue* v = instr->value();
if (v->EmitAtUses()) {
if (v->IsClassOfTest()) {
}
-LInstruction* LChunkBuilder::DoCompareMapAndBranch(
- HCompareMapAndBranch* instr) {
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
return new LCmpMapAndBranch(value);
}
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = UseRegisterOrConstant(instr->index());
+ LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
+ LOperand* string = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LStringLength(string));
+}
+
+
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
return MarkAsCall(DefineFixed(new LArrayLiteral, eax), instr);
}
// LStoreNamed
// LStoreNamedField
// LStoreNamedGeneric
+// LStringCharCodeAt
// LBitNotI
// LCallNew
// LCheckFunction
// LReturn
// LSmiTag
// LStoreGlobal
+// LStringLength
// LTaggedToI
// LThrow
// LTypeof
V(StoreKeyedGeneric) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
+ V(StringCharCodeAt) \
+ V(StringLength) \
V(SubI) \
V(TaggedToI) \
V(Throw) \
};
-template<typename T, int N>
+template<typename ElementType, int NumElements>
class OperandContainer {
public:
OperandContainer() {
- for (int i = 0; i < N; i++) elems_[i] = NULL;
+ for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
}
- int length() { return N; }
- T& operator[](int i) {
+ int length() { return NumElements; }
+ ElementType& operator[](int i) {
ASSERT(i < length());
return elems_[i];
}
void PrintOperandsTo(StringStream* stream);
private:
- T elems_[N];
+ ElementType elems_[NumElements];
};
-template<typename T>
-class OperandContainer<T, 0> {
+template<typename ElementType>
+class OperandContainer<ElementType, 0> {
public:
int length() { return 0; }
void PrintOperandsTo(StringStream* stream) { }
};
-template<int R, int I, int T = 0>
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
class LTemplateInstruction: public LInstruction {
public:
// Allow 0 or 1 output operands.
};
-template<int I, int T = 0>
+template<int I, int T>
class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
DECLARE_INSTRUCTION(ControlInstruction)
};
-class LArgumentsLength: public LTemplateInstruction<1, 1> {
+class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
};
-class LCmpID: public LTemplateInstruction<1, 2> {
+class LCmpID: public LTemplateInstruction<1, 2, 0> {
public:
LCmpID(LOperand* left, LOperand* right) {
inputs_[0] = left;
};
-class LCmpIDAndBranch: public LControlInstruction<2> {
+class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
};
-class LUnaryMathOperation: public LTemplateInstruction<1, 1> {
+class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
public:
explicit LUnaryMathOperation(LOperand* value) {
inputs_[0] = value;
};
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2> {
+class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
public:
LCmpJSObjectEq(LOperand* left, LOperand* right) {
inputs_[0] = left;
};
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2> {
+class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
};
-class LIsNull: public LTemplateInstruction<1, 1> {
+class LIsNull: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsNull(LOperand* value) {
inputs_[0] = value;
};
-class LIsSmi: public LTemplateInstruction<1, 1> {
+class LIsSmi: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsSmi(LOperand* value) {
inputs_[0] = value;
};
-class LIsSmiAndBranch: public LControlInstruction<1> {
+class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
};
-class LHasInstanceType: public LTemplateInstruction<1, 1> {
+class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasInstanceType(LOperand* value) {
inputs_[0] = value;
};
-class LHasCachedArrayIndex: public LTemplateInstruction<1, 1> {
+class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
};
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1> {
+class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value;
};
-class LCmpT: public LTemplateInstruction<1, 2> {
+class LCmpT: public LTemplateInstruction<1, 2, 0> {
public:
LCmpT(LOperand* left, LOperand* right) {
inputs_[0] = left;
};
-class LCmpTAndBranch: public LControlInstruction<2> {
+class LCmpTAndBranch: public LControlInstruction<2, 0> {
public:
LCmpTAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
};
-class LInstanceOf: public LTemplateInstruction<1, 2> {
+class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
public:
LInstanceOf(LOperand* left, LOperand* right) {
inputs_[0] = left;
};
-class LInstanceOfAndBranch: public LControlInstruction<2> {
+class LInstanceOfAndBranch: public LControlInstruction<2, 0> {
public:
LInstanceOfAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
};
-class LBitI: public LTemplateInstruction<1, 2> {
+class LBitI: public LTemplateInstruction<1, 2, 0> {
public:
LBitI(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
};
-class LShiftI: public LTemplateInstruction<1, 2> {
+class LShiftI: public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
};
-class LSubI: public LTemplateInstruction<1, 2> {
+class LSubI: public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
};
-class LBranch: public LControlInstruction<1> {
+class LBranch: public LControlInstruction<1, 0> {
public:
explicit LBranch(LOperand* value) {
inputs_[0] = value;
};
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1> {
+class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCmpMapAndBranch(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMapAndBranch)
+ DECLARE_HYDROGEN_ACCESSOR(CompareMap)
virtual bool IsControl() const { return true; }
Handle<Map> map() const { return hydrogen()->map(); }
int true_block_id() const {
- return hydrogen()->true_destination()->block_id();
+ return hydrogen()->FirstSuccessor()->block_id();
}
int false_block_id() const {
- return hydrogen()->false_destination()->block_id();
+ return hydrogen()->SecondSuccessor()->block_id();
}
};
-class LJSArrayLength: public LTemplateInstruction<1, 1> {
+class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LJSArrayLength(LOperand* value) {
inputs_[0] = value;
};
-class LFixedArrayLength: public LTemplateInstruction<1, 1> {
+class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LFixedArrayLength(LOperand* value) {
inputs_[0] = value;
};
-class LThrow: public LTemplateInstruction<0, 1> {
+class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
inputs_[0] = value;
};
-class LBitNotI: public LTemplateInstruction<1, 1> {
+class LBitNotI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LBitNotI(LOperand* value) {
inputs_[0] = value;
};
-class LAddI: public LTemplateInstruction<1, 2> {
+class LAddI: public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
};
-class LPower: public LTemplateInstruction<1, 2> {
+class LPower: public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
};
-class LArithmeticD: public LTemplateInstruction<1, 2> {
+class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
};
-class LArithmeticT: public LTemplateInstruction<1, 2> {
+class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
};
-class LReturn: public LTemplateInstruction<0, 1> {
+class LReturn: public LTemplateInstruction<0, 1, 0> {
public:
explicit LReturn(LOperand* value) {
inputs_[0] = value;
};
-class LLoadNamedField: public LTemplateInstruction<1, 1> {
+class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
};
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1> {
+class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
inputs_[0] = object;
};
-class LLoadElements: public LTemplateInstruction<1, 1> {
+class LLoadElements: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadElements(LOperand* object) {
inputs_[0] = object;
};
-class LLoadKeyedFastElement: public LTemplateInstruction<1, 2> {
+class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
};
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2> {
+class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
inputs_[0] = obj;
};
-class LStoreGlobal: public LTemplateInstruction<0, 1> {
+class LStoreGlobal: public LTemplateInstruction<0, 1, 0> {
public:
explicit LStoreGlobal(LOperand* value) {
inputs_[0] = value;
};
-class LPushArgument: public LTemplateInstruction<0, 1> {
+class LPushArgument: public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
};
-class LCallNew: public LTemplateInstruction<1, 1> {
+class LCallNew: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNew(LOperand* constructor) {
inputs_[0] = constructor;
};
-class LInteger32ToDouble: public LTemplateInstruction<1, 1> {
+class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
};
-class LNumberTagI: public LTemplateInstruction<1, 1> {
+class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LNumberTagD(LOperand* value, LOperand* temp) {
+ LNumberTagD(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
};
-class LSmiTag: public LTemplateInstruction<1, 1> {
+class LSmiTag: public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
};
-class LNumberUntagD: public LTemplateInstruction<1, 1> {
+class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value;
};
-class LSmiUntag: public LTemplateInstruction<1, 1> {
+class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
};
-class LCheckFunction: public LTemplateInstruction<0, 1> {
+class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LStringCharCodeAt(LOperand* string, LOperand* index) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+ DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+};
+
+
+class LStringLength: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LStringLength(LOperand* string) {
+ inputs_[0] = string;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
+ DECLARE_HYDROGEN_ACCESSOR(StringLength)
+
+ LOperand* string() { return inputs_[0]; }
+};
+
+
+class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckFunction(LOperand* value) {
inputs_[0] = value;
};
-class LCheckMap: public LTemplateInstruction<0, 1> {
+class LCheckMap: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMap(LOperand* value) {
inputs_[0] = value;
};
-class LCheckSmi: public LTemplateInstruction<0, 1> {
+class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
public:
LCheckSmi(LOperand* value, Condition condition)
: condition_(condition) {
};
-class LTypeof: public LTemplateInstruction<1, 1> {
+class LTypeof: public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeof(LOperand* value) {
inputs_[0] = value;
};
-class LTypeofIs: public LTemplateInstruction<1, 1> {
+class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeofIs(LOperand* value) {
inputs_[0] = value;
};
-class LTypeofIsAndBranch: public LControlInstruction<1> {
+class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
};
-class LDeleteProperty: public LTemplateInstruction<1, 2> {
+class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
public:
LDeleteProperty(LOperand* obj, LOperand* key) {
inputs_[0] = obj;
}
+MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
+ ExternalArrayType array_type, Code::Flags flags) {
+ // ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, failed_allocation;
+
+ // Check that the object isn't a smi.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &slow, not_taken);
+
+ // Check that the key is a smi.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow, not_taken);
+
+ // Get the map of the receiver.
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks. We need
+ // to check this explicitly since this generic stub does not perform
+ // map checks.
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
+ __ j(not_zero, &slow, not_taken);
+
+ __ CmpInstanceType(ecx, JS_OBJECT_TYPE);
+ __ j(not_equal, &slow, not_taken);
+
+ // Check that the elements array is the appropriate type of
+ // ExternalArray.
+ __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+ Handle<Map> map(Heap::MapForExternalArrayType(array_type));
+ __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+ Immediate(map));
+ __ j(not_equal, &slow, not_taken);
+
+ // eax: key, known to be a smi.
+ // edx: receiver, known to be a JSObject.
+ // ebx: elements object, known to be an external array.
+ // Check that the index is in range.
+ __ mov(ecx, eax);
+ __ SmiUntag(ecx); // Untag the index.
+ __ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
+ // Unsigned comparison catches both negative and too-large values.
+ __ j(above_equal, &slow);
+
+ __ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
+ // ebx: base pointer of external storage
+ switch (array_type) {
+ case kExternalByteArray:
+ __ movsx_b(ecx, Operand(ebx, ecx, times_1, 0));
+ break;
+ case kExternalUnsignedByteArray:
+ __ movzx_b(ecx, Operand(ebx, ecx, times_1, 0));
+ break;
+ case kExternalShortArray:
+ __ movsx_w(ecx, Operand(ebx, ecx, times_2, 0));
+ break;
+ case kExternalUnsignedShortArray:
+ __ movzx_w(ecx, Operand(ebx, ecx, times_2, 0));
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ mov(ecx, Operand(ebx, ecx, times_4, 0));
+ break;
+ case kExternalFloatArray:
+ __ fld_s(Operand(ebx, ecx, times_4, 0));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ // For integer array types:
+ // ecx: value
+ // For floating-point array type:
+ // FP(0): value
+
+ if (array_type == kExternalIntArray ||
+ array_type == kExternalUnsignedIntArray) {
+ // For the Int and UnsignedInt array types, we need to see whether
+ // the value can be represented in a Smi. If not, we need to convert
+ // it to a HeapNumber.
+ Label box_int;
+ if (array_type == kExternalIntArray) {
+ __ cmp(ecx, 0xC0000000);
+ __ j(sign, &box_int);
+ } else {
+ ASSERT_EQ(array_type, kExternalUnsignedIntArray);
+ // The test is different for unsigned int values. Since we need
+ // the value to be in the range of a positive smi, we can't
+ // handle either of the top two bits being set in the value.
+ __ test(ecx, Immediate(0xC0000000));
+ __ j(not_zero, &box_int);
+ }
+
+ __ mov(eax, ecx);
+ __ SmiTag(eax);
+ __ ret(0);
+
+ __ bind(&box_int);
+
+ // Allocate a HeapNumber for the int and perform int-to-double
+ // conversion.
+ if (array_type == kExternalIntArray) {
+ __ push(ecx);
+ __ fild_s(Operand(esp, 0));
+ __ pop(ecx);
+ } else {
+ ASSERT(array_type == kExternalUnsignedIntArray);
+ // Need to zero-extend the value.
+ // There's no fild variant for unsigned values, so zero-extend
+ // to a 64-bit int manually.
+ __ push(Immediate(0));
+ __ push(ecx);
+ __ fild_d(Operand(esp, 0));
+ __ pop(ecx);
+ __ pop(ecx);
+ }
+ // FP(0): value
+ __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
+ // Set the value.
+ __ mov(eax, ecx);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(0);
+ } else if (array_type == kExternalFloatArray) {
+ // For the floating-point array type, we need to always allocate a
+ // HeapNumber.
+ __ AllocateHeapNumber(ecx, ebx, edi, &failed_allocation);
+ // Set the value.
+ __ mov(eax, ecx);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(0);
+ } else {
+ __ mov(eax, ecx);
+ __ SmiTag(eax);
+ __ ret(0);
+ }
+
+ // If we fail allocation of the HeapNumber, we still have a value on
+ // top of the FPU stack. Remove it.
+ __ bind(&failed_allocation);
+ __ ffree();
+ __ fincstp();
+ // Fall through to slow case.
+
+ // Slow case: Jump to runtime.
+ __ bind(&slow);
+ __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
+ // ----------- S t a t e -------------
+ // -- eax : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx); // receiver
+ __ push(eax); // name
+ __ push(ebx); // return address
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+
+ // Return the generated code.
+ return GetCode(flags);
+}
+
+
+MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
+ ExternalArrayType array_type, Code::Flags flags) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label slow, check_heap_number;
+
+ // Check that the object isn't a smi.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &slow);
+ // Get the map from the receiver.
+ __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks. We need
+ // to do this because this generic stub does not perform map checks.
+ __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
+ 1 << Map::kIsAccessCheckNeeded);
+ __ j(not_zero, &slow);
+ // Check that the key is a smi.
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow);
+ // Get the instance type from the map of the receiver.
+ __ CmpInstanceType(edi, JS_OBJECT_TYPE);
+ __ j(not_equal, &slow);
+
+ // Check that the elements array is the appropriate type of
+ // ExternalArray.
+ // eax: value
+ // edx: receiver, a JSObject
+ // ecx: key, a smi
+ __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+ __ CheckMap(edi, Handle<Map>(Heap::MapForExternalArrayType(array_type)),
+ &slow, true);
+
+ // Check that the index is in range.
+ __ mov(ebx, ecx);
+ __ SmiUntag(ebx);
+ __ cmp(ebx, FieldOperand(edi, ExternalArray::kLengthOffset));
+ // Unsigned comparison catches both negative and too-large values.
+ __ j(above_equal, &slow);
+
+ // Handle both smis and HeapNumbers in the fast path. Go to the
+ // runtime for all other kinds of values.
+ // eax: value
+ // edx: receiver
+ // ecx: key
+ // edi: elements array
+ // ebx: untagged index
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_equal, &check_heap_number);
+ // smi case
+ __ mov(ecx, eax); // Preserve the value in eax. Key is no longer needed.
+ __ SmiUntag(ecx);
+ __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
+ // ecx: base pointer of external storage
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ mov_w(Operand(edi, ebx, times_2, 0), ecx);
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ mov(Operand(edi, ebx, times_4, 0), ecx);
+ break;
+ case kExternalFloatArray:
+ // Need to perform int-to-float conversion.
+ __ push(ecx);
+ __ fild_s(Operand(esp, 0));
+ __ pop(ecx);
+ __ fstp_s(Operand(edi, ebx, times_4, 0));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ ret(0); // Return the original value.
+
+ __ bind(&check_heap_number);
+ // eax: value
+ // edx: receiver
+ // ecx: key
+ // edi: elements array
+ // ebx: untagged index
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ Immediate(Factory::heap_number_map()));
+ __ j(not_equal, &slow);
+
+ // The WebGL specification leaves the behavior of storing NaN and
+ // +/-Infinity into integer arrays basically undefined. For more
+ // reproducible behavior, convert these to zero.
+ __ mov(edi, FieldOperand(edi, ExternalArray::kExternalPointerOffset));
+ // ebx: untagged index
+ // edi: base pointer of external storage
+ if (array_type == kExternalFloatArray) {
+ __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ fstp_s(Operand(edi, ebx, times_4, 0));
+ __ ret(0);
+ } else {
+ // Perform float-to-int conversion with truncation (round-to-zero)
+ // behavior.
+
+ // For the moment we make the slow call to the runtime on
+ // processors that don't support SSE2. The code in IntegerConvert
+ // (code-stubs-ia32.cc) is roughly what is needed here though the
+ // conversion failure case does not need to be handled.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ if (array_type != kExternalIntArray &&
+ array_type != kExternalUnsignedIntArray) {
+ ASSERT(CpuFeatures::IsSupported(SSE2));
+ CpuFeatures::Scope scope(SSE2);
+ __ cvttsd2si(ecx, FieldOperand(eax, HeapNumber::kValueOffset));
+ // ecx: untagged integer value
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ mov_b(Operand(edi, ebx, times_1, 0), ecx);
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ mov_w(Operand(edi, ebx, times_2, 0), ecx);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ if (CpuFeatures::IsSupported(SSE3)) {
+ CpuFeatures::Scope scope(SSE3);
+ // fisttp stores values as signed integers. To represent the
+ // entire range of int and unsigned int arrays, store as a
+ // 64-bit int and discard the high 32 bits.
+ // If the value is NaN or +/-infinity, the result is 0x80000000,
+ // which is automatically zero when taken mod 2^n, n < 32.
+ __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ sub(Operand(esp), Immediate(2 * kPointerSize));
+ __ fisttp_d(Operand(esp, 0));
+ __ pop(ecx);
+ __ add(Operand(esp), Immediate(kPointerSize));
+ } else {
+ ASSERT(CpuFeatures::IsSupported(SSE2));
+ CpuFeatures::Scope scope(SSE2);
+ // We can easily implement the correct rounding behavior for the
+ // range [0, 2^31-1]. For the time being, to keep this code simple,
+ // make the slow runtime call for values outside this range.
+ // Note: we could do better for signed int arrays.
+ __ movd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
+ // We will need the key if we have to make the slow runtime call.
+ __ push(ecx);
+ __ LoadPowerOf2(xmm1, ecx, 31);
+ __ pop(ecx);
+ __ ucomisd(xmm1, xmm0);
+ __ j(above_equal, &slow);
+ __ cvttsd2si(ecx, Operand(xmm0));
+ }
+ // ecx: untagged integer value
+ __ mov(Operand(edi, ebx, times_4, 0), ecx);
+ }
+ __ ret(0); // Return original value.
+ }
+ }
+
+ // Slow case: call runtime.
+ __ bind(&slow);
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : key
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ __ pop(ebx);
+ __ push(edx);
+ __ push(ecx);
+ __ push(eax);
+ __ push(ebx);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
+
+ return GetCode(flags);
+}
+
+
#undef __
} } // namespace v8::internal
}
-Code* KeyedLoadIC::external_array_stub(JSObject::ElementsKind elements_kind) {
- switch (elements_kind) {
- case JSObject::EXTERNAL_BYTE_ELEMENTS:
- return Builtins::builtin(Builtins::KeyedLoadIC_ExternalByteArray);
- case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- return Builtins::builtin(Builtins::KeyedLoadIC_ExternalUnsignedByteArray);
- case JSObject::EXTERNAL_SHORT_ELEMENTS:
- return Builtins::builtin(Builtins::KeyedLoadIC_ExternalShortArray);
- case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- return Builtins::builtin(
- Builtins::KeyedLoadIC_ExternalUnsignedShortArray);
- case JSObject::EXTERNAL_INT_ELEMENTS:
- return Builtins::builtin(Builtins::KeyedLoadIC_ExternalIntArray);
- case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
- return Builtins::builtin(Builtins::KeyedLoadIC_ExternalUnsignedIntArray);
- case JSObject::EXTERNAL_FLOAT_ELEMENTS:
- return Builtins::builtin(Builtins::KeyedLoadIC_ExternalFloatArray);
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
-Code* KeyedStoreIC::external_array_stub(JSObject::ElementsKind elements_kind) {
- switch (elements_kind) {
- case JSObject::EXTERNAL_BYTE_ELEMENTS:
- return Builtins::builtin(Builtins::KeyedStoreIC_ExternalByteArray);
- case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- return Builtins::builtin(
- Builtins::KeyedStoreIC_ExternalUnsignedByteArray);
- case JSObject::EXTERNAL_SHORT_ELEMENTS:
- return Builtins::builtin(Builtins::KeyedStoreIC_ExternalShortArray);
- case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- return Builtins::builtin(
- Builtins::KeyedStoreIC_ExternalUnsignedShortArray);
- case JSObject::EXTERNAL_INT_ELEMENTS:
- return Builtins::builtin(Builtins::KeyedStoreIC_ExternalIntArray);
- case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
- return Builtins::builtin(Builtins::KeyedStoreIC_ExternalUnsignedIntArray);
- case JSObject::EXTERNAL_FLOAT_ELEMENTS:
- return Builtins::builtin(Builtins::KeyedStoreIC_ExternalFloatArray);
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
static bool HasInterceptorGetter(JSObject* object) {
return !object->GetNamedInterceptor()->getter()->IsUndefined();
}
} else if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->HasExternalArrayElements()) {
- stub = external_array_stub(receiver->GetElementsKind());
+ MaybeObject* probe =
+ StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver, false);
+ stub =
+ probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked());
} else if (receiver->HasIndexedInterceptor()) {
stub = indexed_interceptor_stub();
} else if (state == UNINITIALIZED &&
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->HasExternalArrayElements()) {
- stub = external_array_stub(receiver->GetElementsKind());
+ MaybeObject* probe =
+ StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver, true);
+ stub =
+ probe->IsFailure() ? NULL : Code::cast(probe->ToObjectUnchecked());
} else if (state == UNINITIALIZED &&
key->IsSmi() &&
receiver->map()->has_fast_elements()) {
static void GenerateGeneric(MacroAssembler* masm);
static void GenerateString(MacroAssembler* masm);
- // Generators for external array types. See objects.h.
- // These are similar to the generic IC; they optimize the case of
- // operating upon external array types but fall back to the runtime
- // for all other types.
- static void GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type);
static void GenerateIndexedInterceptor(MacroAssembler* masm);
// Clear the use of the inlined version.
static Code* string_stub() {
return Builtins::builtin(Builtins::KeyedLoadIC_String);
}
- static Code* external_array_stub(JSObject::ElementsKind elements_kind);
static Code* indexed_interceptor_stub() {
return Builtins::builtin(Builtins::KeyedLoadIC_IndexedInterceptor);
static void GenerateRuntimeSetProperty(MacroAssembler* masm);
static void GenerateGeneric(MacroAssembler* masm);
- // Generators for external array types. See objects.h.
- // These are similar to the generic IC; they optimize the case of
- // operating upon external array types but fall back to the runtime
- // for all other types.
- static void GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type);
-
// Clear the inlined version so the IC is always hit.
static void ClearInlinedVersion(Address address);
static Code* generic_stub() {
return Builtins::builtin(Builtins::KeyedStoreIC_Generic);
}
- static Code* external_array_stub(JSObject::ElementsKind elements_kind);
static void Clear(Address address, Code* target);
}
-void LOperand::PrintTo(StringStream* stream) {
- LUnallocated* unalloc = NULL;
- switch (kind()) {
- case INVALID:
- break;
- case UNALLOCATED:
- unalloc = LUnallocated::cast(this);
- stream->Add("v%d", unalloc->virtual_register());
- switch (unalloc->policy()) {
- case LUnallocated::NONE:
- break;
- case LUnallocated::FIXED_REGISTER: {
- const char* register_name =
- Register::AllocationIndexToString(unalloc->fixed_index());
- stream->Add("(=%s)", register_name);
- break;
- }
- case LUnallocated::FIXED_DOUBLE_REGISTER: {
- const char* double_register_name =
- DoubleRegister::AllocationIndexToString(unalloc->fixed_index());
- stream->Add("(=%s)", double_register_name);
- break;
- }
- case LUnallocated::FIXED_SLOT:
- stream->Add("(=%dS)", unalloc->fixed_index());
- break;
- case LUnallocated::MUST_HAVE_REGISTER:
- stream->Add("(R)");
- break;
- case LUnallocated::WRITABLE_REGISTER:
- stream->Add("(WR)");
- break;
- case LUnallocated::SAME_AS_FIRST_INPUT:
- stream->Add("(1)");
- break;
- case LUnallocated::ANY:
- stream->Add("(-)");
- break;
- case LUnallocated::IGNORE:
- stream->Add("(0)");
- break;
- }
- break;
- case CONSTANT_OPERAND:
- stream->Add("[constant:%d]", index());
- break;
- case STACK_SLOT:
- stream->Add("[stack:%d]", index());
- break;
- case DOUBLE_STACK_SLOT:
- stream->Add("[double_stack:%d]", index());
- break;
- case REGISTER:
- stream->Add("[%s|R]", Register::AllocationIndexToString(index()));
- break;
- case DOUBLE_REGISTER:
- stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index()));
- break;
- case ARGUMENT:
- stream->Add("[arg:%d]", index());
- break;
+UsePosition::UsePosition(LifetimePosition pos, LOperand* operand)
+ : operand_(operand),
+ hint_(NULL),
+ pos_(pos),
+ next_(NULL),
+ requires_reg_(false),
+ register_beneficial_(true) {
+ if (operand_ != NULL && operand_->IsUnallocated()) {
+ LUnallocated* unalloc = LUnallocated::cast(operand_);
+ requires_reg_ = unalloc->HasRegisterPolicy();
+ register_beneficial_ = !unalloc->HasAnyPolicy();
}
+ ASSERT(pos_.IsValid());
}
-int LOperand::VirtualRegister() {
- LUnallocated* unalloc = LUnallocated::cast(this);
- return unalloc->virtual_register();
+
+bool UsePosition::HasHint() const {
+ return hint_ != NULL && !hint_->IsUnallocated();
}
#endif
+LiveRange::LiveRange(int id)
+ : id_(id),
+ spilled_(false),
+ assigned_register_(kInvalidAssignment),
+ assigned_register_kind_(NONE),
+ last_interval_(NULL),
+ first_interval_(NULL),
+ first_pos_(NULL),
+ parent_(NULL),
+ next_(NULL),
+ current_interval_(NULL),
+ last_processed_use_(NULL),
+ spill_start_index_(kMaxInt) {
+ spill_operand_ = new LUnallocated(LUnallocated::IGNORE);
+}
+
+
+void LiveRange::set_assigned_register(int reg, RegisterKind register_kind) {
+ ASSERT(!HasRegisterAssigned() && !IsSpilled());
+ assigned_register_ = reg;
+ assigned_register_kind_ = register_kind;
+ ConvertOperands();
+}
+
+
+void LiveRange::MakeSpilled() {
+ ASSERT(!IsSpilled());
+ ASSERT(TopLevel()->HasAllocatedSpillOperand());
+ spilled_ = true;
+ assigned_register_ = kInvalidAssignment;
+ ConvertOperands();
+}
+
+
+bool LiveRange::HasAllocatedSpillOperand() const {
+ return spill_operand_ != NULL && !spill_operand_->IsUnallocated();
+}
+
+
+void LiveRange::SetSpillOperand(LOperand* operand) {
+ ASSERT(!operand->IsUnallocated());
+ ASSERT(spill_operand_ != NULL);
+ ASSERT(spill_operand_->IsUnallocated());
+ spill_operand_->ConvertTo(operand->kind(), operand->index());
+}
+
+
UsePosition* LiveRange::NextUsePosition(LifetimePosition start) {
UsePosition* use_pos = last_processed_use_;
if (use_pos == NULL) use_pos = first_pos();
}
-void LAllocator::AddGapMove(int pos, LiveRange* prev, LiveRange* next) {
- UsePosition* prev_pos = prev->AddUsePosition(
- LifetimePosition::FromInstructionIndex(pos));
- UsePosition* next_pos = next->AddUsePosition(
- LifetimePosition::FromInstructionIndex(pos));
- LOperand* prev_operand = prev_pos->operand();
- LOperand* next_operand = next_pos->operand();
- LGap* gap = chunk_->GetGapAt(pos);
- gap->GetOrCreateParallelMove(LGap::START)->
- AddMove(prev_operand, next_operand);
- next_pos->set_hint(prev_operand);
-}
-
-
LiveRange* LAllocator::SplitAt(LiveRange* range, LifetimePosition pos) {
ASSERT(!range->IsFixed());
TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
class LArgument;
class LChunk;
+class LOperand;
+class LUnallocated;
class LConstantOperand;
class LGap;
class LParallelMove;
};
-class LOperand: public ZoneObject {
- public:
- enum Kind {
- INVALID,
- UNALLOCATED,
- CONSTANT_OPERAND,
- STACK_SLOT,
- DOUBLE_STACK_SLOT,
- REGISTER,
- DOUBLE_REGISTER,
- ARGUMENT
- };
-
- LOperand() : value_(KindField::encode(INVALID)) { }
-
- Kind kind() const { return KindField::decode(value_); }
- int index() const { return static_cast<int>(value_) >> kKindFieldWidth; }
- bool IsConstantOperand() const { return kind() == CONSTANT_OPERAND; }
- bool IsStackSlot() const { return kind() == STACK_SLOT; }
- bool IsDoubleStackSlot() const { return kind() == DOUBLE_STACK_SLOT; }
- bool IsRegister() const { return kind() == REGISTER; }
- bool IsDoubleRegister() const { return kind() == DOUBLE_REGISTER; }
- bool IsArgument() const { return kind() == ARGUMENT; }
- bool IsUnallocated() const { return kind() == UNALLOCATED; }
- bool Equals(LOperand* other) const { return value_ == other->value_; }
- int VirtualRegister();
-
- void PrintTo(StringStream* stream);
- void ConvertTo(Kind kind, int index) {
- value_ = KindField::encode(kind);
- value_ |= index << kKindFieldWidth;
- ASSERT(this->index() == index);
- }
-
- protected:
- static const int kKindFieldWidth = 3;
- class KindField : public BitField<Kind, 0, kKindFieldWidth> { };
-
- LOperand(Kind kind, int index) { ConvertTo(kind, index); }
-
- unsigned value_;
-};
-
-
-class LUnallocated: public LOperand {
- public:
- enum Policy {
- NONE,
- ANY,
- FIXED_REGISTER,
- FIXED_DOUBLE_REGISTER,
- FIXED_SLOT,
- MUST_HAVE_REGISTER,
- WRITABLE_REGISTER,
- SAME_AS_FIRST_INPUT,
- IGNORE
- };
-
- // Lifetime of operand inside the instruction.
- enum Lifetime {
- // USED_AT_START operand is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
- USED_AT_START,
-
- // USED_AT_END operand is treated as live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
- USED_AT_END
- };
-
- explicit LUnallocated(Policy policy) : LOperand(UNALLOCATED, 0) {
- Initialize(policy, 0, USED_AT_END);
- }
-
- LUnallocated(Policy policy, int fixed_index) : LOperand(UNALLOCATED, 0) {
- Initialize(policy, fixed_index, USED_AT_END);
- }
-
- LUnallocated(Policy policy, Lifetime lifetime) : LOperand(UNALLOCATED, 0) {
- Initialize(policy, 0, lifetime);
- }
-
- // The superclass has a KindField. Some policies have a signed fixed
- // index in the upper bits.
- static const int kPolicyWidth = 4;
- static const int kLifetimeWidth = 1;
- static const int kVirtualRegisterWidth = 17;
-
- static const int kPolicyShift = kKindFieldWidth;
- static const int kLifetimeShift = kPolicyShift + kPolicyWidth;
- static const int kVirtualRegisterShift = kLifetimeShift + kLifetimeWidth;
- static const int kFixedIndexShift =
- kVirtualRegisterShift + kVirtualRegisterWidth;
-
- class PolicyField : public BitField<Policy, kPolicyShift, kPolicyWidth> { };
-
- class LifetimeField
- : public BitField<Lifetime, kLifetimeShift, kLifetimeWidth> {
- };
-
- class VirtualRegisterField
- : public BitField<unsigned,
- kVirtualRegisterShift,
- kVirtualRegisterWidth> {
- };
-
- static const int kMaxVirtualRegisters = 1 << (kVirtualRegisterWidth + 1);
- static const int kMaxFixedIndices = 128;
-
- bool HasIgnorePolicy() const { return policy() == IGNORE; }
- bool HasNoPolicy() const { return policy() == NONE; }
- bool HasAnyPolicy() const {
- return policy() == ANY;
- }
- bool HasFixedPolicy() const {
- return policy() == FIXED_REGISTER ||
- policy() == FIXED_DOUBLE_REGISTER ||
- policy() == FIXED_SLOT;
- }
- bool HasRegisterPolicy() const {
- return policy() == WRITABLE_REGISTER || policy() == MUST_HAVE_REGISTER;
- }
- bool HasSameAsInputPolicy() const {
- return policy() == SAME_AS_FIRST_INPUT;
- }
- Policy policy() const { return PolicyField::decode(value_); }
- void set_policy(Policy policy) {
- value_ &= ~PolicyField::mask();
- value_ |= PolicyField::encode(policy);
- }
- int fixed_index() const {
- return static_cast<int>(value_) >> kFixedIndexShift;
- }
-
- unsigned virtual_register() const {
- return VirtualRegisterField::decode(value_);
- }
-
- void set_virtual_register(unsigned id) {
- value_ &= ~VirtualRegisterField::mask();
- value_ |= VirtualRegisterField::encode(id);
- }
-
- LUnallocated* CopyUnconstrained() {
- LUnallocated* result = new LUnallocated(ANY);
- result->set_virtual_register(virtual_register());
- return result;
- }
-
- static LUnallocated* cast(LOperand* op) {
- ASSERT(op->IsUnallocated());
- return reinterpret_cast<LUnallocated*>(op);
- }
-
- bool IsUsedAtStart() {
- return LifetimeField::decode(value_) == USED_AT_START;
- }
-
- private:
- void Initialize(Policy policy, int fixed_index, Lifetime lifetime) {
- value_ |= PolicyField::encode(policy);
- value_ |= LifetimeField::encode(lifetime);
- value_ |= fixed_index << kFixedIndexShift;
- ASSERT(this->fixed_index() == fixed_index);
- }
-};
-
-
-class LMoveOperands BASE_EMBEDDED {
- public:
- LMoveOperands(LOperand* source, LOperand* destination)
- : source_(source), destination_(destination) {
- }
-
- LOperand* source() const { return source_; }
- void set_source(LOperand* operand) { source_ = operand; }
-
- LOperand* destination() const { return destination_; }
- void set_destination(LOperand* operand) { destination_ = operand; }
-
- // The gap resolver marks moves as "in-progress" by clearing the
- // destination (but not the source).
- bool IsPending() const {
- return destination_ == NULL && source_ != NULL;
- }
-
- // True if this move a move into the given destination operand.
- bool Blocks(LOperand* operand) const {
- return !IsEliminated() && source()->Equals(operand);
- }
-
- // A move is redundant if it's been eliminated, if its source and
- // destination are the same, or if its destination is unneeded.
- bool IsRedundant() const {
- return IsEliminated() || source_->Equals(destination_) || IsIgnored();
- }
-
- bool IsIgnored() const {
- return destination_ != NULL &&
- destination_->IsUnallocated() &&
- LUnallocated::cast(destination_)->HasIgnorePolicy();
- }
-
- // We clear both operands to indicate move that's been eliminated.
- void Eliminate() { source_ = destination_ = NULL; }
- bool IsEliminated() const {
- ASSERT(source_ != NULL || destination_ == NULL);
- return source_ == NULL;
- }
-
- private:
- LOperand* source_;
- LOperand* destination_;
-};
-
-
-class LConstantOperand: public LOperand {
- public:
- static LConstantOperand* Create(int index) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new LConstantOperand(index);
- }
-
- static LConstantOperand* cast(LOperand* op) {
- ASSERT(op->IsConstantOperand());
- return reinterpret_cast<LConstantOperand*>(op);
- }
-
- static void SetupCache();
-
- private:
- static const int kNumCachedOperands = 128;
- static LConstantOperand cache[];
-
- LConstantOperand() : LOperand() { }
- explicit LConstantOperand(int index) : LOperand(CONSTANT_OPERAND, index) { }
-};
-
-
-class LArgument: public LOperand {
- public:
- explicit LArgument(int index) : LOperand(ARGUMENT, index) { }
-
- static LArgument* cast(LOperand* op) {
- ASSERT(op->IsArgument());
- return reinterpret_cast<LArgument*>(op);
- }
-};
-
-
-class LStackSlot: public LOperand {
- public:
- static LStackSlot* Create(int index) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new LStackSlot(index);
- }
-
- static LStackSlot* cast(LOperand* op) {
- ASSERT(op->IsStackSlot());
- return reinterpret_cast<LStackSlot*>(op);
- }
-
- static void SetupCache();
-
- private:
- static const int kNumCachedOperands = 128;
- static LStackSlot cache[];
-
- LStackSlot() : LOperand() { }
- explicit LStackSlot(int index) : LOperand(STACK_SLOT, index) { }
-};
-
-
-class LDoubleStackSlot: public LOperand {
- public:
- static LDoubleStackSlot* Create(int index) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new LDoubleStackSlot(index);
- }
-
- static LDoubleStackSlot* cast(LOperand* op) {
- ASSERT(op->IsStackSlot());
- return reinterpret_cast<LDoubleStackSlot*>(op);
- }
-
- static void SetupCache();
-
- private:
- static const int kNumCachedOperands = 128;
- static LDoubleStackSlot cache[];
-
- LDoubleStackSlot() : LOperand() { }
- explicit LDoubleStackSlot(int index) : LOperand(DOUBLE_STACK_SLOT, index) { }
-};
-
-
-class LRegister: public LOperand {
- public:
- static LRegister* Create(int index) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new LRegister(index);
- }
-
- static LRegister* cast(LOperand* op) {
- ASSERT(op->IsRegister());
- return reinterpret_cast<LRegister*>(op);
- }
-
- static void SetupCache();
-
- private:
- static const int kNumCachedOperands = 16;
- static LRegister cache[];
-
- LRegister() : LOperand() { }
- explicit LRegister(int index) : LOperand(REGISTER, index) { }
-};
-
-
-class LDoubleRegister: public LOperand {
- public:
- static LDoubleRegister* Create(int index) {
- ASSERT(index >= 0);
- if (index < kNumCachedOperands) return &cache[index];
- return new LDoubleRegister(index);
- }
-
- static LDoubleRegister* cast(LOperand* op) {
- ASSERT(op->IsDoubleRegister());
- return reinterpret_cast<LDoubleRegister*>(op);
- }
-
- static void SetupCache();
-
- private:
- static const int kNumCachedOperands = 16;
- static LDoubleRegister cache[];
-
- LDoubleRegister() : LOperand() { }
- explicit LDoubleRegister(int index) : LOperand(DOUBLE_REGISTER, index) { }
-};
-
-
// A register-allocator view of a Lithium instruction. It contains the id of
// the output operand and a list of input operand uses.
class InstructionSummary: public ZoneObject {
// Representation of a use position.
class UsePosition: public ZoneObject {
public:
- UsePosition(LifetimePosition pos, LOperand* operand)
- : operand_(operand),
- hint_(NULL),
- pos_(pos),
- next_(NULL),
- requires_reg_(false),
- register_beneficial_(true) {
- if (operand_ != NULL && operand_->IsUnallocated()) {
- LUnallocated* unalloc = LUnallocated::cast(operand_);
- requires_reg_ = unalloc->HasRegisterPolicy();
- register_beneficial_ = !unalloc->HasAnyPolicy();
- }
- ASSERT(pos_.IsValid());
- }
+ UsePosition(LifetimePosition pos, LOperand* operand);
LOperand* operand() const { return operand_; }
bool HasOperand() const { return operand_ != NULL; }
LOperand* hint() const { return hint_; }
void set_hint(LOperand* hint) { hint_ = hint; }
- bool HasHint() const { return hint_ != NULL && !hint_->IsUnallocated(); }
+ bool HasHint() const;
bool RequiresRegister() const;
bool RegisterIsBeneficial() const;
public:
static const int kInvalidAssignment = 0x7fffffff;
- explicit LiveRange(int id)
- : id_(id),
- spilled_(false),
- assigned_register_(kInvalidAssignment),
- assigned_register_kind_(NONE),
- last_interval_(NULL),
- first_interval_(NULL),
- first_pos_(NULL),
- parent_(NULL),
- next_(NULL),
- current_interval_(NULL),
- last_processed_use_(NULL),
- spill_start_index_(kMaxInt) {
- spill_operand_ = new LUnallocated(LUnallocated::IGNORE);
- }
+ explicit LiveRange(int id);
UseInterval* first_interval() const { return first_interval_; }
UsePosition* first_pos() const { return first_pos_; }
LOperand* CreateAssignedOperand();
int assigned_register() const { return assigned_register_; }
int spill_start_index() const { return spill_start_index_; }
- void set_assigned_register(int reg, RegisterKind register_kind) {
- ASSERT(!HasRegisterAssigned() && !IsSpilled());
- assigned_register_ = reg;
- assigned_register_kind_ = register_kind;
- ConvertOperands();
- }
- void MakeSpilled() {
- ASSERT(!IsSpilled());
- ASSERT(TopLevel()->HasAllocatedSpillOperand());
- spilled_ = true;
- assigned_register_ = kInvalidAssignment;
- ConvertOperands();
- }
+ void set_assigned_register(int reg, RegisterKind register_kind);
+ void MakeSpilled();
// Returns use position in this live range that follows both start
// and last processed use position.
return last_interval_->end();
}
- bool HasAllocatedSpillOperand() const {
- return spill_operand_ != NULL && !spill_operand_->IsUnallocated();
- }
-
+ bool HasAllocatedSpillOperand() const;
LOperand* GetSpillOperand() const { return spill_operand_; }
- void SetSpillOperand(LOperand* operand) {
- ASSERT(!operand->IsUnallocated());
- ASSERT(spill_operand_ != NULL);
- ASSERT(spill_operand_->IsUnallocated());
- spill_operand_->ConvertTo(operand->kind(), operand->index());
- }
+ void SetSpillOperand(LOperand* operand);
void SetSpillStartIndex(int start) {
spill_start_index_ = Min(start, spill_start_index_);
void Spill(LiveRange* range);
bool IsBlockBoundary(LifetimePosition pos);
- void AddGapMove(int pos, LiveRange* prev, LiveRange* next);
// Helper methods for resolving control flow.
void ResolveControlFlow(LiveRange* range,
namespace v8 {
namespace internal {
+
+void LOperand::PrintTo(StringStream* stream) {
+ LUnallocated* unalloc = NULL;
+ switch (kind()) {
+ case INVALID:
+ break;
+ case UNALLOCATED:
+ unalloc = LUnallocated::cast(this);
+ stream->Add("v%d", unalloc->virtual_register());
+ switch (unalloc->policy()) {
+ case LUnallocated::NONE:
+ break;
+ case LUnallocated::FIXED_REGISTER: {
+ const char* register_name =
+ Register::AllocationIndexToString(unalloc->fixed_index());
+ stream->Add("(=%s)", register_name);
+ break;
+ }
+ case LUnallocated::FIXED_DOUBLE_REGISTER: {
+ const char* double_register_name =
+ DoubleRegister::AllocationIndexToString(unalloc->fixed_index());
+ stream->Add("(=%s)", double_register_name);
+ break;
+ }
+ case LUnallocated::FIXED_SLOT:
+ stream->Add("(=%dS)", unalloc->fixed_index());
+ break;
+ case LUnallocated::MUST_HAVE_REGISTER:
+ stream->Add("(R)");
+ break;
+ case LUnallocated::WRITABLE_REGISTER:
+ stream->Add("(WR)");
+ break;
+ case LUnallocated::SAME_AS_FIRST_INPUT:
+ stream->Add("(1)");
+ break;
+ case LUnallocated::ANY:
+ stream->Add("(-)");
+ break;
+ case LUnallocated::IGNORE:
+ stream->Add("(0)");
+ break;
+ }
+ break;
+ case CONSTANT_OPERAND:
+ stream->Add("[constant:%d]", index());
+ break;
+ case STACK_SLOT:
+ stream->Add("[stack:%d]", index());
+ break;
+ case DOUBLE_STACK_SLOT:
+ stream->Add("[double_stack:%d]", index());
+ break;
+ case REGISTER:
+ stream->Add("[%s|R]", Register::AllocationIndexToString(index()));
+ break;
+ case DOUBLE_REGISTER:
+ stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index()));
+ break;
+ case ARGUMENT:
+ stream->Add("[arg:%d]", index());
+ break;
+ }
+}
+
+
+int LOperand::VirtualRegister() {
+ LUnallocated* unalloc = LUnallocated::cast(this);
+ return unalloc->virtual_register();
+}
+
+
bool LParallelMove::IsRedundant() const {
for (int i = 0; i < move_operands_.length(); ++i) {
if (!move_operands_[i].IsRedundant()) return false;
#define V8_LITHIUM_H_
#include "hydrogen.h"
-#include "lithium-allocator.h"
#include "safepoint-table.h"
namespace v8 {
namespace internal {
+class LOperand: public ZoneObject {
+ public:
+ enum Kind {
+ INVALID,
+ UNALLOCATED,
+ CONSTANT_OPERAND,
+ STACK_SLOT,
+ DOUBLE_STACK_SLOT,
+ REGISTER,
+ DOUBLE_REGISTER,
+ ARGUMENT
+ };
+
+ LOperand() : value_(KindField::encode(INVALID)) { }
+
+ Kind kind() const { return KindField::decode(value_); }
+ int index() const { return static_cast<int>(value_) >> kKindFieldWidth; }
+ bool IsConstantOperand() const { return kind() == CONSTANT_OPERAND; }
+ bool IsStackSlot() const { return kind() == STACK_SLOT; }
+ bool IsDoubleStackSlot() const { return kind() == DOUBLE_STACK_SLOT; }
+ bool IsRegister() const { return kind() == REGISTER; }
+ bool IsDoubleRegister() const { return kind() == DOUBLE_REGISTER; }
+ bool IsArgument() const { return kind() == ARGUMENT; }
+ bool IsUnallocated() const { return kind() == UNALLOCATED; }
+ bool Equals(LOperand* other) const { return value_ == other->value_; }
+ int VirtualRegister();
+
+ void PrintTo(StringStream* stream);
+ void ConvertTo(Kind kind, int index) {
+ value_ = KindField::encode(kind);
+ value_ |= index << kKindFieldWidth;
+ ASSERT(this->index() == index);
+ }
+
+ protected:
+ static const int kKindFieldWidth = 3;
+ class KindField : public BitField<Kind, 0, kKindFieldWidth> { };
+
+ LOperand(Kind kind, int index) { ConvertTo(kind, index); }
+
+ unsigned value_;
+};
+
+
+class LUnallocated: public LOperand {
+ public:
+ enum Policy {
+ NONE,
+ ANY,
+ FIXED_REGISTER,
+ FIXED_DOUBLE_REGISTER,
+ FIXED_SLOT,
+ MUST_HAVE_REGISTER,
+ WRITABLE_REGISTER,
+ SAME_AS_FIRST_INPUT,
+ IGNORE
+ };
+
+ // Lifetime of operand inside the instruction.
+ enum Lifetime {
+ // USED_AT_START operand is guaranteed to be live only at
+ // instruction start. Register allocator is free to assign the same register
+ // to some other operand used inside instruction (i.e. temporary or
+ // output).
+ USED_AT_START,
+
+ // USED_AT_END operand is treated as live until the end of
+ // instruction. This means that register allocator will not reuse it's
+ // register for any other operand inside instruction.
+ USED_AT_END
+ };
+
+ explicit LUnallocated(Policy policy) : LOperand(UNALLOCATED, 0) {
+ Initialize(policy, 0, USED_AT_END);
+ }
+
+ LUnallocated(Policy policy, int fixed_index) : LOperand(UNALLOCATED, 0) {
+ Initialize(policy, fixed_index, USED_AT_END);
+ }
+
+ LUnallocated(Policy policy, Lifetime lifetime) : LOperand(UNALLOCATED, 0) {
+ Initialize(policy, 0, lifetime);
+ }
+
+ // The superclass has a KindField. Some policies have a signed fixed
+ // index in the upper bits.
+ static const int kPolicyWidth = 4;
+ static const int kLifetimeWidth = 1;
+ static const int kVirtualRegisterWidth = 17;
+
+ static const int kPolicyShift = kKindFieldWidth;
+ static const int kLifetimeShift = kPolicyShift + kPolicyWidth;
+ static const int kVirtualRegisterShift = kLifetimeShift + kLifetimeWidth;
+ static const int kFixedIndexShift =
+ kVirtualRegisterShift + kVirtualRegisterWidth;
+
+ class PolicyField : public BitField<Policy, kPolicyShift, kPolicyWidth> { };
+
+ class LifetimeField
+ : public BitField<Lifetime, kLifetimeShift, kLifetimeWidth> {
+ };
+
+ class VirtualRegisterField
+ : public BitField<unsigned,
+ kVirtualRegisterShift,
+ kVirtualRegisterWidth> {
+ };
+
+ static const int kMaxVirtualRegisters = 1 << (kVirtualRegisterWidth + 1);
+ static const int kMaxFixedIndices = 128;
+
+ bool HasIgnorePolicy() const { return policy() == IGNORE; }
+ bool HasNoPolicy() const { return policy() == NONE; }
+ bool HasAnyPolicy() const {
+ return policy() == ANY;
+ }
+ bool HasFixedPolicy() const {
+ return policy() == FIXED_REGISTER ||
+ policy() == FIXED_DOUBLE_REGISTER ||
+ policy() == FIXED_SLOT;
+ }
+ bool HasRegisterPolicy() const {
+ return policy() == WRITABLE_REGISTER || policy() == MUST_HAVE_REGISTER;
+ }
+ bool HasSameAsInputPolicy() const {
+ return policy() == SAME_AS_FIRST_INPUT;
+ }
+ Policy policy() const { return PolicyField::decode(value_); }
+ void set_policy(Policy policy) {
+ value_ &= ~PolicyField::mask();
+ value_ |= PolicyField::encode(policy);
+ }
+ int fixed_index() const {
+ return static_cast<int>(value_) >> kFixedIndexShift;
+ }
+
+ unsigned virtual_register() const {
+ return VirtualRegisterField::decode(value_);
+ }
+
+ void set_virtual_register(unsigned id) {
+ value_ &= ~VirtualRegisterField::mask();
+ value_ |= VirtualRegisterField::encode(id);
+ }
+
+ LUnallocated* CopyUnconstrained() {
+ LUnallocated* result = new LUnallocated(ANY);
+ result->set_virtual_register(virtual_register());
+ return result;
+ }
+
+ static LUnallocated* cast(LOperand* op) {
+ ASSERT(op->IsUnallocated());
+ return reinterpret_cast<LUnallocated*>(op);
+ }
+
+ bool IsUsedAtStart() {
+ return LifetimeField::decode(value_) == USED_AT_START;
+ }
+
+ private:
+ void Initialize(Policy policy, int fixed_index, Lifetime lifetime) {
+ value_ |= PolicyField::encode(policy);
+ value_ |= LifetimeField::encode(lifetime);
+ value_ |= fixed_index << kFixedIndexShift;
+ ASSERT(this->fixed_index() == fixed_index);
+ }
+};
+
+
+class LMoveOperands BASE_EMBEDDED {
+ public:
+ LMoveOperands(LOperand* source, LOperand* destination)
+ : source_(source), destination_(destination) {
+ }
+
+ LOperand* source() const { return source_; }
+ void set_source(LOperand* operand) { source_ = operand; }
+
+ LOperand* destination() const { return destination_; }
+ void set_destination(LOperand* operand) { destination_ = operand; }
+
+ // The gap resolver marks moves as "in-progress" by clearing the
+ // destination (but not the source).
+ bool IsPending() const {
+ return destination_ == NULL && source_ != NULL;
+ }
+
+ // True if this move a move into the given destination operand.
+ bool Blocks(LOperand* operand) const {
+ return !IsEliminated() && source()->Equals(operand);
+ }
+
+ // A move is redundant if it's been eliminated, if its source and
+ // destination are the same, or if its destination is unneeded.
+ bool IsRedundant() const {
+ return IsEliminated() || source_->Equals(destination_) || IsIgnored();
+ }
+
+ bool IsIgnored() const {
+ return destination_ != NULL &&
+ destination_->IsUnallocated() &&
+ LUnallocated::cast(destination_)->HasIgnorePolicy();
+ }
+
+ // We clear both operands to indicate move that's been eliminated.
+ void Eliminate() { source_ = destination_ = NULL; }
+ bool IsEliminated() const {
+ ASSERT(source_ != NULL || destination_ == NULL);
+ return source_ == NULL;
+ }
+
+ private:
+ LOperand* source_;
+ LOperand* destination_;
+};
+
+
+class LConstantOperand: public LOperand {
+ public:
+ static LConstantOperand* Create(int index) {
+ ASSERT(index >= 0);
+ if (index < kNumCachedOperands) return &cache[index];
+ return new LConstantOperand(index);
+ }
+
+ static LConstantOperand* cast(LOperand* op) {
+ ASSERT(op->IsConstantOperand());
+ return reinterpret_cast<LConstantOperand*>(op);
+ }
+
+ static void SetupCache();
+
+ private:
+ static const int kNumCachedOperands = 128;
+ static LConstantOperand cache[];
+
+ LConstantOperand() : LOperand() { }
+ explicit LConstantOperand(int index) : LOperand(CONSTANT_OPERAND, index) { }
+};
+
+
+class LArgument: public LOperand {
+ public:
+ explicit LArgument(int index) : LOperand(ARGUMENT, index) { }
+
+ static LArgument* cast(LOperand* op) {
+ ASSERT(op->IsArgument());
+ return reinterpret_cast<LArgument*>(op);
+ }
+};
+
+
+class LStackSlot: public LOperand {
+ public:
+ static LStackSlot* Create(int index) {
+ ASSERT(index >= 0);
+ if (index < kNumCachedOperands) return &cache[index];
+ return new LStackSlot(index);
+ }
+
+ static LStackSlot* cast(LOperand* op) {
+ ASSERT(op->IsStackSlot());
+ return reinterpret_cast<LStackSlot*>(op);
+ }
+
+ static void SetupCache();
+
+ private:
+ static const int kNumCachedOperands = 128;
+ static LStackSlot cache[];
+
+ LStackSlot() : LOperand() { }
+ explicit LStackSlot(int index) : LOperand(STACK_SLOT, index) { }
+};
+
+
+class LDoubleStackSlot: public LOperand {
+ public:
+ static LDoubleStackSlot* Create(int index) {
+ ASSERT(index >= 0);
+ if (index < kNumCachedOperands) return &cache[index];
+ return new LDoubleStackSlot(index);
+ }
+
+ static LDoubleStackSlot* cast(LOperand* op) {
+ ASSERT(op->IsStackSlot());
+ return reinterpret_cast<LDoubleStackSlot*>(op);
+ }
+
+ static void SetupCache();
+
+ private:
+ static const int kNumCachedOperands = 128;
+ static LDoubleStackSlot cache[];
+
+ LDoubleStackSlot() : LOperand() { }
+ explicit LDoubleStackSlot(int index) : LOperand(DOUBLE_STACK_SLOT, index) { }
+};
+
+
+class LRegister: public LOperand {
+ public:
+ static LRegister* Create(int index) {
+ ASSERT(index >= 0);
+ if (index < kNumCachedOperands) return &cache[index];
+ return new LRegister(index);
+ }
+
+ static LRegister* cast(LOperand* op) {
+ ASSERT(op->IsRegister());
+ return reinterpret_cast<LRegister*>(op);
+ }
+
+ static void SetupCache();
+
+ private:
+ static const int kNumCachedOperands = 16;
+ static LRegister cache[];
+
+ LRegister() : LOperand() { }
+ explicit LRegister(int index) : LOperand(REGISTER, index) { }
+};
+
+
+class LDoubleRegister: public LOperand {
+ public:
+ static LDoubleRegister* Create(int index) {
+ ASSERT(index >= 0);
+ if (index < kNumCachedOperands) return &cache[index];
+ return new LDoubleRegister(index);
+ }
+
+ static LDoubleRegister* cast(LOperand* op) {
+ ASSERT(op->IsDoubleRegister());
+ return reinterpret_cast<LDoubleRegister*>(op);
+ }
+
+ static void SetupCache();
+
+ private:
+ static const int kNumCachedOperands = 16;
+ static LDoubleRegister cache[];
+
+ LDoubleRegister() : LOperand() { }
+ explicit LDoubleRegister(int index) : LOperand(DOUBLE_REGISTER, index) { }
+};
+
+
class LParallelMove : public ZoneObject {
public:
LParallelMove() : move_operands_(4) { }
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LIVEOBJECTLIST_INL_H_
+#define V8_LIVEOBJECTLIST_INL_H_
+
+#include "v8.h"
+
+#include "liveobjectlist.h"
+
+#endif // V8_LIVEOBJECTLIST_INL_H_
+
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifdef LIVE_OBJECT_LIST
+
+#include <ctype.h>
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "checks.h"
+#include "global-handles.h"
+#include "heap.h"
+#include "inspector.h"
+#include "list-inl.h"
+#include "liveobjectlist.h"
+#include "string-stream.h"
+#include "top.h"
+#include "v8utils.h"
+
+namespace v8 {
+namespace internal {
+
+
+
+} } // namespace v8::internal
+
+#endif // LIVE_OBJECT_LIST
+
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LIVEOBJECTLIST_H_
+#define V8_LIVEOBJECTLIST_H_
+
+#include "v8.h"
+
+#include "checks.h"
+#include "heap.h"
+#include "objects.h"
+#include "globals.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef LIVE_OBJECT_LIST
+
+
+// Temporary stubbed out LiveObjectList implementation.
+class LiveObjectList {
+ public:
+ inline static void GCEpilogue() {}
+ inline static void GCPrologue() {}
+ inline static void IterateElements(ObjectVisitor* v) {}
+ inline static void ProcessNonLive(HeapObject *obj) {}
+ inline static void UpdateReferencesForScavengeGC() {}
+
+ static MaybeObject* Capture() { return Heap::undefined_value(); }
+ static bool Delete(int id) { return false; }
+ static MaybeObject* Dump(int id1,
+ int id2,
+ int start_idx,
+ int dump_limit,
+ Handle<JSObject> filter_obj) {
+ return Heap::undefined_value();
+ }
+ static MaybeObject* Info(int start_idx, int dump_limit) {
+ return Heap::undefined_value();
+ }
+ static MaybeObject* Summarize(int id1,
+ int id2,
+ Handle<JSObject> filter_obj) {
+ return Heap::undefined_value();
+ }
+
+ static void Reset() {}
+ static Object* GetObj(int obj_id) { return Heap::undefined_value(); }
+ static Object* GetObjId(Handle<String> address) {
+ return Heap::undefined_value();
+ }
+ static MaybeObject* GetObjRetainers(int obj_id,
+ Handle<JSObject> instance_filter,
+ bool verbose,
+ int start,
+ int count,
+ Handle<JSObject> filter_obj) {
+ return Heap::undefined_value();
+ }
+
+ static Object* GetPath(int obj_id1,
+ int obj_id2,
+ Handle<JSObject> instance_filter) {
+ return Heap::undefined_value();
+ }
+ static Object* PrintObj(int obj_id) { return Heap::undefined_value(); }
+};
+
+
+#else // !LIVE_OBJECT_LIST
+
+
+class LiveObjectList {
+ public:
+ static void GCEpilogue() {}
+ static void GCPrologue() {}
+ static void IterateElements(ObjectVisitor* v) {}
+ static void ProcessNonLive(HeapObject *obj) {}
+ static void UpdateReferencesForScavengeGC() {}
+};
+
+
+#endif // LIVE_OBJECT_LIST
+
+} } // namespace v8::internal
+
+#endif // V8_LIVEOBJECTLIST_H_
+
}
+// When formatting internally created error messages, do not
+// invoke overwritten error toString methods but explicitly use
+// the error to string method. This is to avoid leaking error
+// objects between script tags in a browser setting.
+function ToStringCheckErrorObject(obj) {
+ if (obj instanceof $Error) {
+ return %_CallFunction(obj, errorToString);
+ } else {
+ return ToString(obj);
+ }
+}
+
+
function ToDetailString(obj) {
if (obj != null && IS_OBJECT(obj) && obj.toString === $Object.prototype.toString) {
var constructor = obj.constructor;
- if (!constructor) return ToString(obj);
+ if (!constructor) return ToStringCheckErrorObject(obj);
var constructorName = constructor.name;
- if (!constructorName) return ToString(obj);
+ if (!constructorName) return ToStringCheckErrorObject(obj);
return "#<" + GetInstanceName(constructorName) + ">";
- } else if (obj instanceof $Error) {
- // When formatting internally created error messages, do not
- // invoke overwritten error toString methods but explicitly use
- // the error to string method. This is to avoid leaking error
- // objects between script tags in a browser setting.
- return %_CallFunction(obj, errorToString);
} else {
- return ToString(obj);
+ return ToStringCheckErrorObject(obj);
}
}
array_indexof_not_defined: "Array.getIndexOf: Argument undefined",
object_not_extensible: "Can't add property %0, object is not extensible",
illegal_access: "Illegal access",
- invalid_preparser_data: "Invalid preparser data for function %0"
+ invalid_preparser_data: "Invalid preparser data for function %0",
+ strict_mode_with: "Strict mode code may not include a with statement",
+ strict_catch_variable: "Catch variable may not be eval or arguments in strict mode",
+ strict_param_name: "Parameter name eval or arguments is not allowed in strict mode",
+ strict_param_dupe: "Strict mode function may not have duplicate parameter names",
+ strict_var_name: "Variable name may not be eval or arguments in strict mode",
+ strict_function_name: "Function name may not be eval or arguments in strict mode",
};
}
var format = kMessages[message.type];
// Setup extra properties of the Error.prototype object.
$Error.prototype.message = '';
+// Global list of error objects visited during errorToString. This is
+// used to detect cycles in error toString formatting.
+var visited_errors = new $Array();
+var cyclic_error_marker = new $Object();
+
+function errorToStringDetectCycle() {
+ if (!%PushIfAbsent(visited_errors, this)) throw cyclic_error_marker;
+ try {
+ var type = this.type;
+ if (type && !this.hasOwnProperty("message")) {
+ var formatted = FormatMessage({ type: type, args: this.arguments });
+ return this.name + ": " + formatted;
+ }
+ var message = this.hasOwnProperty("message") ? (": " + this.message) : "";
+ return this.name + message;
+ } finally {
+ visited_errors.pop();
+ }
+}
+
function errorToString() {
- var type = this.type;
- if (type && !this.hasOwnProperty("message")) {
- return this.name + ": " + FormatMessage({ type: type, args: this.arguments });
+ // This helper function is needed because access to properties on
+ // the builtins object do not work inside of a catch clause.
+ function isCyclicErrorMarker(o) { return o === cyclic_error_marker; }
+
+ try {
+ return %_CallFunction(this, errorToStringDetectCycle);
+ } catch(e) {
+ // If this error message was encountered already return the empty
+ // string for it instead of recursively formatting it.
+ if (isCyclicErrorMarker(e)) return '';
+ else throw e;
}
- var message = this.hasOwnProperty("message") ? (": " + this.message) : "";
- return this.name + message;
}
%FunctionSetName(errorToString, 'toString');
%SetProperty($Error.prototype, 'toString', errorToString, DONT_ENUM);
-
// Boilerplate for exceptions for stack overflows. Used from
// Top::StackOverflow().
const kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []);
}
-void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type) {
- UNIMPLEMENTED_MIPS();
-}
-
-
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
-void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type) {
- UNIMPLEMENTED_MIPS();
-}
-
-
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
}
+Object* ExternalArrayStubCompiler::CompileKeyedLoadStub(
+ ExternalArrayType array_type, Code::Flags flags) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
+Object* ExternalArrayStubCompiler::CompileKeyedStoreStub(
+ ExternalArrayType array_type, Code::Flags flags) {
+ UNIMPLEMENTED_MIPS();
+ return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+}
+
+
#undef __
} } // namespace v8::internal
}
-bool SharedFunctionInfo::IsBuiltinMathFunction() {
- return HasBuiltinFunctionId() &&
- builtin_function_id() >= kFirstMathFunctionId;
-}
-
-
BuiltinFunctionId SharedFunctionInfo::builtin_function_id() {
ASSERT(HasBuiltinFunctionId());
return static_cast<BuiltinFunctionId>(Smi::cast(function_data())->value());
case JS_BUILTINS_OBJECT_TYPE: return "JS_BUILTINS_OBJECT";
case JS_GLOBAL_PROXY_TYPE: return "JS_GLOBAL_PROXY";
case PROXY_TYPE: return "PROXY";
+ case LAST_STRING_TYPE: return "LAST_STRING_TYPE";
#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return #NAME;
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
enum InstanceType {
// String types.
+ // FIRST_STRING_TYPE
SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kSeqStringTag,
ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag,
CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag,
EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag,
EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
kTwoByteStringTag | kExternalStringTag | kAsciiDataHintTag,
+ // LAST_STRING_TYPE
EXTERNAL_ASCII_STRING_TYPE = kAsciiStringTag | kExternalStringTag,
PRIVATE_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE,
JS_BUILTINS_OBJECT_TYPE,
JS_GLOBAL_PROXY_TYPE,
JS_ARRAY_TYPE,
- JS_REGEXP_TYPE, // LAST_JS_OBJECT_TYPE
+
+ JS_REGEXP_TYPE, // LAST_JS_OBJECT_TYPE, FIRST_FUNCTION_CLASS_TYPE
JS_FUNCTION_TYPE,
LAST_TYPE = JS_FUNCTION_TYPE,
INVALID_TYPE = FIRST_TYPE - 1,
FIRST_NONSTRING_TYPE = MAP_TYPE,
+ FIRST_STRING_TYPE = FIRST_TYPE,
+ LAST_STRING_TYPE = FIRST_NONSTRING_TYPE - 1,
// Boundaries for testing for an external array.
FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_BYTE_ARRAY_TYPE,
LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_FLOAT_ARRAY_TYPE,
// function objects are not counted as objects, even though they are
// implemented as such; only values whose typeof is "object" are included.
FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE,
- LAST_JS_OBJECT_TYPE = JS_REGEXP_TYPE
+ LAST_JS_OBJECT_TYPE = JS_REGEXP_TYPE,
+ // RegExp objects have [[Class]] "function" because they are callable.
+ // All types from this type and above are objects with [[Class]] "function".
+ FIRST_FUNCTION_CLASS_TYPE = JS_REGEXP_TYPE
};
inline bool IsApiFunction();
inline FunctionTemplateInfo* get_api_func_data();
inline bool HasBuiltinFunctionId();
- inline bool IsBuiltinMathFunction();
inline BuiltinFunctionId builtin_function_id();
// [script info]: Script from which the function originates.
void AddLoop() { loop_count_++; }
bool ContainsLoops() const { return loop_count_ > 0; }
+ bool StrictMode() { return strict_mode_; }
+ void EnableStrictMode() {
+ strict_mode_ = FLAG_strict_mode;
+ }
+
private:
// Captures the number of literals that need materialization in the
// function. Includes regexp literals, and boilerplate for object
// Captures the number of loops inside the scope.
int loop_count_;
+ // Parsing strict mode code.
+ bool strict_mode_;
+
// Bookkeeping
TemporaryScope** variable_;
TemporaryScope* parent_;
loop_count_(0),
variable_(variable),
parent_(*variable) {
+ // Inherit the strict mode from the parent scope.
+ strict_mode_ = (parent_ != NULL) && parent_->strict_mode_;
*variable = this;
}
int prev_level_;
};
-
// ----------------------------------------------------------------------------
// The CHECK_OK macro is a convenient macro to enforce error
// handling for functions that may fail (by returning !*ok).
0,
source->length(),
false,
- temp_scope.ContainsLoops());
+ temp_scope.ContainsLoops(),
+ temp_scope.StrictMode());
} else if (stack_overflow_) {
Top::StackOverflow();
}
ASSERT(processor != NULL);
InitializationBlockFinder block_finder;
ThisNamedPropertyAssigmentFinder this_property_assignment_finder;
+ bool directive_prologue = true; // Parsing directive prologue.
+
while (peek() != end_token) {
+ if (directive_prologue && peek() != Token::STRING) {
+ directive_prologue = false;
+ }
+
+ Scanner::Location token_loc = scanner().peek_location();
Statement* stat = ParseStatement(NULL, CHECK_OK);
- if (stat == NULL || stat->IsEmpty()) continue;
+
+ if (stat == NULL || stat->IsEmpty()) {
+ directive_prologue = false; // End of directive prologue.
+ continue;
+ }
+
+ if (directive_prologue) {
+ // A shot at a directive.
+ ExpressionStatement *e_stat;
+ Literal *literal;
+ // Still processing directive prologue?
+ if ((e_stat = stat->AsExpressionStatement()) != NULL &&
+ (literal = e_stat->expression()->AsLiteral()) != NULL &&
+ literal->handle()->IsString()) {
+ Handle<String> directive = Handle<String>::cast(literal->handle());
+
+ // Check "use strict" directive (ES5 14.1).
+ if (!temp_scope_->StrictMode() &&
+ directive->Equals(Heap::use_strict()) &&
+ token_loc.end_pos - token_loc.beg_pos ==
+ Heap::use_strict()->length() + 2) {
+ temp_scope_->EnableStrictMode();
+ // "use strict" is the only directive for now.
+ directive_prologue = false;
+ }
+ } else {
+ // End of the directive prologue.
+ directive_prologue = false;
+ }
+ }
+
// We find and mark the initialization blocks on top level code only.
// This is because the optimization prevents reuse of the map transitions,
// so it should be used only for code that will only be run once.
return result;
}
+static bool IsEvalOrArguments(Handle<String> string) {
+ return string.is_identical_to(Factory::eval_symbol()) ||
+ string.is_identical_to(Factory::arguments_symbol());
+}
// If the variable declaration declares exactly one non-const
// variable, then *var is set to that variable. In all other cases,
Handle<String> name = ParseIdentifier(CHECK_OK);
if (fni_ != NULL) fni_->PushVariableName(name);
+ // Strict mode variables may not be named eval or arguments
+ if (temp_scope_->StrictMode() && IsEvalOrArguments(name)) {
+ ReportMessage("strict_var_name", Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+
// Declare variable.
// Note that we *always* must treat the initial value via a separate init
// assignment for variables and constants because the value must be assigned
// 'with' '(' Expression ')' Statement
Expect(Token::WITH, CHECK_OK);
+
+ if (temp_scope_->StrictMode()) {
+ ReportMessage("strict_mode_with", Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+
Expect(Token::LPAREN, CHECK_OK);
Expression* expr = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
Handle<String> name = ParseIdentifier(CHECK_OK);
+
+ if (temp_scope_->StrictMode() && IsEvalOrArguments(name)) {
+ ReportMessage("strict_catch_variable", Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+
Expect(Token::RPAREN, CHECK_OK);
if (peek() == Token::LBRACE) {
// '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK);
int start_pos = scanner().location().beg_pos;
+
bool done = (peek() == Token::RPAREN);
while (!done) {
Handle<String> param_name = ParseIdentifier(CHECK_OK);
- top_scope_->AddParameter(top_scope_->DeclareLocal(param_name,
- Variable::VAR));
+ Variable* parameter = top_scope_->DeclareLocal(param_name, Variable::VAR);
+ top_scope_->AddParameter(parameter);
num_parameters++;
done = (peek() == Token::RPAREN);
if (!done) Expect(Token::COMMA, CHECK_OK);
end_pos = scanner().location().end_pos;
}
+ // Validate strict mode.
+ if (temp_scope_->StrictMode()) {
+ if (IsEvalOrArguments(name)) {
+ int position = function_token_position != RelocInfo::kNoPosition
+ ? function_token_position
+ : (start_pos > 0 ? start_pos - 1 : start_pos);
+ ReportMessageAt(Scanner::Location(position, start_pos),
+ "strict_function_name", Vector<const char*>::empty());
+ *ok = false;
+ return NULL;
+ }
+ // TODO(mmaly): Check for octal escape sequence here.
+ }
+
FunctionLiteral* function_literal =
new FunctionLiteral(name,
top_scope_,
start_pos,
end_pos,
function_name->length() > 0,
- temp_scope.ContainsLoops());
+ temp_scope.ContainsLoops(),
+ temp_scope.StrictMode());
function_literal->set_function_token_position(function_token_position);
if (fni_ != NULL && !is_named) fni_->AddFunction(function_literal);
: file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
private:
FILE* file_;
void* memory_;
};
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
FILE* file = fopen(name, "w+");
: file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
private:
FILE* file_;
void* memory_;
};
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
FILE* file = fopen(name, "w+");
: file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
private:
FILE* file_;
void* memory_;
};
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
FILE* file = fopen(name, "w+");
}
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
UNIMPLEMENTED();
: file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
private:
FILE* file_;
void* memory_;
};
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
FILE* file = fopen(name, "w+");
}
+bool OS::Remove(const char* path) {
+ return (remove(path) == 0);
+}
+
+
const char* OS::LogFileOpenMode = "w";
: file_(file), memory_(memory), size_(size) { }
virtual ~PosixMemoryMappedFile();
virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
private:
FILE* file_;
void* memory_;
};
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ FILE* file = fopen(name, "w+");
+ if (file == NULL) return NULL;
+
+ fseek(file, 0, SEEK_END);
+ int size = ftell(file);
+
+ void* memory =
+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+ return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
FILE* file = fopen(name, "w+");
}
+bool OS::Remove(const char* path) {
+ return (DeleteFileA(path) != 0);
+}
+
+
// Open log file in binary mode to avoid /n -> /r/n conversion.
const char* OS::LogFileOpenMode = "wb";
class Win32MemoryMappedFile : public OS::MemoryMappedFile {
public:
- Win32MemoryMappedFile(HANDLE file, HANDLE file_mapping, void* memory)
- : file_(file), file_mapping_(file_mapping), memory_(memory) { }
+ Win32MemoryMappedFile(HANDLE file,
+ HANDLE file_mapping,
+ void* memory,
+ int size)
+ : file_(file),
+ file_mapping_(file_mapping),
+ memory_(memory),
+ size_(size) { }
virtual ~Win32MemoryMappedFile();
virtual void* memory() { return memory_; }
+ virtual int size() { return size_; }
private:
HANDLE file_;
HANDLE file_mapping_;
void* memory_;
+ int size_;
};
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+ // Open a physical file
+ HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL);
+ if (file == NULL) return NULL;
+
+ int size = static_cast<int>(GetFileSize(file, NULL));
+
+ // Create a file mapping for the physical file
+ HANDLE file_mapping = CreateFileMapping(file, NULL,
+ PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL);
+ if (file_mapping == NULL) return NULL;
+
+ // Map a view of the file into memory
+ void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
+ return new Win32MemoryMappedFile(file, file_mapping, memory, size);
+}
+
+
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
void* initial) {
// Open a physical file
// Map a view of the file into memory
void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
if (memory) memmove(memory, initial, size);
- return new Win32MemoryMappedFile(file, file_mapping, memory);
+ return new Win32MemoryMappedFile(file, file_mapping, memory, size);
}
static int GetLastError();
static FILE* FOpen(const char* path, const char* mode);
+ static bool Remove(const char* path);
// Log file open mode is platform-dependent due to line ends issues.
static const char* LogFileOpenMode;
class MemoryMappedFile {
public:
+ static MemoryMappedFile* open(const char* name);
static MemoryMappedFile* create(const char* name, int size, void* initial);
virtual ~MemoryMappedFile() { }
virtual void* memory() = 0;
+ virtual int size() = 0;
};
// Safe formatting print. Ensures that str is always null-terminated.
// Note that we must do a lookup anyway, because if we find one,
// we must mark that variable as potentially accessed from this
// inner scope (the property may not be in the 'with' object).
+ if (var != NULL) var->set_is_used(true);
var = NonLocal(proxy->name(), Variable::DYNAMIC);
} else {
// visible name.
if ((var->is_this() || var->name()->length() > 0) &&
(var->is_accessed_from_inner_scope() ||
- scope_calls_eval_ || inner_scope_calls_eval_ ||
- scope_contains_with_)) {
+ scope_calls_eval_ ||
+ inner_scope_calls_eval_)) {
var->set_is_used(true);
}
// Global variables do not need to be allocated.
}
+namespace {
+
+ExternalArrayType ElementsKindToExternalArrayType(JSObject::ElementsKind kind) {
+ switch (kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ return kExternalByteArray;
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ return kExternalUnsignedByteArray;
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ return kExternalShortArray;
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ return kExternalUnsignedShortArray;
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ return kExternalIntArray;
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ return kExternalUnsignedIntArray;
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ return kExternalFloatArray;
+ default:
+ UNREACHABLE();
+ return static_cast<ExternalArrayType>(0);
+ }
+}
+
+} // anonymous namespace
+
+
+MaybeObject* StubCache::ComputeKeyedLoadOrStoreExternalArray(
+ JSObject* receiver,
+ bool is_store) {
+ Code::Flags flags =
+ Code::ComputeMonomorphicFlags(
+ is_store ? Code::KEYED_STORE_IC : Code::KEYED_LOAD_IC,
+ NORMAL);
+ ExternalArrayType array_type =
+ ElementsKindToExternalArrayType(receiver->GetElementsKind());
+ String* name =
+ is_store ? Heap::KeyedStoreExternalArray_symbol()
+ : Heap::KeyedLoadExternalArray_symbol();
+ // Use the global maps for the particular external array types,
+ // rather than the receiver's map, when looking up the cached code,
+ // so that we actually canonicalize these stubs.
+ Map* map = Heap::MapForExternalArrayType(array_type);
+ Object* code = map->FindInCodeCache(name, flags);
+ if (code->IsUndefined()) {
+ ExternalArrayStubCompiler compiler;
+ { MaybeObject* maybe_code =
+ is_store ? compiler.CompileKeyedStoreStub(array_type, flags) :
+ compiler.CompileKeyedLoadStub(array_type, flags);
+ if (!maybe_code->ToObject(&code)) return maybe_code;
+ }
+ if (is_store) {
+ PROFILE(
+ CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), 0));
+ } else {
+ PROFILE(
+ CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), 0));
+ }
+ Object* result;
+ { MaybeObject* maybe_result =
+ map->UpdateCodeCache(name, Code::cast(code));
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ }
+ return code;
+}
+
+
MaybeObject* StubCache::ComputeStoreNormal() {
return Builtins::builtin(Builtins::StoreIC_Normal);
}
}
+MaybeObject* ExternalArrayStubCompiler::GetCode(Code::Flags flags) {
+ Object* result;
+ { MaybeObject* maybe_result = GetCodeWithFlags(flags, "ExternalArrayStub");
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ }
+ Code* code = Code::cast(result);
+ USE(code);
+ PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, "ExternalArrayStub"));
+ return result;
+}
+
+
} } // namespace v8::internal
MUST_USE_RESULT static MaybeObject* ComputeKeyedStoreSpecialized(
JSObject* receiver);
+ MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadOrStoreExternalArray(
+ JSObject* receiver,
+ bool is_store);
+
// ---
MUST_USE_RESULT static MaybeObject* ComputeCallField(int argc,
CallHandlerInfo* api_call_info_;
};
+class ExternalArrayStubCompiler: public StubCompiler {
+ public:
+ explicit ExternalArrayStubCompiler() {}
+
+ MUST_USE_RESULT MaybeObject* CompileKeyedLoadStub(
+ ExternalArrayType array_type, Code::Flags flags);
+
+ MUST_USE_RESULT MaybeObject* CompileKeyedStoreStub(
+ ExternalArrayType array_type, Code::Flags flags);
+
+ private:
+ MaybeObject* GetCode(Code::Flags flags);
+};
+
} } // namespace v8::internal
#endif // V8_STUB_CACHE_H_
}
+MemoryMappedExternalResource::MemoryMappedExternalResource(const char* filename)
+ : filename_(NULL),
+ data_(NULL),
+ length_(0),
+ remove_file_on_cleanup_(false) {
+ Init(filename);
+}
+
+
+MemoryMappedExternalResource::
+ MemoryMappedExternalResource(const char* filename,
+ bool remove_file_on_cleanup)
+ : filename_(NULL),
+ data_(NULL),
+ length_(0),
+ remove_file_on_cleanup_(remove_file_on_cleanup) {
+ Init(filename);
+}
+
+
+MemoryMappedExternalResource::~MemoryMappedExternalResource() {
+ // Release the resources if we had successfully acquired them:
+ if (file_ != NULL) {
+ delete file_;
+ if (remove_file_on_cleanup_) {
+ OS::Remove(filename_);
+ }
+ DeleteArray<char>(filename_);
+ }
+}
+
+
+void MemoryMappedExternalResource::Init(const char* filename) {
+ file_ = OS::MemoryMappedFile::open(filename);
+ if (file_ != NULL) {
+ filename_ = StrDup(filename);
+ data_ = reinterpret_cast<char*>(file_->memory());
+ length_ = file_->size();
+ }
+}
+
+
+bool MemoryMappedExternalResource::EnsureIsAscii(bool abort_if_failed) const {
+ bool is_ascii = true;
+
+ int line_no = 1;
+ const char* start_of_line = data_;
+ const char* end = data_ + length_;
+ for (const char* p = data_; p < end; p++) {
+ char c = *p;
+ if ((c & 0x80) != 0) {
+ // Non-ascii detected:
+ is_ascii = false;
+
+ // Report the error and abort if appropriate:
+ if (abort_if_failed) {
+ int char_no = static_cast<int>(p - start_of_line) - 1;
+
+ ASSERT(filename_ != NULL);
+ PrintF("\n\n\n"
+ "Abort: Non-Ascii character 0x%.2x in file %s line %d char %d",
+ c, filename_, line_no, char_no);
+
+ // Allow for some context up to kNumberOfLeadingContextChars chars
+ // before the offending non-ascii char to help the user see where
+ // the offending char is.
+ const int kNumberOfLeadingContextChars = 10;
+ const char* err_context = p - kNumberOfLeadingContextChars;
+ if (err_context < data_) {
+ err_context = data_;
+ }
+ // Compute the length of the error context and print it.
+ int err_context_length = static_cast<int>(p - err_context);
+ if (err_context_length != 0) {
+ PrintF(" after \"%.*s\"", err_context_length, err_context);
+ }
+ PrintF(".\n\n\n");
+ OS::Abort();
+ }
+
+ break; // Non-ascii detected. No need to continue scanning.
+ }
+ if (c == '\n') {
+ start_of_line = p;
+ line_no++;
+ }
+ }
+
+ return is_ascii;
+}
+
+
} } // namespace v8::internal
}
}
+
+// A resource for using mmapped files to back external strings that are read
+// from files.
+class MemoryMappedExternalResource: public
+ v8::String::ExternalAsciiStringResource {
+ public:
+ explicit MemoryMappedExternalResource(const char* filename);
+ MemoryMappedExternalResource(const char* filename,
+ bool remove_file_on_cleanup);
+ virtual ~MemoryMappedExternalResource();
+
+ virtual const char* data() const { return data_; }
+ virtual size_t length() const { return length_; }
+
+ bool exists() const { return file_ != NULL; }
+ bool is_empty() const { return length_ == 0; }
+
+ bool EnsureIsAscii(bool abort_if_failed) const;
+ bool EnsureIsAscii() const { return EnsureIsAscii(true); }
+ bool IsAscii() const { return EnsureIsAscii(false); }
+
+ private:
+ void Init(const char* filename);
+
+ char* filename_;
+ OS::MemoryMappedFile* file_;
+
+ const char* data_;
+ size_t length_;
+ bool remove_file_on_cleanup_;
+};
+
+
} } // namespace v8::internal
#endif // V8_V8UTILS_H_
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 0
-#define BUILD_NUMBER 9
+#define BUILD_NUMBER 10
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
if (IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
Memory::Address_at(pc_) += static_cast<int32_t>(delta);
+ CPU::FlushICache(pc_, sizeof(Address));
} else if (IsCodeTarget(rmode_)) {
Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
+ CPU::FlushICache(pc_, sizeof(int32_t));
}
}
Assembler::set_target_address_at(pc_, target);
} else {
Memory::Address_at(pc_) = target;
+ CPU::FlushICache(pc_, sizeof(Address));
}
}
void RelocInfo::set_target_object(Object* target) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
*reinterpret_cast<Object**>(pc_) = target;
+ CPU::FlushICache(pc_, sizeof(Address));
}
ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
Memory::Address_at(pc_) = address;
+ CPU::FlushICache(pc_, sizeof(Address));
}
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Memory::Address_at(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset) =
target;
+ CPU::FlushICache(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset,
+ sizeof(Address));
}
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitPointer(target_object_address());
+ CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(target_reference_address());
+ CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitPointer(target_object_address());
+ CPU::FlushICache(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
+ CPU::FlushICache(pc_, sizeof(Address));
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
}
+void Assembler::cvttss2si(Register dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF3);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x2C);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cvttsd2si(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
}
+void Assembler::cvttsd2si(Register dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x2C);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
arithmetic_op_32(0x1b, dst, src);
}
+ void sbbq(Register dst, Register src) {
+ arithmetic_op(0x1b, dst, src);
+ }
+
void cmpb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
}
void movss(const Operand& dst, XMMRegister src);
void cvttss2si(Register dst, const Operand& src);
+ void cvttss2si(Register dst, XMMRegister src);
void cvttsd2si(Register dst, const Operand& src);
+ void cvttsd2si(Register dst, XMMRegister src);
void cvttsd2siq(Register dst, XMMRegister src);
void cvtlsi2sd(XMMRegister dst, const Operand& src);
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
- int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
+ __ AllocateInNewSpace((slots_ * kPointerSize) + FixedArray::kHeaderSize,
rax, rbx, rcx, &gc, TAG_OBJECT);
// Get the function from the stack.
// Setup the object header.
__ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
__ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
- __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
+ __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(slots_));
// Setup the fixed slots.
__ Set(rbx, 0); // Set to NULL.
// Initialize the rest of the slots to undefined.
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < slots_; i++) {
__ movq(Operand(rax, Context::SlotOffset(i)), rbx);
}
}
+Register InstanceofStub::left() { return rax; }
+
+
+Register InstanceofStub::right() { return rdx; }
+
+
int CompareStub::MinorKey() {
// Encode the three parameters in a unique 16 bit value. To avoid duplicate
// stubs the never NaN NaN condition is only taken into account if the
}
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- UNIMPLEMENTED();
+ ASSERT(state_ == CompareIC::SMIS);
+ NearLabel miss;
+ __ JumpIfNotBothSmi(rdx, rax, &miss);
+
+ if (GetCondition() == equal) {
+ // For equality we do not care about the sign of the result.
+ __ SmiSub(rax, rax, rdx);
+ } else {
+ NearLabel done;
+ __ SmiSub(rdx, rdx, rax);
+ __ j(no_overflow, &done);
+ // Correct sign of result in case of overflow.
+ __ SmiNot(rdx, rdx);
+ __ bind(&done);
+ __ movq(rax, rdx);
+ }
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
}
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- UNIMPLEMENTED();
+ ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+
+ NearLabel generic_stub;
+ NearLabel unordered;
+ NearLabel miss;
+ Condition either_smi = masm->CheckEitherSmi(rax, rdx);
+ __ j(either_smi, &generic_stub);
+
+ __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
+ __ j(not_equal, &miss);
+ __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
+ __ j(not_equal, &miss);
+
+ // Load left and right operand
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+
+ // Compare operands
+ __ ucomisd(xmm0, xmm1);
+
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered);
+
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ // Performing mov, because xor would destroy the flag register.
+ __ movl(rax, Immediate(0));
+ __ movl(rcx, Immediate(0));
+ __ setcc(above, rax); // Add one to zero if carry clear and not equal.
+ __ sbbq(rax, rcx); // Subtract one if below (aka. carry set).
+ __ ret(0);
+
+ __ bind(&unordered);
+
+ CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
+ __ bind(&generic_stub);
+ __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
}
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- UNIMPLEMENTED();
+ ASSERT(state_ == CompareIC::OBJECTS);
+ NearLabel miss;
+ Condition either_smi = masm->CheckEitherSmi(rdx, rax);
+ __ j(either_smi, &miss);
+
+ __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
+ __ j(not_equal, &miss, not_taken);
+ __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
+ __ j(not_equal, &miss, not_taken);
+
+ ASSERT(GetCondition() == equal);
+ __ subq(rax, rdx);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
}
void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- UNIMPLEMENTED();
+ // Save the registers.
+ __ pop(rcx);
+ __ push(rdx);
+ __ push(rax);
+ __ push(rcx);
+
+ // Call the runtime system in a fresh internal frame.
+ ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss));
+ __ EnterInternalFrame();
+ __ push(rdx);
+ __ push(rax);
+ __ Push(Smi::FromInt(op_));
+ __ CallExternalReference(miss, 3);
+ __ LeaveInternalFrame();
+
+ // Compute the entry point of the rewritten stub.
+ __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
+
+ // Restore registers.
+ __ pop(rcx);
+ __ pop(rax);
+ __ pop(rdx);
+ __ push(rcx);
+
+ // Do a tail call to the rewritten stub.
+ __ jmp(rdi);
}
#undef __
frame_->AllocateStackSlots();
// Allocate the local context if needed.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = scope()->num_heap_slots();
if (heap_slots > 0) {
Comment cmnt(masm_, "[ allocate local context");
// Allocate local context.
} else if (opcode == 0x2C) {
// CVTTSS2SI:
// Convert with truncation scalar single-precision FP to dword integer.
- // Assert that mod is not 3, so source is memory, not an XMM register.
- ASSERT_NE(0xC0, *current & 0xC0);
- current += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, current);
+ int mod, regop, rm;
+ get_modrm(*current, &mod, ®op, &rm);
+ AppendToBuffer("cvttss2si%c %s,",
+ operand_size_code(), NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
} else if (opcode == 0x5A) {
// CVTSS2SD:
// Convert scalar single-precision FP to scalar double-precision FP.
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = scope()->num_heap_slots();
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in rdi.
}
-void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow;
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &slow);
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(rax, &slow);
-
- // Check that the object is a JS object.
- __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
- __ j(not_equal, &slow);
- // Check that the receiver does not require access checks. We need
- // to check this explicitly since this generic stub does not perform
- // map checks. The map is already in rdx.
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &slow);
-
- // Check that the elements array is the appropriate type of
- // ExternalArray.
- // rax: index (as a smi)
- // rdx: JSObject
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::RootIndexForExternalArrayType(array_type));
- __ j(not_equal, &slow);
-
- // Check that the index is in range.
- __ SmiToInteger32(rcx, rax);
- __ cmpl(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
-
- // rax: index (as a smi)
- // rdx: receiver (JSObject)
- // rcx: untagged index
- // rbx: elements array
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rbx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- __ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
- break;
- case kExternalUnsignedByteArray:
- __ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
- break;
- case kExternalShortArray:
- __ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
- break;
- case kExternalUnsignedShortArray:
- __ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
- break;
- case kExternalIntArray:
- __ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
- break;
- case kExternalUnsignedIntArray:
- __ movl(rcx, Operand(rbx, rcx, times_4, 0));
- break;
- case kExternalFloatArray:
- __ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // rax: index
- // rdx: receiver
- // For integer array types:
- // rcx: value
- // For floating-point array type:
- // xmm0: value as double.
-
- ASSERT(kSmiValueSize == 32);
- if (array_type == kExternalUnsignedIntArray) {
- // For the UnsignedInt array type, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- NearLabel box_int;
-
- __ JumpIfUIntNotValidSmiValue(rcx, &box_int);
-
- __ Integer32ToSmi(rax, rcx);
- __ ret(0);
-
- __ bind(&box_int);
-
- // Allocate a HeapNumber for the int and perform int-to-double
- // conversion.
- // The value is zero-extended since we loaded the value from memory
- // with movl.
- __ cvtqsi2sd(xmm0, rcx);
-
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // Set the value.
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
- } else if (array_type == kExternalFloatArray) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // Set the value.
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
- } else {
- __ Integer32ToSmi(rax, rcx);
- __ ret(0);
- }
-
- // Slow case: Jump to runtime.
- __ bind(&slow);
- __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
- GenerateRuntimeGetProperty(masm);
-}
-
-
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
}
-void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
- ExternalArrayType array_type) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow;
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &slow);
- // Get the map from the receiver.
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks. We need
- // to do this because this generic stub does not perform map checks.
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &slow);
- // Check that the key is a smi.
- __ JumpIfNotSmi(rcx, &slow);
-
- // Check that the object is a JS object.
- __ CmpInstanceType(rbx, JS_OBJECT_TYPE);
- __ j(not_equal, &slow);
-
- // Check that the elements array is the appropriate type of
- // ExternalArray.
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::RootIndexForExternalArrayType(array_type));
- __ j(not_equal, &slow);
-
- // Check that the index is in range.
- __ SmiToInteger32(rdi, rcx); // Untag the index.
- __ cmpl(rdi, FieldOperand(rbx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &slow);
-
- // Handle both smis and HeapNumbers in the fast path. Go to the
- // runtime for all other kinds of values.
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- // rbx: elements array
- // rdi: untagged key
- NearLabel check_heap_number;
- __ JumpIfNotSmi(rax, &check_heap_number);
- // No more branches to slow case on this path. Key and receiver not needed.
- __ SmiToInteger32(rdx, rax);
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rbx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ movw(Operand(rbx, rdi, times_2, 0), rdx);
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
- __ movl(Operand(rbx, rdi, times_4, 0), rdx);
- break;
- case kExternalFloatArray:
- // Need to perform int-to-float conversion.
- __ cvtlsi2ss(xmm0, rdx);
- __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
- break;
- default:
- UNREACHABLE();
- break;
- }
- __ ret(0);
-
- __ bind(&check_heap_number);
- // rax: value
- // rcx: key (a smi)
- // rdx: receiver (a JSObject)
- // rbx: elements array
- // rdi: untagged key
- __ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
- __ j(not_equal, &slow);
- // No more branches to slow case on this path.
-
- // The WebGL specification leaves the behavior of storing NaN and
- // +/-Infinity into integer arrays basically undefined. For more
- // reproducible behavior, convert these to zero.
- __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rdi: untagged index
- // rbx: base pointer of external storage
- // top of FPU stack: value
- if (array_type == kExternalFloatArray) {
- __ cvtsd2ss(xmm0, xmm0);
- __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
- __ ret(0);
- } else {
- // Need to perform float-to-int conversion.
- // Test the value for NaN.
-
- // Convert to int32 and store the low byte/word.
- // If the value is NaN or +/-infinity, the result is 0x80000000,
- // which is automatically zero when taken mod 2^n, n < 32.
- // rdx: value (converted to an untagged integer)
- // rdi: untagged index
- // rbx: base pointer of external storage
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
- __ cvtsd2si(rdx, xmm0);
- __ movb(Operand(rbx, rdi, times_1, 0), rdx);
- break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
- __ cvtsd2si(rdx, xmm0);
- __ movw(Operand(rbx, rdi, times_2, 0), rdx);
- break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray: {
- // Convert to int64, so that NaN and infinities become
- // 0x8000000000000000, which is zero mod 2^32.
- __ cvtsd2siq(rdx, xmm0);
- __ movl(Operand(rbx, rdi, times_4, 0), rdx);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- __ ret(0);
- }
-
- // Slow case: call runtime.
- __ bind(&slow);
- GenerateRuntimeSetProperty(masm);
-}
-
-
// The generated code does not accept smi keys.
// The generated code falls through if both probes miss.
static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
- Abort("Unimplemented: %s", "EmitBranch");
+ int next_block = GetNextEmittedBlock(current_block_);
+ right_block = chunk_->LookupDestination(right_block);
+ left_block = chunk_->LookupDestination(left_block);
+
+ if (right_block == left_block) {
+ EmitGoto(left_block);
+ } else if (left_block == next_block) {
+ __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
+ } else if (right_block == next_block) {
+ __ j(cc, chunk_->GetAssemblyLabel(left_block));
+ } else {
+ __ j(cc, chunk_->GetAssemblyLabel(left_block));
+ if (cc != always) {
+ __ jmp(chunk_->GetAssemblyLabel(right_block));
+ }
+ }
}
void LCodeGen::DoBranch(LBranch* instr) {
- Abort("Unimplemented: %s", "DoBranch");
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Representation r = instr->hydrogen()->representation();
+ if (r.IsInteger32()) {
+ Register reg = ToRegister(instr->InputAt(0));
+ __ testl(reg, reg);
+ EmitBranch(true_block, false_block, not_zero);
+ } else if (r.IsDouble()) {
+ XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
+ __ xorpd(xmm0, xmm0);
+ __ ucomisd(reg, xmm0);
+ EmitBranch(true_block, false_block, not_equal);
+ } else {
+ ASSERT(r.IsTagged());
+ Register reg = ToRegister(instr->InputAt(0));
+ HType type = instr->hydrogen()->type();
+ if (type.IsBoolean()) {
+ __ Cmp(reg, Factory::true_value());
+ EmitBranch(true_block, false_block, equal);
+ } else if (type.IsSmi()) {
+ __ SmiCompare(reg, Smi::FromInt(0));
+ EmitBranch(true_block, false_block, not_equal);
+ } else {
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+ __ j(equal, false_label);
+ __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+ __ j(equal, true_label);
+ __ CompareRoot(reg, Heap::kFalseValueRootIndex);
+ __ j(equal, false_label);
+ __ SmiCompare(reg, Smi::FromInt(0));
+ __ j(equal, false_label);
+ __ JumpIfSmi(reg, true_label);
+
+ // Test for double values. Plus/minus zero and NaN are false.
+ NearLabel call_stub;
+ __ CompareRoot(FieldOperand(reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_stub);
+
+ // HeapNumber => false iff +0, -0, or NaN. These three cases set the
+ // zero flag when compared to zero using ucomisd.
+ __ xorpd(xmm0, xmm0);
+ __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ __ j(zero, false_label);
+ __ jmp(true_label);
+
+ // The conversion stub doesn't cause garbage collections so it's
+ // safe to not record a safepoint after the call.
+ __ bind(&call_stub);
+ ToBooleanStub stub;
+ __ Pushad();
+ __ push(reg);
+ __ CallStub(&stub);
+ __ testq(rax, rax);
+ __ Popad();
+ EmitBranch(true_block, false_block, not_zero);
+ }
+ }
}
}
-Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
Condition cond = no_condition;
switch (op) {
case Token::EQ:
void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
- Abort("Unimplemented: %s", "EmitCmpI");
+ if (right->IsConstantOperand()) {
+ int32_t value = ToInteger32(LConstantOperand::cast(right));
+ if (left->IsRegister()) {
+ __ cmpl(ToRegister(left), Immediate(value));
+ } else {
+ __ cmpl(ToOperand(left), Immediate(value));
+ }
+ } else if (right->IsRegister()) {
+ __ cmpq(ToRegister(left), ToRegister(right));
+ } else {
+ __ cmpq(ToRegister(left), ToOperand(right));
+ }
}
void LCodeGen::DoCmpID(LCmpID* instr) {
- Abort("Unimplemented: %s", "DoCmpID");
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ LOperand* result = instr->result();
+
+ NearLabel unordered;
+ if (instr->is_double()) {
+ // Don't base result on EFLAGS when a NaN is involved. Instead
+ // jump to the unordered case, which produces a false value.
+ __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ j(parity_even, &unordered);
+ } else {
+ EmitCmpI(left, right);
+ }
+
+ NearLabel done;
+ Condition cc = TokenToCondition(instr->op(), instr->is_double());
+ __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
+ __ j(cc, &done);
+
+ __ bind(&unordered);
+ __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
- Abort("Unimplemented: %s", "DoCmpIDAndBranch");
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ if (instr->is_double()) {
+ // Don't base result on EFLAGS when a NaN is involved. Instead
+ // jump to the false block.
+ __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+ __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
+ } else {
+ EmitCmpI(left, right);
+ }
+
+ Condition cc = TokenToCondition(instr->op(), instr->is_double());
+ EmitBranch(true_block, false_block, cc);
}
void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
- Abort("Unimplemented: %s", "DoCmpJSObjectAndBranch");
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ __ cmpq(left, right);
+ EmitBranch(true_block, false_block, equal);
}
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
- Abort("Unimplemented: %s", "DoIsNullAndBranch");
+ Register reg = ToRegister(instr->InputAt(0));
+
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ if (instr->hydrogen()->representation().IsSpecialization() ||
+ instr->hydrogen()->type().IsSmi()) {
+ // If the expression is known to untagged or smi, then it's definitely
+ // not null, and it can't be a an undetectable object.
+ // Jump directly to the false block.
+ EmitGoto(false_block);
+ return;
+ }
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+
+ __ Cmp(reg, Factory::null_value());
+ if (instr->is_strict()) {
+ EmitBranch(true_block, false_block, equal);
+ } else {
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+ __ j(equal, true_label);
+ __ Cmp(reg, Factory::undefined_value());
+ __ j(equal, true_label);
+ __ JumpIfSmi(reg, false_label);
+ // Check for undetectable objects by looking in the bit field in
+ // the map. The object has already been smi checked.
+ Register scratch = ToRegister(instr->TempAt(0));
+ __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+ __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ EmitBranch(true_block, false_block, not_zero);
+ }
}
Register temp2,
Label* is_not_object,
Label* is_object) {
- Abort("Unimplemented: %s", "EmitIsObject");
+ ASSERT(!input.is(temp1));
+ ASSERT(!input.is(temp2));
+ ASSERT(!temp1.is(temp2));
+
+ __ JumpIfSmi(input, is_not_object);
+
+ __ Cmp(input, Factory::null_value());
+ __ j(equal, is_object);
+
+ __ movq(temp1, FieldOperand(input, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined.
+ __ testb(FieldOperand(temp1, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, is_not_object);
+
+ __ movzxbl(temp2, FieldOperand(temp1, Map::kInstanceTypeOffset));
+ __ cmpb(temp2, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ j(below, is_not_object);
+ __ cmpb(temp2, Immediate(LAST_JS_OBJECT_TYPE));
return below_equal;
}
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Abort("Unimplemented: %s", "DoIsObjectAndBranch");
+ Register reg = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+ Register temp2 = ToRegister(instr->TempAt(1));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ Condition true_cond = EmitIsObject(reg, temp, temp2, false_label, true_label);
+
+ EmitBranch(true_block, false_block, true_cond);
}
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
- Abort("Unimplemented: %s", "DoIsSmiAndBranch");
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Condition is_smi;
+ if (instr->InputAt(0)->IsRegister()) {
+ Register input = ToRegister(instr->InputAt(0));
+ is_smi = masm()->CheckSmi(input);
+ } else {
+ Operand input = ToOperand(instr->InputAt(0));
+ is_smi = masm()->CheckSmi(input);
+ }
+ EmitBranch(true_block, false_block, is_smi);
+}
+
+
+static InstanceType TestType(HHasInstanceType* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == FIRST_TYPE) return to;
+ ASSERT(from == to || to == LAST_TYPE);
+ return from;
+}
+
+
+static Condition BranchCondition(HHasInstanceType* instr) {
+ InstanceType from = instr->from();
+ InstanceType to = instr->to();
+ if (from == to) return equal;
+ if (to == LAST_TYPE) return above_equal;
+ if (from == FIRST_TYPE) return below_equal;
+ UNREACHABLE();
+ return equal;
}
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
- Abort("Unimplemented: %s", "DoHasInstanceTypeAndBranch");
+ Register input = ToRegister(instr->InputAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ __ JumpIfSmi(input, false_label);
+
+ __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
+ EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
}
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
- Abort("Unimplemented: %s", "DoHasCachedArrayIndexAndBranch");
+ Register input = ToRegister(instr->InputAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ __ testl(FieldOperand(input, String::kHashFieldOffset),
+ Immediate(String::kContainsCachedArrayIndexMask));
+ EmitBranch(true_block, false_block, not_equal);
}
-// Branches to a label or falls through with the answer in the z flag. Trashes
-// the temp registers, but not the input. Only input and temp2 may alias.
+// Branches to a label or falls through with the answer in the z flag.
+// Trashes the temp register and possibly input (if it and temp are aliased).
void LCodeGen::EmitClassOfTest(Label* is_true,
Label* is_false,
- Handle<String>class_name,
+ Handle<String> class_name,
Register input,
- Register temp,
- Register temp2) {
- Abort("Unimplemented: %s", "EmitClassOfTest");
+ Register temp) {
+ __ JumpIfSmi(input, is_false);
+ __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
+ __ j(below, is_false);
+
+ // Map is now in temp.
+ // Functions have class 'Function'.
+ __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
+ if (class_name->IsEqualTo(CStrVector("Function"))) {
+ __ j(equal, is_true);
+ } else {
+ __ j(equal, is_false);
+ }
+
+ // Check if the constructor in the map is a function.
+ __ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
+
+ // As long as JS_FUNCTION_TYPE is the last instance type and it is
+ // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+ // LAST_JS_OBJECT_TYPE.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+
+ // Objects with a non-function constructor have class 'Object'.
+ __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
+ if (class_name->IsEqualTo(CStrVector("Object"))) {
+ __ j(not_equal, is_true);
+ } else {
+ __ j(not_equal, is_false);
+ }
+
+ // temp now contains the constructor function. Grab the
+ // instance class name from there.
+ __ movq(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+ __ movq(temp, FieldOperand(temp,
+ SharedFunctionInfo::kInstanceClassNameOffset));
+ // The class name we are testing against is a symbol because it's a literal.
+ // The name in the constructor is a symbol because of the way the context is
+ // booted. This routine isn't expected to work for random API-created
+ // classes and it doesn't have to because you can't access it with natives
+ // syntax. Since both sides are symbols it is sufficient to use an identity
+ // comparison.
+ ASSERT(class_name->IsSymbol());
+ __ Cmp(temp, class_name);
+ // End with the answer in the z flag.
}
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
- Abort("Unimplemented: %s", "DoClassOfTestAndBranch");
+ Register input = ToRegister(instr->InputAt(0));
+ Register temp = ToRegister(instr->TempAt(0));
+ Handle<String> class_name = instr->hydrogen()->class_name();
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ EmitClassOfTest(true_label, false_label, class_name, input, temp);
+
+ EmitBranch(true_block, false_block, equal);
}
void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
- Abort("Unimplemented: %s", "DoInstanceOfAndBranch");
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ InstanceofStub stub(InstanceofStub::kArgsInRegisters);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ testq(rax, rax);
+ EmitBranch(true_block, false_block, zero);
}
void LCodeGen::DoCmpT(LCmpT* instr) {
- Abort("Unimplemented: %s", "DoCmpT");
+ Token::Value op = instr->op();
+
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ Condition condition = TokenToCondition(op, false);
+ if (op == Token::GT || op == Token::LTE) {
+ condition = ReverseCondition(condition);
+ }
+ NearLabel true_value, done;
+ __ testq(rax, rax);
+ __ j(condition, &true_value);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+ __ jmp(&done);
+ __ bind(&true_value);
+ __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+ __ bind(&done);
}
void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
- Abort("Unimplemented: %s", "DoCmpTAndBranch");
+ Token::Value op = instr->op();
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ // The compare stub expects compare condition and the input operands
+ // reversed for GT and LTE.
+ Condition condition = TokenToCondition(op, false);
+ if (op == Token::GT || op == Token::LTE) {
+ condition = ReverseCondition(condition);
+ }
+ __ testq(rax, rax);
+ EmitBranch(true_block, false_block, condition);
}
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
- Abort("Unimplemented: %s", "DoTypeofIsAndBranch");
+ Register input = ToRegister(instr->InputAt(0));
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ Condition final_branch_condition = EmitTypeofIs(true_label,
+ false_label,
+ input,
+ instr->type_literal());
+
+ EmitBranch(true_block, false_block, final_branch_condition);
}
Label* false_label,
Register input,
Handle<String> type_name) {
- Abort("Unimplemented: %s", "EmitTypeofIs");
- return no_condition;
+ Condition final_branch_condition = no_condition;
+ if (type_name->Equals(Heap::number_symbol())) {
+ __ JumpIfSmi(input, true_label);
+ __ Cmp(FieldOperand(input, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ final_branch_condition = equal;
+
+ } else if (type_name->Equals(Heap::string_symbol())) {
+ __ JumpIfSmi(input, false_label);
+ __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
+ __ testb(FieldOperand(input, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, false_label);
+ __ CmpInstanceType(input, FIRST_NONSTRING_TYPE);
+ final_branch_condition = below;
+
+ } else if (type_name->Equals(Heap::boolean_symbol())) {
+ __ CompareRoot(input, Heap::kTrueValueRootIndex);
+ __ j(equal, true_label);
+ __ CompareRoot(input, Heap::kFalseValueRootIndex);
+ final_branch_condition = equal;
+
+ } else if (type_name->Equals(Heap::undefined_symbol())) {
+ __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
+ __ j(equal, true_label);
+ __ JumpIfSmi(input, false_label);
+ // Check for undetectable objects => true.
+ __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
+ __ testb(FieldOperand(input, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ final_branch_condition = not_zero;
+
+ } else if (type_name->Equals(Heap::function_symbol())) {
+ __ JumpIfSmi(input, false_label);
+ __ CmpObjectType(input, FIRST_FUNCTION_CLASS_TYPE, input);
+ final_branch_condition = above_equal;
+
+ } else if (type_name->Equals(Heap::object_symbol())) {
+ __ JumpIfSmi(input, false_label);
+ __ Cmp(input, Factory::null_value());
+ __ j(equal, true_label);
+ // Check for undetectable objects => false.
+ __ testb(FieldOperand(input, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, false_label);
+ // Check for JS objects that are not RegExp or Function => true.
+ __ CmpInstanceType(input, FIRST_JS_OBJECT_TYPE);
+ __ j(below, false_label);
+ __ CmpInstanceType(input, FIRST_FUNCTION_CLASS_TYPE);
+ final_branch_condition = below_equal;
+
+ } else {
+ final_branch_condition = never;
+ __ jmp(false_label);
+ }
+
+ return final_branch_condition;
}
Label* if_false,
Handle<String> class_name,
Register input,
- Register temporary,
- Register temporary2);
+ Register temporary);
int StackSlotCount() const { return chunk()->spill_slot_count(); }
int ParameterCount() const { return scope()->num_parameters(); }
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- if (current->IsBranch() && !instr->IsGoto()) {
- // TODO(fschneider): Handle branch instructions uniformly like
- // other instructions. This requires us to generate the right
- // branch instruction already at the HIR level.
+ if (current->IsTest() && !instr->IsGoto()) {
ASSERT(instr->IsControl());
- HBranch* branch = HBranch::cast(current);
- instr->set_hydrogen_value(branch->value());
- HBasicBlock* first = branch->FirstSuccessor();
- HBasicBlock* second = branch->SecondSuccessor();
+ HTest* test = HTest::cast(current);
+ instr->set_hydrogen_value(test->value());
+ HBasicBlock* first = test->FirstSuccessor();
+ HBasicBlock* second = test->SecondSuccessor();
ASSERT(first != NULL && second != NULL);
instr->SetBranchTargets(first->block_id(), second->block_id());
} else {
}
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
- Abort("Unimplemented: %s", "DoBranch");
- return NULL;
+LInstruction* LChunkBuilder::DoTest(HTest* instr) {
+ HValue* v = instr->value();
+ if (v->EmitAtUses()) {
+ if (v->IsClassOfTest()) {
+ HClassOfTest* compare = HClassOfTest::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+
+ return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
+ TempRegister());
+ } else if (v->IsCompare()) {
+ HCompare* compare = HCompare::cast(v);
+ Token::Value op = compare->token();
+ HValue* left = compare->left();
+ HValue* right = compare->right();
+ Representation r = compare->GetInputRepresentation();
+ if (r.IsInteger32()) {
+ ASSERT(left->representation().IsInteger32());
+ ASSERT(right->representation().IsInteger32());
+
+ return new LCmpIDAndBranch(UseRegisterAtStart(left),
+ UseOrConstantAtStart(right));
+ } else if (r.IsDouble()) {
+ ASSERT(left->representation().IsDouble());
+ ASSERT(right->representation().IsDouble());
+
+ return new LCmpIDAndBranch(UseRegisterAtStart(left),
+ UseRegisterAtStart(right));
+ } else {
+ ASSERT(left->representation().IsTagged());
+ ASSERT(right->representation().IsTagged());
+ bool reversed = op == Token::GT || op == Token::LTE;
+ LOperand* left_operand = UseFixed(left, reversed ? rax : rdx);
+ LOperand* right_operand = UseFixed(right, reversed ? rdx : rax);
+ LCmpTAndBranch* result = new LCmpTAndBranch(left_operand,
+ right_operand);
+ return MarkAsCall(result, instr);
+ }
+ } else if (v->IsIsSmi()) {
+ HIsSmi* compare = HIsSmi::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+
+ return new LIsSmiAndBranch(Use(compare->value()));
+ } else if (v->IsHasInstanceType()) {
+ HHasInstanceType* compare = HHasInstanceType::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+
+ return new LHasInstanceTypeAndBranch(
+ UseRegisterAtStart(compare->value()));
+ } else if (v->IsHasCachedArrayIndex()) {
+ HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+
+ return new LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(compare->value()));
+ } else if (v->IsIsNull()) {
+ HIsNull* compare = HIsNull::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+
+ // We only need a temp register for non-strict compare.
+ LOperand* temp = compare->is_strict() ? NULL : TempRegister();
+ return new LIsNullAndBranch(UseRegisterAtStart(compare->value()),
+ temp);
+ } else if (v->IsIsObject()) {
+ HIsObject* compare = HIsObject::cast(v);
+ ASSERT(compare->value()->representation().IsTagged());
+
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
+ temp1,
+ temp2);
+ } else if (v->IsCompareJSObjectEq()) {
+ HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
+ return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
+ UseRegisterAtStart(compare->right()));
+ } else if (v->IsInstanceOf()) {
+ HInstanceOf* instance_of = HInstanceOf::cast(v);
+ LInstanceOfAndBranch* result =
+ new LInstanceOfAndBranch(
+ UseFixed(instance_of->left(), InstanceofStub::left()),
+ UseFixed(instance_of->right(), InstanceofStub::right()));
+ return MarkAsCall(result, instr);
+ } else if (v->IsTypeofIs()) {
+ HTypeofIs* typeof_is = HTypeofIs::cast(v);
+ return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
+ } else {
+ if (v->IsConstant()) {
+ if (HConstant::cast(v)->handle()->IsTrue()) {
+ return new LGoto(instr->FirstSuccessor()->block_id());
+ } else if (HConstant::cast(v)->handle()->IsFalse()) {
+ return new LGoto(instr->SecondSuccessor()->block_id());
+ }
+ }
+ Abort("Undefined compare before branch");
+ return NULL;
+ }
+ }
+ return new LBranch(UseRegisterAtStart(v));
}
-LInstruction* LChunkBuilder::DoCompareMapAndBranch(
- HCompareMapAndBranch* instr) {
- Abort("Unimplemented: %s", "DoCompareMapAndBranch");
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+ Abort("Unimplemented: %s", "DoCompareMap");
return NULL;
}
LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
- Abort("Unimplemented: %s", "DoCompare");
- return NULL;
+ Token::Value op = instr->token();
+ Representation r = instr->GetInputRepresentation();
+ if (r.IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ return DefineAsRegister(new LCmpID(left, right));
+ } else if (r.IsDouble()) {
+ ASSERT(instr->left()->representation().IsDouble());
+ ASSERT(instr->right()->representation().IsDouble());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ return DefineAsRegister(new LCmpID(left, right));
+ } else {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ bool reversed = (op == Token::GT || op == Token::LTE);
+ LOperand* left = UseFixed(instr->left(), reversed ? rax : rdx);
+ LOperand* right = UseFixed(instr->right(), reversed ? rdx : rax);
+ LCmpT* result = new LCmpT(left, right);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+ }
}
}
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+ Abort("Unimplemented: %s", "DoStringCharCodeAt");
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
+ Abort("Unimplemented: %s", "DoStringLength");
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
Abort("Unimplemented: %s", "DoArrayLiteral");
return NULL;
};
-template<typename T, int N>
+template<typename ElementType, int NumElements>
class OperandContainer {
public:
OperandContainer() {
- for (int i = 0; i < N; i++) elems_[i] = NULL;
+ for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
}
- int length() { return N; }
- T& operator[](int i) {
+ int length() { return NumElements; }
+ ElementType& operator[](int i) {
ASSERT(i < length());
return elems_[i];
}
void PrintOperandsTo(StringStream* stream);
private:
- T elems_[N];
+ ElementType elems_[NumElements];
};
-template<typename T>
-class OperandContainer<T, 0> {
+template<typename ElementType>
+class OperandContainer<ElementType, 0> {
public:
int length() { return 0; }
void PrintOperandsTo(StringStream* stream) { }
};
-template<int R, int I, int T = 0>
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
class LTemplateInstruction: public LInstruction {
public:
// Allow 0 or 1 output operands.
};
-template<int I, int T = 0>
+template<int I, int T>
class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
DECLARE_INSTRUCTION(ControlInstruction)
};
-class LArgumentsLength: public LTemplateInstruction<1, 1> {
+class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LArgumentsLength(LOperand* elements) {
inputs_[0] = elements;
};
-class LCmpID: public LTemplateInstruction<1, 2> {
+class LCmpID: public LTemplateInstruction<1, 2, 0> {
public:
LCmpID(LOperand* left, LOperand* right) {
inputs_[0] = left;
};
-class LCmpIDAndBranch: public LControlInstruction<2> {
+class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
};
-class LUnaryMathOperation: public LTemplateInstruction<1, 1> {
+class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
public:
explicit LUnaryMathOperation(LOperand* value) {
inputs_[0] = value;
};
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2> {
+class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
public:
LCmpJSObjectEq(LOperand* left, LOperand* right) {
inputs_[0] = left;
};
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2> {
+class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
};
-class LIsNull: public LTemplateInstruction<1, 1> {
+class LIsNull: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsNull(LOperand* value) {
inputs_[0] = value;
};
-class LIsSmi: public LTemplateInstruction<1, 1> {
+class LIsSmi: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsSmi(LOperand* value) {
inputs_[0] = value;
};
-class LIsSmiAndBranch: public LControlInstruction<1> {
+class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
inputs_[0] = value;
};
-class LHasInstanceType: public LTemplateInstruction<1, 1> {
+class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasInstanceType(LOperand* value) {
inputs_[0] = value;
};
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
+class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
public:
- LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
+ explicit LHasInstanceTypeAndBranch(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
};
-class LHasCachedArrayIndex: public LTemplateInstruction<1, 1> {
+class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
};
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1> {
+class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
inputs_[0] = value;
};
-class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
+class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
public:
- LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
+ LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
- temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
};
-class LCmpT: public LTemplateInstruction<1, 2> {
+class LCmpT: public LTemplateInstruction<1, 2, 0> {
public:
LCmpT(LOperand* left, LOperand* right) {
inputs_[0] = left;
};
-class LCmpTAndBranch: public LControlInstruction<2> {
+class LCmpTAndBranch: public LControlInstruction<2, 0> {
public:
LCmpTAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
};
-class LInstanceOf: public LTemplateInstruction<1, 2> {
+class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
public:
LInstanceOf(LOperand* left, LOperand* right) {
inputs_[0] = left;
};
-class LInstanceOfAndBranch: public LControlInstruction<2> {
+class LInstanceOfAndBranch: public LControlInstruction<2, 0> {
public:
LInstanceOfAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
};
-class LBitI: public LTemplateInstruction<1, 2> {
+class LBitI: public LTemplateInstruction<1, 2, 0> {
public:
LBitI(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
};
-class LShiftI: public LTemplateInstruction<1, 2> {
+class LShiftI: public LTemplateInstruction<1, 2, 0> {
public:
LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
: op_(op), can_deopt_(can_deopt) {
};
-class LSubI: public LTemplateInstruction<1, 2> {
+class LSubI: public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right) {
inputs_[0] = left;
};
-class LBranch: public LControlInstruction<1> {
+class LBranch: public LControlInstruction<1, 0> {
public:
explicit LBranch(LOperand* value) {
inputs_[0] = value;
};
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1> {
+class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCmpMapAndBranch(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMapAndBranch)
+ DECLARE_HYDROGEN_ACCESSOR(CompareMap)
virtual bool IsControl() const { return true; }
Handle<Map> map() const { return hydrogen()->map(); }
int true_block_id() const {
- return hydrogen()->true_destination()->block_id();
+ return hydrogen()->FirstSuccessor()->block_id();
}
int false_block_id() const {
- return hydrogen()->false_destination()->block_id();
+ return hydrogen()->SecondSuccessor()->block_id();
}
};
-class LJSArrayLength: public LTemplateInstruction<1, 1> {
+class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LJSArrayLength(LOperand* value) {
inputs_[0] = value;
};
-class LFixedArrayLength: public LTemplateInstruction<1, 1> {
+class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LFixedArrayLength(LOperand* value) {
inputs_[0] = value;
};
-class LThrow: public LTemplateInstruction<0, 1> {
+class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
inputs_[0] = value;
};
-class LBitNotI: public LTemplateInstruction<1, 1> {
+class LBitNotI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LBitNotI(LOperand* value) {
inputs_[0] = value;
};
-class LAddI: public LTemplateInstruction<1, 2> {
+class LAddI: public LTemplateInstruction<1, 2, 0> {
public:
LAddI(LOperand* left, LOperand* right) {
inputs_[0] = left;
};
-class LPower: public LTemplateInstruction<1, 2> {
+class LPower: public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
};
-class LArithmeticD: public LTemplateInstruction<1, 2> {
+class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
};
-class LArithmeticT: public LTemplateInstruction<1, 2> {
+class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
: op_(op) {
};
-class LReturn: public LTemplateInstruction<0, 1> {
+class LReturn: public LTemplateInstruction<0, 1, 0> {
public:
explicit LReturn(LOperand* value) {
inputs_[0] = value;
};
-class LLoadNamedField: public LTemplateInstruction<1, 1> {
+class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedField(LOperand* object) {
inputs_[0] = object;
};
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1> {
+class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
inputs_[0] = object;
};
-class LLoadElements: public LTemplateInstruction<1, 1> {
+class LLoadElements: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadElements(LOperand* object) {
inputs_[0] = object;
};
-class LLoadKeyedFastElement: public LTemplateInstruction<1, 2> {
+class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
};
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2> {
+class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
inputs_[0] = obj;
};
-class LStoreGlobal: public LTemplateInstruction<0, 1> {
+class LStoreGlobal: public LTemplateInstruction<0, 1, 0> {
public:
explicit LStoreGlobal(LOperand* value) {
inputs_[0] = value;
};
-class LPushArgument: public LTemplateInstruction<0, 1> {
+class LPushArgument: public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
inputs_[0] = value;
};
-class LCallKeyed: public LTemplateInstruction<1, 0, 1> {
+class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LCallKeyed(LOperand* temp) {
- temps_[0] = temp;
+ explicit LCallKeyed(LOperand* key) {
+ inputs_[0] = key;
}
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
};
-class LCallNew: public LTemplateInstruction<1, 1> {
+class LCallNew: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallNew(LOperand* constructor) {
inputs_[0] = constructor;
};
-class LInteger32ToDouble: public LTemplateInstruction<1, 1> {
+class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInteger32ToDouble(LOperand* value) {
inputs_[0] = value;
};
-class LNumberTagI: public LTemplateInstruction<1, 1> {
+class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
inputs_[0] = value;
};
-class LSmiTag: public LTemplateInstruction<1, 1> {
+class LSmiTag: public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
inputs_[0] = value;
};
-class LNumberUntagD: public LTemplateInstruction<1, 1> {
+class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberUntagD(LOperand* value) {
inputs_[0] = value;
};
-class LSmiUntag: public LTemplateInstruction<1, 1> {
+class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
public:
LSmiUntag(LOperand* value, bool needs_check)
: needs_check_(needs_check) {
};
-class LCheckFunction: public LTemplateInstruction<0, 1> {
+class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckFunction(LOperand* value) {
inputs_[0] = value;
};
-class LCheckMap: public LTemplateInstruction<0, 1> {
+class LCheckMap: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckMap(LOperand* value) {
inputs_[0] = value;
};
-class LCheckSmi: public LTemplateInstruction<0, 1> {
+class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
public:
LCheckSmi(LOperand* value, Condition condition)
: condition_(condition) {
};
-class LTypeof: public LTemplateInstruction<1, 1> {
+class LTypeof: public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeof(LOperand* value) {
inputs_[0] = value;
};
-class LTypeofIs: public LTemplateInstruction<1, 1> {
+class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeofIs(LOperand* value) {
inputs_[0] = value;
};
-class LTypeofIsAndBranch: public LControlInstruction<1> {
+class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
inputs_[0] = value;
};
-class LDeleteProperty: public LTemplateInstruction<1, 2> {
+class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
public:
LDeleteProperty(LOperand* obj, LOperand* key) {
inputs_[0] = obj;
MUST_USE_RESULT LOperand* UseRegister(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
- // A value in a register that may be trashed.
+ // An input operand in a register that may be trashed.
MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
- // An operand value in a register or stack slot.
+ // An input operand in a register or stack slot.
MUST_USE_RESULT LOperand* Use(HValue* value);
MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
- // An operand value in a register, stack slot or a constant operand.
+ // An input operand in a register, stack slot or a constant operand.
MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
- // An operand value in a register or a constant operand.
+ // An input operand in a register or a constant operand.
MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+ // An input operand in register, stack slot or a constant operand.
+ // Will not be moved to a register even if one is freely available.
+ MUST_USE_RESULT LOperand* UseAny(HValue* value);
+
// Temporary operand that must be in a register.
MUST_USE_RESULT LUnallocated* TempRegister();
MUST_USE_RESULT LOperand* FixedTemp(Register reg);
MUST_USE_RESULT LOperand* FixedTemp(XMMRegister reg);
- // An operand value in register, stack slot or a constant operand.
- // Will not be moved to a register even if one is freely available.
- LOperand* UseAny(HValue* value);
-
// Methods for setting up define-use relationships.
// Return the same instruction that they are passed.
template<int I, int T>
}
+Condition MacroAssembler::CheckSmi(const Operand& src) {
+ ASSERT_EQ(0, kSmiTag);
+ testb(src, Immediate(kSmiTagMask));
+ return zero;
+}
+
+
Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
// Make mask 0x8000000000000001 and test that both bits are zero.
}
+void MacroAssembler::Pushad() {
+ push(rax);
+ push(rcx);
+ push(rdx);
+ push(rbx);
+ // Not pushing rsp or rbp.
+ push(rsi);
+ push(rdi);
+ push(r8);
+ push(r9);
+ // r10 is kScratchRegister.
+ push(r11);
+ push(r12);
+ // r13 is kRootRegister.
+ push(r14);
+ // r15 is kSmiConstantRegister
+}
+
+
+void MacroAssembler::Popad() {
+ pop(r14);
+ pop(r12);
+ pop(r11);
+ pop(r9);
+ pop(r8);
+ pop(rdi);
+ pop(rsi);
+ pop(rbx);
+ pop(rdx);
+ pop(rcx);
+ pop(rax);
+}
+
+
void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) {
// Adjust this code if not the case.
// Is the value a tagged smi.
Condition CheckSmi(Register src);
+ Condition CheckSmi(const Operand& src);
// Is the value a non-negative tagged smi.
Condition CheckNonNegativeSmi(Register src);
void Call(ExternalReference ext);
void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
+ // Non-x64 instructions.
+ // Push/pop all general purpose registers.
+ // Does not push rsp/rbp nor any of the assembler's special purpose registers
+ // (kScratchRegister, kSmiConstantRegister, kRootRegister).
+ void Pushad();
+ void Popad();
+
// Compare object type for heap object.
// Always use unsigned comparisons: above and below, not less and greater.
// Incoming register is heap_object and outgoing register is map.
}
+MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
+ ExternalArrayType array_type, Code::Flags flags) {
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label slow;
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(rdx, &slow);
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(rax, &slow);
+
+ // Check that the object is a JS object.
+ __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
+ __ j(not_equal, &slow);
+ // Check that the receiver does not require access checks. We need
+ // to check this explicitly since this generic stub does not perform
+ // map checks. The map is already in rdx.
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ j(not_zero, &slow);
+
+ // Check that the elements array is the appropriate type of
+ // ExternalArray.
+ // rax: index (as a smi)
+ // rdx: JSObject
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+ Heap::RootIndexForExternalArrayType(array_type));
+ __ j(not_equal, &slow);
+
+ // Check that the index is in range.
+ __ SmiToInteger32(rcx, rax);
+ __ cmpl(rcx, FieldOperand(rbx, ExternalArray::kLengthOffset));
+ // Unsigned comparison catches both negative and too-large values.
+ __ j(above_equal, &slow);
+
+ // rax: index (as a smi)
+ // rdx: receiver (JSObject)
+ // rcx: untagged index
+ // rbx: elements array
+ __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
+ // rbx: base pointer of external storage
+ switch (array_type) {
+ case kExternalByteArray:
+ __ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
+ break;
+ case kExternalUnsignedByteArray:
+ __ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
+ break;
+ case kExternalShortArray:
+ __ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
+ break;
+ case kExternalUnsignedShortArray:
+ __ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
+ break;
+ case kExternalIntArray:
+ __ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
+ break;
+ case kExternalUnsignedIntArray:
+ __ movl(rcx, Operand(rbx, rcx, times_4, 0));
+ break;
+ case kExternalFloatArray:
+ __ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ // rax: index
+ // rdx: receiver
+ // For integer array types:
+ // rcx: value
+ // For floating-point array type:
+ // xmm0: value as double.
+
+ ASSERT(kSmiValueSize == 32);
+ if (array_type == kExternalUnsignedIntArray) {
+ // For the UnsignedInt array type, we need to see whether
+ // the value can be represented in a Smi. If not, we need to convert
+ // it to a HeapNumber.
+ NearLabel box_int;
+
+ __ JumpIfUIntNotValidSmiValue(rcx, &box_int);
+
+ __ Integer32ToSmi(rax, rcx);
+ __ ret(0);
+
+ __ bind(&box_int);
+
+ // Allocate a HeapNumber for the int and perform int-to-double
+ // conversion.
+ // The value is zero-extended since we loaded the value from memory
+ // with movl.
+ __ cvtqsi2sd(xmm0, rcx);
+
+ __ AllocateHeapNumber(rcx, rbx, &slow);
+ // Set the value.
+ __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
+ __ movq(rax, rcx);
+ __ ret(0);
+ } else if (array_type == kExternalFloatArray) {
+ // For the floating-point array type, we need to always allocate a
+ // HeapNumber.
+ __ AllocateHeapNumber(rcx, rbx, &slow);
+ // Set the value.
+ __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
+ __ movq(rax, rcx);
+ __ ret(0);
+ } else {
+ __ Integer32ToSmi(rax, rcx);
+ __ ret(0);
+ }
+
+ // Slow case: Jump to runtime.
+ __ bind(&slow);
+ __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
+
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ __ pop(rbx);
+ __ push(rdx); // receiver
+ __ push(rax); // name
+ __ push(rbx); // return address
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+
+ // Return the generated code.
+ return GetCode(flags);
+}
+
+
+MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
+ ExternalArrayType array_type, Code::Flags flags) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ Label slow;
+
+ // Check that the object isn't a smi.
+ __ JumpIfSmi(rdx, &slow);
+ // Get the map from the receiver.
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks. We need
+ // to do this because this generic stub does not perform map checks.
+ __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ j(not_zero, &slow);
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(rcx, &slow);
+
+ // Check that the object is a JS object.
+ __ CmpInstanceType(rbx, JS_OBJECT_TYPE);
+ __ j(not_equal, &slow);
+
+ // Check that the elements array is the appropriate type of
+ // ExternalArray.
+ // rax: value
+ // rcx: key (a smi)
+ // rdx: receiver (a JSObject)
+ __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+ Heap::RootIndexForExternalArrayType(array_type));
+ __ j(not_equal, &slow);
+
+ // Check that the index is in range.
+ __ SmiToInteger32(rdi, rcx); // Untag the index.
+ __ cmpl(rdi, FieldOperand(rbx, ExternalArray::kLengthOffset));
+ // Unsigned comparison catches both negative and too-large values.
+ __ j(above_equal, &slow);
+
+ // Handle both smis and HeapNumbers in the fast path. Go to the
+ // runtime for all other kinds of values.
+ // rax: value
+ // rcx: key (a smi)
+ // rdx: receiver (a JSObject)
+ // rbx: elements array
+ // rdi: untagged key
+ NearLabel check_heap_number;
+ __ JumpIfNotSmi(rax, &check_heap_number);
+ // No more branches to slow case on this path. Key and receiver not needed.
+ __ SmiToInteger32(rdx, rax);
+ __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
+ // rbx: base pointer of external storage
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ movb(Operand(rbx, rdi, times_1, 0), rdx);
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ movw(Operand(rbx, rdi, times_2, 0), rdx);
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray:
+ __ movl(Operand(rbx, rdi, times_4, 0), rdx);
+ break;
+ case kExternalFloatArray:
+ // Need to perform int-to-float conversion.
+ __ cvtlsi2ss(xmm0, rdx);
+ __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ ret(0);
+
+ __ bind(&check_heap_number);
+ // rax: value
+ // rcx: key (a smi)
+ // rdx: receiver (a JSObject)
+ // rbx: elements array
+ // rdi: untagged key
+ __ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
+ __ j(not_equal, &slow);
+ // No more branches to slow case on this path.
+
+ // The WebGL specification leaves the behavior of storing NaN and
+ // +/-Infinity into integer arrays basically undefined. For more
+ // reproducible behavior, convert these to zero.
+ __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
+ // rdi: untagged index
+ // rbx: base pointer of external storage
+ // top of FPU stack: value
+ if (array_type == kExternalFloatArray) {
+ __ cvtsd2ss(xmm0, xmm0);
+ __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
+ __ ret(0);
+ } else {
+ // Perform float-to-int conversion with truncation (round-to-zero)
+ // behavior.
+
+ // Convert to int32 and store the low byte/word.
+ // If the value is NaN or +/-infinity, the result is 0x80000000,
+ // which is automatically zero when taken mod 2^n, n < 32.
+ // rdx: value (converted to an untagged integer)
+ // rdi: untagged index
+ // rbx: base pointer of external storage
+ switch (array_type) {
+ case kExternalByteArray:
+ case kExternalUnsignedByteArray:
+ __ cvttsd2si(rdx, xmm0);
+ __ movb(Operand(rbx, rdi, times_1, 0), rdx);
+ break;
+ case kExternalShortArray:
+ case kExternalUnsignedShortArray:
+ __ cvttsd2si(rdx, xmm0);
+ __ movw(Operand(rbx, rdi, times_2, 0), rdx);
+ break;
+ case kExternalIntArray:
+ case kExternalUnsignedIntArray: {
+ // Convert to int64, so that NaN and infinities become
+ // 0x8000000000000000, which is zero mod 2^32.
+ __ cvttsd2siq(rdx, xmm0);
+ __ movl(Operand(rbx, rdi, times_4, 0), rdx);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ __ ret(0);
+ }
+
+ // Slow case: call runtime.
+ __ bind(&slow);
+
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ __ pop(rbx);
+ __ push(rdx); // receiver
+ __ push(rcx); // key
+ __ push(rax); // value
+ __ push(rbx); // return address
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
+
+ return GetCode(flags);
+}
+
#undef __
} } // namespace v8::internal
test-serialize/ContextDeserialization: SKIP
test-debug/BreakPointReturn: SKIP
test-debug/DebugStepLinearMixedICs: SKIP
-
+test-debug/DebugConditional: SKIP
##############################################################################
[ $arch == arm ]
TestExternalPointerWrapping();
#if defined(V8_HOST_ARCH_X64)
+ // Check a value with a leading 1 bit in x64 Smi encoding.
+ expected_ptr = reinterpret_cast<void*>(0x400000000);
+ TestExternalPointerWrapping();
+
expected_ptr = reinterpret_cast<void*>(0xdeadbeefdeadbeef);
TestExternalPointerWrapping();
CompileRun("ReferenceError.prototype.toString ="
" function() { return 'Whoops' }");
CompileRun("asdf;");
+ CompileRun("ReferenceError.prototype.constructor.name = void 0;");
+ CompileRun("asdf;");
+ CompileRun("ReferenceError.prototype.constructor = void 0;");
+ CompileRun("asdf;");
v8::Handle<Value> string = CompileRun("try { asdf; } catch(e) { e + ''; }");
CHECK(string->Equals(v8_str("Whoops")));
v8::V8::RemoveMessageListeners(check_message);
CHECK_EQ(0, result->Int32Value());
CHECK_EQ(0,
i::Smi::cast(jsobj->GetElement(5)->ToObjectChecked())->value());
+
+ // Check truncation behavior of integral arrays.
+ const char* unsigned_data =
+ "var source_data = [0.6, 10.6];"
+ "var expected_results = [0, 10];";
+ const char* signed_data =
+ "var source_data = [0.6, 10.6, -0.6, -10.6];"
+ "var expected_results = [0, 10, 0, -10];";
+ bool is_unsigned =
+ (array_type == v8::kExternalUnsignedByteArray ||
+ array_type == v8::kExternalUnsignedShortArray ||
+ array_type == v8::kExternalUnsignedIntArray);
+
+ i::OS::SNPrintF(test_buf,
+ "%s"
+ "var all_passed = true;"
+ "for (var i = 0; i < source_data.length; i++) {"
+ " for (var j = 0; j < 8; j++) {"
+ " ext_array[j] = source_data[i];"
+ " }"
+ " all_passed = all_passed &&"
+ " (ext_array[5] == expected_results[i]);"
+ "}"
+ "all_passed;",
+ (is_unsigned ? unsigned_data : signed_data));
+ result = CompileRun(test_buf.start());
+ CHECK_EQ(true, result->BooleanValue());
}
result = CompileRun("ext_array[3] = 33;"
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test printing of cyclic errors which return the empty string for
+// compatibility with Safari and Firefox.
+
+var e = new Error();
+assertEquals('Error', e + '');
+
+e = new Error();
+e.name = e;
+e.message = e;
+e.stack = e;
+e.arguments = e;
+assertEquals(': ', e + '');
+
+e = new Error();
+e.name = [ e ];
+e.message = [ e ];
+e.stack = [ e ];
+e.arguments = [ e ];
+assertEquals(': ', e + '');
function checkFrame0(name, value) {
- assertTrue(name == 'a' || name == 'b');
+ assertTrue(name == 'a' || name == 'b', 'frame0 name');
if (name == 'a') {
assertEquals(1, value);
- }
- if (name == 'b') {
+ } else if (name == 'b') {
assertEquals(2, value);
}
}
function checkFrame1(name, value) {
- assertTrue(name == '.arguments' || name == 'a');
+ assertTrue(name == '.arguments' || name == 'arguments' || name == 'a',
+ 'frame1 name');
if (name == 'a') {
assertEquals(3, value);
}
function checkFrame2(name, value) {
- assertTrue(name == '.arguments' || name == 'a' ||
- name == 'arguments' || name == 'b');
+ assertTrue(name == 'a' || name == 'b', 'frame2 name');
if (name == 'a') {
assertEquals(5, value);
- }
- if (name == 'b') {
+ } else if (name == 'b') {
assertEquals(0, value);
}
}
checkFrame0(frame0.localName(0), frame0.localValue(0).value());
checkFrame0(frame0.localName(1), frame0.localValue(1).value());
- // Frame 1 has normal variable a (and the .arguments variable).
+ // Frame 1 has normal variables a and arguments (and the .arguments
+ // variable).
var frame1 = exec_state.frame(1);
checkFrame1(frame1.localName(0), frame1.localValue(0).value());
checkFrame1(frame1.localName(1), frame1.localValue(1).value());
+ checkFrame1(frame1.localName(2), frame1.localValue(2).value());
- // Frame 2 has normal variables a and b (and both the .arguments and
- // arguments variable).
+ // Frame 2 has normal variables a and b.
var frame2 = exec_state.frame(2);
checkFrame2(frame2.localName(0), frame2.localValue(0).value());
checkFrame2(frame2.localName(1), frame2.localValue(1).value());
- checkFrame2(frame2.localName(2), frame2.localValue(2).value());
- checkFrame2(frame2.localName(3), frame2.localValue(3).value());
// Evaluating a and b on frames 0, 1 and 2 produces 1, 2, 3, 4, 5 and 6.
assertEquals(1, exec_state.frame(0).evaluate('a').value());
--- /dev/null
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function CheckStrictMode(code, exception) {
+ assertDoesNotThrow(code);
+ assertThrows("'use strict';\n" + code, exception);
+ assertThrows('"use strict";\n' + code, exception);
+ assertDoesNotThrow("\
+ function outer() {\
+ function inner() {\n"
+ + code +
+ "\n}\
+ }");
+ assertThrows("\
+ function outer() {\
+ 'use strict';\
+ function inner() {\n"
+ + code +
+ "\n}\
+ }", exception);
+}
+
+// Incorrect 'use strict' directive.
+function UseStrictEscape() {
+ "use\\x20strict";
+ with ({}) {};
+}
+
+// 'use strict' in non-directive position.
+function UseStrictNonDirective() {
+ void(0);
+ "use strict";
+ with ({}) {};
+}
+
+// Multiple directives, including "use strict".
+assertThrows('\
+"directive 1";\
+"another directive";\
+"use strict";\
+"directive after strict";\
+"and one more";\
+with({}) {}', SyntaxError);
+
+// 'with' disallowed in strict mode.
+CheckStrictMode("with({}) {}", SyntaxError);
+
+// Function named 'eval'.
+CheckStrictMode("function eval() {}", SyntaxError)
+
+// Function named 'arguments'.
+CheckStrictMode("function arguments() {}", SyntaxError)
+
+// Function parameter named 'eval'.
+//CheckStrictMode("function foo(a, b, eval, c, d) {}", SyntaxError)
+
+// Function parameter named 'arguments'.
+//CheckStrictMode("function foo(a, b, arguments, c, d) {}", SyntaxError)
+
+// Property accessor parameter named 'eval'.
+//CheckStrictMode("var o = { set foo(eval) {} }", SyntaxError)
+
+// Property accessor parameter named 'arguments'.
+//CheckStrictMode("var o = { set foo(arguments) {} }", SyntaxError)
+
+// Duplicate function parameter name.
+//CheckStrictMode("function foo(a, b, c, d, b) {}", SyntaxError)
+
+// catch(eval)
+CheckStrictMode("try{}catch(eval){};", SyntaxError)
+
+// catch(arguments)
+CheckStrictMode("try{}catch(arguments){};", SyntaxError)
+
+// var eval
+CheckStrictMode("var eval;", SyntaxError)
+
+// var arguments
+CheckStrictMode("var arguments;", SyntaxError)
+
+// Strict mode applies to the function in which the directive is used..
+//assertThrows('\
+//function foo(eval) {\
+// "use strict";\
+//}', SyntaxError);
+
+// Strict mode doesn't affect the outer stop of strict code.
+function NotStrict(eval) {
+ function Strict() {
+ "use strict";
+ }
+ with ({}) {};
+}
TestStringType(Flat16, true);
TestStringType(NotAString16, true);
+
+function ConsNotSmiIndex() {
+ var str = Cons();
+ assertTrue(isNaN(str.charCodeAt(0x7fffffff)));
+}
+
+for (var i = 0; i < 100000; i++) {
+ ConsNotSmiIndex();
+}
+
+
for (var i = 0; i != 10; i++) {
assertEquals(101, Cons16().charCodeAt(1.1));
assertEquals('e', Cons16().charAt(1.1));
js1_5/extensions/regress-363258: PASS || FAIL
+# Test that assumes specific runtime for a regexp, flaky in debug mode.
+ecma_3/RegExp/regress-85721: PASS || FAIL if $mode == debug
+
##################### INCOMPATIBLE TESTS #####################
'gcc_version%': 'unknown',
'v8_target_arch%': '<(target_arch)',
'v8_use_snapshot%': 'true',
+ 'v8_use_liveobjectlist%': 'false',
},
'conditions': [
['use_system_v8==0', {
}],
],
}],
+ ['v8_use_liveobjectlist=="true"', {
+ 'defines': [
+ 'ENABLE_DEBUGGER_SUPPORT',
+ 'INSPECTOR',
+ 'OBJECT_PRINT',
+ 'LIVEOBJECTLIST',
+ ],
+ }],
],
'configurations': {
'Debug': {
'../../src/ic-inl.h',
'../../src/ic.cc',
'../../src/ic.h',
+ '../../src/inspector.cc',
+ '../../src/inspector.h',
'../../src/interpreter-irregexp.cc',
'../../src/interpreter-irregexp.h',
'../../src/jump-target-inl.h',
'../../src/lithium-allocator.h',
'../../src/liveedit.cc',
'../../src/liveedit.h',
+ '../../src/liveobjectlist-inl.h',
+ '../../src/liveobjectlist.cc',
+ '../../src/liveobjectlist.h',
'../../src/log-inl.h',
'../../src/log-utils.cc',
'../../src/log-utils.h',
89B91BFB12D4F1BB002FF4BC /* libv8-x64.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 895692AA12D4ED240072C313 /* libv8-x64.a */; };
89B933AF0FAA0F9600201304 /* version.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF32F0FAA0ED200136CF6 /* version.cc */; };
89B933B00FAA0F9D00201304 /* version.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF32F0FAA0ED200136CF6 /* version.cc */; };
+ 89D7DDD512E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD312E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc */; };
+ 89D7DDDA12E8DE09001E2B82 /* gdb-jit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */; };
+ 89D7DDDB12E8DE09001E2B82 /* inspector.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD812E8DE09001E2B82 /* inspector.cc */; };
+ 89D7DDDC12E8DE09001E2B82 /* gdb-jit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */; };
+ 89D7DDDD12E8DE09001E2B82 /* inspector.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD812E8DE09001E2B82 /* inspector.cc */; };
+ 89D7DDDE12E8DE09001E2B82 /* gdb-jit.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */; };
+ 89D7DDDF12E8DE09001E2B82 /* inspector.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89D7DDD812E8DE09001E2B82 /* inspector.cc */; };
89F23C3F0E78D5B2006B2466 /* accessors.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0F60E719B8F00D62E90 /* accessors.cc */; };
89F23C400E78D5B2006B2466 /* allocation.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0F80E719B8F00D62E90 /* allocation.cc */; };
89F23C410E78D5B2006B2466 /* api.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF0FA0E719B8F00D62E90 /* api.cc */; };
89B91B9A12D4EF95002FF4BC /* virtual-frame-x64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "virtual-frame-x64.h"; path = "x64/virtual-frame-x64.h"; sourceTree = "<group>"; };
89B91BBE12D4F02A002FF4BC /* v8_shell-x64 */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "v8_shell-x64"; sourceTree = BUILT_PRODUCTS_DIR; };
89B91BCE12D4F02A002FF4BC /* d8-x64 */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "d8-x64"; sourceTree = BUILT_PRODUCTS_DIR; };
+ 89D7DDD312E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-gap-resolver-ia32.cc"; path = "ia32/lithium-gap-resolver-ia32.cc"; sourceTree = "<group>"; };
+ 89D7DDD412E8DDCF001E2B82 /* lithium-gap-resolver-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "lithium-gap-resolver-ia32.h"; path = "ia32/lithium-gap-resolver-ia32.h"; sourceTree = "<group>"; };
+ 89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "gdb-jit.cc"; sourceTree = "<group>"; };
+ 89D7DDD712E8DE09001E2B82 /* gdb-jit.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "gdb-jit.h"; sourceTree = "<group>"; };
+ 89D7DDD812E8DE09001E2B82 /* inspector.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = inspector.cc; sourceTree = "<group>"; };
+ 89D7DDD912E8DE09001E2B82 /* inspector.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = inspector.h; sourceTree = "<group>"; };
89F23C870E78D5B2006B2466 /* libv8-arm.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libv8-arm.a"; sourceTree = BUILT_PRODUCTS_DIR; };
89F23C950E78D5B6006B2466 /* v8_shell-arm */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "v8_shell-arm"; sourceTree = BUILT_PRODUCTS_DIR; };
89F3605A12DCDF6400ACF8A6 /* lithium-codegen-x64.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "lithium-codegen-x64.cc"; path = "x64/lithium-codegen-x64.cc"; sourceTree = "<group>"; };
897FF1270E719B8F00D62E90 /* dateparser.h */,
8956B6CD0F5D86570033B5A2 /* debug-agent.cc */,
8956B6CE0F5D86570033B5A2 /* debug-agent.h */,
- 898BD20C0EF6CC850068B00A /* debug-arm.cc */,
897FF1280E719B8F00D62E90 /* debug.cc */,
897FF1290E719B8F00D62E90 /* debug.h */,
893E248B12B14B3D0083370F /* deoptimizer.cc */,
9F92FAA80F8F28AD0089F02C /* func-name-inferrer.h */,
893E24DA12B14B9F0083370F /* gc-extension.cc */,
893E24DB12B14B9F0083370F /* gc-extension.h */,
+ 89D7DDD612E8DE09001E2B82 /* gdb-jit.cc */,
+ 89D7DDD712E8DE09001E2B82 /* gdb-jit.h */,
897FF13E0E719B8F00D62E90 /* global-handles.cc */,
897FF13F0E719B8F00D62E90 /* global-handles.h */,
897FF1400E719B8F00D62E90 /* globals.h */,
897FF14B0E719B8F00D62E90 /* ic-inl.h */,
897FF14C0E719B8F00D62E90 /* ic.cc */,
897FF14D0E719B8F00D62E90 /* ic.h */,
+ 89D7DDD812E8DE09001E2B82 /* inspector.cc */,
+ 89D7DDD912E8DE09001E2B82 /* inspector.h */,
89A15C660EE4665300B48DEB /* interpreter-irregexp.cc */,
89A15C670EE4665300B48DEB /* interpreter-irregexp.h */,
897FF14E0E719B8F00D62E90 /* jsregexp.cc */,
89B91C0312D4F275002FF4BC /* ia32 */ = {
isa = PBXGroup;
children = (
+ 89D7DDD312E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc */,
+ 89D7DDD412E8DDCF001E2B82 /* lithium-gap-resolver-ia32.h */,
897FF1000E719B8F00D62E90 /* assembler-ia32-inl.h */,
897FF1010E719B8F00D62E90 /* assembler-ia32.cc */,
897FF1020E719B8F00D62E90 /* assembler-ia32.h */,
896448BC0E9D530500E7C516 /* codegen-arm.h */,
895FA748107FFE73006F39D4 /* constants-arm.cc */,
897FF11B0E719B8F00D62E90 /* constants-arm.h */,
+ 898BD20C0EF6CC850068B00A /* debug-arm.cc */,
893E24C612B14B510083370F /* deoptimizer-arm.cc */,
9FA38BCB1175B30400C4CD55 /* full-codegen-arm.cc */,
9FA38BCC1175B30400C4CD55 /* jump-target-arm.cc */,
8938A2A312D63B630080CDDE /* lithium-x64.cc in Sources */,
894A59E912D777E80000766D /* lithium.cc in Sources */,
89F3605B12DCDF6400ACF8A6 /* lithium-codegen-x64.cc in Sources */,
+ 89D7DDDE12E8DE09001E2B82 /* gdb-jit.cc in Sources */,
+ 89D7DDDF12E8DE09001E2B82 /* inspector.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
893E24DD12B14B9F0083370F /* gc-extension.cc in Sources */,
8946827512C26EB700C914BC /* objects-printer.cc in Sources */,
894A59EB12D777E80000766D /* lithium.cc in Sources */,
+ 89D7DDD512E8DDCF001E2B82 /* lithium-gap-resolver-ia32.cc in Sources */,
+ 89D7DDDA12E8DE09001E2B82 /* gdb-jit.cc in Sources */,
+ 89D7DDDB12E8DE09001E2B82 /* inspector.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
893E24DF12B14B9F0083370F /* gc-extension.cc in Sources */,
8946827612C26EB700C914BC /* objects-printer.cc in Sources */,
894A59EA12D777E80000766D /* lithium.cc in Sources */,
+ 89D7DDDC12E8DE09001E2B82 /* gdb-jit.cc in Sources */,
+ 89D7DDDD12E8DE09001E2B82 /* inspector.cc in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
V8_ENABLE_CHECKS,
OBJECT_PRINT,
ENABLE_VMSTATE_TRACKING,
+ ENABLE_DEBUGGER_SUPPORT,
);
GCC_SYMBOLS_PRIVATE_EXTERN = YES;
GCC_TREAT_WARNINGS_AS_ERRORS = YES;
GCC_PREPROCESSOR_DEFINITIONS = (
"$(GCC_PREPROCESSOR_DEFINITIONS)",
NDEBUG,
+ ENABLE_DEBUGGER_SUPPORT,
);
GCC_SYMBOLS_PRIVATE_EXTERN = YES;
GCC_TREAT_WARNINGS_AS_ERRORS = NO;