+2010-06-23: Version 2.2.19
+
+ Fix bug that causes the build to break when profillingsupport=off
+ (issue 738).
+
+ Added expose-externalize-string flag for testing extensions.
+
+ Resolve linker issues with using V8 as a DLL causing a number of
+ problems with unresolved symbols.
+
+ Fix build failure for cctests when ENABLE_DEBUGGER_SUPPORT is not
+ defined.
+
+ Performance improvements on all platforms.
+
+
2010-06-16: Version 2.2.18
Added API functions to retrieve information on indexed properties
enum Type {
CONTEXT_VARIABLE = 0, // A variable from a function context.
ELEMENT = 1, // An element of an array.
- PROPERTY = 2 // A named object property.
+ PROPERTY = 2, // A named object property.
+ INTERNAL = 3 // A link that can't be accessed from JS,
+ // thus, its name isn't a real property name.
};
/** Returns edge type (see HeapGraphEdge::Type). */
static const int kFullStringRepresentationMask = 0x07;
static const int kExternalTwoByteRepresentationTag = 0x02;
- // These constants are compiler dependent so their values must be
- // defined within the implementation.
- V8EXPORT static int kJSObjectType;
- V8EXPORT static int kFirstNonstringType;
- V8EXPORT static int kProxyType;
+ static const int kJSObjectType = 0x9f;
+ static const int kFirstNonstringType = 0x80;
+ static const int kProxyType = 0x85;
static inline bool HasHeapObjectTag(internal::Object* value) {
return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
static FatalErrorCallback exception_behavior = NULL;
-int i::Internals::kJSObjectType = JS_OBJECT_TYPE;
-int i::Internals::kFirstNonstringType = FIRST_NONSTRING_TYPE;
-int i::Internals::kProxyType = PROXY_TYPE;
static void DefaultFatalErrorHandler(const char* location,
const char* message) {
reinterpret_cast<const i::HeapGraphEdge*>(this);
switch (edge->type()) {
case i::HeapGraphEdge::CONTEXT_VARIABLE:
+ case i::HeapGraphEdge::INTERNAL:
case i::HeapGraphEdge::PROPERTY:
return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
edge->name())));
namespace v8 {
namespace internal {
-Condition NegateCondition(Condition cc) {
- ASSERT(cc != al);
- return static_cast<Condition>(cc ^ ne);
-}
-
void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) {
const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
const Instr kMovMvnPattern = 0xd * B21;
const Instr kMovMvnFlip = B22;
+const Instr kMovLeaveCCMask = 0xdff * B16;
+const Instr kMovLeaveCCPattern = 0x1a0 * B16;
+const Instr kMovwMask = 0xff * B20;
+const Instr kMovwPattern = 0x30 * B20;
+const Instr kMovwLeaveCCFlip = 0x5 * B21;
const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
const Instr kCmpCmnPattern = 0x15 * B20;
const Instr kCmpCmnFlip = B21;
}
+void Assembler::CodeTargetAlign() {
+ // Preferred alignment of jump targets on some ARM chips.
+ Align(8);
+}
+
+
bool Assembler::IsNop(Instr instr, int type) {
// Check for mov rx, rx.
ASSERT(0 <= type && type <= 14); // mov pc, pc is not a nop.
}
+static Instr EncodeMovwImmediate(uint32_t immediate) {
+ ASSERT(immediate < 0x10000);
+ return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
+}
+
+
// Low-level code emission routines depending on the addressing mode.
// If this returns true then you have to use the rotate_imm and immed_8
// that it returns, because it may have already changed the instruction
if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
*instr ^= kMovMvnFlip;
return true;
+ } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ if (imm32 < 0x10000) {
+ *instr ^= kMovwLeaveCCFlip;
+ *instr |= EncodeMovwImmediate(imm32);
+ *rotate_imm = *immed_8 = 0; // Not used for movw.
+ return true;
+ }
+ }
}
} else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly
// encoded.
-static bool MustUseIp(RelocInfo::Mode rmode) {
+static bool MustUseConstantPool(RelocInfo::Mode rmode) {
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
if (!Serializer::enabled()) {
bool Operand::is_single_instruction() const {
if (rm_.is_valid()) return true;
- if (MustUseIp(rmode_)) return false;
+ if (MustUseConstantPool(rmode_)) return false;
uint32_t dummy1, dummy2;
return fits_shifter(imm32_, &dummy1, &dummy2, NULL);
}
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
- if (MustUseIp(x.rmode_) ||
+ if (MustUseConstantPool(x.rmode_) ||
!fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to register ip and change the original instruction to use ip.
// However, if the original instruction is a 'mov rd, x' (not setting the
// condition code), then replace it with a 'ldr rd, [pc]'.
- RecordRelocInfo(x.rmode_, x.imm32_);
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = static_cast<Condition>(instr & CondMask);
if ((instr & ~CondMask) == 13*B21) { // mov, S not set
- ldr(rd, MemOperand(pc, 0), cond);
+ if (MustUseConstantPool(x.rmode_) ||
+ !CpuFeatures::IsSupported(ARMv7)) {
+ RecordRelocInfo(x.rmode_, x.imm32_);
+ ldr(rd, MemOperand(pc, 0), cond);
+ } else {
+ // Will probably use movw, will certainly not use constant pool.
+ mov(rd, Operand(x.imm32_ & 0xffff), LeaveCC, cond);
+ movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
+ }
} else {
- ldr(ip, MemOperand(pc, 0), cond);
+ // If this is not a mov or mvn instruction we may still be able to avoid
+ // a constant pool entry by using mvn or movw.
+ if (!MustUseConstantPool(x.rmode_) &&
+ (instr & kMovMvnMask) != kMovMvnPattern) {
+ mov(ip, x, LeaveCC, cond);
+ } else {
+ RecordRelocInfo(x.rmode_, x.imm32_);
+ ldr(ip, MemOperand(pc, 0), cond);
+ }
addrmod1(instr, rn, rd, Operand(ip));
}
return;
}
+void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
+ ASSERT(immediate < 0x10000);
+ mov(reg, Operand(immediate), LeaveCC, cond);
+}
+
+
+void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
+ emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
+}
+
+
void Assembler::bic(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | 14*B21 | s, src1, dst, src2);
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
- if (MustUseIp(src.rmode_) ||
+ if (MustUseConstantPool(src.rmode_) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip.
RecordRelocInfo(src.rmode_, src.imm32_);
// Returns the equivalent of !cc.
-INLINE(Condition NegateCondition(Condition cc));
+inline Condition NegateCondition(Condition cc) {
+ ASSERT(cc != al);
+ return static_cast<Condition>(cc ^ ne);
+}
// Corresponds to transposing the operands of a comparison.
extern const Instr kMovMvnPattern;
extern const Instr kMovMvnFlip;
+extern const Instr kMovLeaveCCMask;
+extern const Instr kMovLeaveCCPattern;
+extern const Instr kMovwMask;
+extern const Instr kMovwPattern;
+extern const Instr kMovwLeaveCCFlip;
+
extern const Instr kCmpCmnMask;
extern const Instr kCmpCmnPattern;
extern const Instr kCmpCmnFlip;
// possible to align the pc offset to a multiple
// of m. m must be a power of 2 (>= 4).
void Align(int m);
+ // Aligns code to something that's optimal for a jump target for the platform.
+ void CodeTargetAlign();
// Branch instructions
void b(int branch_offset, Condition cond = al);
mov(dst, Operand(src), s, cond);
}
+ // ARMv7 instructions for loading a 32 bit immediate in two instructions.
+ // This may actually emit a different mov instruction, but on an ARMv7 it
+ // is guaranteed to only emit one instruction.
+ void movw(Register reg, uint32_t immediate, Condition cond = al);
+ // The constant for movt should be in the range 0-0xffff.
+ void movt(Register reg, uint32_t immediate, Condition cond = al);
+
void bic(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
// Load the offset into r3.
int slot_offset =
FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ mov(r3, Operand(slot_offset));
- __ RecordWrite(r2, r3, r1);
+ __ RecordWrite(r2, Operand(slot_offset), r3, r1);
}
}
}
exit.Branch(eq);
// scratch is loaded with context when calling SlotOperand above.
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ mov(r3, Operand(offset));
// r1 could be identical with tos, but that doesn't matter.
- __ RecordWrite(scratch, r3, r1);
+ __ RecordWrite(scratch, Operand(offset), r3, r1);
}
// If we definitely did not jump over the assignment, we do not need
// to bind the exit label. Doing so can defeat peephole
__ str(r0, FieldMemOperand(r1, offset));
// Update the write barrier for the array address.
- __ mov(r3, Operand(offset));
- __ RecordWrite(r1, r3, r2);
+ __ RecordWrite(r1, Operand(offset), r3, r2);
}
ASSERT_EQ(original_height + 1, frame_->height());
}
// Store the value.
__ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
// Update the write barrier.
- __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
- __ RecordWrite(r1, r2, r3);
+ __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
// Leave.
leave.Bind();
frame_->EmitPush(r0);
Label slow_allocate_heapnumber;
Label heapnumber_allocated;
- __ AllocateHeapNumber(r4, r1, r2, &slow_allocate_heapnumber);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
void RecordWriteStub::Generate(MacroAssembler* masm) {
- __ RecordWriteHelper(object_, offset_, scratch_);
+ __ RecordWriteHelper(object_, Operand(offset_), offset_, scratch_);
__ Ret();
}
bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
+ Register heap_number_map = r6;
if (ShouldGenerateSmiCode()) {
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
// Smi-smi case (overflow).
// Since both are Smis there is no heap number to overwrite, so allocate.
- // The new heap number is in r5. r6 and r7 are scratch.
- __ AllocateHeapNumber(r5, r6, r7, lhs.is(r0) ? &slow_reverse : &slow);
+ // The new heap number is in r5. r3 and r7 are scratch.
+ __ AllocateHeapNumber(
+ r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow);
// If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
// using registers d7 and d6 for the double values.
__ vmov(s13, r7);
__ vcvt_f64_s32(d6, s13);
} else {
- // Write Smi from rhs to r3 and r2 in double format. r6 is scratch.
+ // Write Smi from rhs to r3 and r2 in double format. r3 is scratch.
__ mov(r7, Operand(rhs));
- ConvertToDoubleStub stub1(r3, r2, r7, r6);
+ ConvertToDoubleStub stub1(r3, r2, r7, r9);
__ push(lr);
__ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
- // Write Smi from lhs to r1 and r0 in double format. r6 is scratch.
+ // Write Smi from lhs to r1 and r0 in double format. r9 is scratch.
__ mov(r7, Operand(lhs));
- ConvertToDoubleStub stub2(r1, r0, r7, r6);
+ ConvertToDoubleStub stub2(r1, r0, r7, r9);
__ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
}
// We branch here if at least one of r0 and r1 is not a Smi.
__ bind(not_smi);
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// After this point we have the left hand side in r1 and the right hand side
// in r0.
default:
break;
}
+ // Restore heap number map register.
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
}
if (mode_ == NO_OVERWRITE) {
// In the case where there is no chance of an overwritable float we may as
// well do the allocation immediately while r0 and r1 are untouched.
- __ AllocateHeapNumber(r5, r6, r7, &slow);
+ __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
}
// Move r0 to a double in r2-r3.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
- __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+ __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r4, heap_number_map);
__ b(ne, &slow);
if (mode_ == OVERWRITE_RIGHT) {
__ mov(r5, Operand(r0)); // Overwrite this heap number.
__ bind(&r0_is_smi);
if (mode_ == OVERWRITE_RIGHT) {
// We can't overwrite a Smi so get address of new heap number into r5.
- __ AllocateHeapNumber(r5, r6, r7, &slow);
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
if (use_fp_registers) {
} else {
// Write Smi from r0 to r3 and r2 in double format.
__ mov(r7, Operand(r0));
- ConvertToDoubleStub stub3(r3, r2, r7, r6);
+ ConvertToDoubleStub stub3(r3, r2, r7, r4);
__ push(lr);
__ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
__ tst(r1, Operand(kSmiTagMask));
__ b(ne, &r1_is_not_smi);
GenerateTypeTransition(masm);
+ // Restore heap number map register.
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&r1_is_smi);
}
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
__ bind(&r1_is_not_smi);
- __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
+ __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r4, heap_number_map);
__ b(ne, &slow);
if (mode_ == OVERWRITE_LEFT) {
__ mov(r5, Operand(r1)); // Overwrite this heap number.
__ bind(&r1_is_smi);
if (mode_ == OVERWRITE_LEFT) {
// We can't overwrite a Smi so get address of new heap number into r5.
- __ AllocateHeapNumber(r5, r6, r7, &slow);
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
if (use_fp_registers) {
} else {
// Write Smi from r1 to r1 and r0 in double format.
__ mov(r7, Operand(r1));
- ConvertToDoubleStub stub4(r1, r0, r7, r6);
+ ConvertToDoubleStub stub4(r1, r0, r7, r9);
__ push(lr);
__ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
__ pop(lr);
}
}
-
if (lhs.is(r0)) {
__ b(&slow);
__ bind(&slow_reverse);
__ Swap(r0, r1, ip);
}
+ heap_number_map = no_reg; // Don't use this any more from here on.
+
// We jump to here if something goes wrong (one param is not a number of any
// sort or new-space allocation fails).
__ bind(&slow);
Label rhs_is_smi, lhs_is_smi;
Label done_checking_rhs, done_checking_lhs;
+ Register heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
__ tst(lhs, Operand(kSmiTagMask));
__ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number.
- __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
+ __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
+ __ cmp(r4, heap_number_map);
__ b(ne, &slow);
GetInt32(masm, lhs, r3, r5, r4, &slow);
__ jmp(&done_checking_lhs);
__ tst(rhs, Operand(kSmiTagMask));
__ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number.
- __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
+ __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
+ __ cmp(r4, heap_number_map);
__ b(ne, &slow);
GetInt32(masm, rhs, r2, r5, r4, &slow);
__ jmp(&done_checking_rhs);
break;
}
case NO_OVERWRITE: {
- // Get a new heap number in r5. r6 and r7 are scratch.
- __ AllocateHeapNumber(r5, r6, r7, &slow);
+ // Get a new heap number in r5. r4 and r7 are scratch.
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
}
default: break;
}
if (mode_ != NO_OVERWRITE) {
__ bind(&have_to_allocate);
- // Get a new heap number in r5. r6 and r7 are scratch.
- __ AllocateHeapNumber(r5, r6, r7, &slow);
+ // Get a new heap number in r5. r4 and r7 are scratch.
+ __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
__ jmp(&got_a_heap_number);
}
}
OS::SNPrintF(Vector<char>(name_, len),
- "GenericBinaryOpStub_%s_%s%s",
+ "GenericBinaryOpStub_%s_%s%s_%s",
op_name,
overwrite_name,
- specialized_on_rhs_ ? "_ConstantRhs" : 0);
+ specialized_on_rhs_ ? "_ConstantRhs" : "",
+ BinaryOpIC::GetName(runtime_operands_type_));
return name_;
}
}
__ Ret();
__ bind(&smi_is_unsuitable);
+ } else if (op_ == Token::MOD &&
+ runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
+ runtime_operands_type_ != BinaryOpIC::STRINGS) {
+ // Do generate a bit of smi code for modulus even though the default for
+ // modulus is not to do it, but as the ARM processor has no coprocessor
+ // support for modulus checking for smis makes sense.
+ Label slow;
+ ASSERT(!ShouldGenerateSmiCode());
+ ASSERT(kSmiTag == 0); // Adjust code below.
+ // Check for two positive smis.
+ __ orr(smi_test_reg, lhs, Operand(rhs));
+ __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
+ __ b(ne, &slow);
+ // Check that rhs is a power of two and not zero.
+ __ sub(scratch, rhs, Operand(1), SetCC);
+ __ b(mi, &slow);
+ __ tst(rhs, scratch);
+ __ b(ne, &slow);
+ // Calculate power of two modulus.
+ __ and_(result, lhs, Operand(scratch));
+ __ Ret();
+ __ bind(&slow);
}
HandleBinaryOpSlowCases(
masm,
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
Label slow, done;
+ Register heap_number_map = r6;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
if (op_ == Token::SUB) {
// Check whether the value is a smi.
Label try_float;
__ b(&done);
__ bind(&try_float);
- __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r1, heap_number_map);
__ b(ne, &slow);
// r0 is a heap number. Get a new heap number in r1.
if (overwrite_) {
__ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
__ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
} else {
- __ AllocateHeapNumber(r1, r2, r3, &slow);
+ __ AllocateHeapNumber(r1, r2, r3, r6, &slow);
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
}
} else if (op_ == Token::BIT_NOT) {
// Check if the operand is a heap number.
- __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r1, heap_number_map);
__ b(ne, &slow);
// Convert the heap number is r0 to an untagged integer in r1.
// Allocate a fresh heap number, but don't overwrite r0 until
// we're sure we can do it without going through the slow case
// that needs the value in r0.
- __ AllocateHeapNumber(r2, r3, r4, &slow);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ mov(r0, Operand(r2));
}
RegExpImpl::kLastCaptureCountOffset));
// Store last subject and last input.
__ mov(r3, last_match_info_elements); // Moved up to reduce latency.
- __ mov(r2, Operand(RegExpImpl::kLastSubjectOffset)); // Ditto.
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastSubjectOffset));
- __ RecordWrite(r3, r2, r7);
+ __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
__ mov(r3, last_match_info_elements);
- __ mov(r2, Operand(RegExpImpl::kLastInputOffset));
- __ RecordWrite(r3, r2, r7);
+ __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
}
- Label non_ascii, allocated;
+ Label non_ascii, allocated, ascii_data;
ASSERT_EQ(0, kTwoByteStringTag);
__ tst(r4, Operand(kStringEncodingMask));
__ tst(r5, Operand(kStringEncodingMask), ne);
__ b(eq, &non_ascii);
// Allocate an ASCII cons string.
+ __ bind(&ascii_data);
__ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
__ bind(&allocated);
// Fill the fields of the cons string.
__ Ret();
__ bind(&non_ascii);
+ // At least one of the strings is two-byte. Check whether it happens
+ // to contain only ascii characters.
+ // r4: first instance type.
+ // r5: second instance type.
+ __ tst(r4, Operand(kAsciiDataHintMask));
+ __ tst(r5, Operand(kAsciiDataHintMask), ne);
+ __ b(ne, &ascii_data);
+ __ eor(r4, r4, Operand(r5));
+ ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ __ b(eq, &ascii_data);
+
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
__ jmp(&allocated);
}
void Generate(MacroAssembler* masm);
- void HandleNonSmiBitwiseOp(MacroAssembler* masm, Register lhs, Register rhs);
+ void HandleNonSmiBitwiseOp(MacroAssembler* masm,
+ Register lhs,
+ Register rhs);
void HandleBinaryOpSlowCases(MacroAssembler* masm,
Label* not_smi,
Register lhs,
// with immediate
inline int RotateField() const { return Bits(11, 8); }
inline int Immed8Field() const { return Bits(7, 0); }
+ inline int Immed4Field() const { return Bits(19, 16); }
+ inline int ImmedMovwMovtField() const {
+ return Immed4Field() << 12 | Offset12Field(); }
// Fields used in Load/Store instructions
inline int PUField() const { return Bits(24, 23); }
void PrintSRegister(int reg);
void PrintDRegister(int reg);
int FormatVFPRegister(Instr* instr, const char* format);
+ void PrintMovwMovt(Instr* instr);
int FormatVFPinstruction(Instr* instr, const char* format);
void PrintCondition(Instr* instr);
void PrintShiftRm(Instr* instr);
}
+// Print the movw or movt instruction.
+void Decoder::PrintMovwMovt(Instr* instr) {
+ int imm = instr->ImmedMovwMovtField();
+ int rd = instr->RdField();
+ PrintRegister(rd);
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", #%d", imm);
+}
+
+
// FormatOption takes a formatting string and interprets it based on
// the current instructions. The format string points to the first
// character of the option string (the option escape has already been
return 1;
}
case 'm': {
- if (format[1] == 'e') { // 'memop: load/store instructions
+ if (format[1] == 'w') {
+ // 'mw: movt/movw instructions.
+ PrintMovwMovt(instr);
+ return 2;
+ }
+ if (format[1] == 'e') { // 'memop: load/store instructions.
ASSERT(STRING_STARTS_WITH(format, "memop"));
if (instr->HasL()) {
Print("ldr");
if (instr->HasS()) {
Format(instr, "tst'cond 'rn, 'shift_op");
} else {
- Unknown(instr); // not used by V8
+ Format(instr, "movw'cond 'mw");
}
break;
}
if (instr->HasS()) {
Format(instr, "cmp'cond 'rn, 'shift_op");
} else {
- Unknown(instr); // not used by V8
+ Format(instr, "movt'cond 'mw");
}
break;
}
}
if (needs_write_barrier) {
- __ mov(scratch1(), Operand(offset));
- __ RecordWrite(scratch0(), scratch1(), scratch2());
+ __ RecordWrite(scratch0(), Operand(offset), scratch1(), scratch2());
}
if (destination().is(accumulator1())) {
__ mov(r1, Operand(Context::SlotOffset(slot->index())));
__ str(r0, MemOperand(cp, r1));
// Update the write barrier. This clobbers all involved
- // registers, so we have use a third register to avoid
+ // registers, so we have to use two more registers to avoid
// clobbering cp.
__ mov(r2, Operand(cp));
- __ RecordWrite(r2, r1, r0);
+ __ RecordWrite(r2, Operand(r1), r3, r0);
}
}
}
__ str(src, location);
// Emit the write barrier code if the location is in the heap.
if (dst->type() == Slot::CONTEXT) {
- __ mov(scratch2, Operand(Context::SlotOffset(dst->index())));
- __ RecordWrite(scratch1, scratch2, src);
+ __ RecordWrite(scratch1,
+ Operand(Context::SlotOffset(dst->index())),
+ scratch2,
+ src);
}
}
__ str(result_register(),
CodeGenerator::ContextOperand(cp, slot->index()));
int offset = Context::SlotOffset(slot->index());
- __ mov(r2, Operand(offset));
// We know that we have written a function, which is not a smi.
__ mov(r1, Operand(cp));
- __ RecordWrite(r1, r2, result_register());
+ __ RecordWrite(r1, Operand(offset), r2, result_register());
}
break;
// Update the write barrier for the array store with r0 as the scratch
// register.
- __ mov(r2, Operand(offset));
- __ RecordWrite(r1, r2, result_register());
+ __ RecordWrite(r1, Operand(offset), r2, result_register());
}
if (result_saved) {
// RecordWrite may destroy all its register arguments.
__ mov(r3, result_register());
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ mov(r2, Operand(offset));
- __ RecordWrite(r1, r2, r3);
+ __ RecordWrite(r1, Operand(offset), r2, r3);
break;
}
Label slow_allocate_heapnumber;
Label heapnumber_allocated;
- __ AllocateHeapNumber(r4, r1, r2, &slow_allocate_heapnumber);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r4, r1, r2, r6, &slow_allocate_heapnumber);
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
__ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
- __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
- __ RecordWrite(r1, r2, r3);
+ __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
__ bind(&done);
Apply(context_, r0);
__ bind(&box_int);
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Use r0 for result as key is not needed any more.
- __ AllocateHeapNumber(r0, r3, r4, &slow);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r0, r3, r4, r6, &slow);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Allocate a HeapNumber for the result and perform int-to-double
// conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
// registers - also when jumping due to exhausted young space.
- __ AllocateHeapNumber(r2, r3, r4, &slow);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ vcvt_f64_u32(d0, s0);
__ sub(r1, r2, Operand(kHeapObjectTag));
// Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
// clobbers all registers - also when jumping due to exhausted young
// space.
- __ AllocateHeapNumber(r4, r5, r6, &slow);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r4, r5, r7, r6, &slow);
__ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
__ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
- __ AllocateHeapNumber(r2, r3, r4, &slow);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
__ vcvt_f64_f32(d0, s0);
__ sub(r1, r2, Operand(kHeapObjectTag));
__ vstr(d0, r1, HeapNumber::kValueOffset);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
- __ AllocateHeapNumber(r3, r4, r5, &slow);
+ __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r3, r4, r5, r6, &slow);
// VFP is not available, do manual single to double conversion.
// r2: floating point value (binary32)
__ Ret(eq);
// Update write barrier for the elements array address.
__ sub(r4, r5, Operand(elements));
- __ RecordWrite(elements, r4, r5);
+ __ RecordWrite(elements, Operand(r4), r5, r6);
__ Ret();
}
}
+void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
+ ASSERT(lsb < 32);
+ if (!CpuFeatures::IsSupported(ARMv7)) {
+ int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
+ bic(dst, dst, Operand(mask));
+ } else {
+ bfc(dst, lsb, width, cond);
+ }
+}
+
+
void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
// Empty the const pool.
CheckConstPool(true, true);
void MacroAssembler::RecordWriteHelper(Register object,
- Register offset,
- Register scratch) {
+ Operand offset,
+ Register scratch0,
+ Register scratch1) {
if (FLAG_debug_code) {
// Check that the object is not in new space.
Label not_in_new_space;
- InNewSpace(object, scratch, ne, ¬_in_new_space);
+ InNewSpace(object, scratch1, ne, ¬_in_new_space);
Abort("new-space object passed to RecordWriteHelper");
bind(¬_in_new_space);
}
- mov(ip, Operand(Page::kPageAlignmentMask)); // Load mask only once.
-
- // Calculate region number.
- add(offset, object, Operand(offset)); // Add offset into the object.
- and_(offset, offset, Operand(ip)); // Offset into page of the object.
- mov(offset, Operand(offset, LSR, Page::kRegionSizeLog2));
+ // Add offset into the object.
+ add(scratch0, object, offset);
// Calculate page address.
- bic(object, object, Operand(ip));
+ Bfc(object, 0, kPageSizeBits);
+
+ // Calculate region number.
+ Ubfx(scratch0, scratch0, Page::kRegionSizeLog2,
+ kPageSizeBits - Page::kRegionSizeLog2);
// Mark region dirty.
- ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+ ldr(scratch1, MemOperand(object, Page::kDirtyFlagOffset));
mov(ip, Operand(1));
- orr(scratch, scratch, Operand(ip, LSL, offset));
- str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+ orr(scratch1, scratch1, Operand(ip, LSL, scratch0));
+ str(scratch1, MemOperand(object, Page::kDirtyFlagOffset));
}
// Will clobber 4 registers: object, offset, scratch, ip. The
// register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object, Register offset,
- Register scratch) {
+void MacroAssembler::RecordWrite(Register object,
+ Operand offset,
+ Register scratch0,
+ Register scratch1) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are cp.
- ASSERT(!object.is(cp) && !offset.is(cp) && !scratch.is(cp));
+ ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
Label done;
// First, test that the object is not in the new space. We cannot set
// region marks for new space pages.
- InNewSpace(object, scratch, eq, &done);
+ InNewSpace(object, scratch0, eq, &done);
// Record the actual write.
- RecordWriteHelper(object, offset, scratch);
+ RecordWriteHelper(object, offset, scratch0, scratch1);
bind(&done);
// turned on to provoke errors.
if (FLAG_debug_code) {
mov(object, Operand(BitCast<int32_t>(kZapValue)));
- mov(offset, Operand(BitCast<int32_t>(kZapValue)));
- mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
+ mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
+ mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
}
}
}
+void MacroAssembler::AssertRegisterIsRoot(Register reg,
+ Heap::RootListIndex index) {
+ if (FLAG_debug_code) {
+ LoadRoot(ip, index);
+ cmp(reg, ip);
+ Check(eq, "Register did not match expected root");
+ }
+}
+
+
void MacroAssembler::Check(Condition cc, const char* msg) {
Label L;
b(cc, &L);
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
+ Register heap_number_map,
Label* gc_required) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
gc_required,
TAG_OBJECT);
- // Get heap number map and store it in the allocated object.
- LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
- str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+ // Store heap number map in the allocated object.
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
}
Condition cond = al);
void Sbfx(Register dst, Register src, int lsb, int width,
Condition cond = al);
+ void Bfc(Register dst, int lsb, int width, Condition cond = al);
void Call(Label* target);
void Move(Register dst, Handle<Object> value);
// For the page containing |object| mark the region covering [object+offset]
// dirty. The object address must be in the first 8K of an allocated page.
- void RecordWriteHelper(Register object, Register offset, Register scratch);
+ void RecordWriteHelper(Register object,
+ Operand offset,
+ Register scratch0,
+ Register scratch1);
// For the page containing |object| mark the region covering [object+offset]
// dirty. The object address must be in the first 8K of an allocated page.
- // The 'scratch' register is used in the implementation and all 3 registers
+ // The 'scratch' registers are used in the implementation and all 3 registers
// are clobbered by the operation, as well as the ip register.
- void RecordWrite(Register object, Register offset, Register scratch);
+ void RecordWrite(Register object,
+ Operand offset,
+ Register scratch0,
+ Register scratch1);
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
void AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
+ Register heap_number_map,
Label* gc_required);
// ---------------------------------------------------------------------------
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, const char* msg);
+ void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
// Like Assert(), but always enabled.
void Check(Condition cc, const char* msg);
SetNZFlags(alu_out);
SetCFlag(shifter_carry_out);
} else {
- UNIMPLEMENTED();
+ // Format(instr, "movw'cond 'rd, 'imm").
+ alu_out = instr->ImmedMovwMovtField();
+ set_register(rd, alu_out);
}
break;
}
SetCFlag(!BorrowFrom(rn_val, shifter_operand));
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
} else {
- UNIMPLEMENTED();
+ // Format(instr, "movt'cond 'rd, 'imm").
+ alu_out = (get_register(rd) & 0xffff) |
+ (instr->ImmedMovwMovtField() << 16);
+ set_register(rd, alu_out);
}
break;
}
__ b(eq, &exit);
// Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, Operand(offset));
- __ RecordWrite(receiver_reg, name_reg, scratch);
+ // Pass the now unused name_reg as a scratch register.
+ __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, Operand(offset));
- __ RecordWrite(scratch, name_reg, receiver_reg);
+ __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
}
// Return the value (register r0).
}
if (FLAG_expose_gc) InstallExtension("v8/gc");
+ if (FLAG_expose_externalize_string) InstallExtension("v8/externalize");
if (extensions == NULL) return true;
// Install required extensions
static inline void CheckEqualsHelper(const char* file,
int line,
const char* expected_source,
- void* expected,
+ const void* expected,
const char* value_source,
- void* value) {
+ const void* value) {
if (expected != value) {
V8_Fatal(file, line,
"CHECK_EQ(%s, %s) failed\n# Expected: %p\n# Found: %p",
static inline void CheckNonEqualsHelper(const char* file,
int line,
const char* expected_source,
- void* expected,
+ const void* expected,
const char* value_source,
- void* value) {
+ const void* value) {
if (expected == value) {
V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %p",
expected_source, value_source, value);
}
-
ScriptBreakPoint.prototype.hit_count = function() {
return this.hit_count_;
};
// Create a break point object and set the break point.
break_point = MakeBreakPoint(pos, this.line(), this.column(), this);
break_point.setIgnoreCount(this.ignoreCount());
- %SetScriptBreakPoint(script, pos, break_point);
+ pos = %SetScriptBreakPoint(script, pos, break_point);
+ if (!IS_UNDEFINED(pos)) {
+ this.actual_location = script.locationFromPosition(pos);
+ }
return break_point;
};
void Debug::SetBreakPoint(Handle<SharedFunctionInfo> shared,
- int source_position,
- Handle<Object> break_point_object) {
+ Handle<Object> break_point_object,
+ int* source_position) {
HandleScope scope;
if (!EnsureDebugInfo(shared)) {
// Find the break point and change it.
BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
- it.FindBreakLocationFromPosition(source_position);
+ it.FindBreakLocationFromPosition(*source_position);
it.SetBreakPoint(break_point_object);
+ *source_position = it.position();
+
// At least one active break point now.
ASSERT(debug_info->GetBreakPointCount() > 0);
}
static Object* Break(Arguments args);
static void SetBreakPoint(Handle<SharedFunctionInfo> shared,
- int source_position,
- Handle<Object> break_point_object);
+ Handle<Object> break_point_object,
+ int* source_position);
static void ClearBreakPoint(Handle<Object> break_point_object);
static void ClearAllBreakPoints();
static void FloodWithOneShot(Handle<SharedFunctionInfo> shared);
// --- G C E x t e n s i o n ---
-const char* GCExtension::kSource = "native function gc();";
+const char* const GCExtension::kSource = "native function gc();";
v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
}
-static GCExtension kGCExtension;
-v8::DeclareExtension kGCExtensionDeclaration(&kGCExtension);
+static GCExtension gc_extension;
+static v8::DeclareExtension gc_extension_declaration(&gc_extension);
+
+
+// --- E x t e r n a l i z e S t r i n g E x t e n s i o n ---
+
+
+template <typename Char, typename Base>
+class SimpleStringResource : public Base {
+ public:
+ // Takes ownership of |data|.
+ SimpleStringResource(Char* data, size_t length)
+ : data_(data),
+ length_(length) {}
+
+ virtual ~SimpleStringResource() { delete data_; }
+
+ virtual const Char* data() const { return data_; }
+
+ virtual size_t length() const { return length_; }
+
+ private:
+ Char* const data_;
+ const size_t length_;
+};
+
+
+typedef SimpleStringResource<char, v8::String::ExternalAsciiStringResource>
+ SimpleAsciiStringResource;
+typedef SimpleStringResource<uc16, v8::String::ExternalStringResource>
+ SimpleTwoByteStringResource;
+
+
+const char* const ExternalizeStringExtension::kSource =
+ "native function externalizeString();"
+ "native function isAsciiString();";
+
+
+v8::Handle<v8::FunctionTemplate> ExternalizeStringExtension::GetNativeFunction(
+ v8::Handle<v8::String> str) {
+ if (strcmp(*v8::String::AsciiValue(str), "externalizeString") == 0) {
+ return v8::FunctionTemplate::New(ExternalizeStringExtension::Externalize);
+ } else {
+ ASSERT(strcmp(*v8::String::AsciiValue(str), "isAsciiString") == 0);
+ return v8::FunctionTemplate::New(ExternalizeStringExtension::IsAscii);
+ }
+}
+
+
+v8::Handle<v8::Value> ExternalizeStringExtension::Externalize(
+ const v8::Arguments& args) {
+ if (args.Length() < 1 || !args[0]->IsString()) {
+ return v8::ThrowException(v8::String::New(
+ "First parameter to externalizeString() must be a string."));
+ }
+ bool force_two_byte = false;
+ if (args.Length() >= 2) {
+ if (args[1]->IsBoolean()) {
+ force_two_byte = args[1]->BooleanValue();
+ } else {
+ return v8::ThrowException(v8::String::New(
+ "Second parameter to externalizeString() must be a boolean."));
+ }
+ }
+ bool result = false;
+ Handle<String> string = Utils::OpenHandle(*args[0].As<v8::String>());
+ if (string->IsExternalString()) {
+ return v8::ThrowException(v8::String::New(
+ "externalizeString() can't externalize twice."));
+ }
+ if (string->IsAsciiRepresentation() && !force_two_byte) {
+ char* data = new char[string->length()];
+ String::WriteToFlat(*string, data, 0, string->length());
+ SimpleAsciiStringResource* resource = new SimpleAsciiStringResource(
+ data, string->length());
+ result = string->MakeExternal(resource);
+ if (result && !string->IsSymbol()) {
+ i::ExternalStringTable::AddString(*string);
+ }
+ } else {
+ uc16* data = new uc16[string->length()];
+ String::WriteToFlat(*string, data, 0, string->length());
+ SimpleTwoByteStringResource* resource = new SimpleTwoByteStringResource(
+ data, string->length());
+ result = string->MakeExternal(resource);
+ if (result && !string->IsSymbol()) {
+ i::ExternalStringTable::AddString(*string);
+ }
+ }
+ if (!result) {
+ return v8::ThrowException(v8::String::New("externalizeString() failed."));
+ }
+ return v8::Undefined();
+}
+
+
+v8::Handle<v8::Value> ExternalizeStringExtension::IsAscii(
+ const v8::Arguments& args) {
+ if (args.Length() != 1 || !args[0]->IsString()) {
+ return v8::ThrowException(v8::String::New(
+ "isAsciiString() requires a single string argument."));
+ }
+ return Utils::OpenHandle(*args[0].As<v8::String>())->IsAsciiRepresentation() ?
+ v8::True() : v8::False();
+}
+
+
+static ExternalizeStringExtension externalize_extension;
+static v8::DeclareExtension externalize_extension_declaration(
+ &externalize_extension);
} } // namespace v8::internal
v8::Handle<v8::String> name);
static v8::Handle<v8::Value> GC(const v8::Arguments& args);
private:
- static const char* kSource;
+ static const char* const kSource;
};
+class ExternalizeStringExtension : public v8::Extension {
+ public:
+ ExternalizeStringExtension() : v8::Extension("v8/externalize", kSource) {}
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ v8::Handle<v8::String> name);
+ static v8::Handle<v8::Value> Externalize(const v8::Arguments& args);
+ static v8::Handle<v8::Value> IsAscii(const v8::Arguments& args);
+ private:
+ static const char* const kSource;
+};
+
} } // namespace v8::internal
#endif // V8_EXECUTION_H_
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
DEFINE_bool(expose_gc, false, "expose gc extension")
+DEFINE_bool(expose_externalize_string, false,
+ "expose externalize string extension")
DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
DEFINE_bool(disable_native_files, false, "disable builtin natives files")
"print more details following each garbage collection")
DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
-DEFINE_bool(flush_code, false,
+DEFINE_bool(flush_code, true,
"flush code that we expect not to use again before full gc")
// v8.cc
delete snapshots_;
}
+#endif // ENABLE_LOGGING_AND_PROFILING
void HeapProfiler::Setup() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
if (singleton_ == NULL) {
singleton_ = new HeapProfiler();
}
+#endif
}
void HeapProfiler::TearDown() {
+#ifdef ENABLE_LOGGING_AND_PROFILING
delete singleton_;
singleton_ = NULL;
+#endif
}
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name) {
ASSERT(singleton_ != NULL);
return singleton_->TakeSnapshotImpl(name);
HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name) {
+ Heap::CollectAllGarbage(false);
HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++);
HeapSnapshotGenerator generator(result);
generator.GenerateSnapshot();
class HeapSnapshot;
class HeapSnapshotsCollection;
+#endif
+
// The HeapProfiler writes data to the log files, which can be postprocessed
// to generate .hp files for use by the GHC/Valgrind tool hp2ps.
class HeapProfiler {
public:
static void Setup();
static void TearDown();
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
static HeapSnapshot* TakeSnapshot(const char* name);
static HeapSnapshot* TakeSnapshot(String* name);
static int GetSnapshotsCount();
unsigned next_snapshot_uid_;
static HeapProfiler* singleton_;
+#endif // ENABLE_LOGGING_AND_PROFILING
};
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
// JSObjectsCluster describes a group of JS objects that are
// considered equivalent in terms of a particular profile.
class JSObjectsCluster BASE_EMBEDDED {
return Failure::OutOfMemoryException();
}
+ bool is_ascii_data_in_two_byte_string = false;
+ if (!is_ascii) {
+ // At least one of the strings uses two-byte representation so we
+ // can't use the fast case code for short ascii strings below, but
+ // we can try to save memory if all chars actually fit in ascii.
+ is_ascii_data_in_two_byte_string =
+ first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
+ if (is_ascii_data_in_two_byte_string) {
+ Counters::string_add_runtime_ext_to_ascii.Increment();
+ }
+ }
+
// If the resulting string is small make a flat string.
if (length < String::kMinNonFlatLength) {
ASSERT(first->IsFlat());
for (int i = 0; i < second_length; i++) *dest++ = src[i];
return result;
} else {
- // For short external two-byte strings we check whether they can
- // be represented using ascii.
- if (!first_is_ascii) {
- first_is_ascii = first->IsExternalTwoByteStringWithAsciiChars();
- }
- if (first_is_ascii && !second_is_ascii) {
- second_is_ascii = second->IsExternalTwoByteStringWithAsciiChars();
- }
- if (first_is_ascii && second_is_ascii) {
+ if (is_ascii_data_in_two_byte_string) {
Object* result = AllocateRawAsciiString(length);
if (result->IsFailure()) return result;
// Copy the characters into the new object.
char* dest = SeqAsciiString::cast(result)->GetChars();
String::WriteToFlat(first, dest, 0, first_length);
String::WriteToFlat(second, dest + first_length, 0, second_length);
- Counters::string_add_runtime_ext_to_ascii.Increment();
return result;
}
}
}
- Map* map = is_ascii ? cons_ascii_string_map() : cons_string_map();
+ Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
+ cons_ascii_string_map() : cons_string_map();
Object* result = Allocate(map, NEW_SPACE);
if (result->IsFailure()) return result;
return Failure::OutOfMemoryException();
}
- Map* map = Heap::external_string_map();
+ // For small strings we check whether the resource contains only
+ // ascii characters. If yes, we use a different string map.
+ bool is_ascii = true;
+ if (length >= static_cast<size_t>(String::kMinNonFlatLength)) {
+ is_ascii = false;
+ } else {
+ const uc16* data = resource->data();
+ for (size_t i = 0; i < length; i++) {
+ if (data[i] > String::kMaxAsciiCharCode) {
+ is_ascii = false;
+ break;
+ }
+ }
+ }
+
+ Map* map = is_ascii ?
+ Heap::external_string_with_ascii_data_map() : Heap::external_string_map();
Object* result = Allocate(map, NEW_SPACE);
if (result->IsFailure()) return result;
ThreadManager::IterateArchivedThreads(&threadvisitor);
if (threadvisitor.FoundCode()) return;
+ // Check that there are heap allocated locals in the scopeinfo. If
+ // there is, we are potentially using eval and need the scopeinfo
+ // for variable resolution.
+ if (ScopeInfo<>::HasHeapAllocatedLocals(function_info->code()))
+ return;
+
HandleScope scope;
// Compute the lazy compilable version of the code.
function_info->set_code(*ComputeLazyCompile(function_info->length()));
if (map == cons_ascii_string_map()) return cons_ascii_symbol_map();
if (map == external_string_map()) return external_symbol_map();
if (map == external_ascii_string_map()) return external_ascii_symbol_map();
+ if (map == external_string_with_ascii_data_map()) {
+ return external_symbol_with_ascii_data_map();
+ }
// No match found.
return NULL;
V(Map, cons_symbol_map, ConsSymbolMap) \
V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap) \
V(Map, external_symbol_map, ExternalSymbolMap) \
+ V(Map, external_symbol_with_ascii_data_map, ExternalSymbolWithAsciiDataMap) \
V(Map, external_ascii_symbol_map, ExternalAsciiSymbolMap) \
V(Map, cons_string_map, ConsStringMap) \
V(Map, cons_ascii_string_map, ConsAsciiStringMap) \
V(Map, external_string_map, ExternalStringMap) \
+ V(Map, external_string_with_ascii_data_map, ExternalStringWithAsciiDataMap) \
V(Map, external_ascii_string_map, ExternalAsciiStringMap) \
V(Map, undetectable_string_map, UndetectableStringMap) \
V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \
namespace v8 {
namespace internal {
-Condition NegateCondition(Condition cc) {
- return static_cast<Condition>(cc ^ 1);
-}
-
// The modes possibly affected by apply must be in kApplyMask.
void RelocInfo::apply(intptr_t delta) {
}
+void Assembler::CodeTargetAlign() {
+ Align(16); // Preferred alignment of jump targets on ia32.
+}
+
+
void Assembler::cpuid() {
ASSERT(CpuFeatures::IsEnabled(CPUID));
EnsureSpace ensure_space(this);
}
-void Assembler::comisd(XMMRegister dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- EMIT(0x66);
- EMIT(0x0F);
- EMIT(0x2F);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
// Negation of the default no_condition (-1) results in a non-default
// no_condition value (-2). As long as tests for no_condition check
// for condition < 0, this will work as expected.
-inline Condition NegateCondition(Condition cc);
+inline Condition NegateCondition(Condition cc) {
+ return static_cast<Condition>(cc ^ 1);
+}
+
// Corresponds to transposing the operands of a comparison.
inline Condition ReverseCondition(Condition cc) {
};
}
+
enum Hint {
no_hint = 0,
not_taken = 0x2e,
taken = 0x3e
};
+
// The result of negating a hint is as if the corresponding condition
// were negated by NegateCondition. That is, no_hint is mapped to
// itself and not_taken and taken are mapped to each other.
// possible to align the pc offset to a multiple
// of m. m must be a power of 2.
void Align(int m);
+ // Aligns code to something that's optimal for a jump target for the platform.
+ void CodeTargetAlign();
// Stack
void pushad();
void xorpd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
- void comisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src);
void movmskpd(Register dst, XMMRegister src);
RegisterFile empty_regs;
SetFrame(clone, &empty_regs);
__ bind(&allocation_failed);
+ if (!CpuFeatures::IsSupported(SSE2)) {
+ // Pop the value from the floating point stack.
+ __ fstp(0);
+ }
unsafe_bailout_->Jump();
done.Bind(value);
¬_numbers);
LoadComparisonOperandSSE2(masm_, right_side, xmm1, left_side, right_side,
¬_numbers);
- __ comisd(xmm0, xmm1);
+ __ ucomisd(xmm0, xmm1);
} else {
Label check_right, compare;
// Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
__ addsd(xmm2, xmm3);
// xmm2 now has 0.5.
- __ comisd(xmm2, xmm1);
+ __ ucomisd(xmm2, xmm1);
call_runtime.Branch(not_equal);
// Calculates square root.
__ movsd(xmm1, xmm0);
CpuFeatures::Scope fscope(SSE2);
__ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
__ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ comisd(xmm0, xmm1);
+ __ ucomisd(xmm0, xmm1);
} else {
__ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
__ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
CpuFeatures::Scope use_cmov(CMOV);
FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
- __ comisd(xmm0, xmm1);
+ __ ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered, not_taken);
// If result is not supposed to be flat allocate a cons string object. If both
// strings are ascii the result is an ascii cons string.
- Label non_ascii, allocated;
+ Label non_ascii, allocated, ascii_data;
__ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
ASSERT(kStringEncodingMask == kAsciiStringTag);
__ test(ecx, Immediate(kAsciiStringTag));
__ j(zero, &non_ascii);
+ __ bind(&ascii_data);
// Allocate an acsii cons string.
__ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
__ bind(&allocated);
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
__ bind(&non_ascii);
+ // At least one of the strings is two-byte. Check whether it happens
+ // to contain only ascii characters.
+ // ecx: first instance type AND second instance type.
+ // edi: second instance type.
+ __ test(ecx, Immediate(kAsciiDataHintMask));
+ __ j(not_zero, &ascii_data);
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ xor_(edi, Operand(ecx));
+ ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
+ __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
+ __ j(equal, &ascii_data);
// Allocate a two byte cons string.
__ AllocateConsString(ecx, edi, no_reg, &string_add_runtime);
__ jmp(&allocated);
__ push(other);
__ push(receiver); // receiver
__ push(reg); // holder
- __ mov(other, Immediate(callback_handle));
- __ push(FieldOperand(other, AccessorInfo::kDataOffset)); // data
+ // Push data from AccessorInfo.
+ if (Heap::InNewSpace(callback_handle->data())) {
+ __ mov(other, Immediate(callback_handle));
+ __ push(FieldOperand(other, AccessorInfo::kDataOffset));
+ } else {
+ __ push(Immediate(Handle<Object>(callback_handle->data())));
+ }
__ push(name_reg); // name
// Save a pointer to where we pushed the arguments pointer.
// This will be passed as the const AccessorInfo& to the C++ callback.
if (PatchInlinedLoad(address(), map, offset)) {
set_target(megamorphic_stub());
return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ PrintF("[LoadIC : inline patch %s]\n", *name->ToCString());
+ }
+ } else {
+ if (FLAG_trace_ic) {
+ PrintF("[LoadIC : no inline patch %s (patching failed)]\n",
+ *name->ToCString());
+ }
+ }
+ } else {
+ if (FLAG_trace_ic) {
+ PrintF("[LoadIC : no inline patch %s (not inobject)]\n",
+ *name->ToCString());
+ }
+ }
+ } else {
+ if (FLAG_use_ic && state == PREMONOMORPHIC) {
+ if (FLAG_trace_ic) {
+ PrintF("[LoadIC : no inline patch %s (not inlinable)]\n",
+ *name->ToCString());
+#endif
}
}
}
if ((mask & char_mask) == char_mask) need_mask = false;
mask &= char_mask;
} else {
- // For 2-character preloads in ASCII mode we also use a 16 bit load with
- // zero extend.
+ // For 2-character preloads in ASCII mode or 1-character preloads in
+ // TWO_BYTE mode we also use a 16 bit load with zero extend.
if (details->characters() == 2 && compiler->ascii()) {
+ if ((mask & 0x7f7f) == 0x7f7f) need_mask = false;
+ } else if (details->characters() == 1 && !compiler->ascii()) {
if ((mask & 0xffff) == 0xffff) need_mask = false;
} else {
if (mask == 0xffffffff) need_mask = false;
case CONS_SYMBOL_TYPE: return "CONS_SYMBOL";
case CONS_ASCII_SYMBOL_TYPE: return "CONS_ASCII_SYMBOL";
case EXTERNAL_ASCII_SYMBOL_TYPE:
+ case EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE:
case EXTERNAL_SYMBOL_TYPE: return "EXTERNAL_SYMBOL";
case ASCII_STRING_TYPE: return "ASCII_STRING";
case STRING_TYPE: return "TWO_BYTE_STRING";
case CONS_STRING_TYPE:
case CONS_ASCII_STRING_TYPE: return "CONS_STRING";
case EXTERNAL_ASCII_STRING_TYPE:
+ case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
case EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING";
case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
bool String::IsAsciiRepresentation() {
uint32_t type = map()->instance_type();
- if ((type & kStringRepresentationMask) == kConsStringTag &&
- ConsString::cast(this)->second()->length() == 0) {
- return ConsString::cast(this)->first()->IsAsciiRepresentation();
- }
return (type & kStringEncodingMask) == kAsciiStringTag;
}
bool String::IsTwoByteRepresentation() {
uint32_t type = map()->instance_type();
- if ((type & kStringRepresentationMask) == kConsStringTag &&
- ConsString::cast(this)->second()->length() == 0) {
- return ConsString::cast(this)->first()->IsTwoByteRepresentation();
- }
return (type & kStringEncodingMask) == kTwoByteStringTag;
}
-bool String::IsExternalTwoByteStringWithAsciiChars() {
- if (!IsExternalTwoByteString()) return false;
- const uc16* data = ExternalTwoByteString::cast(this)->resource()->data();
- for (int i = 0, len = length(); i < len; i++) {
- if (data[i] > kMaxAsciiCharCode) return false;
- }
- return true;
+bool String::HasOnlyAsciiChars() {
+ uint32_t type = map()->instance_type();
+ return (type & kStringEncodingMask) == kAsciiStringTag ||
+ (type & kAsciiDataHintMask) == kAsciiDataHintTag;
}
bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
+ // Externalizing twice leaks the external resouce, so it's
+ // prohibited by the API.
+ ASSERT(!this->IsExternalString());
#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
// Assert that the resource and the string are equivalent.
return false;
}
ASSERT(size >= ExternalString::kSize);
+ bool is_ascii = this->IsAsciiRepresentation();
bool is_symbol = this->IsSymbol();
int length = this->length();
int hash_field = this->hash_field();
// Morph the object to an external string by adjusting the map and
// reinitializing the fields.
- this->set_map(Heap::external_string_map());
+ this->set_map(is_ascii ?
+ Heap::external_string_with_ascii_data_map() :
+ Heap::external_string_map());
ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
self->set_length(length);
self->set_hash_field(hash_field);
if (is_symbol) {
self->Hash(); // Force regeneration of the hash value.
// Now morph this external string into a external symbol.
- this->set_map(Heap::external_symbol_map());
+ this->set_map(is_ascii ?
+ Heap::external_symbol_with_ascii_data_map() :
+ Heap::external_symbol_map());
}
// Fill the remainder of the string with dead wood.
template<typename Shape, typename Key>
Object* Dictionary<Shape, Key>::AtPut(Key key, Object* value) {
- int entry = FindEntry(key);
+ int entry = this->FindEntry(key);
// If the entry is present set the value;
if (entry != Dictionary<Shape, Key>::kNotFound) {
Object* value,
PropertyDetails details) {
// Valdate key is absent.
- SLOW_ASSERT((FindEntry(key) == Dictionary<Shape, Key>::kNotFound));
+ SLOW_ASSERT((this->FindEntry(key) == Dictionary<Shape, Key>::kNotFound));
// Check whether the dictionary should be extended.
Object* obj = EnsureCapacity(1, key);
if (obj->IsFailure()) return obj;
Object* value,
PropertyDetails details) {
UpdateMaxNumberKey(key);
- SLOW_ASSERT(FindEntry(key) == kNotFound);
+ SLOW_ASSERT(this->FindEntry(key) == kNotFound);
return Add(key, value, details);
}
ExternalTwoByteString::kSize, \
external_symbol, \
ExternalSymbol) \
+ V(EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE, \
+ ExternalTwoByteString::kSize, \
+ external_symbol_with_ascii_data, \
+ ExternalSymbolWithAsciiData) \
V(EXTERNAL_ASCII_SYMBOL_TYPE, \
ExternalAsciiString::kSize, \
external_ascii_symbol, \
ExternalTwoByteString::kSize, \
external_string, \
ExternalString) \
+ V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE, \
+ ExternalTwoByteString::kSize, \
+ external_string_with_ascii_data, \
+ ExternalStringWithAsciiData) \
V(EXTERNAL_ASCII_STRING_TYPE, \
ExternalAsciiString::kSize, \
external_ascii_string, \
};
const uint32_t kIsConsStringMask = 0x1;
+// If bit 7 is clear, then bit 3 indicates whether this two-byte
+// string actually contains ascii data.
+const uint32_t kAsciiDataHintMask = 0x08;
+const uint32_t kAsciiDataHintTag = 0x08;
+
// A ConsString with an empty string as the right side is a candidate
// for being shortcut by the garbage collector unless it is a
enum InstanceType {
// String types.
- SYMBOL_TYPE = kSymbolTag | kSeqStringTag,
+ SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kSeqStringTag,
ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag,
- CONS_SYMBOL_TYPE = kSymbolTag | kConsStringTag,
+ CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag,
CONS_ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kConsStringTag,
- EXTERNAL_SYMBOL_TYPE = kSymbolTag | kExternalStringTag,
+ EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kExternalStringTag,
+ EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE =
+ kTwoByteStringTag | kSymbolTag | kExternalStringTag | kAsciiDataHintTag,
EXTERNAL_ASCII_SYMBOL_TYPE =
kAsciiStringTag | kSymbolTag | kExternalStringTag,
- STRING_TYPE = kSeqStringTag,
+ STRING_TYPE = kTwoByteStringTag | kSeqStringTag,
ASCII_STRING_TYPE = kAsciiStringTag | kSeqStringTag,
- CONS_STRING_TYPE = kConsStringTag,
+ CONS_STRING_TYPE = kTwoByteStringTag | kConsStringTag,
CONS_ASCII_STRING_TYPE = kAsciiStringTag | kConsStringTag,
- EXTERNAL_STRING_TYPE = kExternalStringTag,
+ EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag,
+ EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
+ kTwoByteStringTag | kExternalStringTag | kAsciiDataHintTag,
EXTERNAL_ASCII_STRING_TYPE = kAsciiStringTag | kExternalStringTag,
PRIVATE_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE,
TYPE_SWITCH_INFO_TYPE,
SCRIPT_TYPE,
CODE_CACHE_TYPE,
-#ifdef ENABLE_DEBUGGER_SUPPORT
+ // The following two instance types are only used when ENABLE_DEBUGGER_SUPPORT
+ // is defined. However as include/v8.h contain some of the instance type
+ // constants always having them avoids them getting different numbers
+ // depending on whether ENABLE_DEBUGGER_SUPPORT is defined or not.
DEBUG_INFO_TYPE,
BREAK_POINT_INFO_TYPE,
-#endif
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
};
+STATIC_CHECK(JS_OBJECT_TYPE == Internals::kJSObjectType);
+STATIC_CHECK(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
+STATIC_CHECK(PROXY_TYPE == Internals::kProxyType);
+
+
enum CompareResult {
LESS = -1,
EQUAL = 0,
inline bool IsAsciiRepresentation();
inline bool IsTwoByteRepresentation();
- // Check whether this string is an external two-byte string that in
- // fact contains only ascii characters.
+ // Returns whether this string has ascii chars, i.e. all of them can
+ // be ascii encoded. This might be the case even if the string is
+ // two-byte. Such strings may appear when the embedder prefers
+ // two-byte external representations even for ascii data.
//
- // Such strings may appear when the embedder prefers two-byte
- // representations even for ascii data.
- inline bool IsExternalTwoByteStringWithAsciiChars();
+ // NOTE: this should be considered only a hint. False negatives are
+ // possible.
+ inline bool HasOnlyAsciiChars();
// Get and set individual two byte chars in the string.
inline void Set(int index, uint16_t value);
HeapEntry* from,
HeapEntry* to)
: type_(type), name_(name), from_(from), to_(to) {
- ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY);
+ ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY || type_ == INTERNAL);
}
}
-void HeapEntry::SetClosureReference(const char* name, HeapEntry* entry) {
- HeapGraphEdge* edge =
- new HeapGraphEdge(HeapGraphEdge::CONTEXT_VARIABLE, name, this, entry);
+void HeapEntry::AddEdge(HeapGraphEdge* edge) {
children_.Add(edge);
- entry->retainers_.Add(edge);
+ edge->to()->retainers_.Add(edge);
+}
+
+
+void HeapEntry::SetClosureReference(const char* name, HeapEntry* entry) {
+ AddEdge(
+ new HeapGraphEdge(HeapGraphEdge::CONTEXT_VARIABLE, name, this, entry));
}
void HeapEntry::SetElementReference(int index, HeapEntry* entry) {
- HeapGraphEdge* edge = new HeapGraphEdge(index, this, entry);
- children_.Add(edge);
- entry->retainers_.Add(edge);
+ AddEdge(new HeapGraphEdge(index, this, entry));
+}
+
+
+void HeapEntry::SetInternalReference(const char* name, HeapEntry* entry) {
+ AddEdge(new HeapGraphEdge(HeapGraphEdge::INTERNAL, name, this, entry));
}
void HeapEntry::SetPropertyReference(const char* name, HeapEntry* entry) {
- HeapGraphEdge* edge =
- new HeapGraphEdge(HeapGraphEdge::PROPERTY, name, this, entry);
- children_.Add(edge);
- entry->retainers_.Add(edge);
+ AddEdge(new HeapGraphEdge(HeapGraphEdge::PROPERTY, name, this, entry));
}
void HeapEntry::Print(int max_depth, int indent) {
- OS::Print("%6d %6d %6d", self_size_, TotalSize(), NonSharedTotalSize());
+ OS::Print("%6d %6d %6d ", self_size_, TotalSize(), NonSharedTotalSize());
if (type_ != STRING) {
OS::Print("%s %.40s\n", TypeAsString(), name_);
} else {
case HeapGraphEdge::ELEMENT:
OS::Print(" %*c %d: ", indent, ' ', edge->index());
break;
+ case HeapGraphEdge::INTERNAL:
+ OS::Print(" %*c $%s: ", indent, ' ', edge->name());
+ break;
case HeapGraphEdge::PROPERTY:
OS::Print(" %*c %s: ", indent, ' ', edge->name());
break;
case HeapGraphEdge::ELEMENT:
OS::Print("[%d] ", edge->index());
break;
+ case HeapGraphEdge::INTERNAL:
+ OS::Print("[$%s] ", edge->name());
+ break;
case HeapGraphEdge::PROPERTY:
OS::Print("[%s] ", edge->name());
break;
}
+void HeapSnapshot::SetInternalReference(HeapEntry* parent,
+ const char* reference_name,
+ Object* child) {
+ HeapEntry* child_entry = GetEntry(child);
+ if (child_entry != NULL) {
+ parent->SetInternalReference(reference_name, child_entry);
+ }
+}
+
+
void HeapSnapshot::SetPropertyReference(HeapEntry* parent,
String* reference_name,
Object* child) {
snapshot_->SetClosureReference(entry, local_name, context->get(idx));
}
}
+ snapshot_->SetInternalReference(entry, "code", func->shared());
}
}
enum Type {
CONTEXT_VARIABLE = v8::HeapGraphEdge::CONTEXT_VARIABLE,
ELEMENT = v8::HeapGraphEdge::ELEMENT,
- PROPERTY = v8::HeapGraphEdge::PROPERTY
+ PROPERTY = v8::HeapGraphEdge::PROPERTY,
+ INTERNAL = v8::HeapGraphEdge::INTERNAL
};
HeapGraphEdge(Type type, const char* name, HeapEntry* from, HeapEntry* to);
return index_;
}
const char* name() const {
- ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY);
+ ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY || type_ == INTERNAL);
return name_;
}
HeapEntry* from() const { return from_; }
void PaintReachableFromOthers() { painted_ = kPaintReachableFromOthers; }
void SetClosureReference(const char* name, HeapEntry* entry);
void SetElementReference(int index, HeapEntry* entry);
+ void SetInternalReference(const char* name, HeapEntry* entry);
void SetPropertyReference(const char* name, HeapEntry* entry);
void SetAutoIndexReference(HeapEntry* entry);
void Print(int max_depth, int indent);
private:
+ void AddEdge(HeapGraphEdge* edge);
int CalculateTotalSize();
int CalculateNonSharedTotalSize();
void FindRetainingPaths(HeapEntry* node, CachedHeapGraphPath* prev_path);
void SetClosureReference(
HeapEntry* parent, String* reference_name, Object* child);
void SetElementReference(HeapEntry* parent, int index, Object* child);
+ void SetInternalReference(
+ HeapEntry* parent, const char* reference_name, Object* child);
void SetPropertyReference(
HeapEntry* parent, String* reference_name, Object* child);
}
-static inline SeqAsciiString* TryGetSeqAsciiString(String* s) {
- if (!s->IsFlat() || !s->IsAsciiRepresentation()) return NULL;
- if (s->IsConsString()) {
- ASSERT(ConsString::cast(s)->second()->length() == 0);
- return SeqAsciiString::cast(ConsString::cast(s)->first());
- }
- return SeqAsciiString::cast(s);
-}
-
-
namespace {
struct ToLowerTraits {
unibrow::Mapping<typename ConvertTraits::UnibrowConverter, 128>* mapping) {
NoHandleAllocation ha;
CONVERT_CHECKED(String, s, args[0]);
- s->TryFlatten();
+ s = s->TryFlattenGetString();
const int length = s->length();
// Assume that the string is not empty; we need this assumption later
// character is also ascii. This is currently the case, but it
// might break in the future if we implement more context and locale
// dependent upper/lower conversions.
- SeqAsciiString* seq_ascii = TryGetSeqAsciiString(s);
- if (seq_ascii != NULL) {
+ if (s->IsSeqAsciiString()) {
Object* o = Heap::AllocateRawAsciiString(length);
if (o->IsFailure()) return o;
SeqAsciiString* result = SeqAsciiString::cast(o);
bool has_changed_character = ConvertTraits::ConvertAscii(
- result->GetChars(), seq_ascii->GetChars(), length);
+ result->GetChars(), SeqAsciiString::cast(s)->GetChars(), length);
return has_changed_character ? result : s;
}
if (first->IsString()) return first;
}
- bool ascii = special->IsAsciiRepresentation();
+ bool ascii = special->HasOnlyAsciiChars();
int position = 0;
for (int i = 0; i < array_length; i++) {
int increment = 0;
String* element = String::cast(elt);
int element_length = element->length();
increment = element_length;
- if (ascii && !element->IsAsciiRepresentation()) {
+ if (ascii && !element->HasOnlyAsciiChars()) {
ascii = false;
}
} else {
Handle<Object> break_point_object_arg = args.at<Object>(2);
// Set break point.
- Debug::SetBreakPoint(shared, source_position, break_point_object_arg);
+ Debug::SetBreakPoint(shared, break_point_object_arg, &source_position);
return Heap::undefined_value();
}
// The current candidate for the source position:
int target_start_position = RelocInfo::kNoPosition;
Handle<SharedFunctionInfo> target;
- // The current candidate for the last function in script:
- Handle<SharedFunctionInfo> last;
while (!done) {
HeapIterator iterator;
for (HeapObject* obj = iterator.next();
}
}
}
-
- // Keep track of the last function in the script.
- if (last.is_null() ||
- shared->end_position() > last->start_position()) {
- last = shared;
- }
}
}
}
- // Make sure some candidate is selected.
if (target.is_null()) {
- if (!last.is_null()) {
- // Position after the last function - use last.
- target = last;
- } else {
- // Unable to find function - possibly script without any function.
- return Heap::undefined_value();
- }
+ return Heap::undefined_value();
}
// If the candidate found is compiled we are done. NOTE: when lazy
}
-// Change the state of a break point in a script. NOTE: Regarding performance
-// see the NOTE for GetScriptFromScriptData.
+// Changes the state of a break point in a script and returns source position
+// where break point was set. NOTE: Regarding performance see the NOTE for
+// GetScriptFromScriptData.
// args[0]: script to set break point in
// args[1]: number: break source position (within the script source)
// args[2]: number: break point object
} else {
position = source_position - shared->start_position();
}
- Debug::SetBreakPoint(shared, position, break_point_object_arg);
+ Debug::SetBreakPoint(shared, break_point_object_arg, &position);
+ position += shared->start_position();
+ return Smi::FromInt(position);
}
return Heap::undefined_value();
}
// ----------------------------------------------------------------------------
// Keyword Matcher
+
KeywordMatcher::FirstState KeywordMatcher::first_states_[] = {
{ "break", KEYWORD_PREFIX, Token::BREAK },
{ NULL, C, Token::ILLEGAL },
// Scanner
Scanner::Scanner(ParserMode pre)
- : stack_overflow_(false), is_pre_parsing_(pre == PREPARSE) { }
+ : is_pre_parsing_(pre == PREPARSE), stack_overflow_(false) { }
void Scanner::Initialize(Handle<String> source,
// *: Actually "future reserved keywords". These are the only ones we
// recognized, the remaining are allowed as identifiers.
public:
- KeywordMatcher() : state_(INITIAL), token_(Token::IDENTIFIER) {}
+ KeywordMatcher()
+ : state_(INITIAL),
+ token_(Token::IDENTIFIER),
+ keyword_(NULL),
+ counter_(0),
+ keyword_token_(Token::ILLEGAL) {}
Token::Value token() { return token_; }
// State map for first keyword character range.
static FirstState first_states_[kFirstCharRangeLength];
- // Current state.
- State state_;
- // Token for currently added characters.
- Token::Value token_;
-
- // Matching a specific keyword string (there is only one possible valid
- // keyword with the current prefix).
- const char* keyword_;
- int counter_;
- Token::Value keyword_token_;
-
// If input equals keyword's character at position, continue matching keyword
// from that position.
inline bool MatchKeywordStart(uc32 input,
char match,
State new_state,
Token::Value keyword_token) {
- if (input == match) { // Matched "do".
- state_ = new_state;
- token_ = keyword_token;
- return true;
+ if (input != match) {
+ return false;
}
- return false;
+ state_ = new_state;
+ token_ = keyword_token;
+ return true;
}
void Step(uc32 input);
+
+ // Current state.
+ State state_;
+ // Token for currently added characters.
+ Token::Value token_;
+
+ // Matching a specific keyword string (there is only one possible valid
+ // keyword with the current prefix).
+ const char* keyword_;
+ int counter_;
+ Token::Value keyword_token_;
};
static const int kNoEndPosition = 1;
private:
- void Init(Handle<String> source,
- unibrow::CharacterStream* stream,
- int start_position, int end_position,
- ParserLanguage language);
-
-
- // Different UTF16 buffers used to pull characters from. Based on input one of
- // these will be initialized as the actual data source.
- CharacterStreamUTF16Buffer char_stream_buffer_;
- ExternalStringUTF16Buffer<ExternalTwoByteString, uint16_t>
- two_byte_string_buffer_;
- ExternalStringUTF16Buffer<ExternalAsciiString, char> ascii_string_buffer_;
-
- // Source. Will point to one of the buffers declared above.
- UTF16Buffer* source_;
-
- // Used to convert the source string into a character stream when a stream
- // is not passed to the scanner.
- SafeStringInputBuffer safe_string_input_buffer_;
-
- // Buffer to hold literal values (identifiers, strings, numbers)
- // using 0-terminated UTF-8 encoding.
- UTF8Buffer literal_buffer_1_;
- UTF8Buffer literal_buffer_2_;
-
- bool stack_overflow_;
- static StaticResource<Utf8Decoder> utf8_decoder_;
-
- // One Unicode character look-ahead; c0_ < 0 at the end of the input.
- uc32 c0_;
-
// The current and look-ahead token.
struct TokenDesc {
Token::Value token;
UTF8Buffer* literal_buffer;
};
- TokenDesc current_; // desc for current token (as returned by Next())
- TokenDesc next_; // desc for next token (one token look-ahead)
- bool has_line_terminator_before_next_;
- bool is_pre_parsing_;
- bool is_parsing_json_;
+ void Init(Handle<String> source,
+ unibrow::CharacterStream* stream,
+ int start_position, int end_position,
+ ParserLanguage language);
// Literal buffer support
void StartLiteral();
return SkipJavaScriptWhiteSpace();
}
}
+
bool SkipJavaScriptWhiteSpace();
bool SkipJsonWhiteSpace();
Token::Value SkipSingleLineComment();
// the integer part is zero), and may include an exponent part (e.g., "e-10").
// Hexadecimal and octal numbers are not allowed.
Token::Value ScanJsonNumber();
+
// A JSON string (production JSONString) is subset of valid JavaScript string
// literals. The string must only be double-quoted (not single-quoted), and
// the only allowed backslash-escapes are ", /, \, b, f, n, r, t and
// four-digit hex escapes (uXXXX). Any other use of backslashes is invalid.
Token::Value ScanJsonString();
+
// Used to recognizes one of the literals "true", "false", or "null". These
// are the only valid JSON identifiers (productions JSONBooleanLiteral,
// JSONNullLiteral).
// Decodes a unicode escape-sequence which is part of an identifier.
// If the escape sequence cannot be decoded the result is kBadRune.
uc32 ScanIdentifierUnicodeEscape();
+
+ TokenDesc current_; // desc for current token (as returned by Next())
+ TokenDesc next_; // desc for next token (one token look-ahead)
+ bool has_line_terminator_before_next_;
+ bool is_pre_parsing_;
+ bool is_parsing_json_;
+
+ // Different UTF16 buffers used to pull characters from. Based on input one of
+ // these will be initialized as the actual data source.
+ CharacterStreamUTF16Buffer char_stream_buffer_;
+ ExternalStringUTF16Buffer<ExternalTwoByteString, uint16_t>
+ two_byte_string_buffer_;
+ ExternalStringUTF16Buffer<ExternalAsciiString, char> ascii_string_buffer_;
+
+ // Source. Will point to one of the buffers declared above.
+ UTF16Buffer* source_;
+
+ // Used to convert the source string into a character stream when a stream
+ // is not passed to the scanner.
+ SafeStringInputBuffer safe_string_input_buffer_;
+
+ // Buffer to hold literal values (identifiers, strings, numbers)
+ // using 0-terminated UTF-8 encoding.
+ UTF8Buffer literal_buffer_1_;
+ UTF8Buffer literal_buffer_2_;
+
+ bool stack_overflow_;
+ static StaticResource<Utf8Decoder> utf8_decoder_;
+
+ // One Unicode character look-ahead; c0_ < 0 at the end of the input.
+ uc32 c0_;
};
} } // namespace v8::internal
template<class Allocator>
+bool ScopeInfo<Allocator>::HasHeapAllocatedLocals(Code* code) {
+ if (code->sinfo_size() > 0) {
+ Object** p = ContextEntriesAddr(code);
+ int n; // number of context slots;
+ ReadInt(p, &n);
+ return n > 0;
+ }
+ return false;
+}
+
+
+template<class Allocator>
int ScopeInfo<Allocator>::StackSlotIndex(Code* code, String* name) {
ASSERT(name->IsSymbol());
if (code->sinfo_size() > 0) {
// Return the number of context slots for code.
static int NumberOfContextSlots(Code* code);
+ // Return if this has context slots besides MIN_CONTEXT_SLOTS;
+ static bool HasHeapAllocatedLocals(Code* code);
+
// Lookup support for scope info embedded in Code objects. Returns
// the stack slot index for a given slot name if the slot is
// present; otherwise returns a value < 0. The name must be a symbol
Code* code = Code::cast(result);
USE(code);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
+ USE(kind);
PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_DEBUG_BREAK_TAG),
code, code->arguments_count()));
}
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 2
-#define BUILD_NUMBER 18
+#define BUILD_NUMBER 19
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
namespace v8 {
namespace internal {
-inline Condition NegateCondition(Condition cc) {
- return static_cast<Condition>(cc ^ 1);
-}
-
// -----------------------------------------------------------------------------
// Implementation of Assembler
-
void Assembler::emitl(uint32_t x) {
Memory::uint32_at(pc_) = x;
pc_ += sizeof(uint32_t);
}
+void Assembler::CodeTargetAlign() {
+ Align(16); // Preferred alignment of jump targets on x64.
+}
+
+
void Assembler::bind_to(Label* L, int pos) {
ASSERT(!L->is_bound()); // Label may only be bound once.
last_pc_ = NULL;
}
+void Assembler::incl(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst);
+ emit(0xFF);
+ emit_modrm(0, dst);
+}
+
+
void Assembler::int3() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
}
-void Assembler::comisd(XMMRegister dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- emit(0x66);
- emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x2f);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
// Negation of the default no_condition (-1) results in a non-default
// no_condition value (-2). As long as tests for no_condition check
// for condition < 0, this will work as expected.
-inline Condition NegateCondition(Condition cc);
+inline Condition NegateCondition(Condition cc) {
+ return static_cast<Condition>(cc ^ 1);
+}
+
// Corresponds to transposing the operands of a comparison.
inline Condition ReverseCondition(Condition cc) {
};
}
+
enum Hint {
no_hint = 0,
not_taken = 0x2e,
// possible to align the pc offset to a multiple
// of m. m must be a power of 2.
void Align(int m);
+ // Aligns code to something that's optimal for a jump target for the platform.
+ void CodeTargetAlign();
// Stack
void pushfq();
void incq(Register dst);
void incq(const Operand& dst);
+ void incl(Register dst);
void incl(const Operand& dst);
void lea(Register dst, const Operand& src);
void xorpd(XMMRegister dst, XMMRegister src);
void sqrtsd(XMMRegister dst, XMMRegister src);
- void comisd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src);
// The first argument is the reg field, the second argument is the r/m field.
class FloatingPointHelper : public AllStatic {
public:
- // Code pattern for loading a floating point value. Input value must
- // be either a smi or a heap number object (fp value). Requirements:
- // operand on TOS+1. Returns operand as floating point number on FPU
- // stack.
- static void LoadFloatOperand(MacroAssembler* masm, Register scratch);
-
- // Code pattern for loading a floating point value. Input value must
- // be either a smi or a heap number object (fp value). Requirements:
- // operand in src register. Returns operand as floating point number
- // in XMM register. May destroy src register.
- static void LoadFloatOperand(MacroAssembler* masm,
- Register src,
- XMMRegister dst);
-
- // Code pattern for loading a possible number into a XMM register.
- // If the contents of src is not a number, control branches to
- // the Label not_number. If contents of src is a smi or a heap number
- // object (fp value), it is loaded into the XMM register as a double.
- // The register src is not changed, and src may not be kScratchRegister.
- static void LoadFloatOperand(MacroAssembler* masm,
- Register src,
- XMMRegister dst,
- Label *not_number);
-
- // Code pattern for loading floating point values. Input values must
- // be either smi or heap number objects (fp values). Requirements:
- // operand_1 in rdx, operand_2 in rax; Returns operands as
- // floating point numbers in XMM registers.
- static void LoadFloatOperands(MacroAssembler* masm,
- XMMRegister dst1,
- XMMRegister dst2);
-
- // Similar to LoadFloatOperands, assumes that the operands are smis.
- static void LoadFloatOperandsFromSmis(MacroAssembler* masm,
- XMMRegister dst1,
- XMMRegister dst2);
-
- // Code pattern for loading floating point values onto the fp stack.
- // Input values must be either smi or heap number objects (fp values).
- // Requirements:
- // Register version: operands in registers lhs and rhs.
- // Stack version: operands on TOS+1 and TOS+2.
- // Returns operands as floating point numbers on fp stack.
- static void LoadFloatOperands(MacroAssembler* masm,
- Register lhs,
- Register rhs);
-
- // Test if operands are smi or number objects (fp). Requirements:
- // operand_1 in rax, operand_2 in rdx; falls through on float or smi
- // operands, jumps to the non_float label otherwise.
- static void CheckNumberOperands(MacroAssembler* masm,
- Label* non_float);
+ // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
+ // If the operands are not both numbers, jump to not_numbers.
+ // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
+ // NumberOperands assumes both are smis or heap numbers.
+ static void LoadSSE2SmiOperands(MacroAssembler* masm);
+ static void LoadSSE2NumberOperands(MacroAssembler* masm);
+ static void LoadSSE2UnknownOperands(MacroAssembler* masm,
+ Label* not_numbers);
// Takes the operands in rdx and rax and loads them as integers in rax
// and rcx.
static void LoadAsIntegers(MacroAssembler* masm,
- Label* operand_conversion_failure);
+ Label* operand_conversion_failure,
+ Register heap_number_map);
+ // As above, but we know the operands to be numbers. In that case,
+ // conversion can't fail.
+ static void LoadNumbersAsIntegers(MacroAssembler* masm);
};
// Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
__ addsd(xmm2, xmm3);
// xmm2 now has 0.5.
- __ comisd(xmm2, xmm1);
+ __ ucomisd(xmm2, xmm1);
call_runtime.Branch(not_equal);
// Calculates square root.
__ cmpq(ArrayElement(cache_, dst_), key_);
__ j(not_equal, &first_loop);
- __ Integer32ToSmi(scratch_, dst_);
- __ movq(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), scratch_);
+ __ Integer32ToSmiField(
+ FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
__ movq(dst_, ArrayElement(cache_, dst_, 1));
__ jmp(exit_label());
__ cmpq(ArrayElement(cache_, dst_), key_);
__ j(not_equal, &second_loop);
- __ Integer32ToSmi(scratch_, dst_);
- __ movq(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), scratch_);
+ __ Integer32ToSmiField(
+ FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
__ movq(dst_, ArrayElement(cache_, dst_, 1));
__ jmp(exit_label());
// cache miss this optimization would hardly matter much.
// Check if we could add new entry to cache.
- __ movq(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ movq(r9, FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
- __ SmiCompare(rbx, r9);
+ __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ SmiToInteger32(r9,
+ FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
+ __ cmpl(rbx, r9);
__ j(greater, &add_new_entry);
// Check if we could evict entry after finger.
- __ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
- __ SmiToInteger32(rdx, rdx);
- __ SmiToInteger32(rbx, rbx);
- __ addq(rdx, kEntrySizeImm);
+ __ SmiToInteger32(rdx,
+ FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
+ __ addl(rdx, kEntrySizeImm);
Label forward;
- __ cmpq(rbx, rdx);
+ __ cmpl(rbx, rdx);
__ j(greater, &forward);
// Need to wrap over the cache.
__ movl(rdx, kEntriesIndexImm);
__ bind(&forward);
- __ Integer32ToSmi(r9, rdx);
+ __ movl(r9, rdx);
__ jmp(&update_cache);
__ bind(&add_new_entry);
- // r9 holds cache size as smi.
- __ SmiToInteger32(rdx, r9);
- __ SmiAddConstant(rbx, r9, Smi::FromInt(JSFunctionResultCache::kEntrySize));
- __ movq(FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
+ // r9 holds cache size as int32.
+ __ leal(rbx, Operand(r9, JSFunctionResultCache::kEntrySize));
+ __ Integer32ToSmiField(
+ FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
// Update the cache itself.
- // rdx holds the index as int.
- // r9 holds the index as smi.
+ // r9 holds the index as int32.
__ bind(&update_cache);
__ pop(rbx); // restore the key
- __ movq(FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
+ __ Integer32ToSmiField(
+ FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
// Store key.
- __ movq(ArrayElement(rcx, rdx), rbx);
+ __ movq(ArrayElement(rcx, r9), rbx);
__ RecordWrite(rcx, 0, rbx, r9);
// Store value.
__ pop(rcx); // restore the cache.
- __ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
- __ SmiAddConstant(rdx, rdx, Smi::FromInt(1));
- __ movq(r9, rdx);
- __ SmiToInteger32(rdx, rdx);
+ __ SmiToInteger32(rdx,
+ FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
+ __ incl(rdx);
+ // Backup rax, because the RecordWrite macro clobbers its arguments.
__ movq(rbx, rax);
- __ movq(ArrayElement(rcx, rdx), rbx);
- __ RecordWrite(rcx, 0, rbx, r9);
+ __ movq(ArrayElement(rcx, rdx), rax);
+ __ RecordWrite(rcx, 0, rbx, rdx);
if (!dst_.is(rax)) {
__ movq(dst_, rax);
¬_numbers);
LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side,
¬_numbers);
- __ comisd(xmm0, xmm1);
+ __ ucomisd(xmm0, xmm1);
// Bail out if a NaN is involved.
not_numbers.Branch(parity_even, left_side, right_side);
// rcx: RegExp data (FixedArray)
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
- __ movq(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
- __ SmiCompare(rbx, Smi::FromInt(JSRegExp::IRREGEXP));
+ __ SmiToInteger32(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
+ __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
__ j(not_equal, &runtime);
// rcx: RegExp data (FixedArray)
// Check that the number of captures fit in the static offsets vector buffer.
- __ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
+ __ SmiToInteger32(rdx,
+ FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
- __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1);
- __ addq(rdx, Immediate(2)); // rdx was number_of_captures * 2.
+ __ leal(rdx, Operand(rdx, rdx, times_1, 2));
// Check that the static offsets vector buffer is large enough.
- __ cmpq(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
+ __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
__ j(above, &runtime);
// rcx: RegExp data (FixedArray)
__ JumpIfSmi(rax, &runtime);
Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
__ j(NegateCondition(is_string), &runtime);
- // Get the length of the string to rbx.
- __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
- // rbx: Length of subject string as smi
- // rcx: RegExp data (FixedArray)
- // rdx: Number of capture registers
+ // rax: Subject string.
+ // rcx: RegExp data (FixedArray).
+ // rdx: Number of capture registers.
// Check that the third argument is a positive smi less than the string
// length. A negative value will be greater (unsigned comparison).
- __ movq(rax, Operand(rsp, kPreviousIndexOffset));
- __ JumpIfNotSmi(rax, &runtime);
- __ SmiCompare(rax, rbx);
+ __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(rbx, &runtime);
+ __ SmiCompare(rbx, FieldOperand(rax, String::kLengthOffset));
__ j(above_equal, &runtime);
// rcx: RegExp data (FixedArray)
// Check that the last match info has space for the capture registers and the
// additional information. Ensure no overflow in add.
ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
- __ movq(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
- __ SmiToInteger32(rax, rax);
+ __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
__ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
__ cmpl(rdx, rax);
__ j(greater, &runtime);
// r12: code
// Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height.
- __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
- __ SmiToInteger64(rbx, rbx); // Previous index from smi.
+ __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
// rax: subject string
// rbx: previous index
__ bind(&success);
__ movq(rax, Operand(rsp, kJSRegExpOffset));
__ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
- __ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
+ __ SmiToInteger32(rax,
+ FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
- __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1);
- __ addq(rdx, Immediate(2)); // rdx was number_of_captures * 2.
+ __ leal(rdx, Operand(rax, rax, times_1, 2));
// rdx: Number of capture registers
// Load last_match_info which is still known to be a fast case JSArray.
rdx,
times_pointer_size,
RegExpImpl::kFirstCaptureOffset),
- rdi);
+ rdi);
__ jmp(&next_capture);
__ bind(&done);
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
- __ movq(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide smi tagged length by two.
- __ PositiveSmiDivPowerOfTwoToInteger32(mask, mask, 1);
+ __ SmiToInteger32(
+ mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ __ shrl(mask, Immediate(1));
__ subq(mask, Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
CpuFeatures::Scope fscope(SSE2);
__ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
__ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
- __ comisd(xmm0, xmm1);
+ __ ucomisd(xmm0, xmm1);
__ j(parity_even, not_found); // Bail out if NaN is involved.
__ j(not_equal, not_found); // The cache did not contain this value.
__ jmp(&load_result_from_cache);
}
__ bind(&is_smi);
- __ movq(scratch, object);
- __ SmiToInteger32(scratch, scratch);
+ __ SmiToInteger32(scratch, object);
GenerateConvertHashCodeToIndex(masm, scratch, mask);
Register index = scratch;
if (include_number_compare_) {
Label non_number_comparison;
Label unordered;
- FloatingPointHelper::LoadFloatOperand(masm, rdx, xmm0,
- &non_number_comparison);
- FloatingPointHelper::LoadFloatOperand(masm, rax, xmm1,
- &non_number_comparison);
-
- __ comisd(xmm0, xmm1);
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
+ __ ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
__ j(parity_even, &unordered);
__ j(equal, &adaptor_frame);
// Get the length from the frame.
- __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
__ jmp(&try_allocate);
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
- __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movq(Operand(rsp, 1 * kPointerSize), rcx);
+ __ SmiToInteger32(rcx,
+ Operand(rdx,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ // Space on stack must already hold a smi.
+ __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
// Do not clobber the length index for the indexing operation since
// it is used compute the size for allocation later.
- SmiIndex index = masm->SmiToIndex(rbx, rcx, kPointerSizeLog2);
- __ lea(rdx, Operand(rdx, index.reg, index.scale, kDisplacement));
+ __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
__ movq(Operand(rsp, 2 * kPointerSize), rdx);
// Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array.
Label add_arguments_object;
__ bind(&try_allocate);
- __ testq(rcx, rcx);
+ __ testl(rcx, rcx);
__ j(zero, &add_arguments_object);
- index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
- __ lea(rcx, Operand(index.reg, index.scale, FixedArray::kHeaderSize));
+ __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
- __ addq(rcx, Immediate(Heap::kArgumentsObjectSize));
+ __ addl(rcx, Immediate(Heap::kArgumentsObjectSize));
// Do the allocation of both objects in one go.
__ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
__ movq(rdi, Operand(rdi, offset));
// Copy the JS object part.
- for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
- __ movq(kScratchRegister, FieldOperand(rdi, i));
- __ movq(FieldOperand(rax, i), kScratchRegister);
- }
+ STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+ __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
+ __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
+ __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
+ __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
+ __ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
+ __ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
// Setup the callee in-object property.
ASSERT(Heap::arguments_callee_index == 0);
// If there are no actual arguments, we're done.
Label done;
- __ testq(rcx, rcx);
+ __ SmiTest(rcx);
__ j(zero, &done);
// Get the parameters pointer from the stack and untag the length.
__ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
__ addq(rdi, Immediate(kPointerSize));
__ subq(rdx, Immediate(kPointerSize));
- __ decq(rcx);
+ __ decl(rcx);
__ j(not_zero, &loop);
// Return and remove the on-stack parameters.
}
-void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
- Register number) {
- Label load_smi, done;
-
- __ JumpIfSmi(number, &load_smi);
- __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi);
- __ SmiToInteger32(number, number);
- __ push(number);
- __ fild_s(Operand(rsp, 0));
- __ pop(number);
-
- __ bind(&done);
+void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
+ __ SmiToInteger32(kScratchRegister, rdx);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ SmiToInteger32(kScratchRegister, rax);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
}
-void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
- Register src,
- XMMRegister dst) {
- Label load_smi, done;
-
- __ JumpIfSmi(src, &load_smi);
- __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
+void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
+ Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
+ // Load operand in rdx into xmm0.
+ __ JumpIfSmi(rdx, &load_smi_rdx);
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ // Load operand in rax into xmm1.
+ __ JumpIfSmi(rax, &load_smi_rax);
+ __ bind(&load_nonsmi_rax);
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&done);
- __ bind(&load_smi);
- __ SmiToInteger32(src, src);
- __ cvtlsi2sd(dst, src);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
- Register src,
- XMMRegister dst,
- Label* not_number) {
- Label load_smi, done;
- ASSERT(!src.is(kScratchRegister));
- __ JumpIfSmi(src, &load_smi);
- __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
- __ cmpq(FieldOperand(src, HeapObject::kMapOffset), kScratchRegister);
- __ j(not_equal, not_number);
- __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
- __ jmp(&done);
+ __ bind(&load_smi_rdx);
+ __ SmiToInteger32(kScratchRegister, rdx);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ JumpIfNotSmi(rax, &load_nonsmi_rax);
- __ bind(&load_smi);
- __ SmiToInteger32(kScratchRegister, src);
- __ cvtlsi2sd(dst, kScratchRegister);
+ __ bind(&load_smi_rax);
+ __ SmiToInteger32(kScratchRegister, rax);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
__ bind(&done);
}
-void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
- XMMRegister dst1,
- XMMRegister dst2) {
- __ movq(kScratchRegister, rdx);
- LoadFloatOperand(masm, kScratchRegister, dst1);
- __ movq(kScratchRegister, rax);
- LoadFloatOperand(masm, kScratchRegister, dst2);
-}
+void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
+ Label* not_numbers) {
+ Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
+ // Load operand in rdx into xmm0, or branch to not_numbers.
+ __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
+ __ JumpIfSmi(rdx, &load_smi_rdx);
+ __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
+ __ j(not_equal, not_numbers); // Argument in rdx is not a number.
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ // Load operand in rax into xmm1, or branch to not_numbers.
+ __ JumpIfSmi(rax, &load_smi_rax);
+ __ bind(&load_nonsmi_rax);
+ __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
+ __ j(not_equal, not_numbers);
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ jmp(&done);
-void FloatingPointHelper::LoadFloatOperandsFromSmis(MacroAssembler* masm,
- XMMRegister dst1,
- XMMRegister dst2) {
+ __ bind(&load_smi_rdx);
__ SmiToInteger32(kScratchRegister, rdx);
- __ cvtlsi2sd(dst1, kScratchRegister);
+ __ cvtlsi2sd(xmm0, kScratchRegister);
+ __ JumpIfNotSmi(rax, &load_nonsmi_rax);
+
+ __ bind(&load_smi_rax);
__ SmiToInteger32(kScratchRegister, rax);
- __ cvtlsi2sd(dst2, kScratchRegister);
+ __ cvtlsi2sd(xmm1, kScratchRegister);
+ __ bind(&done);
}
// Input: rdx, rax are the left and right objects of a bit op.
// Output: rax, rcx are left and right integers for a bit op.
void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
- Label* conversion_failure) {
+ Label* conversion_failure,
+ Register heap_number_map) {
// Check float operands.
Label arg1_is_object, check_undefined_arg1;
Label arg2_is_object, check_undefined_arg2;
__ jmp(&load_arg2);
__ bind(&arg1_is_object);
- __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
+ __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, &check_undefined_arg1);
// Get the untagged integer version of the edx heap number in rcx.
IntegerConvert(masm, rdx, rdx);
__ jmp(&done);
__ bind(&arg2_is_object);
- __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
+ __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
__ j(not_equal, &check_undefined_arg2);
// Get the untagged integer version of the eax heap number in ecx.
IntegerConvert(masm, rcx, rax);
}
-void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
- Register lhs,
- Register rhs) {
- Label load_smi_lhs, load_smi_rhs, done_load_lhs, done;
- __ JumpIfSmi(lhs, &load_smi_lhs);
- __ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset));
- __ bind(&done_load_lhs);
-
- __ JumpIfSmi(rhs, &load_smi_rhs);
- __ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_lhs);
- __ SmiToInteger64(kScratchRegister, lhs);
- __ push(kScratchRegister);
- __ fild_d(Operand(rsp, 0));
- __ pop(kScratchRegister);
- __ jmp(&done_load_lhs);
-
- __ bind(&load_smi_rhs);
- __ SmiToInteger64(kScratchRegister, rhs);
- __ push(kScratchRegister);
- __ fild_d(Operand(rsp, 0));
- __ pop(kScratchRegister);
-
- __ bind(&done);
-}
+// Input: rdx, rax are the left and right objects of a bit op.
+// Output: rax, rcx are left and right integers for a bit op.
+void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
+ if (FLAG_debug_code) {
+ // Both arguments can not be smis. That case is handled by smi-only code.
+ Label ok;
+ __ JumpIfNotBothSmi(rax, rdx, &ok);
+ __ Abort("Both arguments smi but not handled by smi-code.");
+ __ bind(&ok);
+ }
+ // Check float operands.
+ Label done;
+ Label rax_is_object;
+ Label rdx_is_object;
+ __ JumpIfNotSmi(rdx, &rdx_is_object);
+ __ SmiToInteger32(rdx, rdx);
-void FloatingPointHelper::CheckNumberOperands(MacroAssembler* masm,
- Label* non_float) {
- Label test_other, done;
- // Test if both operands are numbers (heap_numbers or smis).
- // If not, jump to label non_float.
- __ JumpIfSmi(rdx, &test_other); // argument in rdx is OK
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map());
- __ j(not_equal, non_float); // The argument in rdx is not a number.
+ __ bind(&rax_is_object);
+ IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
+ __ jmp(&done);
- __ bind(&test_other);
- __ JumpIfSmi(rax, &done); // argument in rax is OK
- __ Cmp(FieldOperand(rax, HeapObject::kMapOffset), Factory::heap_number_map());
- __ j(not_equal, non_float); // The argument in rax is not a number.
+ __ bind(&rdx_is_object);
+ IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
+ __ JumpIfNotSmi(rax, &rax_is_object);
+ __ SmiToInteger32(rcx, rax);
- // Fall-through: Both operands are numbers.
__ bind(&done);
+ __ movl(rax, rdx);
}
}
// left is rdx, right is rax.
__ AllocateHeapNumber(rbx, rcx, slow);
- FloatingPointHelper::LoadFloatOperandsFromSmis(masm, xmm4, xmm5);
+ FloatingPointHelper::LoadSSE2SmiOperands(masm);
switch (op_) {
- case Token::ADD: __ addsd(xmm4, xmm5); break;
- case Token::SUB: __ subsd(xmm4, xmm5); break;
- case Token::MUL: __ mulsd(xmm4, xmm5); break;
- case Token::DIV: __ divsd(xmm4, xmm5); break;
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
- __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm4);
+ __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
__ movq(rax, rbx);
GenerateReturn(masm);
}
Label not_floats;
// rax: y
// rdx: x
- if (static_operands_type_.IsNumber() && FLAG_debug_code) {
- // Assert at runtime that inputs are only numbers.
- __ AbortIfNotNumber(rdx);
- __ AbortIfNotNumber(rax);
- } else {
- FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
- }
- // Fast-case: Both operands are numbers.
- // xmm4 and xmm5 are volatile XMM registers.
- FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
+ ASSERT(!static_operands_type_.IsSmi());
+ if (static_operands_type_.IsNumber()) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(rdx);
+ __ AbortIfNotNumber(rax);
+ }
+ FloatingPointHelper::LoadSSE2NumberOperands(masm);
+ } else {
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime);
+ }
switch (op_) {
- case Token::ADD: __ addsd(xmm4, xmm5); break;
- case Token::SUB: __ subsd(xmm4, xmm5); break;
- case Token::MUL: __ mulsd(xmm4, xmm5); break;
- case Token::DIV: __ divsd(xmm4, xmm5); break;
+ case Token::ADD: __ addsd(xmm0, xmm1); break;
+ case Token::SUB: __ subsd(xmm0, xmm1); break;
+ case Token::MUL: __ mulsd(xmm0, xmm1); break;
+ case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
// Allocate a heap number, if needed.
break;
default: UNREACHABLE();
}
- __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
GenerateReturn(masm);
__ bind(¬_floats);
if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
case Token::SAR:
case Token::SHL:
case Token::SHR: {
- Label skip_allocation, non_smi_result;
- FloatingPointHelper::LoadAsIntegers(masm, &call_runtime);
+ Label skip_allocation, non_smi_shr_result;
+ Register heap_number_map = r9;
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ if (static_operands_type_.IsNumber()) {
+ if (FLAG_debug_code) {
+ // Assert at runtime that inputs are only numbers.
+ __ AbortIfNotNumber(rdx);
+ __ AbortIfNotNumber(rax);
+ }
+ FloatingPointHelper::LoadNumbersAsIntegers(masm);
+ } else {
+ FloatingPointHelper::LoadAsIntegers(masm,
+ &call_runtime,
+ heap_number_map);
+ }
switch (op_) {
case Token::BIT_OR: __ orl(rax, rcx); break;
case Token::BIT_AND: __ andl(rax, rcx); break;
case Token::BIT_XOR: __ xorl(rax, rcx); break;
case Token::SAR: __ sarl_cl(rax); break;
case Token::SHL: __ shll_cl(rax); break;
- case Token::SHR: __ shrl_cl(rax); break;
+ case Token::SHR: {
+ __ shrl_cl(rax);
+ // Check if result is negative. This can only happen for a shift
+ // by zero.
+ __ testl(rax, rax);
+ __ j(negative, &non_smi_shr_result);
+ break;
+ }
default: UNREACHABLE();
}
- if (op_ == Token::SHR) {
- // Check if result is negative. This can only happen for a shift
- // by zero, which also doesn't update the sign flag.
- __ testl(rax, rax);
- __ j(negative, &non_smi_result);
- }
- __ JumpIfNotValidSmiValue(rax, &non_smi_result);
- // Tag smi result, if possible, and return.
+
+ STATIC_ASSERT(kSmiValueSize == 32);
+ // Tag smi result and return.
__ Integer32ToSmi(rax, rax);
GenerateReturn(masm);
- // All ops except SHR return a signed int32 that we load in
- // a HeapNumber.
- if (op_ != Token::SHR && non_smi_result.is_linked()) {
- __ bind(&non_smi_result);
+ // All bit-ops except SHR return a signed int32 that can be
+ // returned immediately as a smi.
+ // We might need to allocate a HeapNumber if we shift a negative
+ // number right by zero (i.e., convert to UInt32).
+ if (op_ == Token::SHR) {
+ ASSERT(non_smi_shr_result.is_linked());
+ __ bind(&non_smi_shr_result);
// Allocate a heap number if needed.
- __ movsxlq(rbx, rax); // rbx: sign extended 32-bit result
+ __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
switch (mode_) {
case OVERWRITE_LEFT:
case OVERWRITE_RIGHT:
__ JumpIfNotSmi(rax, &skip_allocation);
// Fall through!
case NO_OVERWRITE:
- __ AllocateHeapNumber(rax, rcx, &call_runtime);
+ // Allocate heap number in new space.
+ // Not using AllocateHeapNumber macro in order to reuse
+ // already loaded heap_number_map.
+ __ AllocateInNewSpace(HeapNumber::kSize,
+ rax,
+ rcx,
+ no_reg,
+ &call_runtime,
+ TAG_OBJECT);
+ // Set the map.
+ if (FLAG_debug_code) {
+ __ AbortIfNotRootValue(heap_number_map,
+ Heap::kHeapNumberMapRootIndex,
+ "HeapNumberMap register clobbered.");
+ }
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset),
+ heap_number_map);
__ bind(&skip_allocation);
break;
default: UNREACHABLE();
}
// Store the result in the HeapNumber and return.
- __ movq(Operand(rsp, 1 * kPointerSize), rbx);
- __ fild_s(Operand(rsp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
+ __ cvtqsi2sd(xmm0, rbx);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
GenerateReturn(masm);
}
- // SHR should return uint32 - go to runtime for non-smi/negative result.
- if (op_ == Token::SHR) {
- __ bind(&non_smi_result);
- }
break;
}
default: UNREACHABLE(); break;
Label not_strings, both_strings, not_string1, string1, string1_smi2;
// If this stub has already generated FP-specific code then the arguments
- // are already in rdx, rax
+ // are already in rdx and rax.
if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
GenerateLoadArguments(masm);
}
__ push(rax);
// Push this stub's key.
- __ movq(rax, Immediate(MinorKey()));
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
+ __ Push(Smi::FromInt(MinorKey()));
// Although the operation and the type info are encoded into the key,
// the encoding is opaque, so push them too.
- __ movq(rax, Immediate(op_));
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
+ __ Push(Smi::FromInt(op_));
- __ movq(rax, Immediate(runtime_operands_type_));
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
+ __ Push(Smi::FromInt(runtime_operands_type_));
__ push(rcx);
// If result is not supposed to be flat, allocate a cons string object. If
// both strings are ascii the result is an ascii cons string.
// rax: first string
- // ebx: length of resulting flat string
+ // rbx: length of resulting flat string
// rdx: second string
// r8: instance type of first string
// r9: instance type of second string
- Label non_ascii, allocated;
+ Label non_ascii, allocated, ascii_data;
__ movl(rcx, r8);
__ and_(rcx, r9);
ASSERT(kStringEncodingMask == kAsciiStringTag);
__ testl(rcx, Immediate(kAsciiStringTag));
__ j(zero, &non_ascii);
+ __ bind(&ascii_data);
// Allocate an acsii cons string.
__ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
__ bind(&allocated);
__ IncrementCounter(&Counters::string_add_native, 1);
__ ret(2 * kPointerSize);
__ bind(&non_ascii);
+ // At least one of the strings is two-byte. Check whether it happens
+ // to contain only ascii characters.
+ // rcx: first instance type AND second instance type.
+ // r8: first instance type.
+ // r9: second instance type.
+ __ testb(rcx, Immediate(kAsciiDataHintMask));
+ __ j(not_zero, &ascii_data);
+ __ xor_(r8, r9);
+ ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
+ __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
+ __ j(equal, &ascii_data);
// Allocate a two byte cons string.
__ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
__ jmp(&allocated);
// Handle creating a flat result. First check that both strings are not
// external strings.
// rax: first string
- // ebx: length of resulting flat string as smi
+ // rbx: length of resulting flat string as smi
// rdx: second string
// r8: instance type of first string
// r9: instance type of first string
__ j(equal, &string_add_runtime);
// Now check if both strings are ascii strings.
// rax: first string
- // ebx: length of resulting flat string
+ // rbx: length of resulting flat string
// rdx: second string
// r8: instance type of first string
// r9: instance type of second string
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
- Label slow, fast, array, extra, check_pixel_array;
+ Label slow, slow_with_tagged_index, fast, array, extra, check_pixel_array;
// Check that the object isn't a smi.
- __ JumpIfSmi(rdx, &slow);
+ __ JumpIfSmi(rdx, &slow_with_tagged_index);
// Get the map from the receiver.
__ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
// to do this because this generic stub does not perform map checks.
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &slow);
+ __ j(not_zero, &slow_with_tagged_index);
// Check that the key is a smi.
- __ JumpIfNotSmi(rcx, &slow);
+ __ JumpIfNotSmi(rcx, &slow_with_tagged_index);
+ __ SmiToInteger32(rcx, rcx);
__ CmpInstanceType(rbx, JS_ARRAY_TYPE);
__ j(equal, &array);
// Object case: Check key against length in the elements array.
// rax: value
// rdx: JSObject
- // rcx: index (as a smi)
+ // rcx: index
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &check_pixel_array);
- __ SmiCompare(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
// rax: value
// rbx: FixedArray
- // rcx: index (as a smi)
- __ j(below, &fast);
+ // rcx: index
+ __ j(above, &fast);
// Slow case: call runtime.
__ bind(&slow);
+ __ Integer32ToSmi(rcx, rcx);
+ __ bind(&slow_with_tagged_index);
GenerateRuntimeSetProperty(masm);
+ // Never returns to here.
// Check whether the elements is a pixel array.
// rax: value
// rdx: receiver
// rbx: receiver's elements array
- // rcx: index (as a smi), zero-extended.
+ // rcx: index, zero-extended.
__ bind(&check_pixel_array);
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kPixelArrayMapRootIndex);
// Check that the value is a smi. If a conversion is needed call into the
// runtime to convert and clamp.
__ JumpIfNotSmi(rax, &slow);
- __ SmiToInteger32(rdi, rcx);
- __ cmpl(rdi, FieldOperand(rbx, PixelArray::kLengthOffset));
+ __ cmpl(rcx, FieldOperand(rbx, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
// No more bailouts to slow case on this path, so key not needed.
- __ SmiToInteger32(rcx, rax);
+ __ SmiToInteger32(rdi, rax);
{ // Clamp the value to [0..255].
Label done;
- __ testl(rcx, Immediate(0xFFFFFF00));
+ __ testl(rdi, Immediate(0xFFFFFF00));
__ j(zero, &done);
- __ setcc(negative, rcx); // 1 if negative, 0 if positive.
- __ decb(rcx); // 0 if negative, 255 if positive.
+ __ setcc(negative, rdi); // 1 if negative, 0 if positive.
+ __ decb(rdi); // 0 if negative, 255 if positive.
__ bind(&done);
}
__ movq(rbx, FieldOperand(rbx, PixelArray::kExternalPointerOffset));
- __ movb(Operand(rbx, rdi, times_1, 0), rcx);
+ __ movb(Operand(rbx, rcx, times_1, 0), rdi);
__ ret(0);
// Extra capacity case: Check if there is extra capacity to
// rax: value
// rdx: receiver (a JSArray)
// rbx: receiver's elements array (a FixedArray)
- // rcx: index (as a smi)
+ // rcx: index
// flags: smicompare (rdx.length(), rbx)
__ j(not_equal, &slow); // do not leave holes in the array
- __ SmiCompare(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
- __ j(above_equal, &slow);
+ __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
+ __ j(below_equal, &slow);
// Increment index to get new length.
- __ SmiAddConstant(rdi, rcx, Smi::FromInt(1));
- __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
+ __ leal(rdi, Operand(rcx, 1));
+ __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
__ jmp(&fast);
// Array case: Get the length and the elements array from the JS
__ bind(&array);
// rax: value
// rdx: receiver (a JSArray)
- // rcx: index (as a smi)
+ // rcx: index
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
- __ SmiCompare(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
+ __ SmiCompareInteger32(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
__ j(below_equal, &extra);
// Fast case: Do the store.
__ bind(&fast);
// rax: value
// rbx: receiver's elements array (a FixedArray)
- // rcx: index (as a smi)
+ // rcx: index
Label non_smi_value;
- __ JumpIfNotSmi(rax, &non_smi_value);
- SmiIndex index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
- __ movq(FieldOperand(rbx, index.reg, index.scale, FixedArray::kHeaderSize),
+ __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
rax);
+ __ JumpIfNotSmi(rax, &non_smi_value);
__ ret(0);
__ bind(&non_smi_value);
// Slow case that needs to retain rcx for use by RecordWrite.
// Update write barrier for the elements array address.
- SmiIndex index2 = masm->SmiToIndex(kScratchRegister, rcx, kPointerSizeLog2);
- __ movq(FieldOperand(rbx, index2.reg, index2.scale, FixedArray::kHeaderSize),
- rax);
__ movq(rdx, rax);
__ RecordWriteNonSmi(rbx, 0, rdx, rcx);
__ ret(0);
#include "macro-assembler-x64.h"
#include "serialize.h"
#include "debug.h"
+#include "heap.h"
namespace v8 {
namespace internal {
// Compute number of region covering addr. See Page::GetRegionNumberForAddress
// method for more details.
- and_(addr, Immediate(Page::kPageAlignmentMask));
shrl(addr, Immediate(Page::kRegionSizeLog2));
+ andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2));
// Set dirty mark for region.
bts(Operand(object, Page::kDirtyFlagOffset), addr);
// For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
-// If offset is zero, then the smi_index register contains the array index into
-// the elements array represented as a smi. Otherwise it can be used as a
-// scratch register.
+// If offset is zero, then the index register contains the array index into
+// the elements array represented a zero extended int32. Otherwise it can be
+// used as a scratch register.
// All registers are clobbered by the operation.
void MacroAssembler::RecordWrite(Register object,
int offset,
Register value,
- Register smi_index) {
+ Register index) {
// The compiled code assumes that record write doesn't change the
// context register, so we check that none of the clobbered
// registers are rsi.
- ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi));
+ ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
JumpIfSmi(value, &done);
- RecordWriteNonSmi(object, offset, value, smi_index);
+ RecordWriteNonSmi(object, offset, value, index);
bind(&done);
// Clobber all input registers when running with the debug-code flag
if (FLAG_debug_code) {
movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
}
}
void MacroAssembler::RecordWriteNonSmi(Register object,
int offset,
Register scratch,
- Register smi_index) {
+ Register index) {
Label done;
if (FLAG_debug_code) {
JumpIfNotSmi(object, &okay);
Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
bind(&okay);
+
+ if (offset == 0) {
+ // index must be int32.
+ Register tmp = index.is(rax) ? rbx : rax;
+ push(tmp);
+ movl(tmp, index);
+ cmpq(tmp, index);
+ Check(equal, "Index register for RecordWrite must be untagged int32.");
+ pop(tmp);
+ }
}
// Test that the object address is not in the new space. We cannot
ASSERT(IsAligned(offset, kPointerSize) ||
IsAligned(offset + kHeapObjectTag, kPointerSize));
- Register dst = smi_index;
+ Register dst = index;
if (offset != 0) {
lea(dst, Operand(object, offset));
} else {
// array access: calculate the destination address in the same manner as
// KeyedStoreIC::GenerateGeneric.
- SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
lea(dst, FieldOperand(object,
- index.reg,
- index.scale,
+ index,
+ times_pointer_size,
FixedArray::kHeaderSize));
}
RecordWriteHelper(object, dst, scratch);
if (FLAG_debug_code) {
movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
}
}
void MacroAssembler::Set(const Operand& dst, int64_t x) {
- if (x == 0) {
- xor_(kScratchRegister, kScratchRegister);
- movq(dst, kScratchRegister);
- } else if (is_int32(x)) {
+ if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
- } else if (is_uint32(x)) {
- movl(dst, Immediate(static_cast<uint32_t>(x)));
} else {
movq(kScratchRegister, x, RelocInfo::NONE);
movq(dst, kScratchRegister);
}
+void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
+ if (FLAG_debug_code) {
+ testb(dst, Immediate(0x01));
+ Label ok;
+ j(zero, &ok);
+ if (allow_stub_calls()) {
+ Abort("Integer32ToSmiField writing to non-smi location");
+ } else {
+ int3();
+ }
+ bind(&ok);
+ }
+ ASSERT(kSmiShift % kBitsPerByte == 0);
+ movl(Operand(dst, kSmiShift / kBitsPerByte), src);
+}
+
+
void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
Register src,
int constant) {
}
+void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
+ movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
+}
+
+
void MacroAssembler::SmiTest(Register src) {
testq(src, src);
}
}
+void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
+ cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
+}
+
+
void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
Register src,
int power) {
movq(dst, src1);
addq(dst, src2);
}
- Assert(no_overflow, "Smi addition onverflow");
+ Assert(no_overflow, "Smi addition overflow");
} else if (dst.is(src1)) {
- addq(dst, src2);
- Label smi_result;
- j(no_overflow, &smi_result);
- // Restore src1.
- subq(src1, src2);
- jmp(on_not_smi_result);
- bind(&smi_result);
+ movq(kScratchRegister, src1);
+ addq(kScratchRegister, src2);
+ j(overflow, on_not_smi_result);
+ movq(dst, kScratchRegister);
} else {
movq(dst, src1);
addq(dst, src2);
movq(dst, src1);
subq(dst, src2);
}
- Assert(no_overflow, "Smi substraction onverflow");
+ Assert(no_overflow, "Smi subtraction overflow");
} else if (dst.is(src1)) {
+ cmpq(dst, src2);
+ j(overflow, on_not_smi_result);
subq(dst, src2);
- Label smi_result;
- j(no_overflow, &smi_result);
- // Restore src1.
- addq(src1, src2);
- jmp(on_not_smi_result);
- bind(&smi_result);
} else {
movq(dst, src1);
subq(dst, src2);
movq(dst, src1);
subq(dst, src2);
}
- Assert(no_overflow, "Smi substraction onverflow");
+ Assert(no_overflow, "Smi subtraction overflow");
} else if (dst.is(src1)) {
- subq(dst, src2);
- Label smi_result;
- j(no_overflow, &smi_result);
- // Restore src1.
- addq(src1, src2);
- jmp(on_not_smi_result);
- bind(&smi_result);
+ movq(kScratchRegister, src1);
+ subq(kScratchRegister, src2);
+ j(overflow, on_not_smi_result);
+ movq(src1, kScratchRegister);
} else {
movq(dst, src1);
subq(dst, src2);
ASSERT(!dst.is(kScratchRegister));
Move(kScratchRegister, constant);
- addq(dst, kScratchRegister);
- Label result_ok;
- j(no_overflow, &result_ok);
- subq(dst, kScratchRegister);
- jmp(on_not_smi_result);
- bind(&result_ok);
+ addq(kScratchRegister, dst);
+ j(overflow, on_not_smi_result);
+ movq(dst, kScratchRegister);
} else {
Move(dst, constant);
addq(dst, src);
} else {
// Subtract by adding the negative, to do it in two operations.
if (constant->value() == Smi::kMinValue) {
- Move(kScratchRegister, constant);
- movq(dst, src);
- subq(dst, kScratchRegister);
+ Move(dst, constant);
+ // Adding and subtracting the min-value gives the same result, it only
+ // differs on the overflow bit, which we don't check here.
+ addq(dst, src);
} else {
+ // Subtract by adding the negation.
Move(dst, Smi::FromInt(-constant->value()));
addq(dst, src);
}
}
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
-
- Move(kScratchRegister, constant);
- subq(dst, kScratchRegister);
- Label sub_success;
- j(no_overflow, &sub_success);
- addq(src, kScratchRegister);
- jmp(on_not_smi_result);
- bind(&sub_success);
- } else {
if (constant->value() == Smi::kMinValue) {
+ // Subtracting min-value from any non-negative value will overflow.
+ // We test the non-negativeness before doing the subtraction.
+ testq(src, src);
+ j(not_sign, on_not_smi_result);
Move(kScratchRegister, constant);
- movq(dst, src);
subq(dst, kScratchRegister);
+ } else {
+ // Subtract by adding the negation.
+ Move(kScratchRegister, Smi::FromInt(-constant->value()));
+ addq(kScratchRegister, dst);
j(overflow, on_not_smi_result);
+ movq(dst, kScratchRegister);
+ }
+ } else {
+ if (constant->value() == Smi::kMinValue) {
+ // Subtracting min-value from any non-negative value will overflow.
+ // We test the non-negativeness before doing the subtraction.
+ testq(src, src);
+ j(not_sign, on_not_smi_result);
+ Move(dst, constant);
+ // Adding and subtracting the min-value gives the same result, it only
+ // differs on the overflow bit, which we don't check here.
+ addq(dst, src);
} else {
+ // Subtract by adding the negation.
Move(dst, Smi::FromInt(-(constant->value())));
addq(dst, src);
j(overflow, on_not_smi_result);
}
+void MacroAssembler::AbortIfNotRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message) {
+ ASSERT(!src.is(kScratchRegister));
+ LoadRoot(kScratchRegister, root_value_index);
+ cmpq(src, kScratchRegister);
+ Check(equal, message);
+}
+
+
+
Condition MacroAssembler::IsObjectStringType(Register heap_object,
Register map,
Register instance_type) {
// NOTICE: Destroys the dst register even if unsuccessful!
void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
+ // Stores an integer32 value into a memory field that already holds a smi.
+ void Integer32ToSmiField(const Operand& dst, Register src);
+
// Adds constant to src and tags the result as a smi.
// Result must be a valid smi.
void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
// Convert smi to 64-bit integer (sign extended if necessary).
void SmiToInteger64(Register dst, Register src);
+ void SmiToInteger64(Register dst, const Operand& src);
// Multiply a positive smi's integer value by a power of two.
// Provides result as 64-bit integer value.
void SmiCompare(Register dst, const Operand& src);
void SmiCompare(const Operand& dst, Register src);
void SmiCompare(const Operand& dst, Smi* src);
+ // Compare the int32 in src register to the value of the smi stored at dst.
+ void SmiCompareInteger32(const Operand& dst, Register src);
// Sets sign and zero flags depending on value of smi in register.
void SmiTest(Register src);
// Abort execution if argument is not a smi. Used in debug code.
void AbortIfNotSmi(Register object);
+ // Abort execution if argument is not the root value with the given index.
+ void AbortIfNotRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message);
+
// ---------------------------------------------------------------------------
// Exception handling
Label call_builtin, exit, with_write_barrier, attempt_to_grow_elements;
// Get the array's length into rax and calculate new length.
- __ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset));
+ __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
- __ SmiAddConstant(rax, rax, Smi::FromInt(argc));
+ __ addl(rax, Immediate(argc));
// Get the element's length into rcx.
- __ movq(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ SmiToInteger32(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
- __ SmiCompare(rax, rcx);
+ __ cmpl(rax, rcx);
__ j(greater, &attempt_to_grow_elements);
// Save new length.
- __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+ __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
// Push the element.
__ movq(rcx, Operand(rsp, argc * kPointerSize));
- SmiIndex index =
- masm()->SmiToIndex(kScratchRegister, rax, times_pointer_size);
__ lea(rdx, FieldOperand(rbx,
- index.reg, index.scale,
+ rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ movq(Operand(rdx, 0), rcx);
// Check if value is a smi.
+ __ Integer32ToSmi(rax, rax); // Return new length as smi.
+
__ JumpIfNotSmi(rcx, &with_write_barrier);
__ bind(&exit);
RecordWriteStub stub(rbx, rdx, rcx);
__ CallStub(&stub);
+
__ ret((argc + 1) * kPointerSize);
__ bind(&attempt_to_grow_elements);
__ movq(rcx, Operand(rcx, 0));
// Check if it's the end of elements.
- index = masm()->SmiToIndex(kScratchRegister, rax, times_pointer_size);
__ lea(rdx, FieldOperand(rbx,
- index.reg, index.scale,
+ rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ cmpq(rdx, rcx);
__ j(not_equal, &call_builtin);
// Increment element's and array's sizes.
__ SmiAddConstant(FieldOperand(rbx, FixedArray::kLengthOffset),
Smi::FromInt(kAllocationDelta));
+ // Make new length a smi before returning it.
+ __ Integer32ToSmi(rax, rax);
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
// Elements are in new space, so write barrier is not required.
__ ret((argc + 1) * kPointerSize);
__ j(not_equal, &miss);
// Get the array's length into rcx and calculate new length.
- __ movq(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
- __ SmiSubConstant(rcx, rcx, Smi::FromInt(1));
- __ SmiTest(rcx);
+ __ SmiToInteger32(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
+ __ subl(rcx, Immediate(1));
__ j(negative, &return_undefined);
// Get the last element.
__ Move(r9, Factory::the_hole_value());
- SmiIndex index =
- masm()->SmiToIndex(r8, rcx, times_pointer_size);
__ movq(rax, FieldOperand(rbx,
- index.reg, index.scale,
+ rcx, times_pointer_size,
FixedArray::kHeaderSize));
// Check if element is already the hole.
__ cmpq(rax, r9);
+ // If so, call slow-case to also check prototypes for value.
__ j(equal, &call_builtin);
// Set the array's length.
- __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
+ __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
- // Fill with the hole and return original value..
+ // Fill with the hole and return original value.
__ movq(FieldOperand(rbx,
- index.reg, index.scale,
+ rcx, times_pointer_size,
FixedArray::kHeaderSize),
r9);
__ ret((argc + 1) * kPointerSize);
// Sync elements below the range if they have not been materialized
// on the stack.
int start = Min(begin, stack_pointer_ + 1);
+ int end_or_stack_pointer = Min(stack_pointer_, end);
+ // Emit normal push instructions for elements above stack pointer
+ // and use mov instructions if we are below stack pointer.
+ int i = start;
- // If positive we have to adjust the stack pointer.
- int delta = end - stack_pointer_;
- if (delta > 0) {
- stack_pointer_ = end;
- __ subq(rsp, Immediate(delta * kPointerSize));
- }
-
- for (int i = start; i <= end; i++) {
+ while (i <= end_or_stack_pointer) {
if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
+ i++;
+ }
+ while (i <= end) {
+ SyncElementByPushing(i);
+ i++;
}
}
stackTrace->GetFrame(0));
checkStackFrame(origin, "baz", 8, 3, false, true,
stackTrace->GetFrame(1));
- checkStackFrame(NULL, "", 1, 1, true, false,
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ bool is_eval = true;
+#else // ENABLE_DEBUGGER_SUPPORT
+ bool is_eval = false;
+#endif // ENABLE_DEBUGGER_SUPPORT
+
+ checkStackFrame(NULL, "", 1, 1, is_eval, false,
stackTrace->GetFrame(2));
// The last frame is an anonymous function that has the initial call to foo.
checkStackFrame(origin, "", 10, 1, false, false,
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifdef ENABLE_DEBUGGER_SUPPORT
+
#include <stdlib.h>
#include "v8.h"
static int break_point = 0;
Handle<v8::internal::SharedFunctionInfo> shared(fun->shared());
Debug::SetBreakPoint(
- shared, position,
- Handle<Object>(v8::internal::Smi::FromInt(++break_point)));
+ shared,
+ Handle<Object>(v8::internal::Smi::FromInt(++break_point)),
+ &position);
return break_point;
}
}
+// Test top level script break points set on lines.
+TEST(ScriptBreakPointLineTopLevel) {
+ v8::HandleScope scope;
+ DebugLocalContext env;
+ env.ExposeDebug();
+
+ v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount,
+ v8::Undefined());
+
+ v8::Local<v8::String> script = v8::String::New(
+ "function f() {\n"
+ " a = 1; // line 1\n"
+ "}\n"
+ "a = 2; // line 3\n");
+ v8::Local<v8::Function> f;
+ {
+ v8::HandleScope scope;
+ v8::Script::Compile(script, v8::String::New("test.html"))->Run();
+ }
+ f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+
+ Heap::CollectAllGarbage(false);
+
+ SetScriptBreakPointByNameFromJS("test.html", 3, -1);
+
+ // Call f and check that there was no break points.
+ break_point_hit_count = 0;
+ f->Call(env->Global(), 0, NULL);
+ CHECK_EQ(0, break_point_hit_count);
+
+ // Recompile and run script and check that break point was hit.
+ break_point_hit_count = 0;
+ v8::Script::Compile(script, v8::String::New("test.html"))->Run();
+ CHECK_EQ(1, break_point_hit_count);
+
+ // Call f and check that there are still no break points.
+ break_point_hit_count = 0;
+ f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ CHECK_EQ(0, break_point_hit_count);
+
+ v8::Debug::SetDebugEventListener(NULL);
+ CheckDebuggerUnloaded();
+}
+
+
// Test that it is possible to remove the last break point for a function
// inside the break handling of that break point.
TEST(RemoveBreakPointInBreak) {
CheckDebuggerUnloaded();
}
+#endif // ENABLE_DEBUGGER_SUPPORT
COMPARE(mvn(r6, Operand(-1), LeaveCC, ne),
"13a06000 movne r6, #0");
+ // mov -> movw.
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ COMPARE(mov(r5, Operand(0x01234), LeaveCC, ne),
+ "13015234 movwne r5, #4660");
+ // We only disassemble one instruction so the eor instruction is not here.
+ COMPARE(eor(r5, r4, Operand(0x1234), LeaveCC, ne),
+ "1301c234 movwne ip, #4660");
+ // Movw can't do setcc so we don't get that here. Mov immediate with setcc
+ // is pretty strange anyway.
+ COMPARE(mov(r5, Operand(0x01234), SetCC, ne),
+ "159fc000 ldrne ip, [pc, #+0]");
+ // We only disassemble one instruction so the eor instruction is not here.
+ // The eor does the setcc so we get a movw here.
+ COMPARE(eor(r5, r4, Operand(0x1234), SetCC, ne),
+ "1301c234 movwne ip, #4660");
+
+ COMPARE(movt(r5, 0x4321, ne),
+ "13445321 movtne r5, #17185");
+ COMPARE(movw(r5, 0xabcd, eq),
+ "030a5bcd movweq r5, #43981");
+ }
+
+ // Eor doesn't have an eor-negative variant, but we can do an mvn followed by
+ // an eor to get the same effect.
+ COMPARE(eor(r5, r4, Operand(0xffffff34), SetCC, ne),
+ "13e0c0cb mvnne ip, #203");
+
// and <-> bic.
COMPARE(and_(r3, r5, Operand(0xfc03ffff)),
"e3c537ff bic r3, r5, #66846720");
__ jmp(&L1);
__ jmp(Operand(ebx, ecx, times_4, 10000));
+#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference after_break_target =
ExternalReference(Debug_Address::AfterBreakTarget());
__ jmp(Operand::StaticVariable(after_break_target));
+#endif // ENABLE_DEBUGGER_SUPPORT
__ jmp(ic, RelocInfo::CODE_TARGET);
__ nop();
__ divsd(xmm1, xmm0);
__ movdbl(xmm1, Operand(ebx, ecx, times_4, 10000));
__ movdbl(Operand(ebx, ecx, times_4, 10000), xmm1);
- __ comisd(xmm0, xmm1);
+ __ ucomisd(xmm0, xmm1);
// 128 bit move instructions.
__ movdqa(xmm0, Operand(ebx, ecx, times_4, 10000));
int func_pos = Runtime::StringMatch(script_src, func_pos_str, 0);
CHECK_NE(0, func_pos);
+#ifdef ENABLE_DEBUGGER_SUPPORT
// Obtain SharedFunctionInfo for the function.
Object* shared_func_info_ptr =
Runtime::FindSharedFunctionInfoInScript(i_script, func_pos);
SmartPointer<char> inferred_name =
shared_func_info->inferred_name()->ToCString();
CHECK_EQ(ref_inferred_name, *inferred_name);
+#endif // ENABLE_DEBUGGER_SUPPORT
}
} // namespace
+
+static const v8::HeapGraphNode* GetGlobalObject(
+ const v8::HeapSnapshot* snapshot) {
+ CHECK_EQ(1, snapshot->GetHead()->GetChildrenCount());
+ return snapshot->GetHead()->GetChild(0)->GetToNode();
+}
+
+
+static const v8::HeapGraphNode* GetProperty(const v8::HeapGraphNode* node,
+ v8::HeapGraphEdge::Type type,
+ const char* name) {
+ for (int i = 0, count = node->GetChildrenCount(); i < count; ++i) {
+ const v8::HeapGraphEdge* prop = node->GetChild(i);
+ v8::String::AsciiValue prop_name(prop->GetName());
+ if (prop->GetType() == type && strcmp(name, *prop_name) == 0)
+ return prop->GetToNode();
+ }
+ return NULL;
+}
+
+
+static bool HasString(const v8::HeapGraphNode* node, const char* contents) {
+ for (int i = 0, count = node->GetChildrenCount(); i < count; ++i) {
+ const v8::HeapGraphEdge* prop = node->GetChild(i);
+ const v8::HeapGraphNode* node = prop->GetToNode();
+ if (node->GetType() == v8::HeapGraphNode::STRING) {
+ v8::String::AsciiValue node_name(node->GetName());
+ if (strcmp(contents, *node_name) == 0) return true;
+ }
+ }
+ return false;
+}
+
+
TEST(HeapSnapshot) {
v8::HandleScope scope;
"var c2 = new C2(a2);");
const v8::HeapSnapshot* snapshot_env2 =
v8::HeapProfiler::TakeSnapshot(v8::String::New("env2"));
- const v8::HeapGraphNode* global_env2;
- if (i::Snapshot::IsEnabled()) {
- // In case if snapshots are enabled, there will present a
- // vanilla deserealized global object, without properties
- // added by the test code.
- CHECK_EQ(2, snapshot_env2->GetHead()->GetChildrenCount());
- // Choose the global object of a bigger size.
- const v8::HeapGraphNode* node0 =
- snapshot_env2->GetHead()->GetChild(0)->GetToNode();
- const v8::HeapGraphNode* node1 =
- snapshot_env2->GetHead()->GetChild(1)->GetToNode();
- global_env2 = node0->GetTotalSize() > node1->GetTotalSize() ?
- node0 : node1;
- } else {
- CHECK_EQ(1, snapshot_env2->GetHead()->GetChildrenCount());
- global_env2 = snapshot_env2->GetHead()->GetChild(0)->GetToNode();
- }
+ const v8::HeapGraphNode* global_env2 = GetGlobalObject(snapshot_env2);
// Verify, that JS global object of env2 doesn't have '..1'
// properties, but has '..2' properties.
- bool has_a1 = false, has_b1_1 = false, has_b1_2 = false, has_c1 = false;
- bool has_a2 = false, has_b2_1 = false, has_b2_2 = false, has_c2 = false;
- // This will be needed further.
- const v8::HeapGraphNode* a2_node = NULL;
- for (int i = 0, count = global_env2->GetChildrenCount(); i < count; ++i) {
- const v8::HeapGraphEdge* prop = global_env2->GetChild(i);
- v8::String::AsciiValue prop_name(prop->GetName());
- if (strcmp("a1", *prop_name) == 0) has_a1 = true;
- if (strcmp("b1_1", *prop_name) == 0) has_b1_1 = true;
- if (strcmp("b1_2", *prop_name) == 0) has_b1_2 = true;
- if (strcmp("c1", *prop_name) == 0) has_c1 = true;
- if (strcmp("a2", *prop_name) == 0) {
- has_a2 = true;
- a2_node = prop->GetToNode();
- }
- if (strcmp("b2_1", *prop_name) == 0) has_b2_1 = true;
- if (strcmp("b2_2", *prop_name) == 0) has_b2_2 = true;
- if (strcmp("c2", *prop_name) == 0) has_c2 = true;
- }
- CHECK(!has_a1);
- CHECK(!has_b1_1);
- CHECK(!has_b1_2);
- CHECK(!has_c1);
- CHECK(has_a2);
- CHECK(has_b2_1);
- CHECK(has_b2_2);
- CHECK(has_c2);
+ CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "a1"));
+ CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "b1_1"));
+ CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "b1_2"));
+ CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "c1"));
+ const v8::HeapGraphNode* a2_node =
+ GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "a2");
+ CHECK_NE(NULL, a2_node);
+ CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "b2_1"));
+ CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "b2_2"));
+ CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "c2"));
// Verify that anything related to '[ABC]1' is not reachable.
NamedEntriesDetector det;
CHECK(has_b2_2_x_ref);
}
+
+TEST(HeapSnapshotCodeObjects) {
+ v8::HandleScope scope;
+ v8::Handle<v8::Context> env = v8::Context::New();
+ env->Enter();
+
+ CompileAndRunScript(
+ "function lazy(x) { return x - 1; }\n"
+ "function compiled(x) { return x + 1; }\n"
+ "compiled(1)");
+ const v8::HeapSnapshot* snapshot =
+ v8::HeapProfiler::TakeSnapshot(v8::String::New("code"));
+
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ const v8::HeapGraphNode* compiled =
+ GetProperty(global, v8::HeapGraphEdge::PROPERTY, "compiled");
+ CHECK_NE(NULL, compiled);
+ CHECK_EQ(v8::HeapGraphNode::CLOSURE, compiled->GetType());
+ const v8::HeapGraphNode* lazy =
+ GetProperty(global, v8::HeapGraphEdge::PROPERTY, "lazy");
+ CHECK_NE(NULL, lazy);
+ CHECK_EQ(v8::HeapGraphNode::CLOSURE, lazy->GetType());
+
+ // Find references to code.
+ const v8::HeapGraphNode* compiled_code =
+ GetProperty(compiled, v8::HeapGraphEdge::INTERNAL, "code");
+ CHECK_NE(NULL, compiled_code);
+ const v8::HeapGraphNode* lazy_code =
+ GetProperty(lazy, v8::HeapGraphEdge::INTERNAL, "code");
+ CHECK_NE(NULL, lazy_code);
+
+ // Verify that non-compiled code doesn't contain references to "x"
+ // literal, while compiled code does.
+ bool compiled_references_x = false, lazy_references_x = false;
+ for (int i = 0, count = compiled_code->GetChildrenCount(); i < count; ++i) {
+ const v8::HeapGraphEdge* prop = compiled_code->GetChild(i);
+ const v8::HeapGraphNode* node = prop->GetToNode();
+ if (node->GetType() == v8::HeapGraphNode::CODE) {
+ if (HasString(node, "x")) {
+ compiled_references_x = true;
+ break;
+ }
+ }
+ }
+ for (int i = 0, count = lazy_code->GetChildrenCount(); i < count; ++i) {
+ const v8::HeapGraphEdge* prop = lazy_code->GetChild(i);
+ const v8::HeapGraphNode* node = prop->GetToNode();
+ if (node->GetType() == v8::HeapGraphNode::CODE) {
+ if (HasString(node, "x")) {
+ lazy_references_x = true;
+ break;
+ }
+ }
+ }
+ CHECK(compiled_references_x);
+ CHECK(!lazy_references_x);
+}
+
#endif // ENABLE_LOGGING_AND_PROFILING
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifdef ENABLE_DEBUGGER_SUPPORT
+
#include <stdlib.h>
#include "v8.h"
CompareStrings("abbabababababaaabbabababababbabbbbbbbababa",
"bbbbabababbbabababbbabababababbabbababa");
}
+
+#endif // ENABLE_DEBUGGER_SUPPORT
}
+#ifdef ENABLE_DEBUGGER_SUPPORT
static int register_code(int reg) {
return Debug::k_register_address << kDebugIdShift | reg;
}
+#endif // ENABLE_DEBUGGER_SUPPORT
TEST(ExternalReferenceEncoder) {
Encode(encoder, Runtime::kAbort));
CHECK_EQ(make_code(IC_UTILITY, IC::kLoadCallbackProperty),
Encode(encoder, IC_Utility(IC::kLoadCallbackProperty)));
+#ifdef ENABLE_DEBUGGER_SUPPORT
CHECK_EQ(make_code(DEBUG_ADDRESS, register_code(3)),
Encode(encoder, Debug_Address(Debug::k_register_address, 3)));
+#endif // ENABLE_DEBUGGER_SUPPORT
ExternalReference keyed_load_function_prototype =
ExternalReference(&Counters::keyed_load_function_prototype);
CHECK_EQ(make_code(STATS_COUNTER, Counters::k_keyed_load_function_prototype),
ExternalReference::address_of_real_stack_limit();
CHECK_EQ(make_code(UNCLASSIFIED, 5),
encoder.Encode(real_stack_limit_address.address()));
+#ifdef ENABLE_DEBUGGER_SUPPORT
CHECK_EQ(make_code(UNCLASSIFIED, 15),
encoder.Encode(ExternalReference::debug_break().address()));
+#endif // ENABLE_DEBUGGER_SUPPORT
CHECK_EQ(make_code(UNCLASSIFIED, 10),
encoder.Encode(ExternalReference::new_space_start().address()));
CHECK_EQ(make_code(UNCLASSIFIED, 3),
decoder.Decode(make_code(RUNTIME_FUNCTION, Runtime::kAbort)));
CHECK_EQ(AddressOf(IC_Utility(IC::kLoadCallbackProperty)),
decoder.Decode(make_code(IC_UTILITY, IC::kLoadCallbackProperty)));
+#ifdef ENABLE_DEBUGGER_SUPPORT
CHECK_EQ(AddressOf(Debug_Address(Debug::k_register_address, 3)),
decoder.Decode(make_code(DEBUG_ADDRESS, register_code(3))));
+#endif // ENABLE_DEBUGGER_SUPPORT
ExternalReference keyed_load_function =
ExternalReference(&Counters::keyed_load_function_prototype);
CHECK_EQ(keyed_load_function.address(),
decoder.Decode(make_code(UNCLASSIFIED, 4)));
CHECK_EQ(ExternalReference::address_of_real_stack_limit().address(),
decoder.Decode(make_code(UNCLASSIFIED, 5)));
+#ifdef ENABLE_DEBUGGER_SUPPORT
CHECK_EQ(ExternalReference::debug_break().address(),
decoder.Decode(make_code(UNCLASSIFIED, 15)));
+#endif // ENABLE_DEBUGGER_SUPPORT
CHECK_EQ(ExternalReference::new_space_start().address(),
decoder.Decode(make_code(UNCLASSIFIED, 10)));
}
return arguments.length + arguments[arguments.length - 1];
}
+var stack_corner_case_failure = false;
+
for (var j = 1; j < 0x40000000; j <<= 1) {
try {
var a = new Array(j);
a[j - 1] = 42;
assertEquals(42 + j, al.apply(345, a));
} catch (e) {
+ if (e.toString().indexOf("Maximum call stack size exceeded") != -1) {
+ // For some combinations of build settings, it may be the case that the
+ // stack here is just tall enough to contain the array whose size is
+ // specified by j but is not tall enough to contain the activation
+ // record for the apply call. Allow one such corner case through,
+ // checking that the length check will do the right thing for an array
+ // the next size up.
+ assertEquals(false, stack_corner_case_failure);
+ stack_corner_case_failure = true;
+ continue;
+ }
assertTrue(e.toString().indexOf("Function.prototype.apply") != -1,
"exception does not contain Function.prototype.apply: " +
e.toString());
a = new Array(j);
a[j - 1] = 42;
al.apply(345, a);
- assertUnreachable("Apply of arrray with length " + a.length +
+ assertUnreachable("Apply of array with length " + a.length +
" should have thrown");
} catch (e) {
assertTrue(e.toString().indexOf("Function.prototype.apply") != -1,
mirror = debug.MakeMirror(o.a);
testArguments(dcp, '{"type":"handle","target":' + mirror.handle() + '}', true, false);
- testArguments(dcp, '{"type":"script","target":"sourceUrlScript","line":1}', true, true);
+ testArguments(dcp, '{"type":"script","target":"sourceUrlScript","line":0}', true, true);
// Indicate that all was processed.
listenerComplete = true;
};
function g() {
+ // Comment.
f();
};
sourceUrlFunc();
assertTrue(breakListenerCalled, "Break listener not called on breakpoint set by sourceURL");
+
+// Set a break point on a line with the comment, and check that actual position
+// is the next line after the comment.
+var number = Debug.setScriptBreakPointById(g_script_id, g_line + 1);
+assertEquals(g_line + 2, Debug.findBreakPoint(number).actual_location.line);
# too long to run in debug mode on ARM.
fuzz-natives: PASS, SKIP if ($mode == release || $arch == arm)
-# Issue 494: new snapshot code breaks mjsunit/apply on mac debug snapshot.
-apply: PASS, FAIL if ($system == macos && $mode == debug)
-
big-object-literal: PASS, SKIP if ($arch == arm)
# Issue 488: this test sometimes times out.
--- /dev/null
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose_gc
+
+// This test makes sure that we do flush code with heap allocated locals.
+// This can be a problem if eval is used within the scope.
+// See: http://code.google.com/p/v8/issues/detail?id=747
+
+(function() {
+ var x = 42;
+ this.callEval = function() {eval('x');};
+})();
+
+try {
+ callEval();
+} catch (e) {
+ assertUnreachable();
+}
+
+gc();
+gc();
+gc();
+gc();
+gc();
+gc();
+
+try {
+ callEval();
+} catch (e) {
+ assertUnreachable();
+}
--- /dev/null
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-externalize-string
+
+var size = 1024;
+
+function test() {
+ var str = "";
+
+ // Build an ascii cons string.
+ for (var i = 0; i < size; i++) {
+ str += String.fromCharCode(i & 0x7f);
+ }
+ assertTrue(isAsciiString(str));
+
+ var twoByteExternalWithAsciiData =
+ "AA" + (function() { return "A"; })();
+ externalizeString(twoByteExternalWithAsciiData, true /* force two-byte */);
+ assertFalse(isAsciiString(twoByteExternalWithAsciiData));
+
+ var realTwoByteExternalString =
+ "\u1234\u1234" + (function() { return "\u1234"; })();
+ externalizeString(realTwoByteExternalString);
+ assertFalse(isAsciiString(realTwoByteExternalString));
+
+ assertTrue(isAsciiString(["a", twoByteExternalWithAsciiData].join("")));
+
+ // Appending a two-byte string that contains only ascii chars should
+ // still produce an ascii cons.
+ var str1 = str + twoByteExternalWithAsciiData;
+ assertTrue(isAsciiString(str1));
+
+ // Force flattening of the string.
+ var old_length = str1.length - twoByteExternalWithAsciiData.length;
+ for (var i = 0; i < old_length; i++) {
+ assertEquals(String.fromCharCode(i & 0x7f), str1[i]);
+ }
+ for (var i = old_length; i < str1.length; i++) {
+ assertEquals("A", str1[i]);
+ }
+
+ // Flattened string should still be ascii.
+ assertTrue(isAsciiString(str1));
+
+ // Lower-casing an ascii string should produce ascii.
+ assertTrue(isAsciiString(str1.toLowerCase()));
+
+ assertFalse(isAsciiString(["a", realTwoByteExternalString].join("")));
+
+ // Appending a real two-byte string should produce a two-byte cons.
+ var str2 = str + realTwoByteExternalString;
+ assertFalse(isAsciiString(str2));
+
+ // Force flattening of the string.
+ old_length = str2.length - realTwoByteExternalString.length;
+ for (var i = 0; i < old_length; i++) {
+ assertEquals(String.fromCharCode(i & 0x7f), str2[i]);
+ }
+ for (var i = old_length; i < str.length; i++) {
+ assertEquals("\u1234", str2[i]);
+ }
+
+ // Flattened string should still be two-byte.
+ assertFalse(isAsciiString(str2));
+}
+
+// Run the test many times to ensure IC-s don't break things.
+for (var i = 0; i < 10; i++) {
+ test();
+}