if (initial_capacity > 0) {
size += FixedArray::SizeFor(initial_capacity);
}
- __ AllocateInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ __ Allocate(size, result, scratch2, scratch3, gc_required, TAG_OBJECT);
// Allocated the JSArray. Now initialize the fields except for the elements
// array.
// -----------------------------------
Label gc_required;
- __ AllocateInNewSpace(JSValue::kSize,
- r0, // Result.
- r3, // Scratch.
- r4, // Scratch.
- &gc_required,
- TAG_OBJECT);
+ __ Allocate(JSValue::kSize,
+ r0, // Result.
+ r3, // Scratch.
+ r4, // Scratch.
+ &gc_required,
+ TAG_OBJECT);
// Initialising the String Object.
Register map = r3;
__ pop(r3);
// Attempt to allocate new JSFunction in new space.
- __ AllocateInNewSpace(JSFunction::kSize,
- r0,
- r1,
- r2,
- &gc,
- TAG_OBJECT);
+ __ Allocate(JSFunction::kSize, r0, r1, r2, &gc, TAG_OBJECT);
__ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7);
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
// Attempt to allocate the context in new space.
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- r0,
- r1,
- r2,
- &gc,
- TAG_OBJECT);
+ __ Allocate(FixedArray::SizeFor(length), r0, r1, r2, &gc, TAG_OBJECT);
// Load the function from the stack.
__ ldr(r3, MemOperand(sp, 0));
// Try to allocate the context in new space.
Label gc;
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- r0, r1, r2, &gc, TAG_OBJECT);
+ __ Allocate(FixedArray::SizeFor(length), r0, r1, r2, &gc, TAG_OBJECT);
// Load the function from the stack.
__ ldr(r3, MemOperand(sp, 0));
if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
}
- __ AllocateInNewSpace(size, r0, r1, r2, fail, flags);
+ __ Allocate(size, r0, r1, r2, fail, flags);
if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
__ mov(r2, Operand(Handle<Map>(masm->isolate()->heap()->
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
+ __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
// the constructor's prototype changes, but instance size and property
// counts remain unchanged (if slack tracking finished).
ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
- __ AllocateInNewSpace(instance_size,
- result,
- scratch,
- scratch2,
- deferred->entry(),
- TAG_OBJECT);
+ __ Allocate(instance_size, result, scratch, scratch2, deferred->entry(),
+ TAG_OBJECT);
__ bind(deferred->exit());
if (FLAG_debug_code) {
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ AllocateInNewSpace(size,
- result,
- scratch,
- scratch2,
- deferred->entry(),
- flags);
+ if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ }
+ __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
__ AllocateInNewSpace(size,
// Allocate all objects that are part of the literal in one big
// allocation. This avoids multiple limit checks.
Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
+ __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
+ __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
}
-void MacroAssembler::AllocateInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
// The values must be adjacent in memory to allow the use of LDM.
// Also, assert that the registers are numbered such that the values
// are loaded in the correct order.
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
intptr_t top =
- reinterpret_cast<intptr_t>(new_space_allocation_top.address());
+ reinterpret_cast<intptr_t>(allocation_top.address());
intptr_t limit =
- reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
+ reinterpret_cast<intptr_t>(allocation_limit.address());
ASSERT((limit - top) == kPointerSize);
ASSERT(result.code() < ip.code());
// Set up allocation top address and object size registers.
Register topaddr = scratch1;
Register obj_size_reg = scratch2;
- mov(topaddr, Operand(new_space_allocation_top));
+ mov(topaddr, Operand(allocation_top));
Operand obj_size_operand = Operand(object_size);
if (!obj_size_operand.is_single_instruction(this)) {
// We are about to steal IP, so we need to load this value first
if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
// always safe because the limit of the heap is always aligned.
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
Label aligned;
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
Register scratch1,
Register scratch2,
Label* gc_required) {
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
InitializeNewString(result,
length,
Register scratch1,
Register scratch2,
Label* gc_required) {
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
InitializeNewString(result,
length,
Register scratch1,
Register scratch2,
Label* gc_required) {
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
InitializeNewString(result,
length,
Register scratch1,
Register scratch2,
Label* gc_required) {
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
InitializeNewString(result,
length,
TaggingMode tagging_mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
- AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- tagging_mode == TAG_RESULT ? TAG_OBJECT :
- NO_ALLOCATION_FLAGS);
+ Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
+ tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
// Store heap number map in the allocated object.
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// ---------------------------------------------------------------------------
// Allocation support
- // Allocate an object in new space. The object_size is specified
- // either in bytes or in words if the allocation flag SIZE_IN_WORDS
- // is passed. If the new space is exhausted control continues at the
- // gc_required label. The allocated object is returned in result. If
- // the flag tag_allocated_object is true the result is tagged as as
- // a heap object. All registers are clobbered also when control
- // continues at the gc_required label.
- void AllocateInNewSpace(int object_size,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- AllocationFlags flags);
+ // Allocate an object in new space or old pointer space. The object_size is
+ // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
+ // is passed. If the space is exhausted control continues at the gc_required
+ // label. The allocated object is returned in result. If the flag
+ // tag_allocated_object is true the result is tagged as as a heap object.
+ // All registers are clobbered also when control continues at the gc_required
+ // label.
+ void Allocate(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+
void AllocateInNewSpace(Register object_size,
Register result,
Register scratch1,
__ b(ne, &check_capacity);
int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
- TAG_OBJECT);
+ __ Allocate(size, elements_reg, scratch, scratch2, &slow, TAG_OBJECT);
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
__ str(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
__ b(ne, &check_capacity);
int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
- TAG_OBJECT);
+ __ Allocate(size, elements_reg, scratch1, scratch2, &slow, TAG_OBJECT);
// Initialize the new FixedDoubleArray.
__ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
}
+ExternalReference ExternalReference::old_pointer_space_allocation_top_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->heap()->OldPointerSpaceAllocationTopAddress());
+}
+
+
+ExternalReference ExternalReference::old_pointer_space_allocation_limit_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->heap()->OldPointerSpaceAllocationLimitAddress());
+}
+
+
ExternalReference ExternalReference::handle_scope_level_address(
Isolate* isolate) {
return ExternalReference(HandleScope::current_level_address(isolate));
// Used for fast allocation in generated code.
static ExternalReference new_space_allocation_top_address(Isolate* isolate);
static ExternalReference new_space_allocation_limit_address(Isolate* isolate);
+ static ExternalReference old_pointer_space_allocation_top_address(
+ Isolate* isolate);
+ static ExternalReference old_pointer_space_allocation_limit_address(
+ Isolate* isolate);
static ExternalReference double_fp_operation(Token::Value operation,
Isolate* isolate);
HValue* size_in_bytes =
AddInstruction(new(zone) HConstant(size, Representation::Integer32()));
+ HAllocate::Flags flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE;
+ if (FLAG_pretenure_literals) {
+ flags = static_cast<HAllocate::Flags>(
+ flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
+ }
HInstruction* object =
AddInstruction(new(zone) HAllocate(context(),
size_in_bytes,
HType::JSObject(),
- HAllocate::CAN_ALLOCATE_IN_NEW_SPACE));
+ flags));
for (int i = 0; i < size; i += kPointerSize) {
HInstruction* value =
DEFINE_bool(clever_optimizations,
true,
"Optimize object size, Array shift, DOM strings and string +")
+DEFINE_bool(pretenure_literals, false, "allocate literals in old space")
// Flags for data representation optimizations
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
return new_space_.allocation_limit_address();
}
+ Address* OldPointerSpaceAllocationTopAddress() {
+ return old_pointer_space_->allocation_top_address();
+ }
+ Address* OldPointerSpaceAllocationLimitAddress() {
+ return old_pointer_space_->allocation_limit_address();
+ }
+
// Uncommit unused semi space.
bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
void HAllocate::PrintDataTo(StringStream* stream) {
size()->PrintNameTo(stream);
+ if (!GuaranteedInNewSpace()) stream->Add(" (pretenure)");
}
: type_(type),
flags_(flags) {
ASSERT((flags & CAN_ALLOCATE_IN_OLD_DATA_SPACE) == 0); // unimplemented
- ASSERT((flags & CAN_ALLOCATE_IN_OLD_POINTER_SPACE) == 0); // unimplemented
SetOperandAt(0, context);
SetOperandAt(1, size);
set_representation(Representation::Tagged());
total_size->ClearFlag(HValue::kCanOverflow);
HAllocate::Flags flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE;
+ // TODO(hpayer): add support for old data space
+ if (FLAG_pretenure_literals && !IsFastDoubleElementsKind(kind)) {
+ flags = static_cast<HAllocate::Flags>(
+ flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
+ }
if (IsFastDoubleElementsKind(kind)) {
flags = static_cast<HAllocate::Flags>(
flags | HAllocate::ALLOCATE_DOUBLE_ALIGNED);
if (initial_capacity > 0) {
size += FixedArray::SizeFor(initial_capacity);
}
- __ AllocateInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ __ Allocate(size, result, scratch2, scratch3, gc_required, TAG_OBJECT);
// Allocated the JSArray. Now initialize the fields except for the elements
// array.
// Allocate a JSValue and put the tagged pointer into eax.
Label gc_required;
- __ AllocateInNewSpace(JSValue::kSize,
- eax, // Result.
- ecx, // New allocation top (we ignore it).
- no_reg,
- &gc_required,
- TAG_OBJECT);
+ __ Allocate(JSValue::kSize,
+ eax, // Result.
+ ecx, // New allocation top (we ignore it).
+ no_reg,
+ &gc_required,
+ TAG_OBJECT);
// Set the map.
__ LoadGlobalFunctionInitialMap(edi, ecx);
Counters* counters = masm->isolate()->counters();
Label gc;
- __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
+ __ Allocate(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
__ IncrementCounter(counters->fast_new_closure_total(), 1);
// Try to allocate the context in new space.
Label gc;
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
- eax, ebx, ecx, &gc, TAG_OBJECT);
+ __ Allocate((length * kPointerSize) + FixedArray::kHeaderSize,
+ eax, ebx, ecx, &gc, TAG_OBJECT);
// Get the function from the stack.
__ mov(ecx, Operand(esp, 1 * kPointerSize));
// Try to allocate the context in new space.
Label gc;
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- eax, ebx, ecx, &gc, TAG_OBJECT);
+ __ Allocate(FixedArray::SizeFor(length), eax, ebx, ecx, &gc, TAG_OBJECT);
// Get the function or sentinel from the stack.
__ mov(ecx, Operand(esp, 1 * kPointerSize));
if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
}
- __ AllocateInNewSpace(size, eax, ebx, edx, fail, flags);
+ __ Allocate(size, eax, ebx, edx, fail, flags);
if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
__ mov(FieldOperand(eax, allocation_info_start),
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
+ __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
// the constructor's prototype changes, but instance size and property
// counts remain unchanged (if slack tracking finished).
ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
- __ AllocateInNewSpace(instance_size,
- result,
- no_reg,
- scratch,
- deferred->entry(),
- TAG_OBJECT);
+ __ Allocate(instance_size, result, no_reg, scratch, deferred->entry(),
+ TAG_OBJECT);
__ bind(deferred->exit());
if (FLAG_debug_code) {
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ AllocateInNewSpace(size, result, temp, no_reg, deferred->entry(), flags);
+ if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ }
+ __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
__ AllocateInNewSpace(size, result, temp, no_reg, deferred->entry(), flags);
// Allocate all objects that are part of the literal in one big
// allocation. This avoids multiple limit checks.
Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
+ __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
+ __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
void MacroAssembler::LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
// Just return if allocation top is already known.
if ((flags & RESULT_CONTAINS_TOP) != 0) {
ASSERT(scratch.is(no_reg));
#ifdef DEBUG
// Assert that result actually contains top on entry.
- cmp(result, Operand::StaticVariable(new_space_allocation_top));
+ cmp(result, Operand::StaticVariable(allocation_top));
Check(equal, "Unexpected allocation top");
#endif
return;
// Move address of new object to result. Use scratch register if available.
if (scratch.is(no_reg)) {
- mov(result, Operand::StaticVariable(new_space_allocation_top));
+ mov(result, Operand::StaticVariable(allocation_top));
} else {
- mov(scratch, Immediate(new_space_allocation_top));
+ mov(scratch, Immediate(allocation_top));
mov(result, Operand(scratch, 0));
}
}
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
- Register scratch) {
+ Register scratch,
+ AllocationFlags flags) {
if (emit_debug_code()) {
test(result_end, Immediate(kObjectAlignmentMask));
Check(zero, "Unaligned allocation in new space");
}
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
// Update new top. Use scratch if available.
if (scratch.is(no_reg)) {
- mov(Operand::StaticVariable(new_space_allocation_top), result_end);
+ mov(Operand::StaticVariable(allocation_top), result_end);
} else {
mov(Operand(scratch, 0), result_end);
}
}
-void MacroAssembler::AllocateInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Align the next allocation. Storing the filler map without checking top is
// always safe because the limit of the heap is always aligned.
if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned;
test(result, Immediate(kDoubleAlignmentMask));
Register top_reg = result_end.is_valid() ? result_end : result;
- // Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
+ // Calculate new top and bail out if space is exhausted.
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
if (!top_reg.is(result)) {
mov(top_reg, result);
}
add(top_reg, Immediate(object_size));
j(carry, gc_required);
- cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit));
+ cmp(top_reg, Operand::StaticVariable(allocation_limit));
j(above, gc_required);
// Update allocation top.
- UpdateAllocationTopHelper(top_reg, scratch);
+ UpdateAllocationTopHelper(top_reg, scratch, flags);
// Tag result if requested.
bool tag_result = (flags & TAG_OBJECT) != 0;
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & SIZE_IN_WORDS) == 0);
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
}
// Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
+ UpdateAllocationTopHelper(result_end, scratch, flags);
}
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
}
// Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
+ UpdateAllocationTopHelper(result_end, scratch, flags);
}
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
- AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
// Set the map.
mov(FieldOperand(result, HeapObject::kMapOffset),
ASSERT(length > 0);
// Allocate ASCII string in new space.
- AllocateInNewSpace(SeqOneByteString::SizeFor(length),
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
+ gc_required, TAG_OBJECT);
// Set the map, length and hash field.
mov(FieldOperand(result, HeapObject::kMapOffset),
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
mov(FieldOperand(result, HeapObject::kMapOffset),
// ---------------------------------------------------------------------------
// Allocation support
- // Allocate an object in new space. If the new space is exhausted control
- // continues at the gc_required label. The allocated object is returned in
- // result and end of the new object is returned in result_end. The register
- // scratch can be passed as no_reg in which case an additional object
- // reference will be added to the reloc info. The returned pointers in result
- // and result_end have not yet been tagged as heap objects. If
- // result_contains_top_on_entry is true the content of result is known to be
- // the allocation top on entry (could be result_end from a previous call to
- // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
+ // Allocate an object in new space or old pointer space. If the given space
+ // is exhausted control continues at the gc_required label. The allocated
+ // object is returned in result and end of the new object is returned in
+ // result_end. The register scratch can be passed as no_reg in which case
+ // an additional object reference will be added to the reloc info. The
+ // returned pointers in result and result_end have not yet been tagged as
+ // heap objects. If result_contains_top_on_entry is true the content of
+ // result is known to be the allocation top on entry (could be result_end
+ // from a previous call). If result_contains_top_on_entry is true scratch
// should be no_reg as it is never used.
- void AllocateInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
+ void Allocate(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
void AllocateInNewSpace(int header_size,
ScaleFactor element_size,
void LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags);
- void UpdateAllocationTopHelper(Register result_end, Register scratch);
+
+ void UpdateAllocationTopHelper(Register result_end,
+ Register scratch,
+ AllocationFlags flags);
// Helper for PopHandleScope. Allowed to perform a GC and returns
// NULL if gc_allowed. Does not perform a GC if !gc_allowed, and
__ cmp(ecx, Immediate(instance_size));
__ Check(equal, "Instance size of initial map changed.");
#endif
- __ AllocateInNewSpace(instance_size, edx, ecx, no_reg,
- &generic_stub_call, NO_ALLOCATION_FLAGS);
+ __ Allocate(instance_size, edx, ecx, no_reg, &generic_stub_call,
+ NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields and add the heap tag.
// ebx: initial map
__ j(not_equal, &check_capacity);
int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
+ __ Allocate(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
// Restore the key, which is known to be the array length.
// eax: value
__ j(not_equal, &check_capacity);
int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
+ __ Allocate(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
// Restore the key, which is known to be the array length.
__ mov(ecx, Immediate(0));
// words instead of bytes.
SIZE_IN_WORDS = 1 << 2,
// Align the allocation to a multiple of kDoubleSize
- DOUBLE_ALIGNMENT = 1 << 3
+ DOUBLE_ALIGNMENT = 1 << 3,
+ // Directly allocate in old pointer space
+ PRETENURE_OLD_POINTER_SPACE = 1 << 4
};
#endif // DEBUG
+
+class AllocationUtils {
+ public:
+ static ExternalReference GetAllocationTopReference(
+ Isolate* isolate, AllocationFlags flags) {
+ return ((flags & PRETENURE_OLD_POINTER_SPACE) != 0) ?
+ ExternalReference::old_pointer_space_allocation_top_address(isolate) :
+ ExternalReference::new_space_allocation_top_address(isolate);
+ }
+
+
+ static ExternalReference GetAllocationLimitReference(
+ Isolate* isolate, AllocationFlags flags) {
+ return ((flags & PRETENURE_OLD_POINTER_SPACE) != 0) ?
+ ExternalReference::old_pointer_space_allocation_limit_address(isolate) :
+ ExternalReference::new_space_allocation_limit_address(isolate);
+ }
+};
+
+
} } // namespace v8::internal
#endif // V8_MACRO_ASSEMBLER_H_
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInOldPointerSpace) {
+ // Allocate a block of memory in old pointer space (filled with a filler).
+ // Use as fallback for allocation in generated code when old pointer space
+ // is full.
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
+ int size = size_smi->value();
+ RUNTIME_ASSERT(IsAligned(size, kPointerSize));
+ RUNTIME_ASSERT(size > 0);
+ Heap* heap = isolate->heap();
+ Object* allocation;
+ { MaybeObject* maybe_allocation =
+ heap->old_pointer_space()->AllocateRaw(size);
+ if (maybe_allocation->ToObject(&allocation)) {
+ heap->CreateFillerObjectAt(HeapObject::cast(allocation)->address(), size);
+ }
+ return maybe_allocation;
+ }
+}
+
+
// Push an object unto an array of objects if it is not already in the
// array. Returns true if the element was pushed on the stack and
// false otherwise.
F(CompileForOnStackReplacement, 1, 1) \
F(SetNewFunctionAttributes, 1, 1) \
F(AllocateInNewSpace, 1, 1) \
+ F(AllocateInOldPointerSpace, 1, 1) \
F(SetNativeFlag, 1, 1) \
F(StoreArrayLiteralElement, 5, 1) \
F(DebugCallbackSupportsStepping, 1, 1) \
UNCLASSIFIED,
55,
"Runtime::AllocateInNewSpace");
+ Add(ExternalReference::old_pointer_space_allocation_top_address(
+ isolate).address(),
+ UNCLASSIFIED,
+ 56,
+ "Heap::OldPointerSpaceAllocationTopAddress");
+ Add(ExternalReference::old_pointer_space_allocation_limit_address(
+ isolate).address(),
+ UNCLASSIFIED,
+ 57,
+ "Heap::OldPointerSpaceAllocationLimitAddress");
+ Add(ExternalReference(Runtime::kAllocateInOldPointerSpace, isolate).address(),
+ UNCLASSIFIED,
+ 58,
+ "Runtime::AllocateInOldPointerSpace");
// Add a small set of deopt entry addresses to encoder without generating the
// deopt table code, which isn't possible at deserialization time.
entry,
Deoptimizer::LAZY,
Deoptimizer::CALCULATE_ENTRY_ADDRESS);
- Add(address, LAZY_DEOPTIMIZATION, 56 + entry, "lazy_deopt");
+ Add(address, LAZY_DEOPTIMIZATION, 59 + entry, "lazy_deopt");
}
}
Address top() { return allocation_info_.top; }
Address limit() { return allocation_info_.limit; }
+ // The allocation top and limit addresses.
+ Address* allocation_top_address() { return &allocation_info_.top; }
+ Address* allocation_limit_address() { return &allocation_info_.limit; }
+
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
if (initial_capacity > 0) {
size += FixedArray::SizeFor(initial_capacity);
}
- __ AllocateInNewSpace(size,
- result,
- scratch2,
- scratch3,
- gc_required,
- TAG_OBJECT);
+ __ Allocate(size, result, scratch2, scratch3, gc_required, TAG_OBJECT);
// Allocated the JSArray. Now initialize the fields except for the elements
// array.
// Allocate a JSValue and put the tagged pointer into rax.
Label gc_required;
- __ AllocateInNewSpace(JSValue::kSize,
- rax, // Result.
- rcx, // New allocation top (we ignore it).
- no_reg,
- &gc_required,
- TAG_OBJECT);
+ __ Allocate(JSValue::kSize,
+ rax, // Result.
+ rcx, // New allocation top (we ignore it).
+ no_reg,
+ &gc_required,
+ TAG_OBJECT);
// Set the map.
__ LoadGlobalFunctionInitialMap(rdi, rcx);
Counters* counters = masm->isolate()->counters();
Label gc;
- __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
+ __ Allocate(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
__ IncrementCounter(counters->fast_new_closure_total(), 1);
// Try to allocate the context in new space.
Label gc;
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
- rax, rbx, rcx, &gc, TAG_OBJECT);
+ __ Allocate((length * kPointerSize) + FixedArray::kHeaderSize,
+ rax, rbx, rcx, &gc, TAG_OBJECT);
// Get the function from the stack.
__ movq(rcx, Operand(rsp, 1 * kPointerSize));
// Try to allocate the context in new space.
Label gc;
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
- __ AllocateInNewSpace(FixedArray::SizeFor(length),
- rax, rbx, rcx, &gc, TAG_OBJECT);
+ __ Allocate(FixedArray::SizeFor(length),
+ rax, rbx, rcx, &gc, TAG_OBJECT);
// Get the function from the stack.
__ movq(rcx, Operand(rsp, 1 * kPointerSize));
if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
}
- __ AllocateInNewSpace(size, rax, rbx, rdx, fail, flags);
+ __ Allocate(size, rax, rbx, rdx, fail, flags);
if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
__ LoadRoot(kScratchRegister, Heap::kAllocationSiteInfoMapRootIndex);
// Allocate heap number in new space.
// Not using AllocateHeapNumber macro in order to reuse
// already loaded heap_number_map.
- __ AllocateInNewSpace(HeapNumber::kSize,
- rax,
- rdx,
- no_reg,
- &allocation_failed,
- TAG_OBJECT);
+ __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed,
+ TAG_OBJECT);
// Set the map.
__ AssertRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
+ __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
// the constructor's prototype changes, but instance size and property
// counts remain unchanged (if slack tracking finished).
ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
- __ AllocateInNewSpace(instance_size,
- result,
- no_reg,
- scratch,
- deferred->entry(),
- TAG_OBJECT);
+ __ Allocate(instance_size, result, no_reg, scratch, deferred->entry(),
+ TAG_OBJECT);
__ bind(deferred->exit());
if (FLAG_debug_code) {
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- __ AllocateInNewSpace(size, result, temp, no_reg, deferred->entry(), flags);
+ if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ }
+ __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
__ AllocateInNewSpace(size, result, temp, no_reg, deferred->entry(), flags);
// Allocate all objects that are part of the literal in one big
// allocation. This avoids multiple limit checks.
Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
+ __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
- __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
+ __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
void MacroAssembler::LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags) {
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
// Just return if allocation top is already known.
if ((flags & RESULT_CONTAINS_TOP) != 0) {
ASSERT(!scratch.is_valid());
#ifdef DEBUG
// Assert that result actually contains top on entry.
- Operand top_operand = ExternalOperand(new_space_allocation_top);
+ Operand top_operand = ExternalOperand(allocation_top);
cmpq(result, top_operand);
Check(equal, "Unexpected allocation top");
#endif
// Move address of new object to result. Use scratch register if available,
// and keep address in scratch until call to UpdateAllocationTopHelper.
if (scratch.is_valid()) {
- LoadAddress(scratch, new_space_allocation_top);
+ LoadAddress(scratch, allocation_top);
movq(result, Operand(scratch, 0));
} else {
- Load(result, new_space_allocation_top);
+ Load(result, allocation_top);
}
}
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
- Register scratch) {
+ Register scratch,
+ AllocationFlags flags) {
if (emit_debug_code()) {
testq(result_end, Immediate(kObjectAlignmentMask));
Check(zero, "Unaligned allocation in new space");
}
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate());
+ ExternalReference allocation_top =
+ AllocationUtils::GetAllocationTopReference(isolate(), flags);
// Update new top.
if (scratch.is_valid()) {
// Scratch already contains address of allocation top.
movq(Operand(scratch, 0), result_end);
} else {
- Store(new_space_allocation_top, result_end);
+ Store(allocation_top, result_end);
}
}
-void MacroAssembler::AllocateInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::Allocate(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
}
// Calculate new top and bail out if new space is exhausted.
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate());
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
Register top_reg = result_end.is_valid() ? result_end : result;
}
addq(top_reg, Immediate(object_size));
j(carry, gc_required);
- Operand limit_operand = ExternalOperand(new_space_allocation_limit);
+ Operand limit_operand = ExternalOperand(allocation_limit);
cmpq(top_reg, limit_operand);
j(above, gc_required);
// Update allocation top.
- UpdateAllocationTopHelper(top_reg, scratch);
+ UpdateAllocationTopHelper(top_reg, scratch, flags);
bool tag_result = (flags & TAG_OBJECT) != 0;
if (top_reg.is(result)) {
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & SIZE_IN_WORDS) == 0);
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
j(above, gc_required);
// Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
+ UpdateAllocationTopHelper(result_end, scratch, flags);
// Tag the result if requested.
if ((flags & TAG_OBJECT) != 0) {
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
j(above, gc_required);
// Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
+ UpdateAllocationTopHelper(result_end, scratch, flags);
// Align the next allocation. Storing the filler map without checking top is
// always safe because the limit of the heap is always aligned.
Register scratch,
Label* gc_required) {
// Allocate heap number in new space.
- AllocateInNewSpace(HeapNumber::kSize,
- result,
- scratch,
- no_reg,
- gc_required,
- TAG_OBJECT);
+ Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
// Set the map.
LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
- AllocateInNewSpace(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
Register scratch2,
Label* gc_required) {
// Allocate heap number in new space.
- AllocateInNewSpace(SlicedString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- TAG_OBJECT);
+ Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+ TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
// ---------------------------------------------------------------------------
// Allocation support
- // Allocate an object in new space. If the new space is exhausted control
- // continues at the gc_required label. The allocated object is returned in
- // result and end of the new object is returned in result_end. The register
- // scratch can be passed as no_reg in which case an additional object
- // reference will be added to the reloc info. The returned pointers in result
- // and result_end have not yet been tagged as heap objects. If
- // result_contains_top_on_entry is true the content of result is known to be
- // the allocation top on entry (could be result_end from a previous call to
- // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
+ // Allocate an object in new space or old pointer space. If the given space
+ // is exhausted control continues at the gc_required label. The allocated
+ // object is returned in result and end of the new object is returned in
+ // result_end. The register scratch can be passed as no_reg in which case
+ // an additional object reference will be added to the reloc info. The
+ // returned pointers in result and result_end have not yet been tagged as
+ // heap objects. If result_contains_top_on_entry is true the content of
+ // result is known to be the allocation top on entry (could be result_end
+ // from a previous call). If result_contains_top_on_entry is true scratch
// should be no_reg as it is never used.
- void AllocateInNewSpace(int object_size,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags);
+ void Allocate(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags);
void AllocateInNewSpace(int header_size,
ScaleFactor element_size,
void LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags);
+
// Update allocation top with value in result_end register.
// If scratch is valid, it contains the address of the allocation top.
- void UpdateAllocationTopHelper(Register result_end, Register scratch);
+ void UpdateAllocationTopHelper(Register result_end,
+ Register scratch,
+ AllocationFlags flags);
// Helper for PopHandleScope. Allowed to perform a GC and returns
// NULL if gc_allowed. Does not perform a GC if !gc_allowed, and
__ cmpq(rcx, Immediate(instance_size));
__ Check(equal, "Instance size of initial map changed.");
#endif
- __ AllocateInNewSpace(instance_size, rdx, rcx, no_reg,
- &generic_stub_call, NO_ALLOCATION_FLAGS);
+ __ Allocate(instance_size, rdx, rcx, no_reg, &generic_stub_call,
+ NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields and add the heap tag.
// rbx: initial map
__ j(not_equal, &check_capacity);
int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, rdi, rbx, r8, &slow, TAG_OBJECT);
+ __ Allocate(size, rdi, rbx, r8, &slow, TAG_OBJECT);
// rax: value
// rcx: key
__ j(not_equal, &check_capacity);
int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
- __ AllocateInNewSpace(size, rdi, rbx, r8, &slow, TAG_OBJECT);
+ __ Allocate(size, rdi, rbx, r8, &slow, TAG_OBJECT);
// rax: value
// rcx: key
// Test pretenuring of array literals allocated with HAllocate.
TEST(OptimizedPretenuringArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_pretenure_literals = true;
InitializeVM();
if (!i::V8::UseCrankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
AlwaysAllocateScope always_allocate;
v8::Local<v8::Value> res = CompileRun(
"function f() {"
- " var numbers = new Array(1, 2, 3);"
- " numbers[0] = 3.14;"
+ " var numbers = [1, 2, 3];"
+ " numbers[0] = {};"
" return numbers;"
"};"
"f(); f(); f();"
"%OptimizeFunctionOnNextCall(f);"
"f();");
- CHECK_EQ(static_cast<int>(3.14),
- v8::Object::Cast(*res)->Get(v8_str("0"))->Int32Value());
Handle<JSObject> o =
v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
- // TODO(hpayer): remove InNewSpace check and test if object was allocated
- // in old pointer space.
- CHECK(!HEAP->InOldPointerSpace(*o));
- CHECK(HEAP->InNewSpace(*o));
+ CHECK(HEAP->InOldPointerSpace(o->elements()));
}
Handle<JSObject> o =
v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
- CHECK(HEAP->InNewSpace(*o));
+ CHECK(HEAP->InNewSpace(o->elements()));
}