kDoubleAlignmentMask) == 0); // NOLINT
-INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
- int size));
-
-static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
- int size) {
+HeapObject* Heap::EnsureDoubleAligned(HeapObject* object, int size) {
if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
- heap->CreateFillerObjectAt(object->address(), kPointerSize);
+ CreateFillerObjectAt(object->address(), kPointerSize);
return HeapObject::FromAddress(object->address() + kPointerSize);
} else {
- heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
- kPointerSize);
+ CreateFillerObjectAt(object->address() + size - kPointerSize, kPointerSize);
return object;
}
}
HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) {
- return EnsureDoubleAligned(this, object, size);
+ return EnsureDoubleAligned(object, size);
}
HeapObject* object, int object_size) {
Heap* heap = map->GetHeap();
- int allocation_size = object_size;
- if (alignment != kObjectAlignment) {
- DCHECK(alignment == kDoubleAlignment);
- allocation_size += kPointerSize;
- }
-
DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
- AllocationResult allocation =
- heap->new_space()->AllocateRaw(allocation_size);
+ AllocationResult allocation;
+ if (alignment == kDoubleAlignment) {
+ allocation = heap->new_space()->AllocateRawDoubleAligned(object_size);
+ } else {
+ allocation = heap->new_space()->AllocateRaw(object_size);
+ }
HeapObject* target = NULL; // Initialization to please compiler.
if (allocation.To(&target)) {
// object.
heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
- if (alignment != kObjectAlignment) {
- target = EnsureDoubleAligned(heap, target, allocation_size);
- }
MigrateObject(heap, object, target, object_size);
// Update slot to new target.
HeapObject* object, int object_size) {
Heap* heap = map->GetHeap();
- int allocation_size = object_size;
- if (alignment != kObjectAlignment) {
- DCHECK(alignment == kDoubleAlignment);
- allocation_size += kPointerSize;
- }
-
AllocationResult allocation;
- allocation = heap->old_space()->AllocateRaw(allocation_size);
+ if (alignment == kDoubleAlignment) {
+ allocation = heap->old_space()->AllocateRawDoubleAligned(object_size);
+ } else {
+ allocation = heap->old_space()->AllocateRaw(object_size);
+ }
HeapObject* target = NULL; // Initialization to please compiler.
if (allocation.To(&target)) {
- if (alignment != kObjectAlignment) {
- target = EnsureDoubleAligned(heap, target, allocation_size);
- }
MigrateObject(heap, object, target, object_size);
// Update slot to new target.
if (!allocation.To(&object)) return allocation;
if (array_type == kExternalFloat64Array) {
- object = EnsureDoubleAligned(this, object, size);
+ object = EnsureDoubleAligned(object, size);
}
object->set_map(MapForFixedTypedArray(array_type));
if (!allocation.To(&object)) return allocation;
}
- return EnsureDoubleAligned(this, object, size);
+ return EnsureDoubleAligned(object, size);
}
AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
if (!allocation.To(&object)) return allocation;
}
- object = EnsureDoubleAligned(this, object, size);
+ object = EnsureDoubleAligned(object, size);
object->set_map_no_write_barrier(constant_pool_array_map());
ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
if (!allocation.To(&object)) return allocation;
}
- object = EnsureDoubleAligned(this, object, size);
+ object = EnsureDoubleAligned(object, size);
object->set_map_no_write_barrier(constant_pool_array_map());
ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
MUST_USE_RESULT AllocationResult
CopyJSObject(JSObject* source, AllocationSite* site = NULL);
+ // This method assumes overallocation of one word. It will store a filler
+ // before the object if the given object is not double aligned, otherwise
+ // it will place the filler after the object.
+ MUST_USE_RESULT HeapObject* EnsureDoubleAligned(HeapObject* object, int size);
+
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();
}
+HeapObject* PagedSpace::AllocateLinearlyDoubleAlign(int size_in_bytes) {
+ Address current_top = allocation_info_.top();
+ int alignment_size = 0;
+
+ if ((OffsetFrom(current_top) & kDoubleAlignmentMask) != 0) {
+ alignment_size = kPointerSize;
+ size_in_bytes += alignment_size;
+ }
+ Address new_top = current_top + size_in_bytes;
+ if (new_top > allocation_info_.limit()) return NULL;
+
+ allocation_info_.set_top(new_top);
+ if (alignment_size > 0)
+ return heap()->EnsureDoubleAligned(HeapObject::FromAddress(current_top),
+ size_in_bytes);
+ return HeapObject::FromAddress(current_top);
+}
+
+
// Raw allocation.
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) {
HeapObject* object = AllocateLinearly(size_in_bytes);
}
+// Raw allocation.
+AllocationResult PagedSpace::AllocateRawDoubleAligned(int size_in_bytes) {
+ DCHECK(identity() == OLD_SPACE);
+ HeapObject* object = AllocateLinearlyDoubleAlign(size_in_bytes);
+ int aligned_size_in_bytes = size_in_bytes + kPointerSize;
+
+ if (object == NULL) {
+ object = free_list_.Allocate(aligned_size_in_bytes);
+ if (object == NULL) {
+ object = SlowAllocateRaw(aligned_size_in_bytes);
+ }
+ object = heap()->EnsureDoubleAligned(object, aligned_size_in_bytes);
+ }
+
+ if (object != NULL) {
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
+ return object;
+ }
+
+ return AllocationResult::Retry(identity());
+}
+
+
// -----------------------------------------------------------------------------
// NewSpace
+AllocationResult NewSpace::AllocateRawDoubleAligned(int size_in_bytes) {
+ Address old_top = allocation_info_.top();
+ int alignment_size = 0;
+ int aligned_size_in_bytes = 0;
+
+ // If double alignment is required and top pointer is not aligned, we allocate
+ // additional memory to take care of the alignment.
+ if ((OffsetFrom(old_top) & kDoubleAlignmentMask) != 0) {
+ alignment_size += kPointerSize;
+ }
+ aligned_size_in_bytes = size_in_bytes + alignment_size;
+
+ if (allocation_info_.limit() - old_top < aligned_size_in_bytes) {
+ return SlowAllocateRaw(size_in_bytes, true);
+ }
+
+ HeapObject* obj = HeapObject::FromAddress(old_top);
+ allocation_info_.set_top(allocation_info_.top() + aligned_size_in_bytes);
+ DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+
+ if (alignment_size > 0) {
+ obj = heap()->EnsureDoubleAligned(obj, aligned_size_in_bytes);
+ }
+
+ // The slow path above ultimately goes through AllocateRaw, so this suffices.
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
+
+ return obj;
+}
+
+
AllocationResult NewSpace::AllocateRaw(int size_in_bytes) {
Address old_top = allocation_info_.top();
if (allocation_info_.limit() - old_top < size_in_bytes) {
- return SlowAllocateRaw(size_in_bytes);
+ return SlowAllocateRaw(size_in_bytes, false);
}
HeapObject* obj = HeapObject::FromAddress(old_top);
}
-AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) {
+AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes,
+ bool double_aligned) {
Address old_top = allocation_info_.top();
Address high = to_space_.page_high();
if (allocation_info_.limit() < high) {
// Either the limit has been lowered because linear allocation was disabled
// or because incremental marking wants to get a chance to do a step. Set
// the new limit accordingly.
- Address new_top = old_top + size_in_bytes;
+ int aligned_size = size_in_bytes;
+ aligned_size += (double_aligned ? kPointerSize : 0);
+ Address new_top = old_top + aligned_size;
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
heap()->incremental_marking()->Step(bytes_allocated,
IncrementalMarking::GC_VIA_STACK_GUARD);
- UpdateInlineAllocationLimit(size_in_bytes);
+ UpdateInlineAllocationLimit(aligned_size);
top_on_previous_step_ = new_top;
+ if (double_aligned) return AllocateRawDoubleAligned(size_in_bytes);
return AllocateRaw(size_in_bytes);
} else if (AddFreshPage()) {
// Switched to new page. Try allocating again.
heap()->incremental_marking()->Step(bytes_allocated,
IncrementalMarking::GC_VIA_STACK_GUARD);
top_on_previous_step_ = to_space_.page_low();
+ if (double_aligned) return AllocateRawDoubleAligned(size_in_bytes);
return AllocateRaw(size_in_bytes);
} else {
return AllocationResult::Retry();
// failure object if not.
MUST_USE_RESULT inline AllocationResult AllocateRaw(int size_in_bytes);
+ // Allocate the requested number of bytes in the space double aligned if
+ // possible, return a failure object if not.
+ MUST_USE_RESULT inline AllocationResult AllocateRawDoubleAligned(
+ int size_in_bytes);
+
// Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste.
// If add_to_freelist is false then just accounting stats are updated and
// address denoted by top in allocation_info_.
inline HeapObject* AllocateLinearly(int size_in_bytes);
+ // Generic fast case allocation function that tries double aligned linear
+ // allocation at the address denoted by top in allocation_info_.
+ inline HeapObject* AllocateLinearlyDoubleAlign(int size_in_bytes);
+
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and re-try free-list
// allocation.
return allocation_info_.limit_address();
}
+ MUST_USE_RESULT INLINE(
+ AllocationResult AllocateRawDoubleAligned(int size_in_bytes));
+
MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes));
// Reset the allocation pointer to the beginning of the active semispace.
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
- MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes);
+ MUST_USE_RESULT AllocationResult
+ SlowAllocateRaw(int size_in_bytes, bool double_aligned);
friend class SemiSpaceIterator;