HeapObject* object;
AllocationResult allocation;
if (NEW_SPACE == space) {
-#ifndef V8_HOST_ARCH_64_BIT
- if (alignment == kWordAligned) {
- allocation = new_space_.AllocateRaw(size_in_bytes);
- } else {
- allocation = new_space_.AllocateRawAligned(size_in_bytes, alignment);
- }
-#else
- allocation = new_space_.AllocateRaw(size_in_bytes);
-#endif
+ allocation = new_space_.AllocateRaw(size_in_bytes, alignment);
if (always_allocate() && allocation.IsRetry() && retry_space != NEW_SPACE) {
space = retry_space;
} else {
}
if (OLD_SPACE == space) {
-#ifndef V8_HOST_ARCH_64_BIT
- if (alignment == kWordAligned) {
- allocation = old_space_->AllocateRaw(size_in_bytes);
- } else {
- allocation = old_space_->AllocateRawAligned(size_in_bytes, alignment);
- }
-#else
- allocation = old_space_->AllocateRaw(size_in_bytes);
-#endif
+ allocation = old_space_->AllocateRaw(size_in_bytes, alignment);
} else if (CODE_SPACE == space) {
if (size_in_bytes <= code_space()->AreaSize()) {
- allocation = code_space_->AllocateRaw(size_in_bytes);
+ allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
} else {
// Large code objects are allocated in large object space.
allocation = lo_space_->AllocateRaw(size_in_bytes, EXECUTABLE);
allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
} else {
DCHECK(MAP_SPACE == space);
- allocation = map_space_->AllocateRaw(size_in_bytes);
+ allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
}
if (allocation.To(&object)) {
OnAllocationEvent(object, size_in_bytes);
DCHECK_LE(size, MemoryAllocator::PageAreaSize(
static_cast<AllocationSpace>(space)));
if (space == NEW_SPACE) {
- allocation = new_space()->AllocateRaw(size);
+ allocation = new_space()->AllocateRawUnaligned(size);
} else {
- allocation = paged_space(space)->AllocateRaw(size);
+ allocation = paged_space(space)->AllocateRawUnaligned(size);
}
HeapObject* free_space;
if (allocation.To(&free_space)) {
Heap* heap = map->GetHeap();
DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
- AllocationResult allocation;
-#ifdef V8_HOST_ARCH_32_BIT
- if (alignment == kDoubleAlignment) {
- allocation =
- heap->new_space()->AllocateRawAligned(object_size, kDoubleAligned);
- } else {
- allocation = heap->new_space()->AllocateRaw(object_size);
- }
-#else
- allocation = heap->new_space()->AllocateRaw(object_size);
-#endif
+ AllocationAlignment align =
+ alignment == kDoubleAlignment ? kDoubleAligned : kWordAligned;
+ AllocationResult allocation =
+ heap->new_space()->AllocateRaw(object_size, align);
HeapObject* target = NULL; // Initialization to please compiler.
if (allocation.To(&target)) {
HeapObject* object, int object_size) {
Heap* heap = map->GetHeap();
- AllocationResult allocation;
-#ifdef V8_HOST_ARCH_32_BIT
- if (alignment == kDoubleAlignment) {
- allocation =
- heap->old_space()->AllocateRawAligned(object_size, kDoubleAligned);
- } else {
- allocation = heap->old_space()->AllocateRaw(object_size);
- }
-#else
- allocation = heap->old_space()->AllocateRaw(object_size);
-#endif
+ AllocationAlignment align =
+ alignment == kDoubleAlignment ? kDoubleAligned : kWordAligned;
+ AllocationResult allocation =
+ heap->old_space()->AllocateRaw(object_size, align);
HeapObject* target = NULL; // Initialization to please compiler.
if (allocation.To(&target)) {
continue;
}
- AllocationResult allocation;
-#ifdef V8_HOST_ARCH_32_BIT
- if (object->NeedsToEnsureDoubleAlignment()) {
- allocation = new_space->AllocateRawAligned(size, kDoubleAligned);
- } else {
- allocation = new_space->AllocateRaw(size);
- }
-#else
- allocation = new_space->AllocateRaw(size);
-#endif
+ AllocationAlignment alignment = object->NeedsToEnsureDoubleAlignment()
+ ? kDoubleAligned
+ : kWordAligned;
+ AllocationResult allocation = new_space->AllocateRaw(size, alignment);
if (allocation.IsRetry()) {
if (!new_space->AddFreshPage()) {
// Shouldn't happen. We are sweeping linearly, and to-space
// always room.
UNREACHABLE();
}
-#ifdef V8_HOST_ARCH_32_BIT
- if (object->NeedsToEnsureDoubleAlignment()) {
- allocation = new_space->AllocateRawAligned(size, kDoubleAligned);
- } else {
- allocation = new_space->AllocateRaw(size);
- }
-#else
- allocation = new_space->AllocateRaw(size);
-#endif
+ allocation = new_space->AllocateRaw(size, alignment);
DCHECK(!allocation.IsRetry());
}
Object* target = allocation.ToObjectChecked();
OldSpace* old_space = heap()->old_space();
HeapObject* target;
- AllocationResult allocation;
-#ifdef V8_HOST_ARCH_32_BIT
- if (object->NeedsToEnsureDoubleAlignment()) {
- allocation = old_space->AllocateRawAligned(object_size, kDoubleAligned);
- } else {
- allocation = old_space->AllocateRaw(object_size);
- }
-#else
- allocation = old_space->AllocateRaw(object_size);
-#endif
+ AllocationAlignment alignment =
+ object->NeedsToEnsureDoubleAlignment() ? kDoubleAligned : kWordAligned;
+ AllocationResult allocation = old_space->AllocateRaw(object_size, alignment);
if (allocation.To(&target)) {
MigrateObject(target, object, object_size, old_space->identity());
heap()->IncrementPromotedObjectsSize(object_size);
int size = object->Size();
+ AllocationAlignment alignment = object->NeedsToEnsureDoubleAlignment()
+ ? kDoubleAligned
+ : kWordAligned;
HeapObject* target_object;
- AllocationResult allocation = space->AllocateRaw(size);
+ AllocationResult allocation = space->AllocateRaw(size, alignment);
if (!allocation.To(&target_object)) {
// If allocation failed, use emergency memory and re-try allocation.
CHECK(space->HasEmergencyMemory());
space->UseEmergencyMemory();
- allocation = space->AllocateRaw(size);
+ allocation = space->AllocateRaw(size, alignment);
}
if (!allocation.To(&target_object)) {
// OS refused to give us memory.
// Raw allocation.
-AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) {
+AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
HeapObject* object = AllocateLinearly(size_in_bytes);
if (object == NULL) {
}
+AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
+ AllocationAlignment alignment) {
+#ifdef V8_HOST_ARCH_32_BIT
+ return alignment == kDoubleAligned
+ ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
+ : AllocateRawUnaligned(size_in_bytes);
+#else
+ return AllocateRawUnaligned(size_in_bytes);
+#endif
+}
+
+
// -----------------------------------------------------------------------------
// NewSpace
}
-AllocationResult NewSpace::AllocateRaw(int size_in_bytes) {
+AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
Address old_top = allocation_info_.top();
if (allocation_info_.limit() - old_top < size_in_bytes) {
}
+AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
+ AllocationAlignment alignment) {
+#ifdef V8_HOST_ARCH_32_BIT
+ return alignment == kDoubleAligned
+ ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
+ : AllocateRawUnaligned(size_in_bytes);
+#else
+ return AllocateRawUnaligned(size_in_bytes);
+#endif
+}
+
+
LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
return static_cast<LargePage*>(chunk);
return AllocateRawAligned(size_in_bytes, kDoubleAligned);
else if (alignment == kDoubleUnaligned)
return AllocateRawAligned(size_in_bytes, kDoubleUnaligned);
- return AllocateRaw(size_in_bytes);
+ return AllocateRawUnaligned(size_in_bytes);
} else if (AddFreshPage()) {
// Switched to new page. Try allocating again.
int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
return AllocateRawAligned(size_in_bytes, kDoubleAligned);
else if (alignment == kDoubleUnaligned)
return AllocateRawAligned(size_in_bytes, kDoubleUnaligned);
- return AllocateRaw(size_in_bytes);
+ return AllocateRawUnaligned(size_in_bytes);
} else {
return AllocationResult::Retry();
}
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
- MUST_USE_RESULT inline AllocationResult AllocateRaw(int size_in_bytes);
+ MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
+ int size_in_bytes);
// Allocate the requested number of bytes in the space double aligned if
// possible, return a failure object if not.
MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment);
+ // Allocate the requested number of bytes in the space and consider allocation
+ // alignment if needed.
+ MUST_USE_RESULT inline AllocationResult AllocateRaw(
+ int size_in_bytes, AllocationAlignment alignment);
+
// Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste.
// If add_to_freelist is false then just accounting stats are updated and
MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment));
- MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes));
+ MUST_USE_RESULT INLINE(
+ AllocationResult AllocateRawUnaligned(int size_in_bytes));
+
+ MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
+ int size_in_bytes, AllocationAlignment alignment));
// Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo();
// Helper function that simulates a full new-space in the heap.
static inline bool FillUpOnePage(v8::internal::NewSpace* space) {
- v8::internal::AllocationResult allocation =
- space->AllocateRaw(v8::internal::Page::kMaxRegularHeapObjectSize);
+ v8::internal::AllocationResult allocation = space->AllocateRawUnaligned(
+ v8::internal::Page::kMaxRegularHeapObjectSize);
if (allocation.IsRetry()) return false;
v8::internal::HeapObject* free_space = NULL;
CHECK(allocation.To(&free_space));
int new_linear_size = space_remaining - extra_bytes;
if (new_linear_size == 0) return;
v8::internal::AllocationResult allocation =
- space->AllocateRaw(new_linear_size);
+ space->AllocateRawUnaligned(new_linear_size);
v8::internal::HeapObject* free_space = NULL;
CHECK(allocation.To(&free_space));
space->heap()->CreateFillerObjectAt(free_space->address(), new_linear_size);
// We need filler the size of AllocationMemento object, plus an extra
// fill pointer value.
HeapObject* obj = NULL;
- AllocationResult allocation = CcTest::heap()->new_space()->AllocateRaw(
- AllocationMemento::kSize + kPointerSize);
+ AllocationResult allocation =
+ CcTest::heap()->new_space()->AllocateRawUnaligned(
+ AllocationMemento::kSize + kPointerSize);
CHECK(allocation.To(&obj));
Address addr_obj = obj->address();
CcTest::heap()->CreateFillerObjectAt(
CHECK(new_space.HasBeenSetUp());
while (new_space.Available() >= Page::kMaxRegularHeapObjectSize) {
- Object* obj = new_space.AllocateRaw(
- Page::kMaxRegularHeapObjectSize).ToObjectChecked();
+ Object* obj =
+ new_space.AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize)
+ .ToObjectChecked();
CHECK(new_space.Contains(HeapObject::cast(obj)));
}
CHECK(s->SetUp());
while (s->Available() > 0) {
- s->AllocateRaw(Page::kMaxRegularHeapObjectSize).ToObjectChecked();
+ s->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize).ToObjectChecked();
}
s->TearDown();
// Try to allocate out of the new space. A new page should be added and
// the
// allocation should succeed.
- v8::internal::AllocationResult allocation = new_space->AllocateRaw(80);
+ v8::internal::AllocationResult allocation =
+ new_space->AllocateRawUnaligned(80);
CHECK(!allocation.IsRetry());
CHECK(new_space->CommittedMemory() == 2 * Page::kPageSize);