const int kSpaceTagSize = 3;
const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
+enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned };
// A flag that indicates whether objects should be pretenured when
// allocated (allocated directly into the old generation) or not
AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
AllocationSpace retry_space,
- Alignment alignment) {
+ AllocationAlignment alignment) {
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK(gc_state_ == NOT_IN_GC);
AllocationResult allocation;
if (NEW_SPACE == space) {
#ifndef V8_HOST_ARCH_64_BIT
- if (alignment == kDoubleAligned) {
- allocation = new_space_.AllocateRawDoubleAligned(size_in_bytes);
- } else {
+ if (alignment == kWordAligned) {
allocation = new_space_.AllocateRaw(size_in_bytes);
+ } else {
+ allocation = new_space_.AllocateRawAligned(size_in_bytes, alignment);
}
#else
allocation = new_space_.AllocateRaw(size_in_bytes);
if (OLD_SPACE == space) {
#ifndef V8_HOST_ARCH_64_BIT
- if (alignment == kDoubleAligned) {
- allocation = old_space_->AllocateRawDoubleAligned(size_in_bytes);
- } else {
+ if (alignment == kWordAligned) {
allocation = old_space_->AllocateRaw(size_in_bytes);
+ } else {
+ allocation = old_space_->AllocateRawAligned(size_in_bytes, alignment);
}
#else
allocation = old_space_->AllocateRaw(size_in_bytes);
kDoubleAlignmentMask) == 0); // NOLINT
STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) ==
0); // NOLINT
+#ifdef V8_HOST_ARCH_32_BIT
+STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) !=
+ 0); // NOLINT
+#endif
-HeapObject* Heap::EnsureDoubleAligned(HeapObject* object, int size) {
- if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
+HeapObject* Heap::EnsureAligned(HeapObject* object, int size,
+ AllocationAlignment alignment) {
+ if (alignment == kDoubleAligned &&
+ (OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
+ CreateFillerObjectAt(object->address(), kPointerSize);
+ return HeapObject::FromAddress(object->address() + kPointerSize);
+ } else if (alignment == kDoubleUnaligned &&
+ (OffsetFrom(object->address()) & kDoubleAlignmentMask) == 0) {
CreateFillerObjectAt(object->address(), kPointerSize);
return HeapObject::FromAddress(object->address() + kPointerSize);
} else {
}
+HeapObject* Heap::PrecedeWithFiller(HeapObject* object) {
+ CreateFillerObjectAt(object->address(), kPointerSize);
+ return HeapObject::FromAddress(object->address() + kPointerSize);
+}
+
+
HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) {
- return EnsureDoubleAligned(object, size);
+ return EnsureAligned(object, size, kDoubleAligned);
}
DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
AllocationResult allocation;
-#ifndef V8_HOST_ARCH_64_BIT
+#ifdef V8_HOST_ARCH_32_BIT
if (alignment == kDoubleAlignment) {
- allocation = heap->new_space()->AllocateRawDoubleAligned(object_size);
+ allocation =
+ heap->new_space()->AllocateRawAligned(object_size, kDoubleAligned);
} else {
allocation = heap->new_space()->AllocateRaw(object_size);
}
Heap* heap = map->GetHeap();
AllocationResult allocation;
-#ifndef V8_HOST_ARCH_64_BIT
+#ifdef V8_HOST_ARCH_32_BIT
if (alignment == kDoubleAlignment) {
- allocation = heap->old_space()->AllocateRawDoubleAligned(object_size);
+ allocation =
+ heap->old_space()->AllocateRawAligned(object_size, kDoubleAligned);
} else {
allocation = heap->old_space()->AllocateRaw(object_size);
}
HeapObject* result;
{
- AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
+ AllocationResult allocation =
+ AllocateRaw(size, space, OLD_SPACE, kDoubleUnaligned);
if (!allocation.To(&result)) return allocation;
}
// This method assumes overallocation of one word. It will store a filler
// before the object if the given object is not double aligned, otherwise
// it will place the filler after the object.
- MUST_USE_RESULT HeapObject* EnsureDoubleAligned(HeapObject* object, int size);
+ MUST_USE_RESULT HeapObject* EnsureAligned(HeapObject* object, int size,
+ AllocationAlignment alignment);
+
+ MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object);
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();
HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size);
- enum Alignment { kWordAligned, kDoubleAligned };
-
// Allocate an uninitialized object. The memory is non-executable if the
// hardware and OS allow. This is the single choke-point for allocations
// performed by the runtime and should not be bypassed (to extend this to
// inlined allocations, use the Heap::DisableInlineAllocation() support).
MUST_USE_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationSpace space, AllocationSpace retry_space,
- Alignment aligment = kWordAligned);
+ AllocationAlignment aligment = kWordAligned);
// Allocates a heap object based on the map.
MUST_USE_RESULT AllocationResult
}
AllocationResult allocation;
-#ifndef V8_HOST_ARCH_64_BIT
+#ifdef V8_HOST_ARCH_32_BIT
if (object->NeedsToEnsureDoubleAlignment()) {
- allocation = new_space->AllocateRawDoubleAligned(size);
+ allocation = new_space->AllocateRawAligned(size, kDoubleAligned);
} else {
allocation = new_space->AllocateRaw(size);
}
// always room.
UNREACHABLE();
}
-#ifndef V8_HOST_ARCH_64_BIT
+#ifdef V8_HOST_ARCH_32_BIT
if (object->NeedsToEnsureDoubleAlignment()) {
- allocation = new_space->AllocateRawDoubleAligned(size);
+ allocation = new_space->AllocateRawAligned(size, kDoubleAligned);
} else {
allocation = new_space->AllocateRaw(size);
}
HeapObject* target;
AllocationResult allocation;
-#ifndef V8_HOST_ARCH_64_BIT
+#ifdef V8_HOST_ARCH_32_BIT
if (object->NeedsToEnsureDoubleAlignment()) {
- allocation = old_space->AllocateRawDoubleAligned(object_size);
+ allocation = old_space->AllocateRawAligned(object_size, kDoubleAligned);
} else {
allocation = old_space->AllocateRaw(object_size);
}
}
-HeapObject* PagedSpace::AllocateLinearlyDoubleAlign(int size_in_bytes) {
+HeapObject* PagedSpace::AllocateLinearlyAligned(int size_in_bytes,
+ AllocationAlignment alignment) {
Address current_top = allocation_info_.top();
int alignment_size = 0;
- if ((OffsetFrom(current_top) & kDoubleAlignmentMask) != 0) {
+ if (alignment == kDoubleAligned &&
+ (OffsetFrom(current_top) & kDoubleAlignmentMask) != 0) {
+ alignment_size = kPointerSize;
+ size_in_bytes += alignment_size;
+ } else if (alignment == kDoubleUnaligned &&
+ (OffsetFrom(current_top) & kDoubleAlignmentMask) == 0) {
alignment_size = kPointerSize;
size_in_bytes += alignment_size;
}
if (new_top > allocation_info_.limit()) return NULL;
allocation_info_.set_top(new_top);
- if (alignment_size > 0)
- return heap()->EnsureDoubleAligned(HeapObject::FromAddress(current_top),
- size_in_bytes);
+ if (alignment_size > 0) {
+ return heap()->EnsureAligned(HeapObject::FromAddress(current_top),
+ size_in_bytes, alignment);
+ }
return HeapObject::FromAddress(current_top);
}
// Raw allocation.
-AllocationResult PagedSpace::AllocateRawDoubleAligned(int size_in_bytes) {
+AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
+ AllocationAlignment alignment) {
DCHECK(identity() == OLD_SPACE);
- HeapObject* object = AllocateLinearlyDoubleAlign(size_in_bytes);
+ HeapObject* object = AllocateLinearlyAligned(size_in_bytes, alignment);
int aligned_size_in_bytes = size_in_bytes + kPointerSize;
if (object == NULL) {
object = SlowAllocateRaw(aligned_size_in_bytes);
}
if (object != NULL) {
- object = heap()->EnsureDoubleAligned(object, aligned_size_in_bytes);
+ object = heap()->EnsureAligned(object, aligned_size_in_bytes, alignment);
}
}
// NewSpace
-AllocationResult NewSpace::AllocateRawDoubleAligned(int size_in_bytes) {
+AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
+ AllocationAlignment alignment) {
Address old_top = allocation_info_.top();
int alignment_size = 0;
int aligned_size_in_bytes = 0;
// If double alignment is required and top pointer is not aligned, we allocate
// additional memory to take care of the alignment.
- if ((OffsetFrom(old_top) & kDoubleAlignmentMask) != 0) {
+ if (alignment == kDoubleAligned &&
+ (OffsetFrom(old_top) & kDoubleAlignmentMask) != 0) {
+ alignment_size += kPointerSize;
+ } else if (alignment == kDoubleUnaligned &&
+ (OffsetFrom(old_top) & kDoubleAlignmentMask) == 0) {
alignment_size += kPointerSize;
}
aligned_size_in_bytes = size_in_bytes + alignment_size;
if (allocation_info_.limit() - old_top < aligned_size_in_bytes) {
- return SlowAllocateRaw(size_in_bytes, true);
+ return SlowAllocateRaw(size_in_bytes, alignment);
}
HeapObject* obj = HeapObject::FromAddress(old_top);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
if (alignment_size > 0) {
- obj = heap()->EnsureDoubleAligned(obj, aligned_size_in_bytes);
+ obj = heap()->PrecedeWithFiller(obj);
}
// The slow path above ultimately goes through AllocateRaw, so this suffices.
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
+ DCHECK((kDoubleAligned && (OffsetFrom(obj) & kDoubleAlignmentMask) == 0) ||
+ (kDoubleUnaligned && (OffsetFrom(obj) & kDoubleAlignmentMask) != 0));
+
return obj;
}
Address old_top = allocation_info_.top();
if (allocation_info_.limit() - old_top < size_in_bytes) {
- return SlowAllocateRaw(size_in_bytes, false);
+ return SlowAllocateRaw(size_in_bytes, kWordAligned);
}
HeapObject* obj = HeapObject::FromAddress(old_top);
AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes,
- bool double_aligned) {
+ AllocationAlignment alignment) {
Address old_top = allocation_info_.top();
Address high = to_space_.page_high();
if (allocation_info_.limit() < high) {
// or because incremental marking wants to get a chance to do a step. Set
// the new limit accordingly.
int aligned_size = size_in_bytes;
- aligned_size += (double_aligned ? kPointerSize : 0);
+ aligned_size += (alignment != kWordAligned) ? kPointerSize : 0;
Address new_top = old_top + aligned_size;
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
heap()->incremental_marking()->Step(bytes_allocated,
IncrementalMarking::GC_VIA_STACK_GUARD);
UpdateInlineAllocationLimit(aligned_size);
top_on_previous_step_ = new_top;
- if (double_aligned) return AllocateRawDoubleAligned(size_in_bytes);
+ if (alignment == kDoubleAligned)
+ return AllocateRawAligned(size_in_bytes, kDoubleAligned);
+ else if (alignment == kDoubleUnaligned)
+ return AllocateRawAligned(size_in_bytes, kDoubleUnaligned);
return AllocateRaw(size_in_bytes);
} else if (AddFreshPage()) {
// Switched to new page. Try allocating again.
heap()->incremental_marking()->Step(bytes_allocated,
IncrementalMarking::GC_VIA_STACK_GUARD);
top_on_previous_step_ = to_space_.page_low();
- if (double_aligned) return AllocateRawDoubleAligned(size_in_bytes);
+ if (alignment == kDoubleAligned)
+ return AllocateRawAligned(size_in_bytes, kDoubleAligned);
+ else if (alignment == kDoubleUnaligned)
+ return AllocateRawAligned(size_in_bytes, kDoubleUnaligned);
return AllocateRaw(size_in_bytes);
} else {
return AllocationResult::Retry();
// Allocate the requested number of bytes in the space double aligned if
// possible, return a failure object if not.
- MUST_USE_RESULT inline AllocationResult AllocateRawDoubleAligned(
- int size_in_bytes);
+ MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
+ int size_in_bytes, AllocationAlignment alignment);
// Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste.
// Generic fast case allocation function that tries double aligned linear
// allocation at the address denoted by top in allocation_info_.
- inline HeapObject* AllocateLinearlyDoubleAlign(int size_in_bytes);
+ inline HeapObject* AllocateLinearlyAligned(int size_in_bytes,
+ AllocationAlignment alignment);
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and re-try free-list
return allocation_info_.limit_address();
}
- MUST_USE_RESULT INLINE(
- AllocationResult AllocateRawDoubleAligned(int size_in_bytes));
+ MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
+ int size_in_bytes, AllocationAlignment alignment));
MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes));
HistogramInfo* promoted_histogram_;
MUST_USE_RESULT AllocationResult
- SlowAllocateRaw(int size_in_bytes, bool double_aligned);
+ SlowAllocateRaw(int size_in_bytes, AllocationAlignment alignment);
friend class SemiSpaceIterator;
bool HeapObject::NeedsToEnsureDoubleAlignment() {
-#ifndef V8_HOST_ARCH_64_BIT
+#ifdef V8_HOST_ARCH_32_BIT
return (IsFixedFloat64Array() || IsFixedDoubleArray() ||
IsConstantPoolArray()) &&
FixedArrayBase::cast(this)->length() != 0;
#else
return false;
-#endif // V8_HOST_ARCH_64_BIT
+#endif // V8_HOST_ARCH_32_BIT
+}
+
+
+bool HeapObject::NeedsToEnsureDoubleUnalignment() {
+#ifdef V8_HOST_ARCH_32_BIT
+ return IsHeapNumber();
+#else
+ return false;
+#endif // V8_HOST_ARCH_32_BIT
}
#endif
inline bool NeedsToEnsureDoubleAlignment();
+ inline bool NeedsToEnsureDoubleUnalignment();
// Layout description.
// First field in a heap object is map.