Register scratch2,
Label* gc_required,
AllocationFlags flags) {
- ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
HeapObject** slot,
HeapObject* object,
int object_size) {
- SLOW_ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
SLOW_ASSERT(object->Size() == object_size);
int allocation_size = object_size;
// Statically ensure that it is safe to allocate heap numbers in paged
// spaces.
int size = HeapNumber::kSize;
- STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
+ STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
+
AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
Object* result;
MaybeObject* Heap::AllocateCell(Object* value) {
int size = Cell::kSize;
- STATIC_ASSERT(Cell::kSize <= Page::kMaxNonCodeHeapObjectSize);
+ STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
Object* result;
{ MaybeObject* maybe_result = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
MaybeObject* Heap::AllocatePropertyCell() {
int size = PropertyCell::kSize;
- STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxNonCodeHeapObjectSize);
+ STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
Object* result;
MaybeObject* maybe_result =
MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate foreigns in paged spaces.
- STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
+ STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
Foreign* result;
MaybeObject* maybe_result = Allocate(foreign_map(), space);
MaybeObject* Heap::AllocateSymbol() {
// Statically ensure that it is safe to allocate symbols in paged spaces.
- STATIC_ASSERT(Symbol::kSize <= Page::kMaxNonCodeHeapObjectSize);
+ STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
Object* result;
MaybeObject* maybe =
Page::kPageSize));
// We rely on being able to allocate new arrays in paged spaces.
- ASSERT(MaxRegularSpaceAllocationSize() >=
+ ASSERT(Page::kMaxRegularHeapObjectSize >=
(JSArray::kSize +
FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
AllocationMemento::kSize));
int InitialSemiSpaceSize() { return initial_semispace_size_; }
intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
intptr_t MaxExecutableSize() { return max_executable_size_; }
- int MaxRegularSpaceAllocationSize() { return InitialSemiSpaceSize() * 4/5; }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
PretenureFlag pretenure) {
ASSERT(preferred_old_space == OLD_POINTER_SPACE ||
preferred_old_space == OLD_DATA_SPACE);
- if (object_size > Page::kMaxNonCodeHeapObjectSize) return LO_SPACE;
+ if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE;
return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE;
}
}
}
- if (new_dominator_size > isolate()->heap()->MaxRegularSpaceAllocationSize()) {
+ // Since we clear the first word after folded memory, we cannot use the
+ // whole Page::kMaxRegularHeapObjectSize memory.
+ if (new_dominator_size > Page::kMaxRegularHeapObjectSize - kPointerSize) {
if (FLAG_trace_allocation_folding) {
PrintF("#%d (%s) cannot fold into #%d (%s) due to size: %d\n",
id(), Mnemonic(), dominator_allocate->id(),
void HGraphBuilder::BuildNewSpaceArrayCheck(HValue* length, ElementsKind kind) {
- Heap* heap = isolate()->heap();
int element_size = IsFastDoubleElementsKind(kind) ? kDoubleSize
: kPointerSize;
- int max_size = heap->MaxRegularSpaceAllocationSize() / element_size;
+ int max_size = Page::kMaxRegularHeapObjectSize / element_size;
max_size -= JSArray::kSize / element_size;
HConstant* max_size_constant = Add<HConstant>(max_size);
Add<HBoundsCheck>(length, max_size_constant);
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
heap_profiler->ObjectMoveEvent(src, dst, size);
}
ASSERT(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
- ASSERT(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
if (dest == OLD_POINTER_SPACE) {
Address src_slot = src;
Address dst_slot = dst;
bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
int object_size) {
// TODO(hpayer): Replace that check with an assert.
- CHECK(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ CHECK(object_size <= Page::kMaxRegularHeapObjectSize);
OldSpace* target_space = heap()->TargetSpace(object);
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
- ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
if (!HasTransitionArray()) return true;
return FixedArray::SizeFor(transitions()->length() +
TransitionArray::kTransitionSize)
- <= Page::kMaxNonCodeHeapObjectSize;
+ <= Page::kMaxRegularHeapObjectSize;
}
(base == kVisitJSObject));
ASSERT(IsAligned(object_size, kPointerSize));
ASSERT(kMinObjectSizeInWords * kPointerSize <= object_size);
- ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
const VisitorId specialization = static_cast<VisitorId>(
base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords);
// don't want to be wasteful with long lived objects.
static const int kMaxUncheckedOldFastElementsLength = 500;
- // Note that Heap::MaxRegularSpaceAllocationSize() puts a limit on
+ // Note that Page::kMaxRegularHeapObjectSize puts a limit on
// permissible values (see the ASSERT in heap.cc).
static const int kInitialMaxFastElementArray = 100000;
Heap* heap = isolate->heap();
RUNTIME_ASSERT(IsAligned(size, kPointerSize));
RUNTIME_ASSERT(size > 0);
- RUNTIME_ASSERT(size <= heap->MaxRegularSpaceAllocationSize());
+ RUNTIME_ASSERT(size <= Page::kMaxRegularHeapObjectSize);
HeapObject* allocation;
{ MaybeObject* maybe_allocation = heap->AllocateRaw(size, space, space);
if (!maybe_allocation->To(&allocation)) return maybe_allocation;
Executability executable,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
- ASSERT(page->area_size() <= kMaxNonCodeHeapObjectSize);
+ ASSERT(page->area_size() <= kMaxRegularHeapObjectSize);
ASSERT(chunk->owner() == owner);
owner->IncreaseCapacity(page->area_size());
owner->Free(page->area_start(), page->area_size());
ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
#define ASSERT_OBJECT_SIZE(size) \
- ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
+ ASSERT((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
#define ASSERT_PAGE_OFFSET(offset) \
ASSERT((Page::kObjectStartOffset <= offset) \
// are allocated in large object space and are never moved in memory. This
// also applies to new space allocation, since objects are never migrated
// from new space to large object space. Takes double alignment into account.
- static const int kMaxNonCodeHeapObjectSize = kPageSize - kObjectStartOffset;
+ static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
// Returns maximum available bytes that the old space can have.
intptr_t MaxAvailable() {
- return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
+ return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
}
// Returns an indication of whether a pointer is in a space that has
private:
// The size range of blocks, in bytes.
static const int kMinBlockSize = 3 * kPointerSize;
- static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
+ static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
(1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
(1 << MemoryChunk::SCAN_ON_SCAVENGE);
- static const int kAreaSize = Page::kMaxNonCodeHeapObjectSize;
+ static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
inline NewSpacePage* next_page() const {
return static_cast<NewSpacePage*>(next_chunk());
virtual void VerifyObject(HeapObject* obj);
private:
- static const int kMapsPerPage = Page::kMaxNonCodeHeapObjectSize / Map::kSize;
+ static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
// Do map space compaction if there is a page gap.
int CompactionThreshold() {
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
+ ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
if (current_allocated < code_range_size / 10) {
// Allocate a block.
// Geometrically distributed sizes, greater than
- // Page::kMaxNonCodeHeapObjectSize (which is greater than code page area).
+ // Page::kMaxRegularHeapObjectSize (which is greater than code page area).
// TODO(gc): instead of using 3 use some contant based on code_range_size
// kMaxHeapObjectSize.
size_t requested =
- (Page::kMaxNonCodeHeapObjectSize << (Pseudorandom() % 3)) +
+ (Page::kMaxRegularHeapObjectSize << (Pseudorandom() % 3)) +
Pseudorandom() % 5000 + 1;
size_t allocated = 0;
Address base = code_range.AllocateRawMemory(requested,
factory->NewStringFromAscii(CStrVector("abcdefghij"), TENURED);
// Allocate a large string (for large object space).
- int large_size = Page::kMaxNonCodeHeapObjectSize + 1;
+ int large_size = Page::kMaxRegularHeapObjectSize + 1;
char* str = new char[large_size];
for (int i = 0; i < large_size - 1; ++i) str[i] = 'a';
str[large_size - 1] = '\0';
// just enough room to allocate JSObject and thus fill the newspace.
int allocation_amount = Min(FixedArray::kMaxSize,
- Page::kMaxNonCodeHeapObjectSize + kPointerSize);
+ Page::kMaxRegularHeapObjectSize + kPointerSize);
int allocation_len = LenFromSize(allocation_amount);
NewSpace* new_space = heap->new_space();
Address* top_addr = new_space->allocation_top_address();
// Allocate a fixed array in the new space.
int array_length =
- (Page::kMaxNonCodeHeapObjectSize - FixedArray::kHeaderSize) /
+ (Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) /
(4 * kPointerSize);
Object* obj = heap->AllocateFixedArray(array_length)->ToObjectChecked();
Handle<FixedArray> array(FixedArray::cast(obj));
// Allocate a big fixed array in the new space.
int array_length =
- (Page::kMaxNonCodeHeapObjectSize - FixedArray::kHeaderSize) /
+ (Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) /
(2 * kPointerSize);
Object* obj = heap->AllocateFixedArray(array_length)->ToObjectChecked();
Handle<FixedArray> array(FixedArray::cast(obj));
CcTest::heap()->ReservedSemiSpaceSize()));
CHECK(new_space.HasBeenSetUp());
- while (new_space.Available() >= Page::kMaxNonCodeHeapObjectSize) {
+ while (new_space.Available() >= Page::kMaxRegularHeapObjectSize) {
Object* obj =
- new_space.AllocateRaw(Page::kMaxNonCodeHeapObjectSize)->
+ new_space.AllocateRaw(Page::kMaxRegularHeapObjectSize)->
ToObjectUnchecked();
CHECK(new_space.Contains(HeapObject::cast(obj)));
}
CHECK(s->SetUp());
while (s->Available() > 0) {
- s->AllocateRaw(Page::kMaxNonCodeHeapObjectSize)->ToObjectUnchecked();
+ s->AllocateRaw(Page::kMaxRegularHeapObjectSize)->ToObjectUnchecked();
}
s->TearDown();
assertEquals(result[1], 4);
assertEquals(result2[1], 6);
-// Test to exceed the Heap::MaxRegularSpaceAllocationSize limit but not
-// the Page::kMaxNonCodeHeapObjectSize limit with allocation folding.
+// Test to almost exceed the Page::MaxRegularHeapObjectSize limit.
function boom() {
var a1 = new Array(84632);
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Test to exceed the Heap::MaxRegularSpaceAllocationSize with an array
+// Test to exceed the Page::MaxRegularHeapObjectSize with an array
// constructor call taking many arguments.
function boom() {