Ensure that executable pages are properly guarded.
authorvegorov@chromium.org <vegorov@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Thu, 23 Feb 2012 12:11:24 +0000 (12:11 +0000)
committervegorov@chromium.org <vegorov@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Thu, 23 Feb 2012 12:11:24 +0000 (12:11 +0000)
Split executable memory chunks into two pieces: header with all metadata (protection: RW) and body (protection: RWX). Separate header from metadata with a guard page and add a guard page after the page body.

R=erik.corry@gmail.com
BUG=http://crbug.com/115151

Review URL: https://chromiumcodereview.appspot.com/9452002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10809 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

26 files changed:
src/deoptimizer.cc
src/heap-inl.h
src/heap.cc
src/heap.h
src/mark-compact.cc
src/objects-visiting.h
src/platform-cygwin.cc
src/platform-freebsd.cc
src/platform-linux.cc
src/platform-macos.cc
src/platform-nullos.cc
src/platform-openbsd.cc
src/platform-solaris.cc
src/platform-win32.cc
src/platform.h
src/serialize.cc
src/serialize.h
src/spaces-inl.h
src/spaces.cc
src/spaces.h
src/store-buffer.cc
test/cctest/test-alloc.cc
test/cctest/test-heap.cc
test/cctest/test-mark-compact.cc
test/cctest/test-serialize.cc
test/cctest/test-spaces.cc

index 682eb53..55ecc71 100644 (file)
@@ -451,7 +451,7 @@ Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
     base = data->lazy_deoptimization_entry_code_;
   }
   return
-      static_cast<Address>(base->body()) + (id * table_entry_size_);
+      static_cast<Address>(base->area_start()) + (id * table_entry_size_);
 }
 
 
@@ -464,14 +464,14 @@ int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
     base = data->lazy_deoptimization_entry_code_;
   }
   if (base == NULL ||
-      addr < base->body() ||
-      addr >= base->body() +
+      addr < base->area_start() ||
+      addr >= base->area_start() +
           (kNumberOfEntries * table_entry_size_)) {
     return kNotDeoptimizationEntry;
   }
   ASSERT_EQ(0,
-      static_cast<int>(addr - base->body()) % table_entry_size_);
-  return static_cast<int>(addr - base->body()) / table_entry_size_;
+      static_cast<int>(addr - base->area_start()) % table_entry_size_);
+  return static_cast<int>(addr - base->area_start()) / table_entry_size_;
 }
 
 
@@ -1152,11 +1152,12 @@ MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
       Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
                                                             EXECUTABLE,
                                                             NULL);
+  ASSERT(chunk->area_size() >= desc.instr_size);
   if (chunk == NULL) {
     V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
   }
-  memcpy(chunk->body(), desc.buffer, desc.instr_size);
-  CPU::FlushICache(chunk->body(), desc.instr_size);
+  memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
+  CPU::FlushICache(chunk->area_start(), desc.instr_size);
   return chunk;
 }
 
index 39cdf13..81ed448 100644 (file)
@@ -49,7 +49,7 @@ void PromotionQueue::insert(HeapObject* target, int size) {
     NewSpacePage* rear_page =
         NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
     ASSERT(!rear_page->prev_page()->is_anchor());
-    rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->body_limit());
+    rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->area_end());
     ActivateGuardIfOnTheSamePage();
   }
 
@@ -81,11 +81,6 @@ void PromotionQueue::ActivateGuardIfOnTheSamePage() {
 }
 
 
-int Heap::MaxObjectSizeInPagedSpace() {
-  return Page::kMaxHeapObjectSize;
-}
-
-
 MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> str,
                                           PretenureFlag pretenure) {
   // Check for ASCII first since this is the common case.
@@ -119,7 +114,7 @@ MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
 
   // Allocate string.
   Object* result;
-  { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
+  { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
                    ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
                    : old_data_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -153,7 +148,7 @@ MaybeObject* Heap::AllocateTwoByteSymbol(Vector<const uc16> str,
 
   // Allocate string.
   Object* result;
-  { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
+  { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
                    ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
                    : old_data_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
index 4c54e84..f7205f2 100644 (file)
@@ -1092,7 +1092,7 @@ void PromotionQueue::RelocateQueueHead() {
   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
   intptr_t* head_start = rear_;
   intptr_t* head_end =
-      Min(front_, reinterpret_cast<intptr_t*>(p->body_limit()));
+      Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
 
   int entries_count =
       static_cast<int>(head_end - head_start) / kEntrySizeInWords;
@@ -1435,7 +1435,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
           NewSpaceScavenger::IterateBody(object->map(), object);
       } else {
         new_space_front =
-            NewSpacePage::FromLimit(new_space_front)->next_page()->body();
+            NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
       }
     }
 
@@ -1597,7 +1597,7 @@ class ScavengingVisitor : public StaticVisitorBase {
                                     HeapObject* object,
                                     int object_size) {
     SLOW_ASSERT((size_restriction != SMALL) ||
-                (object_size <= Page::kMaxHeapObjectSize));
+                (object_size <= Page::kMaxNonCodeHeapObjectSize));
     SLOW_ASSERT(object->Size() == object_size);
 
     Heap* heap = map->GetHeap();
@@ -1605,7 +1605,7 @@ class ScavengingVisitor : public StaticVisitorBase {
       MaybeObject* maybe_result;
 
       if ((size_restriction != SMALL) &&
-          (object_size > Page::kMaxHeapObjectSize)) {
+          (object_size > Page::kMaxNonCodeHeapObjectSize)) {
         maybe_result = heap->lo_space()->AllocateRaw(object_size,
                                                      NOT_EXECUTABLE);
       } else {
@@ -2264,7 +2264,7 @@ bool Heap::CreateInitialMaps() {
 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
   // Statically ensure that it is safe to allocate heap numbers in paged
   // spaces.
-  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
+  STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
 
   Object* result;
@@ -2285,7 +2285,7 @@ MaybeObject* Heap::AllocateHeapNumber(double value) {
 
   // This version of AllocateHeapNumber is optimized for
   // allocation in new space.
-  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
+  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
   Object* result;
   { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
@@ -2856,7 +2856,7 @@ MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
 
 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
   // Statically ensure that it is safe to allocate foreigns in paged spaces.
-  STATIC_ASSERT(Foreign::kSize <= Page::kMaxHeapObjectSize);
+  STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
   Foreign* result;
   MaybeObject* maybe_result = Allocate(foreign_map(), space);
@@ -3274,7 +3274,7 @@ MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
   }
   int size = ByteArray::SizeFor(length);
   Object* result;
-  { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
+  { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
                    ? old_data_space_->AllocateRaw(size)
                    : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -3293,7 +3293,7 @@ MaybeObject* Heap::AllocateByteArray(int length) {
   }
   int size = ByteArray::SizeFor(length);
   AllocationSpace space =
-      (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
+      (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
   Object* result;
   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -3359,7 +3359,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
   MaybeObject* maybe_result;
   // Large code objects and code objects which should stay at a fixed address
   // are allocated in large object space.
-  if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
+  if (obj_size > code_space()->AreaSize() || immovable) {
     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
   } else {
     maybe_result = code_space_->AllocateRaw(obj_size);
@@ -3408,7 +3408,7 @@ MaybeObject* Heap::CopyCode(Code* code) {
   // Allocate an object the same size as the code object.
   int obj_size = code->Size();
   MaybeObject* maybe_result;
-  if (obj_size > MaxObjectSizeInPagedSpace()) {
+  if (obj_size > code_space()->AreaSize()) {
     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
   } else {
     maybe_result = code_space_->AllocateRaw(obj_size);
@@ -3451,7 +3451,7 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
       static_cast<size_t>(code->instruction_end() - old_addr);
 
   MaybeObject* maybe_result;
-  if (new_obj_size > MaxObjectSizeInPagedSpace()) {
+  if (new_obj_size > code_space()->AreaSize()) {
     maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
   } else {
     maybe_result = code_space_->AllocateRaw(new_obj_size);
@@ -3772,7 +3772,7 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
   // Allocate the JSObject.
   AllocationSpace space =
       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
-  if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
+  if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
   Object* obj;
   { MaybeObject* maybe_obj = Allocate(map, space);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -4280,7 +4280,7 @@ MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
 
   // Allocate string.
   Object* result;
-  { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
+  { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
                    ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
                    : old_data_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -4317,11 +4317,12 @@ MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
     if (size > kMaxObjectSizeInNewSpace) {
       // Allocate in large object space, retry space will be ignored.
       space = LO_SPACE;
-    } else if (size > MaxObjectSizeInPagedSpace()) {
+    } else if (size > Page::kMaxNonCodeHeapObjectSize) {
       // Allocate in new space, retry in large object space.
       retry_space = LO_SPACE;
     }
-  } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
+  } else if (space == OLD_DATA_SPACE &&
+             size > Page::kMaxNonCodeHeapObjectSize) {
     space = LO_SPACE;
   }
   Object* result;
@@ -4352,11 +4353,12 @@ MaybeObject* Heap::AllocateRawTwoByteString(int length,
     if (size > kMaxObjectSizeInNewSpace) {
       // Allocate in large object space, retry space will be ignored.
       space = LO_SPACE;
-    } else if (size > MaxObjectSizeInPagedSpace()) {
+    } else if (size > Page::kMaxNonCodeHeapObjectSize) {
       // Allocate in new space, retry in large object space.
       retry_space = LO_SPACE;
     }
-  } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
+  } else if (space == OLD_DATA_SPACE &&
+             size > Page::kMaxNonCodeHeapObjectSize) {
     space = LO_SPACE;
   }
   Object* result;
@@ -4495,13 +4497,13 @@ MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
     // Too big for new space.
     space = LO_SPACE;
   } else if (space == OLD_POINTER_SPACE &&
-             size > MaxObjectSizeInPagedSpace()) {
+             size > Page::kMaxNonCodeHeapObjectSize) {
     // Too big for old pointer space.
     space = LO_SPACE;
   }
 
   AllocationSpace retry_space =
-      (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
+      (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
 
   return AllocateRaw(size, space, retry_space);
 }
@@ -4628,13 +4630,13 @@ MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
     // Too big for new space.
     space = LO_SPACE;
   } else if (space == OLD_DATA_SPACE &&
-             size > MaxObjectSizeInPagedSpace()) {
+             size > Page::kMaxNonCodeHeapObjectSize) {
     // Too big for old data space.
     space = LO_SPACE;
   }
 
   AllocationSpace retry_space =
-      (size <= MaxObjectSizeInPagedSpace()) ? OLD_DATA_SPACE : LO_SPACE;
+      (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
 
   return AllocateRaw(size, space, retry_space);
 }
@@ -4763,7 +4765,7 @@ STRUCT_LIST(MAKE_CASE)
   }
   int size = map->instance_size();
   AllocationSpace space =
-      (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
+      (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
   Object* result;
   { MaybeObject* maybe_result = Allocate(map, space);
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -5210,7 +5212,7 @@ void Heap::ZapFromSpace() {
                           new_space_.FromSpaceEnd());
   while (it.has_next()) {
     NewSpacePage* page = it.next();
-    for (Address cursor = page->body(), limit = page->body_limit();
+    for (Address cursor = page->area_start(), limit = page->area_end();
          cursor < limit;
          cursor += kPointerSize) {
       Memory::Address_at(cursor) = kFromSpaceZapValue;
@@ -5349,9 +5351,9 @@ void Heap::OldPointerSpaceCheckStoreBuffer() {
 
   while (pages.has_next()) {
     Page* page = pages.next();
-    Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
+    Object** current = reinterpret_cast<Object**>(page->area_start());
 
-    Address end = page->ObjectAreaEnd();
+    Address end = page->area_end();
 
     Object*** store_buffer_position = store_buffer()->Start();
     Object*** store_buffer_top = store_buffer()->Top();
@@ -5377,9 +5379,9 @@ void Heap::MapSpaceCheckStoreBuffer() {
 
   while (pages.has_next()) {
     Page* page = pages.next();
-    Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
+    Object** current = reinterpret_cast<Object**>(page->area_start());
 
-    Address end = page->ObjectAreaEnd();
+    Address end = page->area_end();
 
     Object*** store_buffer_position = store_buffer()->Start();
     Object*** store_buffer_top = store_buffer()->Top();
index bb5c375..e2fee0c 100644 (file)
@@ -345,7 +345,7 @@ class PromotionQueue {
           NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
       ASSERT(!front_page->prev_page()->is_anchor());
       front_ =
-          reinterpret_cast<intptr_t*>(front_page->prev_page()->body_limit());
+          reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
     }
     *target = reinterpret_cast<HeapObject*>(*(--front_));
     *size = static_cast<int>(*(--front_));
@@ -484,9 +484,6 @@ class Heap {
   // all available bytes. Check MaxHeapObjectSize() instead.
   intptr_t Available();
 
-  // Returns the maximum object size in paged space.
-  inline int MaxObjectSizeInPagedSpace();
-
   // Returns of size of all objects residing in the heap.
   intptr_t SizeOfObjects();
 
index 9d83d90..7c59c04 100644 (file)
@@ -107,14 +107,14 @@ static void VerifyMarking(NewSpace* space) {
   Address end = space->top();
   NewSpacePageIterator it(space->bottom(), end);
   // The bottom position is at the start of its page. Allows us to use
-  // page->body() as start of range on all pages.
+  // page->area_start() as start of range on all pages.
   ASSERT_EQ(space->bottom(),
-            NewSpacePage::FromAddress(space->bottom())->body());
+            NewSpacePage::FromAddress(space->bottom())->area_start());
   while (it.has_next()) {
     NewSpacePage* page = it.next();
-    Address limit = it.has_next() ? page->body_limit() : end;
+    Address limit = it.has_next() ? page->area_end() : end;
     ASSERT(limit == end || !page->Contains(end));
-    VerifyMarking(page->body(), limit);
+    VerifyMarking(page->area_start(), limit);
   }
 }
 
@@ -124,7 +124,7 @@ static void VerifyMarking(PagedSpace* space) {
 
   while (it.has_next()) {
     Page* p = it.next();
-    VerifyMarking(p->ObjectAreaStart(), p->ObjectAreaEnd());
+    VerifyMarking(p->area_start(), p->area_end());
   }
 }
 
@@ -187,8 +187,8 @@ static void VerifyEvacuation(NewSpace* space) {
 
   while (it.has_next()) {
     NewSpacePage* page = it.next();
-    Address current = page->body();
-    Address limit = it.has_next() ? page->body_limit() : space->top();
+    Address current = page->area_start();
+    Address limit = it.has_next() ? page->area_end() : space->top();
     ASSERT(limit == space->top() || !page->Contains(space->top()));
     while (current < limit) {
       HeapObject* object = HeapObject::FromAddress(current);
@@ -205,7 +205,7 @@ static void VerifyEvacuation(PagedSpace* space) {
   while (it.has_next()) {
     Page* p = it.next();
     if (p->IsEvacuationCandidate()) continue;
-    VerifyEvacuation(p->ObjectAreaStart(), p->ObjectAreaEnd());
+    VerifyEvacuation(p->area_start(), p->area_end());
   }
 }
 
@@ -232,7 +232,7 @@ void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
 
 static void TraceFragmentation(PagedSpace* space) {
   int number_of_pages = space->CountTotalPages();
-  intptr_t reserved = (number_of_pages * Page::kObjectAreaSize);
+  intptr_t reserved = (number_of_pages * space->AreaSize());
   intptr_t free = reserved - space->SizeOfObjects();
   PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
          AllocationSpaceName(space->identity()),
@@ -453,13 +453,14 @@ static int FreeListFragmentation(PagedSpace* space, Page* p) {
 
   intptr_t ratio;
   intptr_t ratio_threshold;
+  intptr_t area_size = space->AreaSize();
   if (space->identity() == CODE_SPACE) {
     ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
-        Page::kObjectAreaSize;
+        area_size;
     ratio_threshold = 10;
   } else {
     ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
-        Page::kObjectAreaSize;
+        area_size;
     ratio_threshold = 15;
   }
 
@@ -469,20 +470,20 @@ static int FreeListFragmentation(PagedSpace* space, Page* p) {
            AllocationSpaceName(space->identity()),
            static_cast<int>(sizes.small_size_),
            static_cast<double>(sizes.small_size_ * 100) /
-           Page::kObjectAreaSize,
+           area_size,
            static_cast<int>(sizes.medium_size_),
            static_cast<double>(sizes.medium_size_ * 100) /
-           Page::kObjectAreaSize,
+           area_size,
            static_cast<int>(sizes.large_size_),
            static_cast<double>(sizes.large_size_ * 100) /
-           Page::kObjectAreaSize,
+           area_size,
            static_cast<int>(sizes.huge_size_),
            static_cast<double>(sizes.huge_size_ * 100) /
-           Page::kObjectAreaSize,
+           area_size,
            (ratio > ratio_threshold) ? "[fragmented]" : "");
   }
 
-  if (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize) {
+  if (FLAG_always_compact && sizes.Total() != area_size) {
     return 1;
   }
 
@@ -528,11 +529,11 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
 
   CompactionMode mode = COMPACT_FREE_LISTS;
 
-  intptr_t reserved = number_of_pages * Page::kObjectAreaSize;
+  intptr_t reserved = number_of_pages * space->AreaSize();
   intptr_t over_reserved = reserved - space->SizeOfObjects();
   static const intptr_t kFreenessThreshold = 50;
 
-  if (over_reserved >= 2 * Page::kObjectAreaSize &&
+  if (over_reserved >= 2 * space->AreaSize() &&
       reduce_memory_footprint_) {
     mode = REDUCE_MEMORY_FOOTPRINT;
 
@@ -575,18 +576,17 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
       intptr_t free_bytes = 0;
 
       if (!p->WasSwept()) {
-        free_bytes = (Page::kObjectAreaSize - p->LiveBytes());
+        free_bytes = (p->area_size() - p->LiveBytes());
       } else {
         FreeList::SizeStats sizes;
         space->CountFreeListItems(p, &sizes);
         free_bytes = sizes.Total();
       }
 
-      int free_pct = static_cast<int>(free_bytes * 100 / Page::kObjectAreaSize);
+      int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
 
       if (free_pct >= kFreenessThreshold) {
-        estimated_release += Page::kObjectAreaSize +
-            (Page::kObjectAreaSize - free_bytes);
+        estimated_release += 2 * p->area_size() - free_bytes;
         fragmentation = free_pct;
       } else {
         fragmentation = 0;
@@ -597,7 +597,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
                reinterpret_cast<void*>(p),
                AllocationSpaceName(space->identity()),
                static_cast<int>(free_bytes),
-               static_cast<double>(free_bytes * 100) / Page::kObjectAreaSize,
+               static_cast<double>(free_bytes * 100) / p->area_size(),
                (fragmentation > 0) ? "[fragmented]" : "");
       }
     } else {
@@ -1977,12 +1977,15 @@ static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
   int last_cell_index =
       Bitmap::IndexToCell(
           Bitmap::CellAlignIndex(
-              p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
+              p->AddressToMarkbitIndex(p->area_end())));
+
+  Address cell_base = p->area_start();
+  int cell_index = Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(cell_base)));
 
-  int cell_index = Page::kFirstUsedCell;
-  Address cell_base = p->ObjectAreaStart();
 
-  for (cell_index = Page::kFirstUsedCell;
+  for (;
        cell_index < last_cell_index;
        cell_index++, cell_base += 32 * kPointerSize) {
     ASSERT((unsigned)cell_index ==
@@ -2786,7 +2789,7 @@ bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
                                             int object_size) {
   Object* result;
 
-  if (object_size > heap()->MaxObjectSizeInPagedSpace()) {
+  if (object_size > Page::kMaxNonCodeHeapObjectSize) {
     MaybeObject* maybe_result =
         heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE);
     if (maybe_result->ToObject(&result)) {
@@ -2904,13 +2907,16 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
   int last_cell_index =
       Bitmap::IndexToCell(
           Bitmap::CellAlignIndex(
-              p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
+              p->AddressToMarkbitIndex(p->area_end())));
+
+  Address cell_base = p->area_start();
+  int cell_index = Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(cell_base)));
 
-  int cell_index = Page::kFirstUsedCell;
-  Address cell_base = p->ObjectAreaStart();
   int offsets[16];
 
-  for (cell_index = Page::kFirstUsedCell;
+  for (;
        cell_index < last_cell_index;
        cell_index++, cell_base += 32 * kPointerSize) {
     ASSERT((unsigned)cell_index ==
@@ -3065,12 +3071,16 @@ static void SweepPrecisely(PagedSpace* space,
   int last_cell_index =
       Bitmap::IndexToCell(
           Bitmap::CellAlignIndex(
-              p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
+              p->AddressToMarkbitIndex(p->area_end())));
+
+  Address free_start = p->area_start();
+  int cell_index =
+      Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(free_start)));
 
-  int cell_index = Page::kFirstUsedCell;
-  Address free_start = p->ObjectAreaStart();
   ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
-  Address object_address = p->ObjectAreaStart();
+  Address object_address = free_start;
   int offsets[16];
 
   SkipList* skip_list = p->skip_list();
@@ -3079,7 +3089,7 @@ static void SweepPrecisely(PagedSpace* space,
     skip_list->Clear();
   }
 
-  for (cell_index = Page::kFirstUsedCell;
+  for (;
        cell_index < last_cell_index;
        cell_index++, object_address += 32 * kPointerSize) {
     ASSERT((unsigned)cell_index ==
@@ -3116,8 +3126,8 @@ static void SweepPrecisely(PagedSpace* space,
     // Clear marking bits for current cell.
     cells[cell_index] = 0;
   }
-  if (free_start != p->ObjectAreaEnd()) {
-    space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start));
+  if (free_start != p->area_end()) {
+    space->Free(free_start, static_cast<int>(p->area_end() - free_start));
   }
   p->ResetLiveBytes();
 }
@@ -3412,7 +3422,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
     Page* p = evacuation_candidates_[i];
     if (!p->IsEvacuationCandidate()) continue;
     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
-    space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize);
+    space->Free(p->area_start(), p->area_size());
     p->set_scan_on_scavenge(false);
     slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
     p->ClearEvacuationCandidate();
@@ -3715,23 +3725,27 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
   int last_cell_index =
       Bitmap::IndexToCell(
           Bitmap::CellAlignIndex(
-              p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
+              p->AddressToMarkbitIndex(p->area_end())));
+
+  int cell_index =
+      Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(p->area_start())));
 
-  int cell_index = Page::kFirstUsedCell;
   intptr_t freed_bytes = 0;
 
   // This is the start of the 32 word block that we are currently looking at.
-  Address block_address = p->ObjectAreaStart();
+  Address block_address = p->area_start();
 
   // Skip over all the dead objects at the start of the page and mark them free.
-  for (cell_index = Page::kFirstUsedCell;
+  for (;
        cell_index < last_cell_index;
        cell_index++, block_address += 32 * kPointerSize) {
     if (cells[cell_index] != 0) break;
   }
-  size_t size = block_address - p->ObjectAreaStart();
+  size_t size = block_address - p->area_start();
   if (cell_index == last_cell_index) {
-    freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(),
+    freed_bytes += static_cast<int>(space->Free(p->area_start(),
                                                 static_cast<int>(size)));
     ASSERT_EQ(0, p->LiveBytes());
     return freed_bytes;
@@ -3740,8 +3754,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
   // first live object.
   Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
   // Free the first free space.
-  size = free_end - p->ObjectAreaStart();
-  freed_bytes += space->Free(p->ObjectAreaStart(),
+  size = free_end - p->area_start();
+  freed_bytes += space->Free(p->area_start(),
                              static_cast<int>(size));
   // The start of the current free area is represented in undigested form by
   // the address of the last 32-word section that contained a live object and
index e6ddfed..26e79ae 100644 (file)
@@ -135,7 +135,7 @@ class StaticVisitorBase : public AllStatic {
            (base == kVisitJSObject));
     ASSERT(IsAligned(object_size, kPointerSize));
     ASSERT(kMinObjectSizeInWords * kPointerSize <= object_size);
-    ASSERT(object_size < Page::kMaxHeapObjectSize);
+    ASSERT(object_size < Page::kMaxNonCodeHeapObjectSize);
 
     const VisitorId specialization = static_cast<VisitorId>(
         base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords);
index 79134da..2dc1ed8 100644 (file)
@@ -355,6 +355,17 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
 }
 
 
+bool VirtualMemory::Guard(void* address) {
+  if (NULL == VirtualAlloc(address,
+                           OS::CommitPageSize(),
+                           MEM_COMMIT,
+                           PAGE_READONLY | PAGE_GUARD)) {
+    return false;
+  }
+  return true;
+}
+
+
 class Thread::PlatformData : public Malloced {
  public:
   PlatformData() : thread_(kNoThread) {}
index a5981b1..f6a426f 100644 (file)
@@ -411,6 +411,12 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
 }
 
 
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
 void* VirtualMemory::ReserveRegion(size_t size) {
   void* result = mmap(OS::GetRandomMmapAddr(),
                       size,
index 1429748..0da1c08 100644 (file)
@@ -666,6 +666,12 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
 }
 
 
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
 void* VirtualMemory::ReserveRegion(size_t size) {
   void* result = mmap(OS::GetRandomMmapAddr(),
                       size,
index e367d21..89abf39 100644 (file)
@@ -429,6 +429,12 @@ bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
 }
 
 
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
 bool VirtualMemory::CommitRegion(void* address,
                                  size_t size,
                                  bool is_executable) {
index 094f950..918327a 100644 (file)
@@ -295,6 +295,12 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
 }
 
 
+bool VirtualMemory::Guard(void* address) {
+  UNIMPLEMENTED();
+  return false;
+}
+
+
 class Thread::PlatformData : public Malloced {
  public:
   PlatformData() {
index 7e27a01..0d69971 100644 (file)
@@ -458,6 +458,12 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
 }
 
 
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
 void* VirtualMemory::ReserveRegion(size_t size) {
   void* result = mmap(GetRandomMmapAddr(),
                       size,
index 349da01..004a6ed 100644 (file)
@@ -401,6 +401,12 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
 }
 
 
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
 void* VirtualMemory::ReserveRegion(size_t size) {
   void* result = mmap(OS::GetRandomMmapAddr(),
                       size,
index 6f77b3b..e9e9924 100644 (file)
@@ -1511,6 +1511,17 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
 }
 
 
+bool VirtualMemory::Guard(void* address) {
+  if (NULL == VirtualAlloc(address,
+                           OS::CommitPageSize(),
+                           MEM_COMMIT,
+                           PAGE_READONLY | PAGE_GUARD)) {
+    return false;
+  }
+  return true;
+}
+
+
 bool VirtualMemory::UncommitRegion(void* base, size_t size) {
   return VirtualFree(base, size, MEM_DECOMMIT) != 0;
 }
index a0186d5..38e633a 100644 (file)
@@ -356,6 +356,9 @@ class VirtualMemory {
   // Uncommit real memory.  Returns whether the operation succeeded.
   bool Uncommit(void* address, size_t size);
 
+  // Creates a single guard page at the given address.
+  bool Guard(void* address);
+
   void Release() {
     ASSERT(IsReserved());
     // Notice: Order is important here. The VirtualMemory object might live
index d9fc2b7..81a94dd 100644 (file)
@@ -1088,9 +1088,10 @@ Serializer::Serializer(SnapshotByteSink* sink)
       external_reference_encoder_(new ExternalReferenceEncoder),
       large_object_total_(0),
       root_index_wave_front_(0) {
+  isolate_ = Isolate::Current();
   // The serializer is meant to be used only to generate initial heap images
   // from a context in which there is only one isolate.
-  ASSERT(Isolate::Current()->IsDefaultIsolate());
+  ASSERT(isolate_->IsDefaultIsolate());
   for (int i = 0; i <= LAST_SPACE; i++) {
     fullness_[i] = 0;
   }
@@ -1642,8 +1643,8 @@ int Serializer::Allocate(int space, int size, bool* new_page) {
     // serialized address.
     CHECK(IsPowerOf2(Page::kPageSize));
     int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1));
-    CHECK(size <= Page::kObjectAreaSize);
-    if (used_in_this_page + size > Page::kObjectAreaSize) {
+    CHECK(size <= SpaceAreaSize(space));
+    if (used_in_this_page + size > SpaceAreaSize(space)) {
       *new_page = true;
       fullness_[space] = RoundUp(fullness_[space], Page::kPageSize);
     }
@@ -1654,4 +1655,13 @@ int Serializer::Allocate(int space, int size, bool* new_page) {
 }
 
 
+int Serializer::SpaceAreaSize(int space) {
+  if (space == CODE_SPACE) {
+    return isolate_->memory_allocator()->CodePageAreaSize();
+  } else {
+    return Page::kPageSize - Page::kObjectStartOffset;
+  }
+}
+
+
 } }  // namespace v8::internal
index 72eed5a..02bf58a 100644 (file)
@@ -556,6 +556,9 @@ class Serializer : public SerializerDeserializer {
     return external_reference_encoder_->Encode(addr);
   }
 
+  int SpaceAreaSize(int space);
+
+  Isolate* isolate_;
   // Keep track of the fullness of each space in order to generate
   // relative addresses for back references.  Large objects are
   // just numbered sequentially since relative addresses make no
index d0cddeb..3709009 100644 (file)
@@ -166,10 +166,8 @@ Page* Page::Initialize(Heap* heap,
   Page* page = reinterpret_cast<Page*>(chunk);
   ASSERT(chunk->size() == static_cast<size_t>(kPageSize));
   ASSERT(chunk->owner() == owner);
-  owner->IncreaseCapacity(Page::kObjectAreaSize);
-  owner->Free(page->ObjectAreaStart(),
-              static_cast<int>(page->ObjectAreaEnd() -
-                               page->ObjectAreaStart()));
+  owner->IncreaseCapacity(page->area_size());
+  owner->Free(page->area_start(), page->area_size());
 
   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
 
index 05c5876..de738fb 100644 (file)
@@ -75,8 +75,8 @@ HeapObjectIterator::HeapObjectIterator(Page* page,
          owner == HEAP->cell_space() ||
          owner == HEAP->code_space());
   Initialize(reinterpret_cast<PagedSpace*>(owner),
-             page->ObjectAreaStart(),
-             page->ObjectAreaEnd(),
+             page->area_start(),
+             page->area_end(),
              kOnePageOnly,
              size_func);
   ASSERT(page->WasSweptPrecisely());
@@ -108,12 +108,12 @@ bool HeapObjectIterator::AdvanceToNextPage() {
     cur_page = space_->anchor();
   } else {
     cur_page = Page::FromAddress(cur_addr_ - 1);
-    ASSERT(cur_addr_ == cur_page->ObjectAreaEnd());
+    ASSERT(cur_addr_ == cur_page->area_end());
   }
   cur_page = cur_page->next_page();
   if (cur_page == space_->anchor()) return false;
-  cur_addr_ = cur_page->ObjectAreaStart();
-  cur_end_ = cur_page->ObjectAreaEnd();
+  cur_addr_ = cur_page->area_start();
+  cur_end_ = cur_page->area_end();
   ASSERT(cur_page->WasSweptPrecisely());
   return true;
 }
@@ -227,7 +227,9 @@ Address CodeRange::AllocateRawMemory(const size_t requested,
   }
   ASSERT(*allocated <= current.size);
   ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
-  if (!code_range_->Commit(current.start, *allocated, true)) {
+  if (!MemoryAllocator::CommitCodePage(code_range_,
+                                       current.start,
+                                       *allocated)) {
     *allocated = 0;
     return NULL;
   }
@@ -358,11 +360,17 @@ Address MemoryAllocator::AllocateAlignedMemory(size_t size,
   VirtualMemory reservation;
   Address base = ReserveAlignedMemory(size, alignment, &reservation);
   if (base == NULL) return NULL;
-  if (!reservation.Commit(base,
-                          size,
-                          executable == EXECUTABLE)) {
-    return NULL;
+
+  if (executable == EXECUTABLE) {
+    CommitCodePage(&reservation, base, size);
+  } else {
+    if (!reservation.Commit(base,
+                            size,
+                            executable == EXECUTABLE)) {
+      return NULL;
+    }
   }
+
   controller->TakeControl(&reservation);
   return base;
 }
@@ -378,9 +386,14 @@ void Page::InitializeAsAnchor(PagedSpace* owner) {
 NewSpacePage* NewSpacePage::Initialize(Heap* heap,
                                        Address start,
                                        SemiSpace* semi_space) {
+  Address area_start = start + NewSpacePage::kObjectStartOffset;
+  Address area_end = start + Page::kPageSize;
+
   MemoryChunk* chunk = MemoryChunk::Initialize(heap,
                                                start,
                                                Page::kPageSize,
+                                               area_start,
+                                               area_end,
                                                NOT_EXECUTABLE,
                                                semi_space);
   chunk->set_next_chunk(NULL);
@@ -410,6 +423,8 @@ void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
 MemoryChunk* MemoryChunk::Initialize(Heap* heap,
                                      Address base,
                                      size_t size,
+                                     Address area_start,
+                                     Address area_end,
                                      Executability executable,
                                      Space* owner) {
   MemoryChunk* chunk = FromAddress(base);
@@ -418,6 +433,8 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
 
   chunk->heap_ = heap;
   chunk->size_ = size;
+  chunk->area_start_ = area_start;
+  chunk->area_end_ = area_end;
   chunk->flags_ = 0;
   chunk->set_owner(owner);
   chunk->InitializeReservedMemory();
@@ -431,9 +448,13 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
   ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
   ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
 
-  if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE);
+  if (executable == EXECUTABLE) {
+    chunk->SetFlag(IS_EXECUTABLE);
+  }
 
-  if (owner == heap->old_data_space()) chunk->SetFlag(CONTAINS_ONLY_DATA);
+  if (owner == heap->old_data_space()) {
+    chunk->SetFlag(CONTAINS_ONLY_DATA);
+  }
 
   return chunk;
 }
@@ -462,11 +483,16 @@ void MemoryChunk::Unlink() {
 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
                                             Executability executable,
                                             Space* owner) {
-  size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size;
+  size_t chunk_size;
   Heap* heap = isolate_->heap();
   Address base = NULL;
   VirtualMemory reservation;
+  Address area_start = NULL;
+  Address area_end = NULL;
   if (executable == EXECUTABLE) {
+    chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
+                         OS::CommitPageSize()) + CodePageGuardSize();
+
     // Check executable memory limit.
     if (size_executable_ + chunk_size > capacity_executable_) {
       LOG(isolate_,
@@ -494,18 +520,30 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
       // Update executable memory size.
       size_executable_ += reservation.size();
     }
+
+#ifdef DEBUG
+    ZapBlock(base, CodePageGuardStartOffset());
+    ZapBlock(base + CodePageAreaStartOffset(), body_size);
+#endif
+    area_start = base + CodePageAreaStartOffset();
+    area_end = area_start + body_size;
   } else {
+    chunk_size = MemoryChunk::kObjectStartOffset + body_size;
     base = AllocateAlignedMemory(chunk_size,
                                  MemoryChunk::kAlignment,
                                  executable,
                                  &reservation);
 
     if (base == NULL) return NULL;
-  }
 
 #ifdef DEBUG
-  ZapBlock(base, chunk_size);
+    ZapBlock(base, chunk_size);
 #endif
+
+    area_start = base + Page::kObjectStartOffset;
+    area_end = base + chunk_size;
+  }
+
   isolate_->counters()->memory_allocated()->
       Increment(static_cast<int>(chunk_size));
 
@@ -518,6 +556,8 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
   MemoryChunk* result = MemoryChunk::Initialize(heap,
                                                 base,
                                                 chunk_size,
+                                                area_start,
+                                                area_end,
                                                 executable,
                                                 owner);
   result->set_reserved_memory(&reservation);
@@ -527,7 +567,9 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
 
 Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
                                     Executability executable) {
-  MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner);
+  MemoryChunk* chunk = AllocateChunk(owner->AreaSize(),
+                                     executable,
+                                     owner);
 
   if (chunk == NULL) return NULL;
 
@@ -648,6 +690,65 @@ void MemoryAllocator::ReportStatistics() {
 }
 #endif
 
+
+int MemoryAllocator::CodePageGuardStartOffset() {
+  // We are guarding code pages: the first OS page after the header
+  // will be protected as non-writable.
+  return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
+}
+
+
+int MemoryAllocator::CodePageGuardSize() {
+  return OS::CommitPageSize();
+}
+
+
+int MemoryAllocator::CodePageAreaStartOffset() {
+  // We are guarding code pages: the first OS page after the header
+  // will be protected as non-writable.
+  return CodePageGuardStartOffset() + CodePageGuardSize();
+}
+
+
+int MemoryAllocator::CodePageAreaEndOffset() {
+  // We are guarding code pages: the last OS page will be protected as
+  // non-writable.
+  return Page::kPageSize - OS::CommitPageSize();
+}
+
+
+bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
+                                     Address start,
+                                     size_t size) {
+  // Commit page header (not executable).
+  if (!vm->Commit(start,
+                  CodePageGuardStartOffset(),
+                  false)) {
+    return false;
+  }
+
+  // Create guard page after the header.
+  if (!vm->Guard(start + CodePageGuardStartOffset())) {
+    return false;
+  }
+
+  // Commit page body (executable).
+  size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
+  if (!vm->Commit(start + CodePageAreaStartOffset(),
+                  area_size,
+                  true)) {
+    return false;
+  }
+
+  // Create guard page after the allocatable area.
+  if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
+    return false;
+  }
+
+  return true;
+}
+
+
 // -----------------------------------------------------------------------------
 // MemoryChunk implementation
 
@@ -671,8 +772,14 @@ PagedSpace::PagedSpace(Heap* heap,
       was_swept_conservatively_(false),
       first_unswept_page_(Page::FromAddress(NULL)),
       unswept_free_bytes_(0) {
+  if (id == CODE_SPACE) {
+    area_size_ = heap->isolate()->memory_allocator()->
+        CodePageAreaSize();
+  } else {
+    area_size_ = Page::kPageSize - Page::kObjectStartOffset;
+  }
   max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
-                  * Page::kObjectAreaSize;
+      * AreaSize();
   accounting_stats_.Clear();
 
   allocation_info_.top = NULL;
@@ -722,8 +829,8 @@ MaybeObject* PagedSpace::FindObject(Address addr) {
 }
 
 bool PagedSpace::CanExpand() {
-  ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
-  ASSERT(Capacity() % Page::kObjectAreaSize == 0);
+  ASSERT(max_capacity_ % AreaSize() == 0);
+  ASSERT(Capacity() % AreaSize() == 0);
 
   if (Capacity() == max_capacity_) return false;
 
@@ -763,6 +870,7 @@ int PagedSpace::CountTotalPages() {
 
 void PagedSpace::ReleasePage(Page* page) {
   ASSERT(page->LiveBytes() == 0);
+  ASSERT(AreaSize() == page->area_size());
 
   // Adjust list of unswept pages if the page is the head of the list.
   if (first_unswept_page_ == page) {
@@ -775,7 +883,7 @@ void PagedSpace::ReleasePage(Page* page) {
   if (page->WasSwept()) {
     intptr_t size = free_list_.EvictFreeListItems(page);
     accounting_stats_.AllocateBytes(size);
-    ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size));
+    ASSERT_EQ(AreaSize(), static_cast<int>(size));
   } else {
     DecreaseUnsweptFreeBytes(page);
   }
@@ -792,8 +900,8 @@ void PagedSpace::ReleasePage(Page* page) {
   }
 
   ASSERT(Capacity() > 0);
-  ASSERT(Capacity() % Page::kObjectAreaSize == 0);
-  accounting_stats_.ShrinkSpace(Page::kObjectAreaSize);
+  ASSERT(Capacity() % AreaSize() == 0);
+  accounting_stats_.ShrinkSpace(AreaSize());
 }
 
 
@@ -804,9 +912,9 @@ void PagedSpace::ReleaseAllUnusedPages() {
     if (!page->WasSwept()) {
       if (page->LiveBytes() == 0) ReleasePage(page);
     } else {
-      HeapObject* obj = HeapObject::FromAddress(page->body());
+      HeapObject* obj = HeapObject::FromAddress(page->area_start());
       if (obj->IsFreeSpace() &&
-          FreeSpace::cast(obj)->size() == Page::kObjectAreaSize) {
+          FreeSpace::cast(obj)->size() == AreaSize()) {
         // Sometimes we allocate memory from free list but don't
         // immediately initialize it (e.g. see PagedSpace::ReserveSpace
         // called from Heap::ReserveSpace that can cause GC before
@@ -817,7 +925,7 @@ void PagedSpace::ReleaseAllUnusedPages() {
         // by free list items.
         FreeList::SizeStats sizes;
         free_list_.CountFreeListItems(page, &sizes);
-        if (sizes.Total() == Page::kObjectAreaSize) {
+        if (sizes.Total() == AreaSize()) {
           ReleasePage(page);
         }
       }
@@ -848,8 +956,8 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
     }
     ASSERT(page->WasSweptPrecisely());
     HeapObjectIterator it(page, NULL);
-    Address end_of_previous_object = page->ObjectAreaStart();
-    Address top = page->ObjectAreaEnd();
+    Address end_of_previous_object = page->area_start();
+    Address top = page->area_end();
     int black_size = 0;
     for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
       ASSERT(end_of_previous_object <= object->address());
@@ -1061,7 +1169,7 @@ bool NewSpace::AddFreshPage() {
   }
 
   // Clear remainder of current page.
-  Address limit = NewSpacePage::FromLimit(top)->body_limit();
+  Address limit = NewSpacePage::FromLimit(top)->area_end();
   if (heap()->gc_state() == Heap::SCAVENGE) {
     heap()->promotion_queue()->SetNewLimit(limit);
     heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
@@ -1111,7 +1219,7 @@ void NewSpace::Verify() {
 
   // There should be objects packed in from the low address up to the
   // allocation pointer.
-  Address current = to_space_.first_page()->body();
+  Address current = to_space_.first_page()->area_start();
   CHECK_EQ(current, to_space_.space_start());
 
   while (current != top()) {
@@ -1146,7 +1254,7 @@ void NewSpace::Verify() {
       NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
       // Next page should be valid.
       CHECK(!page->is_anchor());
-      current = page->body();
+      current = page->area_start();
     }
   }
 
@@ -1932,7 +2040,7 @@ static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
 
 void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
   sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
-  if (sizes->huge_size_ < Page::kObjectAreaSize) {
+  if (sizes->huge_size_ < p->area_size()) {
     sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
     sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
     sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
@@ -1962,7 +2070,7 @@ static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
 intptr_t FreeList::EvictFreeListItems(Page* p) {
   intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
 
-  if (sum < Page::kObjectAreaSize) {
+  if (sum < p->area_size()) {
     sum += EvictFreeListItemsInList(&small_list_, p) +
         EvictFreeListItemsInList(&medium_list_, p) +
         EvictFreeListItemsInList(&large_list_, p);
@@ -2084,7 +2192,7 @@ void PagedSpace::PrepareForMarkCompact() {
 
 
 bool PagedSpace::ReserveSpace(int size_in_bytes) {
-  ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize);
+  ASSERT(size_in_bytes <= AreaSize());
   ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
   Address current_top = allocation_info_.top;
   Address new_top = current_top + size_in_bytes;
@@ -2464,7 +2572,7 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
   LargePage* page = heap()->isolate()->memory_allocator()->
       AllocateLargePage(object_size, executable, this);
   if (page == NULL) return Failure::RetryAfterGC(identity());
-  ASSERT(page->body_size() >= object_size);
+  ASSERT(page->area_size() >= object_size);
 
   size_ += static_cast<int>(page->size());
   objects_size_ += object_size;
@@ -2580,7 +2688,7 @@ void LargeObjectSpace::Verify() {
     // object area start.
     HeapObject* object = chunk->GetObject();
     Page* page = Page::FromAddress(object->address());
-    ASSERT(object->address() == page->ObjectAreaStart());
+    ASSERT(object->address() == page->area_start());
 
     // The first word should be a map, and we expect all map pointers to be
     // in map space.
index 0ff62b5..599e9dd 100644 (file)
@@ -103,7 +103,7 @@ class Isolate;
   ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
 
 #define ASSERT_OBJECT_SIZE(size)                                               \
-  ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
+  ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
 
 #define ASSERT_PAGE_OFFSET(offset)                                             \
   ASSERT((Page::kObjectStartOffset <= offset)                                  \
@@ -361,21 +361,15 @@ class MemoryChunk {
     store_buffer_counter_ = counter;
   }
 
-  Address body() { return address() + kObjectStartOffset; }
-
-  Address body_limit() { return address() + size(); }
-
-  int body_size() { return static_cast<int>(size() - kObjectStartOffset); }
-
   bool Contains(Address addr) {
-    return addr >= body() && addr < address() + size();
+    return addr >= area_start() && addr < area_end();
   }
 
   // Checks whether addr can be a limit of addresses in this page.
   // It's a limit if it's in the page, or if it's just after the
   // last byte of the page.
   bool ContainsLimit(Address addr) {
-    return addr >= body() && addr <= address() + size();
+    return addr >= area_start() && addr <= area_end();
   }
 
   enum MemoryChunkFlags {
@@ -487,8 +481,9 @@ class MemoryChunk {
   static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
 
   static const intptr_t kLiveBytesOffset =
-      kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
-      kPointerSize + kPointerSize + kPointerSize + kIntSize;
+     kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
+     kPointerSize + kPointerSize +
+     kPointerSize + kPointerSize + kPointerSize + kIntSize;
 
   static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
 
@@ -594,12 +589,22 @@ class MemoryChunk {
     ClearFlag(EVACUATION_CANDIDATE);
   }
 
+  Address area_start() { return area_start_; }
+  Address area_end() { return area_end_; }
+  int area_size() {
+    return static_cast<int>(area_end() - area_start());
+  }
 
  protected:
   MemoryChunk* next_chunk_;
   MemoryChunk* prev_chunk_;
   size_t size_;
   intptr_t flags_;
+
+  // Start and end of allocatable memory on this chunk.
+  Address area_start_;
+  Address area_end_;
+
   // If the chunk needs to remember its memory reservation, it is stored here.
   VirtualMemory reservation_;
   // The identity of the owning space.  This is tagged as a failure pointer, but
@@ -618,6 +623,8 @@ class MemoryChunk {
   static MemoryChunk* Initialize(Heap* heap,
                                  Address base,
                                  size_t size,
+                                 Address area_start,
+                                 Address area_end,
                                  Executability executable,
                                  Space* owner);
 
@@ -657,12 +664,6 @@ class Page : public MemoryChunk {
   inline void set_next_page(Page* page);
   inline void set_prev_page(Page* page);
 
-  // Returns the start address of the object area in this page.
-  Address ObjectAreaStart() { return address() + kObjectStartOffset; }
-
-  // Returns the end address (exclusive) of the object area in this page.
-  Address ObjectAreaEnd() { return address() + Page::kPageSize; }
-
   // Checks whether an address is page aligned.
   static bool IsAlignedToPageSize(Address a) {
     return 0 == (OffsetFrom(a) & kPageAlignmentMask);
@@ -685,21 +686,14 @@ class Page : public MemoryChunk {
   // Page size in bytes.  This must be a multiple of the OS page size.
   static const int kPageSize = 1 << kPageSizeBits;
 
-  // Page size mask.
-  static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
-
   // Object area size in bytes.
-  static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
+  static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
 
   // Maximum object size that fits in a page.
-  static const int kMaxHeapObjectSize = kObjectAreaSize;
-
-  static const int kFirstUsedCell =
-    (kObjectStartOffset/kPointerSize) >> Bitmap::kBitsPerCellLog2;
+  static const int kMaxNonCodeHeapObjectSize = kNonCodeObjectAreaSize;
 
-  static const int kLastUsedCell =
-    ((kPageSize - kPointerSize)/kPointerSize) >>
-      Bitmap::kBitsPerCellLog2;
+  // Page size mask.
+  static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
 
   inline void ClearGCFields();
 
@@ -734,7 +728,7 @@ STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
 class LargePage : public MemoryChunk {
  public:
   HeapObject* GetObject() {
-    return HeapObject::FromAddress(body());
+    return HeapObject::FromAddress(area_start());
   }
 
   inline LargePage* next_page() const {
@@ -975,7 +969,7 @@ class MemoryAllocator {
 
   // Returns maximum available bytes that the old space can have.
   intptr_t MaxAvailable() {
-    return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
+    return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
   }
 
 #ifdef DEBUG
@@ -1028,6 +1022,20 @@ class MemoryAllocator {
   bool MemoryAllocationCallbackRegistered(
       MemoryAllocationCallback callback);
 
+  static int CodePageGuardStartOffset();
+
+  static int CodePageGuardSize();
+
+  static int CodePageAreaStartOffset();
+
+  static int CodePageAreaEndOffset();
+
+  static int CodePageAreaSize() {
+    return CodePageAreaEndOffset() - CodePageAreaStartOffset();
+  }
+
+  static bool CommitCodePage(VirtualMemory* vm, Address start, size_t size);
+
  private:
   Isolate* isolate_;
 
@@ -1380,7 +1388,7 @@ class FreeList BASE_EMBEDDED {
  private:
   // The size range of blocks, in bytes.
   static const int kMinBlockSize = 3 * kPointerSize;
-  static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
+  static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
 
   FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
 
@@ -1572,12 +1580,12 @@ class PagedSpace : public Space {
 
   void IncreaseUnsweptFreeBytes(Page* p) {
     ASSERT(ShouldBeSweptLazily(p));
-    unswept_free_bytes_ += (Page::kObjectAreaSize - p->LiveBytes());
+    unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
   }
 
   void DecreaseUnsweptFreeBytes(Page* p) {
     ASSERT(ShouldBeSweptLazily(p));
-    unswept_free_bytes_ -= (Page::kObjectAreaSize - p->LiveBytes());
+    unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
   }
 
   bool AdvanceSweeper(intptr_t bytes_to_sweep);
@@ -1600,7 +1608,14 @@ class PagedSpace : public Space {
   // Returns the number of total pages in this space.
   int CountTotalPages();
 
+  // Return size of allocatable area on a page in this space.
+  inline int AreaSize() {
+    return area_size_;
+  }
+
  protected:
+  int area_size_;
+
   // Maximum capacity of this space.
   intptr_t max_capacity_;
 
@@ -1702,6 +1717,8 @@ class NewSpacePage : public MemoryChunk {
     (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
     (1 << MemoryChunk::SCAN_ON_SCAVENGE);
 
+  static const int kAreaSize = Page::kNonCodeObjectAreaSize;
+
   inline NewSpacePage* next_page() const {
     return static_cast<NewSpacePage*>(next_chunk());
   }
@@ -1814,22 +1831,22 @@ class SemiSpace : public Space {
   // Returns the start address of the first page of the space.
   Address space_start() {
     ASSERT(anchor_.next_page() != &anchor_);
-    return anchor_.next_page()->body();
+    return anchor_.next_page()->area_start();
   }
 
   // Returns the start address of the current page of the space.
   Address page_low() {
-    return current_page_->body();
+    return current_page_->area_start();
   }
 
   // Returns one past the end address of the space.
   Address space_end() {
-    return anchor_.prev_page()->body_limit();
+    return anchor_.prev_page()->area_end();
   }
 
   // Returns one past the end address of the current page of the space.
   Address page_high() {
-    return current_page_->body_limit();
+    return current_page_->area_end();
   }
 
   bool AdvancePage() {
@@ -1965,7 +1982,7 @@ class SemiSpaceIterator : public ObjectIterator {
       NewSpacePage* page = NewSpacePage::FromLimit(current_);
       page = page->next_page();
       ASSERT(!page->is_anchor());
-      current_ = page->body();
+      current_ = page->area_start();
       if (current_ == limit_) return NULL;
     }
 
@@ -2073,7 +2090,7 @@ class NewSpace : public Space {
 
   // Return the allocated bytes in the active semispace.
   virtual intptr_t Size() {
-    return pages_used_ * Page::kObjectAreaSize +
+    return pages_used_ * NewSpacePage::kAreaSize +
         static_cast<int>(top() - to_space_.page_low());
   }
 
@@ -2085,7 +2102,7 @@ class NewSpace : public Space {
   // Return the current capacity of a semispace.
   intptr_t EffectiveCapacity() {
     SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
-    return (to_space_.Capacity() / Page::kPageSize) * Page::kObjectAreaSize;
+    return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
   }
 
   // Return the current capacity of a semispace.
@@ -2302,7 +2319,7 @@ class OldSpace : public PagedSpace {
 
   // The limit of allocation for a page in this space.
   virtual Address PageAllocationLimit(Page* page) {
-    return page->ObjectAreaEnd();
+    return page->area_end();
   }
 
  public:
@@ -2331,12 +2348,12 @@ class FixedSpace : public PagedSpace {
       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
         object_size_in_bytes_(object_size_in_bytes),
         name_(name) {
-    page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
+    page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
   }
 
   // The limit of allocation for a page in this space.
   virtual Address PageAllocationLimit(Page* page) {
-    return page->ObjectAreaEnd() - page_extra_;
+    return page->area_end() - page_extra_;
   }
 
   int object_size_in_bytes() { return object_size_in_bytes_; }
@@ -2387,7 +2404,7 @@ class MapSpace : public FixedSpace {
 #endif
 
  private:
-  static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
+  static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
 
   // Do map space compaction if there is a page gap.
   int CompactionThreshold() {
index 9022b3b..3852155 100644 (file)
@@ -453,14 +453,14 @@ void StoreBuffer::FindPointersToNewSpaceInRegion(
 
 // Compute start address of the first map following given addr.
 static inline Address MapStartAlign(Address addr) {
-  Address page = Page::FromAddress(addr)->ObjectAreaStart();
+  Address page = Page::FromAddress(addr)->area_start();
   return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
 }
 
 
 // Compute end address of the first map preceding given addr.
 static inline Address MapEndAlign(Address addr) {
-  Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
+  Address page = Page::FromAllocationTop(addr)->area_start();
   return page + ((addr - page) / Map::kSize * Map::kSize);
 }
 
@@ -523,8 +523,8 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
     Page* page,
     RegionCallback region_callback,
     ObjectSlotCallback slot_callback) {
-  Address visitable_start = page->ObjectAreaStart();
-  Address end_of_page = page->ObjectAreaEnd();
+  Address visitable_start = page->area_start();
+  Address end_of_page = page->area_end();
 
   Address visitable_end = visitable_start;
 
index c654dfa..769fe7b 100644 (file)
@@ -88,7 +88,7 @@ static MaybeObject* AllocateAfterFailures() {
   static const int kLargeObjectSpaceFillerLength = 300000;
   static const int kLargeObjectSpaceFillerSize = FixedArray::SizeFor(
       kLargeObjectSpaceFillerLength);
-  ASSERT(kLargeObjectSpaceFillerSize > heap->MaxObjectSizeInPagedSpace());
+  ASSERT(kLargeObjectSpaceFillerSize > heap->old_pointer_space()->AreaSize());
   while (heap->OldGenerationSpaceAvailable() > kLargeObjectSpaceFillerSize) {
     CHECK(!heap->AllocateFixedArray(kLargeObjectSpaceFillerLength, TENURED)->
           IsFailure());
@@ -214,11 +214,13 @@ TEST(CodeRange) {
   while (total_allocated < 5 * code_range_size) {
     if (current_allocated < code_range_size / 10) {
       // Allocate a block.
-      // Geometrically distributed sizes, greater than Page::kMaxHeapObjectSize.
+      // Geometrically distributed sizes, greater than
+      // Page::kMaxNonCodeHeapObjectSize (which is greater than code page area).
       // TODO(gc): instead of using 3 use some contant based on code_range_size
       // kMaxHeapObjectSize.
-      size_t requested = (Page::kMaxHeapObjectSize << (Pseudorandom() % 3)) +
-           Pseudorandom() % 5000 + 1;
+      size_t requested =
+          (Page::kMaxNonCodeHeapObjectSize << (Pseudorandom() % 3)) +
+          Pseudorandom() % 5000 + 1;
       size_t allocated = 0;
       Address base = code_range->AllocateRawMemory(requested, &allocated);
       CHECK(base != NULL);
index 8be409e..10bacf5 100644 (file)
@@ -820,7 +820,7 @@ TEST(Iteration) {
       FACTORY->NewStringFromAscii(CStrVector("abcdefghij"), TENURED);
 
   // Allocate a large string (for large object space).
-  int large_size = HEAP->MaxObjectSizeInPagedSpace() + 1;
+  int large_size = Page::kMaxNonCodeHeapObjectSize + 1;
   char* str = new char[large_size];
   for (int i = 0; i < large_size - 1; ++i) str[i] = 'a';
   str[large_size - 1] = '\0';
index 2535f10..1d8691b 100644 (file)
@@ -94,7 +94,7 @@ TEST(Promotion) {
 
   // Allocate a fixed array in the new space.
   int array_size =
-      (HEAP->MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) /
+      (Page::kMaxNonCodeHeapObjectSize - FixedArray::kHeaderSize) /
       (kPointerSize * 4);
   Object* obj = HEAP->AllocateFixedArray(array_size)->ToObjectChecked();
 
@@ -125,7 +125,7 @@ TEST(NoPromotion) {
 
   // Allocate a big Fixed array in the new space.
   int max_size =
-      Min(HEAP->MaxObjectSizeInPagedSpace(), HEAP->MaxObjectSizeInNewSpace());
+      Min(Page::kMaxNonCodeHeapObjectSize, HEAP->MaxObjectSizeInNewSpace());
 
   int length = (max_size - FixedArray::kHeaderSize) / (2*kPointerSize);
   Object* obj = i::Isolate::Current()->heap()->AllocateFixedArray(length)->
index b5c1a09..e426e7b 100644 (file)
@@ -558,7 +558,8 @@ DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
 TEST(LinearAllocation) {
   v8::V8::Initialize();
   int new_space_max = 512 * KB;
-  int paged_space_max = Page::kMaxHeapObjectSize;
+  int paged_space_max = Page::kMaxNonCodeHeapObjectSize;
+  int code_space_max = HEAP->code_space()->AreaSize();
 
   for (int size = 1000; size < 5 * MB; size += size >> 1) {
     size &= ~8;  // Round.
@@ -568,7 +569,7 @@ TEST(LinearAllocation) {
         new_space_size,
         paged_space_size,  // Old pointer space.
         paged_space_size,  // Old data space.
-        HEAP->code_space()->RoundSizeDownToObjectAlignment(paged_space_size),
+        HEAP->code_space()->RoundSizeDownToObjectAlignment(code_space_max),
         HEAP->map_space()->RoundSizeDownToObjectAlignment(paged_space_size),
         HEAP->cell_space()->RoundSizeDownToObjectAlignment(paged_space_size),
         size);             // Large object space.
@@ -604,7 +605,7 @@ TEST(LinearAllocation) {
       int old_page_fullness = i % Page::kPageSize;
       int page_fullness = (i + kSmallFixedArraySize) % Page::kPageSize;
       if (page_fullness < old_page_fullness ||
-          page_fullness > Page::kObjectAreaSize) {
+          page_fullness > HEAP->old_pointer_space()->AreaSize()) {
         i = RoundUp(i, Page::kPageSize);
         pointer_last = NULL;
       }
@@ -624,7 +625,7 @@ TEST(LinearAllocation) {
       int old_page_fullness = i % Page::kPageSize;
       int page_fullness = (i + kSmallStringSize) % Page::kPageSize;
       if (page_fullness < old_page_fullness ||
-          page_fullness > Page::kObjectAreaSize) {
+          page_fullness > HEAP->old_data_space()->AreaSize()) {
         i = RoundUp(i, Page::kPageSize);
         data_last = NULL;
       }
@@ -642,7 +643,7 @@ TEST(LinearAllocation) {
       int old_page_fullness = i % Page::kPageSize;
       int page_fullness = (i + kMapSize) % Page::kPageSize;
       if (page_fullness < old_page_fullness ||
-          page_fullness > Page::kObjectAreaSize) {
+          page_fullness > HEAP->map_space()->AreaSize()) {
         i = RoundUp(i, Page::kPageSize);
         map_last = NULL;
       }
@@ -653,7 +654,7 @@ TEST(LinearAllocation) {
       map_last = obj;
     }
 
-    if (size > Page::kObjectAreaSize) {
+    if (size > Page::kMaxNonCodeHeapObjectSize) {
       // Support for reserving space in large object space is not there yet,
       // but using an always-allocate scope is fine for now.
       AlwaysAllocateScope always;
index 6e495bc..92de2a6 100644 (file)
@@ -191,9 +191,10 @@ TEST(NewSpace) {
                         HEAP->ReservedSemiSpaceSize()));
   CHECK(new_space.HasBeenSetUp());
 
-  while (new_space.Available() >= Page::kMaxHeapObjectSize) {
+  while (new_space.Available() >= Page::kMaxNonCodeHeapObjectSize) {
     Object* obj =
-        new_space.AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectUnchecked();
+        new_space.AllocateRaw(Page::kMaxNonCodeHeapObjectSize)->
+        ToObjectUnchecked();
     CHECK(new_space.Contains(HeapObject::cast(obj)));
   }
 
@@ -223,7 +224,7 @@ TEST(OldSpace) {
   CHECK(s->SetUp());
 
   while (s->Available() > 0) {
-    s->AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectUnchecked();
+    s->AllocateRaw(Page::kMaxNonCodeHeapObjectSize)->ToObjectUnchecked();
   }
 
   s->TearDown();