Reduce boot-up memory use of V8.
authorerik.corry@gmail.com <erik.corry@gmail.com@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Tue, 17 Jan 2012 11:38:25 +0000 (11:38 +0000)
committererik.corry@gmail.com <erik.corry@gmail.com@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Tue, 17 Jan 2012 11:38:25 +0000 (11:38 +0000)
Reduce signal sender thread stack size to 32k.
Commit partial old-space pages to reduce minimum memory use.
This is a rebase of http://codereview.chromium.org/9017009/
Review URL: http://codereview.chromium.org/9179012

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10413 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

24 files changed:
src/cpu-profiler.cc
src/d8.cc
src/deoptimizer.cc
src/heap-inl.h
src/heap.cc
src/incremental-marking.cc
src/mark-compact.cc
src/platform-freebsd.cc
src/platform-linux.cc
src/platform-macos.cc
src/platform-openbsd.cc
src/platform-solaris.cc
src/platform-win32.cc
src/platform.h
src/serialize.cc
src/snapshot.h
src/spaces-inl.h
src/spaces.cc
src/spaces.h
src/store-buffer.cc
src/utils.h
test/cctest/test-heap.cc
test/cctest/test-mark-compact.cc
test/cctest/test-spaces.cc

index 2bd62ad..fbdb035 100644 (file)
@@ -42,10 +42,11 @@ namespace internal {
 static const int kEventsBufferSize = 256*KB;
 static const int kTickSamplesBufferChunkSize = 64*KB;
 static const int kTickSamplesBufferChunksCount = 16;
+static const int kProfilerStackSize = 32 * KB;
 
 
 ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
-    : Thread("v8:ProfEvntProc"),
+    : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
       generator_(generator),
       running_(true),
       ticks_buffer_(sizeof(TickSampleEventRecord),
index 97828a4..e077002 100644 (file)
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -126,6 +126,9 @@ ShellOptions Shell::options;
 const char* Shell::kPrompt = "d8> ";
 
 
+const int MB = 1024 * 1024;
+
+
 #ifndef V8_SHARED
 bool CounterMap::Match(void* key1, void* key2) {
   const char* name1 = reinterpret_cast<const char*>(key1);
@@ -1191,14 +1194,11 @@ Handle<String> SourceGroup::ReadFile(const char* name) {
 
 #ifndef V8_SHARED
 i::Thread::Options SourceGroup::GetThreadOptions() {
-  i::Thread::Options options;
-  options.name = "IsolateThread";
   // On some systems (OSX 10.6) the stack size default is 0.5Mb or less
   // which is not enough to parse the big literal expressions used in tests.
   // The stack size should be at least StackGuard::kLimitSize + some
-  // OS-specific padding for thread startup code.
-  options.stack_size = 2 << 20;  // 2 Mb seems to be enough
-  return options;
+  // OS-specific padding for thread startup code.  2Mbytes seems to be enough.
+  return i::Thread::Options("IsolateThread", 2 * MB);
 }
 
 
index aab69c3..e1e3f0b 100644 (file)
@@ -1086,6 +1086,7 @@ MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
 
   MemoryChunk* chunk =
       Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
+                                                            desc.instr_size,
                                                             EXECUTABLE,
                                                             NULL);
   if (chunk == NULL) {
index 4d98fba..23fe306 100644 (file)
@@ -505,7 +505,6 @@ Isolate* Heap::isolate() {
 #define GC_GREEDY_CHECK() { }
 #endif
 
-
 // Calls the FUNCTION_CALL function and retries it up to three times
 // to guarantee that any allocations performed during the call will
 // succeed if there's enough memory.
index 82a9034..a1f3260 100644 (file)
@@ -582,8 +582,11 @@ void Heap::ReserveSpace(
   PagedSpace* map_space = Heap::map_space();
   PagedSpace* cell_space = Heap::cell_space();
   LargeObjectSpace* lo_space = Heap::lo_space();
+  bool one_old_space_gc_has_been_performed = false;
   bool gc_performed = true;
+  bool old_space_gc_performed;
   while (gc_performed) {
+    old_space_gc_performed = false;
     gc_performed = false;
     if (!new_space->ReserveSpace(new_space_size)) {
       Heap::CollectGarbage(NEW_SPACE);
@@ -592,22 +595,27 @@ void Heap::ReserveSpace(
     if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
       Heap::CollectGarbage(OLD_POINTER_SPACE);
       gc_performed = true;
+      old_space_gc_performed = true;
     }
     if (!(old_data_space->ReserveSpace(data_space_size))) {
       Heap::CollectGarbage(OLD_DATA_SPACE);
       gc_performed = true;
+      old_space_gc_performed = true;
     }
     if (!(code_space->ReserveSpace(code_space_size))) {
       Heap::CollectGarbage(CODE_SPACE);
       gc_performed = true;
+      old_space_gc_performed = true;
     }
     if (!(map_space->ReserveSpace(map_space_size))) {
       Heap::CollectGarbage(MAP_SPACE);
       gc_performed = true;
+      old_space_gc_performed = true;
     }
     if (!(cell_space->ReserveSpace(cell_space_size))) {
       Heap::CollectGarbage(CELL_SPACE);
       gc_performed = true;
+      old_space_gc_performed = true;
     }
     // We add a slack-factor of 2 in order to have space for a series of
     // large-object allocations that are only just larger than the page size.
@@ -617,10 +625,17 @@ void Heap::ReserveSpace(
     // allocation in the other spaces.
     large_object_size += cell_space_size + map_space_size + code_space_size +
         data_space_size + pointer_space_size;
-    if (!(lo_space->ReserveSpace(large_object_size))) {
+
+    // If we already did one GC in order to make space in old space, there is
+    // no sense in doing another one.  We will attempt to force through the
+    // large object space allocation, which comes directly from the OS,
+    // regardless of any soft limit.
+    if (!one_old_space_gc_has_been_performed &&
+        !(lo_space->ReserveSpace(large_object_size))) {
       Heap::CollectGarbage(LO_SPACE);
       gc_performed = true;
     }
+    if (old_space_gc_performed) one_old_space_gc_has_been_performed = true;
   }
 }
 
index f6d5a59..f9c0272 100644 (file)
@@ -287,7 +287,7 @@ void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
 
     // It's difficult to filter out slots recorded for large objects.
     if (chunk->owner()->identity() == LO_SPACE &&
-        chunk->size() > static_cast<size_t>(Page::kPageSize) &&
+        chunk->size() > Page::kPageSize &&
         is_compacting) {
       chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
     }
index b9e1639..f2f649b 100644 (file)
@@ -2887,7 +2887,8 @@ static void SweepPrecisely(PagedSpace* space,
     for ( ; live_objects != 0; live_objects--) {
       Address free_end = object_address + offsets[live_index++] * kPointerSize;
       if (free_end != free_start) {
-        space->Free(free_start, static_cast<int>(free_end - free_start));
+        space->AddToFreeLists(free_start,
+                              static_cast<int>(free_end - free_start));
       }
       HeapObject* live_object = HeapObject::FromAddress(free_end);
       ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
@@ -2913,7 +2914,8 @@ static void SweepPrecisely(PagedSpace* space,
     cells[cell_index] = 0;
   }
   if (free_start != p->ObjectAreaEnd()) {
-    space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start));
+    space->AddToFreeLists(free_start,
+                          static_cast<int>(p->ObjectAreaEnd() - free_start));
   }
   p->ResetLiveBytes();
 }
@@ -3206,7 +3208,8 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
     Page* p = evacuation_candidates_[i];
     if (!p->IsEvacuationCandidate()) continue;
     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
-    space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize);
+    space->AddToFreeLists(p->ObjectAreaStart(),
+                          p->ObjectAreaEnd() - p->ObjectAreaStart());
     p->set_scan_on_scavenge(false);
     slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
     p->ClearEvacuationCandidate();
@@ -3523,8 +3526,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
   }
   size_t size = block_address - p->ObjectAreaStart();
   if (cell_index == last_cell_index) {
-    freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(),
-                                                static_cast<int>(size)));
+    freed_bytes += static_cast<int>(space->AddToFreeLists(
+        p->ObjectAreaStart(), static_cast<int>(size)));
     ASSERT_EQ(0, p->LiveBytes());
     return freed_bytes;
   }
@@ -3533,8 +3536,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
   Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
   // Free the first free space.
   size = free_end - p->ObjectAreaStart();
-  freed_bytes += space->Free(p->ObjectAreaStart(),
-                             static_cast<int>(size));
+  freed_bytes += space->AddToFreeLists(p->ObjectAreaStart(),
+                                       static_cast<int>(size));
   // The start of the current free area is represented in undigested form by
   // the address of the last 32-word section that contained a live object and
   // the marking bitmap for that cell, which describes where the live object
@@ -3563,8 +3566,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
           // so now we need to find the start of the first live object at the
           // end of the free space.
           free_end = StartOfLiveObject(block_address, cell);
-          freed_bytes += space->Free(free_start,
-                                     static_cast<int>(free_end - free_start));
+          freed_bytes += space->AddToFreeLists(
+              free_start, static_cast<int>(free_end - free_start));
         }
       }
       // Update our undigested record of where the current free area started.
@@ -3578,8 +3581,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
   // Handle the free space at the end of the page.
   if (block_address - free_start > 32 * kPointerSize) {
     free_start = DigestFreeStart(free_start, free_start_cell);
-    freed_bytes += space->Free(free_start,
-                               static_cast<int>(block_address - free_start));
+    freed_bytes += space->AddToFreeLists(
+        free_start, static_cast<int>(block_address - free_start));
   }
 
   p->ResetLiveBytes();
index 65bd720..38275b3 100644 (file)
@@ -464,15 +464,8 @@ class Thread::PlatformData : public Malloced {
 
 Thread::Thread(const Options& options)
     : data_(new PlatformData),
-      stack_size_(options.stack_size) {
-  set_name(options.name);
-}
-
-
-Thread::Thread(const char* name)
-    : data_(new PlatformData),
-      stack_size_(0) {
-  set_name(name);
+      stack_size_(options.stack_size()) {
+  set_name(options.name());
 }
 
 
@@ -717,8 +710,10 @@ class SignalSender : public Thread {
     FULL_INTERVAL
   };
 
+  static const int kSignalSenderStackSize = 32 * KB;
+
   explicit SignalSender(int interval)
-      : Thread("SignalSender"),
+      : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
         interval_(interval) {}
 
   static void AddActiveSampler(Sampler* sampler) {
index 5a1c0d1..3788b81 100644 (file)
@@ -720,15 +720,8 @@ class Thread::PlatformData : public Malloced {
 
 Thread::Thread(const Options& options)
     : data_(new PlatformData()),
-      stack_size_(options.stack_size) {
-  set_name(options.name);
-}
-
-
-Thread::Thread(const char* name)
-    : data_(new PlatformData()),
-      stack_size_(0) {
-  set_name(name);
+      stack_size_(options.stack_size()) {
+  set_name(options.name());
 }
 
 
@@ -1035,8 +1028,10 @@ class SignalSender : public Thread {
     FULL_INTERVAL
   };
 
+  static const int kSignalSenderStackSize = 32 * KB;
+
   explicit SignalSender(int interval)
-      : Thread("SignalSender"),
+      : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
         vm_tgid_(getpid()),
         interval_(interval) {}
 
index 369c3e4..e7313fa 100644 (file)
@@ -473,17 +473,11 @@ class Thread::PlatformData : public Malloced {
   pthread_t thread_;  // Thread handle for pthread.
 };
 
-Thread::Thread(const Options& options)
-    : data_(new PlatformData),
-      stack_size_(options.stack_size) {
-  set_name(options.name);
-}
-
 
-Thread::Thread(const char* name)
+Thread::Thread(const Options& options)
     : data_(new PlatformData),
-      stack_size_(0) {
-  set_name(name);
+      stack_size_(options.stack_size()) {
+  set_name(options.name());
 }
 
 
@@ -736,10 +730,13 @@ class Sampler::PlatformData : public Malloced {
   thread_act_t profiled_thread_;
 };
 
+
 class SamplerThread : public Thread {
  public:
+  static const int kSamplerThreadStackSize = 32 * KB;
+
   explicit SamplerThread(int interval)
-      : Thread("SamplerThread"),
+      : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
         interval_(interval) {}
 
   static void AddActiveSampler(Sampler* sampler) {
index a01c08d..ac01322 100644 (file)
@@ -512,15 +512,8 @@ class Thread::PlatformData : public Malloced {
 
 Thread::Thread(const Options& options)
     : data_(new PlatformData()),
-      stack_size_(options.stack_size) {
-  set_name(options.name);
-}
-
-
-Thread::Thread(const char* name)
-    : data_(new PlatformData()),
-      stack_size_(0) {
-  set_name(name);
+      stack_size_(options.stack_size()) {
+  set_name(options.name());
 }
 
 
@@ -789,8 +782,10 @@ class SignalSender : public Thread {
     FULL_INTERVAL
   };
 
+  static const int kSignalSenderStackSize = 32 * KB;
+
   explicit SignalSender(int interval)
-      : Thread("SignalSender"),
+      : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
         vm_tgid_(getpid()),
         interval_(interval) {}
 
index 08bec93..c878546 100644 (file)
@@ -369,17 +369,11 @@ class Thread::PlatformData : public Malloced {
   pthread_t thread_;  // Thread handle for pthread.
 };
 
-Thread::Thread(const Options& options)
-    : data_(new PlatformData()),
-      stack_size_(options.stack_size) {
-  set_name(options.name);
-}
 
-
-Thread::Thread(const char* name)
+Thread::Thread(const Options& options)
     : data_(new PlatformData()),
-      stack_size_(0) {
-  set_name(name);
+      stack_size_(options.stack_size()) {
+  set_name(options.name());
 }
 
 
@@ -626,8 +620,10 @@ class SignalSender : public Thread {
     FULL_INTERVAL
   };
 
+  static const int kSignalSenderStackSize = 32 * KB;
+
   explicit SignalSender(int interval)
-      : Thread("SignalSender"),
+      : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
         interval_(interval) {}
 
   static void InstallSignalHandler() {
index 5c000e6..b4ffc4c 100644 (file)
@@ -1526,16 +1526,9 @@ class Thread::PlatformData : public Malloced {
 // handle until it is started.
 
 Thread::Thread(const Options& options)
-    : stack_size_(options.stack_size) {
+    : stack_size_(options.stack_size()) {
   data_ = new PlatformData(kNoThread);
-  set_name(options.name);
-}
-
-
-Thread::Thread(const char* name)
-    : stack_size_(0) {
-  data_ = new PlatformData(kNoThread);
-  set_name(name);
+  set_name(options.name());
 }
 
 
@@ -1901,8 +1894,10 @@ class Sampler::PlatformData : public Malloced {
 
 class SamplerThread : public Thread {
  public:
+  static const int kSamplerThreadStackSize = 32 * KB;
+
   explicit SamplerThread(int interval)
-      : Thread("SamplerThread"),
+      : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
         interval_(interval) {}
 
   static void AddActiveSampler(Sampler* sampler) {
index fc12df2..a0186d5 100644 (file)
@@ -412,16 +412,22 @@ class Thread {
     LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt
   };
 
-  struct Options {
-    Options() : name("v8:<unknown>"), stack_size(0) {}
+  class Options {
+   public:
+    Options() : name_("v8:<unknown>"), stack_size_(0) {}
+    Options(const char* name, int stack_size = 0)
+        : name_(name), stack_size_(stack_size) {}
+
+    const char* name() const { return name_; }
+    int stack_size() const { return stack_size_; }
 
-    const char* name;
-    int stack_size;
+   private:
+    const char* name_;
+    int stack_size_;
   };
 
   // Create new thread.
   explicit Thread(const Options& options);
-  explicit Thread(const char* name);
   virtual ~Thread();
 
   // Start new thread by calling the Run() method in the new thread.
index d0a1a63..bfac00d 100644 (file)
@@ -612,6 +612,7 @@ Address Deserializer::Allocate(int space_index, Space* space, int size) {
     pages_[LO_SPACE].Add(address);
   }
   last_object_address_ = address;
+  ASSERT(address >= Page::FromAddress(address)->ObjectAreaStart());
   return address;
 }
 
@@ -622,7 +623,12 @@ HeapObject* Deserializer::GetAddressFromEnd(int space) {
   int offset = source_->GetInt();
   ASSERT(!SpaceIsLarge(space));
   offset <<= kObjectAlignmentBits;
-  return HeapObject::FromAddress(high_water_[space] - offset);
+  Address address = high_water_[space] - offset;
+  // This assert will fail if kMinimumSpaceSizes is too small for a space,
+  // because we rely on the fact that all allocation is linear when the VM
+  // is very young.
+  ASSERT(address >= Page::FromAddress(address)->ObjectAreaStart());
+  return HeapObject::FromAddress(address);
 }
 
 
index 4f01a2d..4f46404 100644 (file)
@@ -26,6 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "isolate.h"
+#include "spaces.h"
 
 #ifndef V8_SNAPSHOT_H_
 #define V8_SNAPSHOT_H_
@@ -86,6 +87,21 @@ class Snapshot {
   DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
 };
 
+
+// These are the sizes of the spaces that are needed in order to unpack the
+// VM boot snapshot.
+const int kMinimumSpaceSizes[LAST_SPACE + 1] = {
+  0,           // New space.
+  512 * 1024,  // Old pointer space.
+  128 * 1024,  // Old data space.
+  256 * 1024,  // Code space.
+  64 * 1024,   // Map space.
+  64 * 1024,   // Cell space.
+  0            // Large object space.
+};
+
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_SNAPSHOT_H_
index bd544ce..32cc444 100644 (file)
@@ -164,12 +164,12 @@ Page* Page::Initialize(Heap* heap,
                        Executability executable,
                        PagedSpace* owner) {
   Page* page = reinterpret_cast<Page*>(chunk);
-  ASSERT(chunk->size() == static_cast<size_t>(kPageSize));
+  ASSERT(chunk->size() <= kPageSize);
   ASSERT(chunk->owner() == owner);
-  owner->IncreaseCapacity(Page::kObjectAreaSize);
-  owner->Free(page->ObjectAreaStart(),
-              static_cast<int>(page->ObjectAreaEnd() -
-                               page->ObjectAreaStart()));
+  intptr_t object_bytes = page->ObjectAreaEnd() - page->ObjectAreaStart();
+  owner->IncreaseCapacity(object_bytes);
+  owner->AddToFreeLists(page->ObjectAreaStart(),
+                        static_cast<int>(object_bytes));
 
   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
 
@@ -257,6 +257,7 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
   if (new_top > allocation_info_.limit) return NULL;
 
   allocation_info_.top = new_top;
+  ASSERT(new_top >= Page::FromAllocationTop(new_top)->ObjectAreaStart());
   return HeapObject::FromAddress(current_top);
 }
 
index c8e94dd..372a061 100644 (file)
@@ -31,6 +31,7 @@
 #include "macro-assembler.h"
 #include "mark-compact.h"
 #include "platform.h"
+#include "snapshot.h"
 
 namespace v8 {
 namespace internal {
@@ -263,7 +264,7 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate)
     : isolate_(isolate),
       capacity_(0),
       capacity_executable_(0),
-      size_(0),
+      memory_allocator_reserved_(0),
       size_executable_(0) {
 }
 
@@ -273,7 +274,7 @@ bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
   capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
   ASSERT_GE(capacity_, capacity_executable_);
 
-  size_ = 0;
+  memory_allocator_reserved_ = 0;
   size_executable_ = 0;
 
   return true;
@@ -282,7 +283,7 @@ bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
 
 void MemoryAllocator::TearDown() {
   // Check that spaces were torn down before MemoryAllocator.
-  ASSERT(size_ == 0);
+  CHECK_EQ(memory_allocator_reserved_, 0);
   // TODO(gc) this will be true again when we fix FreeMemory.
   // ASSERT(size_executable_ == 0);
   capacity_ = 0;
@@ -295,8 +296,8 @@ void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
   // TODO(gc) make code_range part of memory allocator?
   ASSERT(reservation->IsReserved());
   size_t size = reservation->size();
-  ASSERT(size_ >= size);
-  size_ -= size;
+  ASSERT(memory_allocator_reserved_ >= size);
+  memory_allocator_reserved_ -= size;
 
   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
 
@@ -316,8 +317,8 @@ void MemoryAllocator::FreeMemory(Address base,
                                  size_t size,
                                  Executability executable) {
   // TODO(gc) make code_range part of memory allocator?
-  ASSERT(size_ >= size);
-  size_ -= size;
+  ASSERT(memory_allocator_reserved_ >= size);
+  memory_allocator_reserved_ -= size;
 
   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
 
@@ -343,7 +344,7 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size,
   VirtualMemory reservation(size, alignment);
 
   if (!reservation.IsReserved()) return NULL;
-  size_ += reservation.size();
+  memory_allocator_reserved_ += reservation.size();
   Address base = RoundUp(static_cast<Address>(reservation.address()),
                          alignment);
   controller->TakeControl(&reservation);
@@ -352,11 +353,14 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size,
 
 
 Address MemoryAllocator::AllocateAlignedMemory(size_t size,
+                                               size_t reserved_size,
                                                size_t alignment,
                                                Executability executable,
                                                VirtualMemory* controller) {
+  ASSERT(RoundUp(reserved_size, OS::CommitPageSize()) >=
+         RoundUp(size, OS::CommitPageSize()));
   VirtualMemory reservation;
-  Address base = ReserveAlignedMemory(size, alignment, &reservation);
+  Address base = ReserveAlignedMemory(reserved_size, alignment, &reservation);
   if (base == NULL) return NULL;
   if (!reservation.Commit(base,
                           size,
@@ -375,6 +379,53 @@ void Page::InitializeAsAnchor(PagedSpace* owner) {
 }
 
 
+void Page::CommitMore(intptr_t space_needed) {
+  intptr_t reserved_page_size = reservation_.IsReserved() ?
+      reservation_.size() :
+      Page::kPageSize;
+  ASSERT(size() < reserved_page_size);
+  intptr_t expand = Min(Max(size(), space_needed), reserved_page_size - size());
+  // At least double the page size (this also rounds to OS page size).
+  expand = Min(reserved_page_size - size(),
+               RoundUpToPowerOf2(size() + expand) - size());
+  ASSERT(expand <= kPageSize - size());
+  ASSERT(expand <= reserved_page_size - size());
+  Executability executable =
+      IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
+  Address old_end = ObjectAreaEnd();
+  if (!VirtualMemory::CommitRegion(old_end, expand, executable)) return;
+
+  set_size(size() + expand);
+
+  PagedSpace* paged_space = reinterpret_cast<PagedSpace*>(owner());
+  paged_space->heap()->isolate()->memory_allocator()->AllocationBookkeeping(
+      paged_space,
+      old_end,
+      0,  // No new memory was reserved.
+      expand,  // New memory committed.
+      executable);
+  paged_space->IncreaseCapacity(expand);
+
+  // In spaces with alignment requirements (e.g. map space) we have to align
+  // the expanded area with the correct object alignment.
+  uintptr_t object_area_size = old_end - ObjectAreaStart();
+  uintptr_t aligned_object_area_size =
+      object_area_size - object_area_size % paged_space->ObjectAlignment();
+  if (aligned_object_area_size != object_area_size) {
+    aligned_object_area_size += paged_space->ObjectAlignment();
+  }
+  Address new_area =
+      reinterpret_cast<Address>(ObjectAreaStart() + aligned_object_area_size);
+  // In spaces with alignment requirements, this will waste the space for one
+  // object per doubling of the page size until the next GC.
+  paged_space->AddToFreeLists(old_end, new_area - old_end);
+
+  expand -= (new_area - old_end);
+
+  paged_space->AddToFreeLists(new_area, expand);
+}
+
+
 NewSpacePage* NewSpacePage::Initialize(Heap* heap,
                                        Address start,
                                        SemiSpace* semi_space) {
@@ -460,9 +511,15 @@ void MemoryChunk::Unlink() {
 
 
 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
+                                            intptr_t committed_body_size,
                                             Executability executable,
                                             Space* owner) {
-  size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size;
+  ASSERT(body_size >= committed_body_size);
+  size_t chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + body_size,
+                              OS::CommitPageSize());
+  intptr_t committed_chunk_size =
+      committed_body_size + MemoryChunk::kObjectStartOffset;
+  committed_chunk_size = RoundUp(committed_chunk_size, OS::CommitPageSize());
   Heap* heap = isolate_->heap();
   Address base = NULL;
   VirtualMemory reservation;
@@ -482,20 +539,21 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
       ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
                        MemoryChunk::kAlignment));
       if (base == NULL) return NULL;
-      size_ += chunk_size;
-      // Update executable memory size.
-      size_executable_ += chunk_size;
+      // The AllocateAlignedMemory method will update the memory allocator
+      // memory used, but we are not using that if we have a code range, so
+      // we update it here.
+      memory_allocator_reserved_ += chunk_size;
     } else {
-      base = AllocateAlignedMemory(chunk_size,
+      base = AllocateAlignedMemory(committed_chunk_size,
+                                   chunk_size,
                                    MemoryChunk::kAlignment,
                                    executable,
                                    &reservation);
       if (base == NULL) return NULL;
-      // Update executable memory size.
-      size_executable_ += reservation.size();
     }
   } else {
-    base = AllocateAlignedMemory(chunk_size,
+    base = AllocateAlignedMemory(committed_chunk_size,
+                                 chunk_size,
                                  MemoryChunk::kAlignment,
                                  executable,
                                  &reservation);
@@ -503,21 +561,12 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
     if (base == NULL) return NULL;
   }
 
-#ifdef DEBUG
-  ZapBlock(base, chunk_size);
-#endif
-  isolate_->counters()->memory_allocated()->
-      Increment(static_cast<int>(chunk_size));
-
-  LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
-  if (owner != NULL) {
-    ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
-    PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
-  }
+  AllocationBookkeeping(
+      owner, base, chunk_size, committed_chunk_size, executable);
 
   MemoryChunk* result = MemoryChunk::Initialize(heap,
                                                 base,
-                                                chunk_size,
+                                                committed_chunk_size,
                                                 executable,
                                                 owner);
   result->set_reserved_memory(&reservation);
@@ -525,9 +574,40 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
 }
 
 
-Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
+void MemoryAllocator::AllocationBookkeeping(Space* owner,
+                                            Address base,
+                                            intptr_t reserved_chunk_size,
+                                            intptr_t committed_chunk_size,
+                                            Executability executable) {
+  if (executable == EXECUTABLE) {
+    // Update executable memory size.
+    size_executable_ += reserved_chunk_size;
+  }
+
+#ifdef DEBUG
+  ZapBlock(base, committed_chunk_size);
+#endif
+  isolate_->counters()->memory_allocated()->
+      Increment(static_cast<int>(committed_chunk_size));
+
+  LOG(isolate_, NewEvent("MemoryChunk", base, committed_chunk_size));
+  if (owner != NULL) {
+    ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
+    PerformAllocationCallback(
+        space, kAllocationActionAllocate, committed_chunk_size);
+  }
+}
+
+
+Page* MemoryAllocator::AllocatePage(intptr_t committed_object_area_size,
+                                    PagedSpace* owner,
                                     Executability executable) {
-  MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner);
+  ASSERT(committed_object_area_size <= Page::kObjectAreaSize);
+
+  MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize,
+                                     committed_object_area_size,
+                                     executable,
+                                     owner);
 
   if (chunk == NULL) return NULL;
 
@@ -538,7 +618,8 @@ Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
                                               Executability executable,
                                               Space* owner) {
-  MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
+  MemoryChunk* chunk =
+      AllocateChunk(object_size, object_size, executable, owner);
   if (chunk == NULL) return NULL;
   return LargePage::Initialize(isolate_->heap(), chunk);
 }
@@ -559,8 +640,12 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
   if (reservation->IsReserved()) {
     FreeMemory(reservation, chunk->executable());
   } else {
+    // When we do not have a reservation that is because this allocation
+    // is part of the huge reserved chunk of memory reserved for code on
+    // x64.  In that case the size was rounded up to the page size on
+    // allocation so we do the same now when freeing.
     FreeMemory(chunk->address(),
-               chunk->size(),
+               RoundUp(chunk->size(), Page::kPageSize),
                chunk->executable());
   }
 }
@@ -640,11 +725,12 @@ void MemoryAllocator::RemoveMemoryAllocationCallback(
 
 #ifdef DEBUG
 void MemoryAllocator::ReportStatistics() {
-  float pct = static_cast<float>(capacity_ - size_) / capacity_;
+  float pct =
+      static_cast<float>(capacity_ - memory_allocator_reserved_) / capacity_;
   PrintF("  capacity: %" V8_PTR_PREFIX "d"
              ", used: %" V8_PTR_PREFIX "d"
              ", available: %%%d\n\n",
-         capacity_, size_, static_cast<int>(pct*100));
+         capacity_, memory_allocator_reserved_, static_cast<int>(pct*100));
 }
 #endif
 
@@ -712,7 +798,6 @@ MaybeObject* PagedSpace::FindObject(Address addr) {
 
 bool PagedSpace::CanExpand() {
   ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
-  ASSERT(Capacity() % Page::kObjectAreaSize == 0);
 
   if (Capacity() == max_capacity_) return false;
 
@@ -724,11 +809,42 @@ bool PagedSpace::CanExpand() {
   return true;
 }
 
-bool PagedSpace::Expand() {
+bool PagedSpace::Expand(intptr_t size_in_bytes) {
   if (!CanExpand()) return false;
 
+  Page* last_page = anchor_.prev_page();
+  if (last_page != &anchor_) {
+    // We have run out of linear allocation space.  This may be  because the
+    // most recently allocated page (stored last in the list) is a small one,
+    // that starts on a page aligned boundary, but has not a full kPageSize of
+    // committed memory.  Let's commit more memory for the page.
+    intptr_t reserved_page_size = last_page->reserved_memory()->IsReserved() ?
+        last_page->reserved_memory()->size() :
+        Page::kPageSize;
+    if (last_page->size() < reserved_page_size &&
+        (reserved_page_size - last_page->size()) >= size_in_bytes &&
+        !last_page->IsEvacuationCandidate() &&
+        last_page->WasSwept()) {
+      last_page->CommitMore(size_in_bytes);
+      return true;
+    }
+  }
+
+  // We initially only commit a part of the page, but the deserialization
+  // of the initial snapshot makes the assumption that it can deserialize
+  // into linear memory of a certain size per space, so some of the spaces
+  // need to have a little more committed memory.
+  int initial = Max(OS::CommitPageSize(), kMinimumSpaceSizes[identity()]);
+
+  ASSERT(Page::kPageSize - initial < Page::kObjectAreaSize);
+
+  intptr_t expansion_size =
+      Max(initial,
+          RoundUpToPowerOf2(MemoryChunk::kObjectStartOffset + size_in_bytes)) -
+      MemoryChunk::kObjectStartOffset;
+
   Page* p = heap()->isolate()->memory_allocator()->
-      AllocatePage(this, executable());
+      AllocatePage(expansion_size, this, executable());
   if (p == NULL) return false;
 
   ASSERT(Capacity() <= max_capacity_);
@@ -771,6 +887,8 @@ void PagedSpace::ReleasePage(Page* page) {
     allocation_info_.top = allocation_info_.limit = NULL;
   }
 
+  intptr_t size = page->ObjectAreaEnd() - page->ObjectAreaStart();
+
   page->Unlink();
   if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
     heap()->isolate()->memory_allocator()->Free(page);
@@ -779,8 +897,7 @@ void PagedSpace::ReleasePage(Page* page) {
   }
 
   ASSERT(Capacity() > 0);
-  ASSERT(Capacity() % Page::kObjectAreaSize == 0);
-  accounting_stats_.ShrinkSpace(Page::kObjectAreaSize);
+  accounting_stats_.ShrinkSpace(size);
 }
 
 
@@ -1658,7 +1775,7 @@ void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
   // is big enough to be a FreeSpace with at least one extra word (the next
   // pointer), we set its map to be the free space map and its size to an
   // appropriate array length for the desired size from HeapObject::Size().
-  // If the block is too small (eg, one or two words), to hold both a size
+  // If the block is too small (e.g. one or two words), to hold both a size
   // field and a next pointer, we give it a filler map that gives it the
   // correct size.
   if (size_in_bytes > FreeSpace::kHeaderSize) {
@@ -1762,69 +1879,102 @@ int FreeList::Free(Address start, int size_in_bytes) {
 }
 
 
-FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
+FreeListNode* FreeList::PickNodeFromList(FreeListNode** list,
+                                         int* node_size,
+                                         int minimum_size) {
   FreeListNode* node = *list;
 
   if (node == NULL) return NULL;
 
+  ASSERT(node->map() == node->GetHeap()->raw_unchecked_free_space_map());
+
   while (node != NULL &&
          Page::FromAddress(node->address())->IsEvacuationCandidate()) {
     available_ -= node->Size();
     node = node->next();
   }
 
-  if (node != NULL) {
-    *node_size = node->Size();
-    *list = node->next();
-  } else {
+  if (node == NULL) {
     *list = NULL;
+    return NULL;
   }
 
+  // Gets the size without checking the map.  When we are booting we have
+  // a FreeListNode before we have created its map.
+  intptr_t size = reinterpret_cast<FreeSpace*>(node)->Size();
+
+  // We don't search the list for one that fits, preferring to look in the
+  // list of larger nodes, but we do check the first in the list, because
+  // if we had to expand the space or page we may have placed an entry that
+  // was just long enough at the head of one of the lists.
+  if (size < minimum_size) return NULL;
+
+  *node_size = size;
+  available_ -= size;
+  *list = node->next();
+
   return node;
 }
 
 
-FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
+FreeListNode* FreeList::FindAbuttingNode(
+  int size_in_bytes, int* node_size, Address limit, FreeListNode** list_head) {
+  FreeListNode* first_node = *list_head;
+  if (first_node != NULL &&
+      first_node->address() == limit &&
+      reinterpret_cast<FreeSpace*>(first_node)->Size() >= size_in_bytes &&
+      !Page::FromAddress(first_node->address())->IsEvacuationCandidate()) {
+    FreeListNode* answer = first_node;
+    int size = reinterpret_cast<FreeSpace*>(first_node)->Size();
+    available_ -= size;
+    *node_size = size;
+    *list_head = first_node->next();
+    ASSERT(IsVeryLong() || available_ == SumFreeLists());
+    return answer;
+  }
+  return NULL;
+}
+
+
+FreeListNode* FreeList::FindNodeFor(int size_in_bytes,
+                                    int* node_size,
+                                    Address limit) {
   FreeListNode* node = NULL;
 
-  if (size_in_bytes <= kSmallAllocationMax) {
-    node = PickNodeFromList(&small_list_, node_size);
+  if (limit != NULL) {
+    // We may have a memory area at the head of the free list, which abuts the
+    // old linear allocation area.  This happens if the linear allocation area
+    // has been shortened to allow an incremental marking step to be performed.
+    // In that case we prefer to return the free memory area that is contiguous
+    // with the old linear allocation area.
+    node = FindAbuttingNode(size_in_bytes, node_size, limit, &large_list_);
     if (node != NULL) return node;
-  }
-
-  if (size_in_bytes <= kMediumAllocationMax) {
-    node = PickNodeFromList(&medium_list_, node_size);
+    node = FindAbuttingNode(size_in_bytes, node_size, limit, &huge_list_);
     if (node != NULL) return node;
   }
 
-  if (size_in_bytes <= kLargeAllocationMax) {
-    node = PickNodeFromList(&large_list_, node_size);
-    if (node != NULL) return node;
-  }
+  node = PickNodeFromList(&small_list_, node_size, size_in_bytes);
+  ASSERT(IsVeryLong() || available_ == SumFreeLists());
+  if (node != NULL) return node;
+
+  node = PickNodeFromList(&medium_list_, node_size, size_in_bytes);
+  ASSERT(IsVeryLong() || available_ == SumFreeLists());
+  if (node != NULL) return node;
+
+  node = PickNodeFromList(&large_list_, node_size, size_in_bytes);
+  ASSERT(IsVeryLong() || available_ == SumFreeLists());
+  if (node != NULL) return node;
 
+  // The tricky third clause in this for statement is due to the fact that
+  // PickNodeFromList can cut pages out of the list if they are unavailable for
+  // new allocation (e.g. if they are on a page that has been scheduled for
+  // evacuation).
   for (FreeListNode** cur = &huge_list_;
        *cur != NULL;
-       cur = (*cur)->next_address()) {
-    FreeListNode* cur_node = *cur;
-    while (cur_node != NULL &&
-           Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
-      available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
-      cur_node = cur_node->next();
-    }
-
-    *cur = cur_node;
-    if (cur_node == NULL) break;
-
-    ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
-    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
-    int size = cur_as_free_space->Size();
-    if (size >= size_in_bytes) {
-      // Large enough node found.  Unlink it from the list.
-      node = *cur;
-      *node_size = size;
-      *cur = node->next();
-      break;
-    }
+       cur = (*cur) == NULL ? cur : (*cur)->next_address()) {
+    node = PickNodeFromList(cur, node_size, size_in_bytes);
+    ASSERT(IsVeryLong() || available_ == SumFreeLists());
+    if (node != NULL) return node;
   }
 
   return node;
@@ -1843,10 +1993,23 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
   ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
 
   int new_node_size = 0;
-  FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
+  FreeListNode* new_node =
+      FindNodeFor(size_in_bytes, &new_node_size, owner_->limit());
   if (new_node == NULL) return NULL;
 
-  available_ -= new_node_size;
+  if (new_node->address() == owner_->limit()) {
+    // The new freelist node we were given is an extension of the one we had
+    // last.  This is a common thing to happen when we extend a small page by
+    // committing more memory.  In this case we just add the new node to the
+    // linear allocation area and recurse.
+    owner_->Allocate(new_node_size);
+    owner_->SetTop(owner_->top(), new_node->address() + new_node_size);
+    MaybeObject* allocation = owner_->AllocateRaw(size_in_bytes);
+    Object* answer;
+    if (!allocation->ToObject(&answer)) return NULL;
+    return HeapObject::cast(answer);
+  }
+
   ASSERT(IsVeryLong() || available_ == SumFreeLists());
 
   int bytes_left = new_node_size - size_in_bytes;
@@ -1856,7 +2019,9 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
   // Mark the old linear allocation area with a free space map so it can be
   // skipped when scanning the heap.  This also puts it back in the free list
   // if it is big enough.
-  owner_->Free(owner_->top(), old_linear_size);
+  if (old_linear_size != 0) {
+    owner_->AddToFreeLists(owner_->top(), old_linear_size);
+  }
 
 #ifdef DEBUG
   for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
@@ -1885,8 +2050,8 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
     // We don't want to give too large linear areas to the allocator while
     // incremental marking is going on, because we won't check again whether
     // we want to do another increment until the linear area is used up.
-    owner_->Free(new_node->address() + size_in_bytes + linear_size,
-                 new_node_size - size_in_bytes - linear_size);
+    owner_->AddToFreeLists(new_node->address() + size_in_bytes + linear_size,
+                           new_node_size - size_in_bytes - linear_size);
     owner_->SetTop(new_node->address() + size_in_bytes,
                    new_node->address() + size_in_bytes + linear_size);
   } else if (bytes_left > 0) {
@@ -1895,6 +2060,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
     owner_->SetTop(new_node->address() + size_in_bytes,
                    new_node->address() + new_node_size);
   } else {
+    ASSERT(bytes_left == 0);
     // TODO(gc) Try not freeing linear allocation region when bytes_left
     // are zero.
     owner_->SetTop(NULL, NULL);
@@ -2027,7 +2193,9 @@ bool NewSpace::ReserveSpace(int bytes) {
   HeapObject* allocation = HeapObject::cast(object);
   Address top = allocation_info_.top;
   if ((top - bytes) == allocation->address()) {
-    allocation_info_.top = allocation->address();
+    Address new_top = allocation->address();
+    ASSERT(new_top >= Page::FromAddress(new_top - 1)->ObjectAreaStart());
+    allocation_info_.top = new_top;
     return true;
   }
   // There may be a borderline case here where the allocation succeeded, but
@@ -2042,7 +2210,7 @@ void PagedSpace::PrepareForMarkCompact() {
   // Mark the old linear allocation area with a free space map so it can be
   // skipped when scanning the heap.
   int old_linear_size = static_cast<int>(limit() - top());
-  Free(top(), old_linear_size);
+  AddToFreeLists(top(), old_linear_size);
   SetTop(NULL, NULL);
 
   // Stop lazy sweeping and clear marking bits for unswept pages.
@@ -2085,10 +2253,13 @@ bool PagedSpace::ReserveSpace(int size_in_bytes) {
   // Mark the old linear allocation area with a free space so it can be
   // skipped when scanning the heap.  This also puts it back in the free list
   // if it is big enough.
-  Free(top(), old_linear_size);
+  AddToFreeLists(top(), old_linear_size);
 
   SetTop(new_area->address(), new_area->address() + size_in_bytes);
-  Allocate(size_in_bytes);
+  // The AddToFreeLists call above will reduce the size of the space in the
+  // allocation stats.  We don't need to add this linear area to the size
+  // with an Allocate(size_in_bytes) call here, because the
+  // free_list_.Allocate() call above already accounted for this memory.
   return true;
 }
 
@@ -2169,7 +2340,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
   }
 
   // Try to expand the space and allocate in the new next page.
-  if (Expand()) {
+  if (Expand(size_in_bytes)) {
     return free_list_.Allocate(size_in_bytes);
   }
 
@@ -2530,6 +2701,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
       heap()->mark_compact_collector()->ReportDeleteIfNeeded(
           object, heap()->isolate());
       size_ -= static_cast<int>(page->size());
+      ASSERT(size_ >= 0);
       objects_size_ -= object->Size();
       page_count_--;
 
index b3963d6..f7803a3 100644 (file)
@@ -502,11 +502,9 @@ class MemoryChunk {
   static const int kObjectStartOffset = kBodyOffset - 1 +
       (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
 
-  size_t size() const { return size_; }
+  intptr_t size() const { return size_; }
 
-  void set_size(size_t size) {
-    size_ = size;
-  }
+  void set_size(size_t size) { size_ = size; }
 
   Executability executable() {
     return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
@@ -658,7 +656,7 @@ class Page : public MemoryChunk {
   Address ObjectAreaStart() { return address() + kObjectStartOffset; }
 
   // Returns the end address (exclusive) of the object area in this page.
-  Address ObjectAreaEnd() { return address() + Page::kPageSize; }
+  Address ObjectAreaEnd() { return address() + size(); }
 
   // Checks whether an address is page aligned.
   static bool IsAlignedToPageSize(Address a) {
@@ -677,6 +675,10 @@ class Page : public MemoryChunk {
     return address() + offset;
   }
 
+  // Expand the committed area for pages that are small.  This
+  // happens primarily when the VM is newly booted.
+  void CommitMore(intptr_t space_needed);
+
   // ---------------------------------------------------------------------
 
   // Page size in bytes.  This must be a multiple of the OS page size.
@@ -846,12 +848,10 @@ class CodeRange {
     FreeBlock(Address start_arg, size_t size_arg)
         : start(start_arg), size(size_arg) {
       ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
-      ASSERT(size >= static_cast<size_t>(Page::kPageSize));
     }
     FreeBlock(void* start_arg, size_t size_arg)
         : start(static_cast<Address>(start_arg)), size(size_arg) {
       ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
-      ASSERT(size >= static_cast<size_t>(Page::kPageSize));
     }
 
     Address start;
@@ -947,7 +947,9 @@ class MemoryAllocator {
 
   void TearDown();
 
-  Page* AllocatePage(PagedSpace* owner, Executability executable);
+  Page* AllocatePage(intptr_t object_area_size,
+                     PagedSpace* owner,
+                     Executability executable);
 
   LargePage* AllocateLargePage(intptr_t object_size,
                                       Executability executable,
@@ -956,10 +958,14 @@ class MemoryAllocator {
   void Free(MemoryChunk* chunk);
 
   // Returns the maximum available bytes of heaps.
-  intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
+  intptr_t Available() {
+    return capacity_ < memory_allocator_reserved_ ?
+           0 :
+           capacity_ - memory_allocator_reserved_;
+  }
 
   // Returns allocated spaces in bytes.
-  intptr_t Size() { return size_; }
+  intptr_t Size() { return memory_allocator_reserved_; }
 
   // Returns the maximum available executable bytes of heaps.
   intptr_t AvailableExecutable() {
@@ -981,6 +987,7 @@ class MemoryAllocator {
 #endif
 
   MemoryChunk* AllocateChunk(intptr_t body_size,
+                             intptr_t committed_body_size,
                              Executability executable,
                              Space* space);
 
@@ -988,6 +995,7 @@ class MemoryAllocator {
                                size_t alignment,
                                VirtualMemory* controller);
   Address AllocateAlignedMemory(size_t requested,
+                                size_t committed,
                                 size_t alignment,
                                 Executability executable,
                                 VirtualMemory* controller);
@@ -1007,6 +1015,12 @@ class MemoryAllocator {
   // and false otherwise.
   bool UncommitBlock(Address start, size_t size);
 
+  void AllocationBookkeeping(Space* owner,
+                             Address base,
+                             intptr_t reserved_size,
+                             intptr_t committed_size,
+                             Executability executable);
+
   // Zaps a contiguous block of memory [start..(start+size)[ thus
   // filling it up with a recognizable non-NULL bit pattern.
   void ZapBlock(Address start, size_t size);
@@ -1034,7 +1048,7 @@ class MemoryAllocator {
   size_t capacity_executable_;
 
   // Allocated space size in bytes.
-  size_t size_;
+  size_t memory_allocator_reserved_;
   // Allocated executable space size in bytes.
   size_t size_executable_;
 
@@ -1379,9 +1393,15 @@ class FreeList BASE_EMBEDDED {
   static const int kMinBlockSize = 3 * kPointerSize;
   static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
 
-  FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
+  FreeListNode* PickNodeFromList(FreeListNode** list,
+                                 int* node_size,
+                                 int minimum_size);
 
-  FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
+  FreeListNode* FindNodeFor(int size_in_bytes, int* node_size, Address limit);
+  FreeListNode* FindAbuttingNode(int size_in_bytes,
+                                 int* node_size,
+                                 Address limit,
+                                 FreeListNode** list_head);
 
   PagedSpace* owner_;
   Heap* heap_;
@@ -1481,6 +1501,8 @@ class PagedSpace : public Space {
   // free bytes that were not found at all due to lazy sweeping.
   virtual intptr_t Waste() { return accounting_stats_.Waste(); }
 
+  virtual int ObjectAlignment() { return kObjectAlignment; }
+
   // Returns the allocation pointer in this space.
   Address top() { return allocation_info_.top; }
   Address limit() { return allocation_info_.limit; }
@@ -1495,7 +1517,7 @@ class PagedSpace : public Space {
   // the free list or accounted as waste.
   // If add_to_freelist is false then just accounting stats are updated and
   // no attempt to add area to free list is made.
-  int Free(Address start, int size_in_bytes) {
+  int AddToFreeLists(Address start, int size_in_bytes) {
     int wasted = free_list_.Free(start, size_in_bytes);
     accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
     return size_in_bytes - wasted;
@@ -1503,6 +1525,7 @@ class PagedSpace : public Space {
 
   // Set space allocation info.
   void SetTop(Address top, Address limit) {
+    ASSERT(top == NULL || top >= Page::FromAddress(top - 1)->ObjectAreaStart());
     ASSERT(top == limit ||
            Page::FromAddress(top) == Page::FromAddress(limit - 1));
     allocation_info_.top = top;
@@ -1573,6 +1596,7 @@ class PagedSpace : public Space {
     return !first_unswept_page_->is_valid();
   }
 
+  inline bool HasAPage() { return anchor_.next_page() != &anchor_; }
   Page* FirstPage() { return anchor_.next_page(); }
   Page* LastPage() { return anchor_.prev_page(); }
 
@@ -1645,12 +1669,6 @@ class PagedSpace : public Space {
   // Normal allocation information.
   AllocationInfo allocation_info_;
 
-  // Bytes of each page that cannot be allocated.  Possibly non-zero
-  // for pages in spaces with only fixed-size objects.  Always zero
-  // for pages in spaces with variable sized objects (those pages are
-  // padded with free-list nodes).
-  int page_extra_;
-
   bool was_swept_conservatively_;
 
   // The first page to be swept when the lazy sweeper advances. Is set
@@ -1662,10 +1680,11 @@ class PagedSpace : public Space {
   // done conservatively.
   intptr_t unswept_free_bytes_;
 
-  // Expands the space by allocating a fixed number of pages. Returns false if
-  // it cannot allocate requested number of pages from OS, or if the hard heap
-  // size limit has been hit.
-  bool Expand();
+  // Expands the space by allocating a page. Returns false if it cannot
+  // allocate a page from OS, or if the hard heap size limit has been hit.  The
+  // new page will have at least enough committed space to satisfy the object
+  // size indicated by the allocation_size argument;
+  bool Expand(intptr_t allocation_size);
 
   // Generic fast case allocation function that tries linear allocation at the
   // address denoted by top in allocation_info_.
@@ -2325,14 +2344,7 @@ class OldSpace : public PagedSpace {
            intptr_t max_capacity,
            AllocationSpace id,
            Executability executable)
-      : PagedSpace(heap, max_capacity, id, executable) {
-    page_extra_ = 0;
-  }
-
-  // The limit of allocation for a page in this space.
-  virtual Address PageAllocationLimit(Page* page) {
-    return page->ObjectAreaEnd();
-  }
+      : PagedSpace(heap, max_capacity, id, executable) { }
 
  public:
   TRACK_MEMORY("OldSpace")
@@ -2359,17 +2371,12 @@ class FixedSpace : public PagedSpace {
              const char* name)
       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
         object_size_in_bytes_(object_size_in_bytes),
-        name_(name) {
-    page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
-  }
-
-  // The limit of allocation for a page in this space.
-  virtual Address PageAllocationLimit(Page* page) {
-    return page->ObjectAreaEnd() - page_extra_;
-  }
+        name_(name) { }
 
   int object_size_in_bytes() { return object_size_in_bytes_; }
 
+  virtual int ObjectAlignment() { return object_size_in_bytes_; }
+
   // Prepares for a mark-compact GC.
   virtual void PrepareForMarkCompact();
 
index 9022b3b..f85ec27 100644 (file)
@@ -496,7 +496,6 @@ void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
   Address map_aligned_end   = MapEndAlign(end);
 
   ASSERT(map_aligned_start == start);
-  ASSERT(map_aligned_end == end);
 
   FindPointersToNewSpaceInMaps(map_aligned_start,
                                map_aligned_end,
@@ -524,52 +523,57 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
     RegionCallback region_callback,
     ObjectSlotCallback slot_callback) {
   Address visitable_start = page->ObjectAreaStart();
-  Address end_of_page = page->ObjectAreaEnd();
 
   Address visitable_end = visitable_start;
 
   Object* free_space_map = heap_->free_space_map();
   Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
 
-  while (visitable_end < end_of_page) {
-    Object* o = *reinterpret_cast<Object**>(visitable_end);
-    // Skip fillers but not things that look like fillers in the special
-    // garbage section which can contain anything.
-    if (o == free_space_map ||
-        o == two_pointer_filler_map ||
-        (visitable_end == space->top() && visitable_end != space->limit())) {
-      if (visitable_start != visitable_end) {
-        // After calling this the special garbage section may have moved.
-        (this->*region_callback)(visitable_start,
-                                 visitable_end,
-                                 slot_callback);
-        if (visitable_end >= space->top() && visitable_end < space->limit()) {
-          visitable_end = space->limit();
-          visitable_start = visitable_end;
-          continue;
+  while (true) {  // While the page grows (doesn't normally happen).
+    Address end_of_page = page->ObjectAreaEnd();
+    while (visitable_end < end_of_page) {
+      Object* o = *reinterpret_cast<Object**>(visitable_end);
+      // Skip fillers but not things that look like fillers in the special
+      // garbage section which can contain anything.
+      if (o == free_space_map ||
+          o == two_pointer_filler_map ||
+          (visitable_end == space->top() && visitable_end != space->limit())) {
+        if (visitable_start != visitable_end) {
+          // After calling this the special garbage section may have moved.
+          (this->*region_callback)(visitable_start,
+                                   visitable_end,
+                                   slot_callback);
+          if (visitable_end >= space->top() && visitable_end < space->limit()) {
+            visitable_end = space->limit();
+            visitable_start = visitable_end;
+            continue;
+          }
+        }
+        if (visitable_end == space->top() && visitable_end != space->limit()) {
+          visitable_start = visitable_end = space->limit();
+        } else {
+          // At this point we are either at the start of a filler or we are at
+          // the point where the space->top() used to be before the
+          // visit_pointer_region call above.  Either way we can skip the
+          // object at the current spot:  We don't promise to visit objects
+          // allocated during heap traversal, and if space->top() moved then it
+          // must be because an object was allocated at this point.
+          visitable_start =
+              visitable_end + HeapObject::FromAddress(visitable_end)->Size();
+          visitable_end = visitable_start;
         }
-      }
-      if (visitable_end == space->top() && visitable_end != space->limit()) {
-        visitable_start = visitable_end = space->limit();
       } else {
-        // At this point we are either at the start of a filler or we are at
-        // the point where the space->top() used to be before the
-        // visit_pointer_region call above.  Either way we can skip the
-        // object at the current spot:  We don't promise to visit objects
-        // allocated during heap traversal, and if space->top() moved then it
-        // must be because an object was allocated at this point.
-        visitable_start =
-            visitable_end + HeapObject::FromAddress(visitable_end)->Size();
-        visitable_end = visitable_start;
+        ASSERT(o != free_space_map);
+        ASSERT(o != two_pointer_filler_map);
+        ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
+        visitable_end += kPointerSize;
       }
-    } else {
-      ASSERT(o != free_space_map);
-      ASSERT(o != two_pointer_filler_map);
-      ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
-      visitable_end += kPointerSize;
     }
+    ASSERT(visitable_end >= end_of_page);
+    // If the page did not grow we are done.
+    if (end_of_page == page->ObjectAreaEnd()) break;
   }
-  ASSERT(visitable_end == end_of_page);
+  ASSERT(visitable_end == page->ObjectAreaEnd());
   if (visitable_start != visitable_end) {
     (this->*region_callback)(visitable_start,
                              visitable_end,
index 17bf06f..ac60f98 100644 (file)
@@ -153,11 +153,9 @@ int HandleObjectPointerCompare(const Handle<T>* a, const Handle<T>* b) {
 }
 
 
-// Returns the smallest power of two which is >= x. If you pass in a
-// number that is already a power of two, it is returned as is.
-// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
-// figure 3-3, page 48, where the function is called clp2.
-inline uint32_t RoundUpToPowerOf2(uint32_t x) {
+template<typename int_type>
+inline int RoundUpToPowerOf2(int_type x_argument) {
+  uintptr_t x = static_cast<uintptr_t>(x_argument);
   ASSERT(x <= 0x80000000u);
   x = x - 1;
   x = x | (x >> 1);
@@ -165,7 +163,7 @@ inline uint32_t RoundUpToPowerOf2(uint32_t x) {
   x = x | (x >> 4);
   x = x | (x >> 8);
   x = x | (x >> 16);
-  return x + 1;
+  return static_cast<int_type>(x + 1);
 }
 
 
index 1e4e332..fd1f9c3 100644 (file)
@@ -1236,17 +1236,14 @@ TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
        obj = iterator.next()) {
     size_of_objects_2 += obj->Size();
   }
-  // Delta must be within 5% of the larger result.
-  // TODO(gc): Tighten this up by distinguishing between byte
-  // arrays that are real and those that merely mark free space
-  // on the heap.
+  // Delta must be within 1% of the larger result.
   if (size_of_objects_1 > size_of_objects_2) {
     intptr_t delta = size_of_objects_1 - size_of_objects_2;
     PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
            "Iterator: %" V8_PTR_PREFIX "d, "
            "delta: %" V8_PTR_PREFIX "d\n",
            size_of_objects_1, size_of_objects_2, delta);
-    CHECK_GT(size_of_objects_1 / 20, delta);
+    CHECK_GT(size_of_objects_1 / 100, delta);
   } else {
     intptr_t delta = size_of_objects_2 - size_of_objects_1;
     PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
index 3c66c4c..4799595 100644 (file)
@@ -526,12 +526,25 @@ static intptr_t MemoryInUse() {
 
 TEST(BootUpMemoryUse) {
   intptr_t initial_memory = MemoryInUse();
+  FLAG_crankshaft = false;  // Avoid flakiness.
   // Only Linux has the proc filesystem and only if it is mapped.  If it's not
   // there we just skip the test.
   if (initial_memory >= 0) {
     InitializeVM();
     intptr_t booted_memory = MemoryInUse();
-    CHECK_LE(booted_memory - initial_memory, 16 * 1024 * 1024);
+    if (sizeof(initial_memory) == 8) {
+      if (v8::internal::Snapshot::IsEnabled()) {
+        CHECK_LE(booted_memory - initial_memory, 4700 * 1024);  // 4640.
+      } else {
+        CHECK_LE(booted_memory - initial_memory, 4300 * 1024);  // 4276.
+      }
+    } else {
+      if (v8::internal::Snapshot::IsEnabled()) {
+        CHECK_LE(booted_memory - initial_memory, 3300 * 1024);  // 3276.
+      } else {
+        CHECK_LE(booted_memory - initial_memory, 3500 * 1024);  // 3416
+      }
+    }
   }
 }
 
index 6e495bc..6eb1ddd 100644 (file)
@@ -140,8 +140,8 @@ TEST(MemoryAllocator) {
                        heap->MaxReserved(),
                        OLD_POINTER_SPACE,
                        NOT_EXECUTABLE);
-  Page* first_page =
-      memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
+  Page* first_page = memory_allocator->AllocatePage(
+      Page::kObjectAreaSize, &faked_space, NOT_EXECUTABLE);
 
   first_page->InsertAfter(faked_space.anchor()->prev_page());
   CHECK(first_page->is_valid());
@@ -154,7 +154,8 @@ TEST(MemoryAllocator) {
 
   // Again, we should get n or n - 1 pages.
   Page* other =
-      memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
+      memory_allocator->AllocatePage(
+          Page::kObjectAreaSize, &faked_space, NOT_EXECUTABLE);
   CHECK(other->is_valid());
   total_pages++;
   other->InsertAfter(first_page);