c2ce5fcfafb28f830805e01d3b9fc3d5617c9daa
[platform/upstream/nodejs.git] / deps / v8 / src / heap / spaces.cc
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h"
9 #include "src/full-codegen.h"
10 #include "src/heap/mark-compact.h"
11 #include "src/macro-assembler.h"
12 #include "src/msan.h"
13 #include "src/snapshot.h"
14
15 namespace v8 {
16 namespace internal {
17
18
19 // ----------------------------------------------------------------------------
20 // HeapObjectIterator
21
22 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
23   // You can't actually iterate over the anchor page.  It is not a real page,
24   // just an anchor for the double linked page list.  Initialize as if we have
25   // reached the end of the anchor page, then the first iteration will move on
26   // to the first page.
27   Initialize(space, NULL, NULL, kAllPagesInSpace, NULL);
28 }
29
30
31 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
32                                        HeapObjectCallback size_func) {
33   // You can't actually iterate over the anchor page.  It is not a real page,
34   // just an anchor for the double linked page list.  Initialize the current
35   // address and end as NULL, then the first iteration will move on
36   // to the first page.
37   Initialize(space, NULL, NULL, kAllPagesInSpace, size_func);
38 }
39
40
41 HeapObjectIterator::HeapObjectIterator(Page* page,
42                                        HeapObjectCallback size_func) {
43   Space* owner = page->owner();
44   DCHECK(owner == page->heap()->old_pointer_space() ||
45          owner == page->heap()->old_data_space() ||
46          owner == page->heap()->map_space() ||
47          owner == page->heap()->cell_space() ||
48          owner == page->heap()->property_cell_space() ||
49          owner == page->heap()->code_space());
50   Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
51              page->area_end(), kOnePageOnly, size_func);
52   DCHECK(page->WasSwept() || page->SweepingCompleted());
53 }
54
55
56 void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
57                                     HeapObjectIterator::PageMode mode,
58                                     HeapObjectCallback size_f) {
59   space_ = space;
60   cur_addr_ = cur;
61   cur_end_ = end;
62   page_mode_ = mode;
63   size_func_ = size_f;
64 }
65
66
67 // We have hit the end of the page and should advance to the next block of
68 // objects.  This happens at the end of the page.
69 bool HeapObjectIterator::AdvanceToNextPage() {
70   DCHECK(cur_addr_ == cur_end_);
71   if (page_mode_ == kOnePageOnly) return false;
72   Page* cur_page;
73   if (cur_addr_ == NULL) {
74     cur_page = space_->anchor();
75   } else {
76     cur_page = Page::FromAddress(cur_addr_ - 1);
77     DCHECK(cur_addr_ == cur_page->area_end());
78   }
79   cur_page = cur_page->next_page();
80   if (cur_page == space_->anchor()) return false;
81   cur_addr_ = cur_page->area_start();
82   cur_end_ = cur_page->area_end();
83   DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
84   return true;
85 }
86
87
88 // -----------------------------------------------------------------------------
89 // CodeRange
90
91
92 CodeRange::CodeRange(Isolate* isolate)
93     : isolate_(isolate),
94       code_range_(NULL),
95       free_list_(0),
96       allocation_list_(0),
97       current_allocation_block_index_(0),
98       emergency_block_() {}
99
100
101 bool CodeRange::SetUp(size_t requested) {
102   DCHECK(code_range_ == NULL);
103
104   if (requested == 0) {
105     // When a target requires the code range feature, we put all code objects
106     // in a kMaximalCodeRangeSize range of virtual address space, so that
107     // they can call each other with near calls.
108     if (kRequiresCodeRange) {
109       requested = kMaximalCodeRangeSize;
110     } else {
111       return true;
112     }
113   }
114
115   if (requested <= kMinimumCodeRangeSize) {
116     requested = kMinimumCodeRangeSize;
117   }
118
119   DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
120   code_range_ = new base::VirtualMemory(requested);
121   CHECK(code_range_ != NULL);
122   if (!code_range_->IsReserved()) {
123     delete code_range_;
124     code_range_ = NULL;
125     return false;
126   }
127
128   // We are sure that we have mapped a block of requested addresses.
129   DCHECK(code_range_->size() == requested);
130   Address base = reinterpret_cast<Address>(code_range_->address());
131
132   // On some platforms, specifically Win64, we need to reserve some pages at
133   // the beginning of an executable space.
134   if (kReservedCodeRangePages) {
135     if (!code_range_->Commit(
136             base, kReservedCodeRangePages * base::OS::CommitPageSize(), true)) {
137       delete code_range_;
138       code_range_ = NULL;
139       return false;
140     }
141     base += kReservedCodeRangePages * base::OS::CommitPageSize();
142   }
143   Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
144   size_t size = code_range_->size() - (aligned_base - base) -
145                 kReservedCodeRangePages * base::OS::CommitPageSize();
146   allocation_list_.Add(FreeBlock(aligned_base, size));
147   current_allocation_block_index_ = 0;
148
149   LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
150   ReserveEmergencyBlock();
151   return true;
152 }
153
154
155 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
156                                        const FreeBlock* right) {
157   // The entire point of CodeRange is that the difference between two
158   // addresses in the range can be represented as a signed 32-bit int,
159   // so the cast is semantically correct.
160   return static_cast<int>(left->start - right->start);
161 }
162
163
164 bool CodeRange::GetNextAllocationBlock(size_t requested) {
165   for (current_allocation_block_index_++;
166        current_allocation_block_index_ < allocation_list_.length();
167        current_allocation_block_index_++) {
168     if (requested <= allocation_list_[current_allocation_block_index_].size) {
169       return true;  // Found a large enough allocation block.
170     }
171   }
172
173   // Sort and merge the free blocks on the free list and the allocation list.
174   free_list_.AddAll(allocation_list_);
175   allocation_list_.Clear();
176   free_list_.Sort(&CompareFreeBlockAddress);
177   for (int i = 0; i < free_list_.length();) {
178     FreeBlock merged = free_list_[i];
179     i++;
180     // Add adjacent free blocks to the current merged block.
181     while (i < free_list_.length() &&
182            free_list_[i].start == merged.start + merged.size) {
183       merged.size += free_list_[i].size;
184       i++;
185     }
186     if (merged.size > 0) {
187       allocation_list_.Add(merged);
188     }
189   }
190   free_list_.Clear();
191
192   for (current_allocation_block_index_ = 0;
193        current_allocation_block_index_ < allocation_list_.length();
194        current_allocation_block_index_++) {
195     if (requested <= allocation_list_[current_allocation_block_index_].size) {
196       return true;  // Found a large enough allocation block.
197     }
198   }
199   current_allocation_block_index_ = 0;
200   // Code range is full or too fragmented.
201   return false;
202 }
203
204
205 Address CodeRange::AllocateRawMemory(const size_t requested_size,
206                                      const size_t commit_size,
207                                      size_t* allocated) {
208   DCHECK(commit_size <= requested_size);
209   FreeBlock current;
210   if (!ReserveBlock(requested_size, &current)) {
211     *allocated = 0;
212     return NULL;
213   }
214   *allocated = current.size;
215   DCHECK(*allocated <= current.size);
216   DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
217   if (!isolate_->memory_allocator()->CommitExecutableMemory(
218           code_range_, current.start, commit_size, *allocated)) {
219     *allocated = 0;
220     ReleaseBlock(&current);
221     return NULL;
222   }
223   return current.start;
224 }
225
226
227 bool CodeRange::CommitRawMemory(Address start, size_t length) {
228   return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
229 }
230
231
232 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
233   return code_range_->Uncommit(start, length);
234 }
235
236
237 void CodeRange::FreeRawMemory(Address address, size_t length) {
238   DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
239   free_list_.Add(FreeBlock(address, length));
240   code_range_->Uncommit(address, length);
241 }
242
243
244 void CodeRange::TearDown() {
245   delete code_range_;  // Frees all memory in the virtual memory range.
246   code_range_ = NULL;
247   free_list_.Free();
248   allocation_list_.Free();
249 }
250
251
252 bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
253   DCHECK(allocation_list_.length() == 0 ||
254          current_allocation_block_index_ < allocation_list_.length());
255   if (allocation_list_.length() == 0 ||
256       requested_size > allocation_list_[current_allocation_block_index_].size) {
257     // Find an allocation block large enough.
258     if (!GetNextAllocationBlock(requested_size)) return false;
259   }
260   // Commit the requested memory at the start of the current allocation block.
261   size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
262   *block = allocation_list_[current_allocation_block_index_];
263   // Don't leave a small free block, useless for a large object or chunk.
264   if (aligned_requested < (block->size - Page::kPageSize)) {
265     block->size = aligned_requested;
266   }
267   DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
268   allocation_list_[current_allocation_block_index_].start += block->size;
269   allocation_list_[current_allocation_block_index_].size -= block->size;
270   return true;
271 }
272
273
274 void CodeRange::ReleaseBlock(const FreeBlock* block) { free_list_.Add(*block); }
275
276
277 void CodeRange::ReserveEmergencyBlock() {
278   const size_t requested_size = MemoryAllocator::CodePageAreaSize();
279   if (emergency_block_.size == 0) {
280     ReserveBlock(requested_size, &emergency_block_);
281   } else {
282     DCHECK(emergency_block_.size >= requested_size);
283   }
284 }
285
286
287 void CodeRange::ReleaseEmergencyBlock() {
288   if (emergency_block_.size != 0) {
289     ReleaseBlock(&emergency_block_);
290     emergency_block_.size = 0;
291   }
292 }
293
294
295 // -----------------------------------------------------------------------------
296 // MemoryAllocator
297 //
298
299 MemoryAllocator::MemoryAllocator(Isolate* isolate)
300     : isolate_(isolate),
301       capacity_(0),
302       capacity_executable_(0),
303       size_(0),
304       size_executable_(0),
305       lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
306       highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
307
308
309 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
310   capacity_ = RoundUp(capacity, Page::kPageSize);
311   capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
312   DCHECK_GE(capacity_, capacity_executable_);
313
314   size_ = 0;
315   size_executable_ = 0;
316
317   return true;
318 }
319
320
321 void MemoryAllocator::TearDown() {
322   // Check that spaces were torn down before MemoryAllocator.
323   DCHECK(size_ == 0);
324   // TODO(gc) this will be true again when we fix FreeMemory.
325   // DCHECK(size_executable_ == 0);
326   capacity_ = 0;
327   capacity_executable_ = 0;
328 }
329
330
331 bool MemoryAllocator::CommitMemory(Address base, size_t size,
332                                    Executability executable) {
333   if (!base::VirtualMemory::CommitRegion(base, size,
334                                          executable == EXECUTABLE)) {
335     return false;
336   }
337   UpdateAllocatedSpaceLimits(base, base + size);
338   return true;
339 }
340
341
342 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
343                                  Executability executable) {
344   // TODO(gc) make code_range part of memory allocator?
345   DCHECK(reservation->IsReserved());
346   size_t size = reservation->size();
347   DCHECK(size_ >= size);
348   size_ -= size;
349
350   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
351
352   if (executable == EXECUTABLE) {
353     DCHECK(size_executable_ >= size);
354     size_executable_ -= size;
355   }
356   // Code which is part of the code-range does not have its own VirtualMemory.
357   DCHECK(isolate_->code_range() == NULL ||
358          !isolate_->code_range()->contains(
359              static_cast<Address>(reservation->address())));
360   DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
361          !isolate_->code_range()->valid());
362   reservation->Release();
363 }
364
365
366 void MemoryAllocator::FreeMemory(Address base, size_t size,
367                                  Executability executable) {
368   // TODO(gc) make code_range part of memory allocator?
369   DCHECK(size_ >= size);
370   size_ -= size;
371
372   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
373
374   if (executable == EXECUTABLE) {
375     DCHECK(size_executable_ >= size);
376     size_executable_ -= size;
377   }
378   if (isolate_->code_range() != NULL &&
379       isolate_->code_range()->contains(static_cast<Address>(base))) {
380     DCHECK(executable == EXECUTABLE);
381     isolate_->code_range()->FreeRawMemory(base, size);
382   } else {
383     DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
384            !isolate_->code_range()->valid());
385     bool result = base::VirtualMemory::ReleaseRegion(base, size);
386     USE(result);
387     DCHECK(result);
388   }
389 }
390
391
392 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
393                                               base::VirtualMemory* controller) {
394   base::VirtualMemory reservation(size, alignment);
395
396   if (!reservation.IsReserved()) return NULL;
397   size_ += reservation.size();
398   Address base =
399       RoundUp(static_cast<Address>(reservation.address()), alignment);
400   controller->TakeControl(&reservation);
401   return base;
402 }
403
404
405 Address MemoryAllocator::AllocateAlignedMemory(
406     size_t reserve_size, size_t commit_size, size_t alignment,
407     Executability executable, base::VirtualMemory* controller) {
408   DCHECK(commit_size <= reserve_size);
409   base::VirtualMemory reservation;
410   Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
411   if (base == NULL) return NULL;
412
413   if (executable == EXECUTABLE) {
414     if (!CommitExecutableMemory(&reservation, base, commit_size,
415                                 reserve_size)) {
416       base = NULL;
417     }
418   } else {
419     if (reservation.Commit(base, commit_size, false)) {
420       UpdateAllocatedSpaceLimits(base, base + commit_size);
421     } else {
422       base = NULL;
423     }
424   }
425
426   if (base == NULL) {
427     // Failed to commit the body. Release the mapping and any partially
428     // commited regions inside it.
429     reservation.Release();
430     return NULL;
431   }
432
433   controller->TakeControl(&reservation);
434   return base;
435 }
436
437
438 void Page::InitializeAsAnchor(PagedSpace* owner) {
439   set_owner(owner);
440   set_prev_page(this);
441   set_next_page(this);
442 }
443
444
445 NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
446                                        SemiSpace* semi_space) {
447   Address area_start = start + NewSpacePage::kObjectStartOffset;
448   Address area_end = start + Page::kPageSize;
449
450   MemoryChunk* chunk =
451       MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
452                               area_end, NOT_EXECUTABLE, semi_space);
453   chunk->set_next_chunk(NULL);
454   chunk->set_prev_chunk(NULL);
455   chunk->initialize_scan_on_scavenge(true);
456   bool in_to_space = (semi_space->id() != kFromSpace);
457   chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
458                              : MemoryChunk::IN_FROM_SPACE);
459   DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
460                                        : MemoryChunk::IN_TO_SPACE));
461   NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
462   heap->incremental_marking()->SetNewSpacePageFlags(page);
463   return page;
464 }
465
466
467 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
468   set_owner(semi_space);
469   set_next_chunk(this);
470   set_prev_chunk(this);
471   // Flags marks this invalid page as not being in new-space.
472   // All real new-space pages will be in new-space.
473   SetFlags(0, ~0);
474 }
475
476
477 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
478                                      Address area_start, Address area_end,
479                                      Executability executable, Space* owner) {
480   MemoryChunk* chunk = FromAddress(base);
481
482   DCHECK(base == chunk->address());
483
484   chunk->heap_ = heap;
485   chunk->size_ = size;
486   chunk->area_start_ = area_start;
487   chunk->area_end_ = area_end;
488   chunk->flags_ = 0;
489   chunk->set_owner(owner);
490   chunk->InitializeReservedMemory();
491   chunk->slots_buffer_ = NULL;
492   chunk->skip_list_ = NULL;
493   chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
494   chunk->progress_bar_ = 0;
495   chunk->high_water_mark_ = static_cast<int>(area_start - base);
496   chunk->set_parallel_sweeping(SWEEPING_DONE);
497   chunk->available_in_small_free_list_ = 0;
498   chunk->available_in_medium_free_list_ = 0;
499   chunk->available_in_large_free_list_ = 0;
500   chunk->available_in_huge_free_list_ = 0;
501   chunk->non_available_small_blocks_ = 0;
502   chunk->ResetLiveBytes();
503   Bitmap::Clear(chunk);
504   chunk->initialize_scan_on_scavenge(false);
505   chunk->SetFlag(WAS_SWEPT);
506
507   DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
508   DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
509
510   if (executable == EXECUTABLE) {
511     chunk->SetFlag(IS_EXECUTABLE);
512   }
513
514   if (owner == heap->old_data_space()) {
515     chunk->SetFlag(CONTAINS_ONLY_DATA);
516   }
517
518   return chunk;
519 }
520
521
522 // Commit MemoryChunk area to the requested size.
523 bool MemoryChunk::CommitArea(size_t requested) {
524   size_t guard_size =
525       IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
526   size_t header_size = area_start() - address() - guard_size;
527   size_t commit_size =
528       RoundUp(header_size + requested, base::OS::CommitPageSize());
529   size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
530                                   base::OS::CommitPageSize());
531
532   if (commit_size > committed_size) {
533     // Commit size should be less or equal than the reserved size.
534     DCHECK(commit_size <= size() - 2 * guard_size);
535     // Append the committed area.
536     Address start = address() + committed_size + guard_size;
537     size_t length = commit_size - committed_size;
538     if (reservation_.IsReserved()) {
539       Executability executable =
540           IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
541       if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length,
542                                                                executable)) {
543         return false;
544       }
545     } else {
546       CodeRange* code_range = heap_->isolate()->code_range();
547       DCHECK(code_range != NULL && code_range->valid() &&
548              IsFlagSet(IS_EXECUTABLE));
549       if (!code_range->CommitRawMemory(start, length)) return false;
550     }
551
552     if (Heap::ShouldZapGarbage()) {
553       heap_->isolate()->memory_allocator()->ZapBlock(start, length);
554     }
555   } else if (commit_size < committed_size) {
556     DCHECK(commit_size > 0);
557     // Shrink the committed area.
558     size_t length = committed_size - commit_size;
559     Address start = address() + committed_size + guard_size - length;
560     if (reservation_.IsReserved()) {
561       if (!reservation_.Uncommit(start, length)) return false;
562     } else {
563       CodeRange* code_range = heap_->isolate()->code_range();
564       DCHECK(code_range != NULL && code_range->valid() &&
565              IsFlagSet(IS_EXECUTABLE));
566       if (!code_range->UncommitRawMemory(start, length)) return false;
567     }
568   }
569
570   area_end_ = area_start_ + requested;
571   return true;
572 }
573
574
575 void MemoryChunk::InsertAfter(MemoryChunk* other) {
576   MemoryChunk* other_next = other->next_chunk();
577
578   set_next_chunk(other_next);
579   set_prev_chunk(other);
580   other_next->set_prev_chunk(this);
581   other->set_next_chunk(this);
582 }
583
584
585 void MemoryChunk::Unlink() {
586   MemoryChunk* next_element = next_chunk();
587   MemoryChunk* prev_element = prev_chunk();
588   next_element->set_prev_chunk(prev_element);
589   prev_element->set_next_chunk(next_element);
590   set_prev_chunk(NULL);
591   set_next_chunk(NULL);
592 }
593
594
595 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
596                                             intptr_t commit_area_size,
597                                             Executability executable,
598                                             Space* owner) {
599   DCHECK(commit_area_size <= reserve_area_size);
600
601   size_t chunk_size;
602   Heap* heap = isolate_->heap();
603   Address base = NULL;
604   base::VirtualMemory reservation;
605   Address area_start = NULL;
606   Address area_end = NULL;
607
608   //
609   // MemoryChunk layout:
610   //
611   //             Executable
612   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
613   // |           Header           |
614   // +----------------------------+<- base + CodePageGuardStartOffset
615   // |           Guard            |
616   // +----------------------------+<- area_start_
617   // |           Area             |
618   // +----------------------------+<- area_end_ (area_start + commit_area_size)
619   // |   Committed but not used   |
620   // +----------------------------+<- aligned at OS page boundary
621   // | Reserved but not committed |
622   // +----------------------------+<- aligned at OS page boundary
623   // |           Guard            |
624   // +----------------------------+<- base + chunk_size
625   //
626   //           Non-executable
627   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
628   // |          Header            |
629   // +----------------------------+<- area_start_ (base + kObjectStartOffset)
630   // |           Area             |
631   // +----------------------------+<- area_end_ (area_start + commit_area_size)
632   // |  Committed but not used    |
633   // +----------------------------+<- aligned at OS page boundary
634   // | Reserved but not committed |
635   // +----------------------------+<- base + chunk_size
636   //
637
638   if (executable == EXECUTABLE) {
639     chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
640                          base::OS::CommitPageSize()) +
641                  CodePageGuardSize();
642
643     // Check executable memory limit.
644     if (size_executable_ + chunk_size > capacity_executable_) {
645       LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
646                                 "V8 Executable Allocation capacity exceeded"));
647       return NULL;
648     }
649
650     // Size of header (not executable) plus area (executable).
651     size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
652                                  base::OS::CommitPageSize());
653     // Allocate executable memory either from code range or from the
654     // OS.
655     if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
656       base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
657                                                        &chunk_size);
658       DCHECK(
659           IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
660       if (base == NULL) return NULL;
661       size_ += chunk_size;
662       // Update executable memory size.
663       size_executable_ += chunk_size;
664     } else {
665       base = AllocateAlignedMemory(chunk_size, commit_size,
666                                    MemoryChunk::kAlignment, executable,
667                                    &reservation);
668       if (base == NULL) return NULL;
669       // Update executable memory size.
670       size_executable_ += reservation.size();
671     }
672
673     if (Heap::ShouldZapGarbage()) {
674       ZapBlock(base, CodePageGuardStartOffset());
675       ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
676     }
677
678     area_start = base + CodePageAreaStartOffset();
679     area_end = area_start + commit_area_size;
680   } else {
681     chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
682                          base::OS::CommitPageSize());
683     size_t commit_size =
684         RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
685                 base::OS::CommitPageSize());
686     base =
687         AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
688                               executable, &reservation);
689
690     if (base == NULL) return NULL;
691
692     if (Heap::ShouldZapGarbage()) {
693       ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
694     }
695
696     area_start = base + Page::kObjectStartOffset;
697     area_end = area_start + commit_area_size;
698   }
699
700   // Use chunk_size for statistics and callbacks because we assume that they
701   // treat reserved but not-yet committed memory regions of chunks as allocated.
702   isolate_->counters()->memory_allocated()->Increment(
703       static_cast<int>(chunk_size));
704
705   LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
706   if (owner != NULL) {
707     ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
708     PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
709   }
710
711   MemoryChunk* result = MemoryChunk::Initialize(
712       heap, base, chunk_size, area_start, area_end, executable, owner);
713   result->set_reserved_memory(&reservation);
714   return result;
715 }
716
717
718 void Page::ResetFreeListStatistics() {
719   non_available_small_blocks_ = 0;
720   available_in_small_free_list_ = 0;
721   available_in_medium_free_list_ = 0;
722   available_in_large_free_list_ = 0;
723   available_in_huge_free_list_ = 0;
724 }
725
726
727 Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
728                                     Executability executable) {
729   MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
730
731   if (chunk == NULL) return NULL;
732
733   return Page::Initialize(isolate_->heap(), chunk, executable, owner);
734 }
735
736
737 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
738                                               Space* owner,
739                                               Executability executable) {
740   MemoryChunk* chunk =
741       AllocateChunk(object_size, object_size, executable, owner);
742   if (chunk == NULL) return NULL;
743   return LargePage::Initialize(isolate_->heap(), chunk);
744 }
745
746
747 void MemoryAllocator::Free(MemoryChunk* chunk) {
748   LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
749   if (chunk->owner() != NULL) {
750     ObjectSpace space =
751         static_cast<ObjectSpace>(1 << chunk->owner()->identity());
752     PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
753   }
754
755   isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
756                                          chunk->IsEvacuationCandidate());
757
758   delete chunk->slots_buffer();
759   delete chunk->skip_list();
760
761   base::VirtualMemory* reservation = chunk->reserved_memory();
762   if (reservation->IsReserved()) {
763     FreeMemory(reservation, chunk->executable());
764   } else {
765     FreeMemory(chunk->address(), chunk->size(), chunk->executable());
766   }
767 }
768
769
770 bool MemoryAllocator::CommitBlock(Address start, size_t size,
771                                   Executability executable) {
772   if (!CommitMemory(start, size, executable)) return false;
773
774   if (Heap::ShouldZapGarbage()) {
775     ZapBlock(start, size);
776   }
777
778   isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
779   return true;
780 }
781
782
783 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
784   if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
785   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
786   return true;
787 }
788
789
790 void MemoryAllocator::ZapBlock(Address start, size_t size) {
791   for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
792     Memory::Address_at(start + s) = kZapValue;
793   }
794 }
795
796
797 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
798                                                 AllocationAction action,
799                                                 size_t size) {
800   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
801     MemoryAllocationCallbackRegistration registration =
802         memory_allocation_callbacks_[i];
803     if ((registration.space & space) == space &&
804         (registration.action & action) == action)
805       registration.callback(space, action, static_cast<int>(size));
806   }
807 }
808
809
810 bool MemoryAllocator::MemoryAllocationCallbackRegistered(
811     MemoryAllocationCallback callback) {
812   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
813     if (memory_allocation_callbacks_[i].callback == callback) return true;
814   }
815   return false;
816 }
817
818
819 void MemoryAllocator::AddMemoryAllocationCallback(
820     MemoryAllocationCallback callback, ObjectSpace space,
821     AllocationAction action) {
822   DCHECK(callback != NULL);
823   MemoryAllocationCallbackRegistration registration(callback, space, action);
824   DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
825   return memory_allocation_callbacks_.Add(registration);
826 }
827
828
829 void MemoryAllocator::RemoveMemoryAllocationCallback(
830     MemoryAllocationCallback callback) {
831   DCHECK(callback != NULL);
832   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
833     if (memory_allocation_callbacks_[i].callback == callback) {
834       memory_allocation_callbacks_.Remove(i);
835       return;
836     }
837   }
838   UNREACHABLE();
839 }
840
841
842 #ifdef DEBUG
843 void MemoryAllocator::ReportStatistics() {
844   float pct = static_cast<float>(capacity_ - size_) / capacity_;
845   PrintF("  capacity: %" V8_PTR_PREFIX
846          "d"
847          ", used: %" V8_PTR_PREFIX
848          "d"
849          ", available: %%%d\n\n",
850          capacity_, size_, static_cast<int>(pct * 100));
851 }
852 #endif
853
854
855 int MemoryAllocator::CodePageGuardStartOffset() {
856   // We are guarding code pages: the first OS page after the header
857   // will be protected as non-writable.
858   return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
859 }
860
861
862 int MemoryAllocator::CodePageGuardSize() {
863   return static_cast<int>(base::OS::CommitPageSize());
864 }
865
866
867 int MemoryAllocator::CodePageAreaStartOffset() {
868   // We are guarding code pages: the first OS page after the header
869   // will be protected as non-writable.
870   return CodePageGuardStartOffset() + CodePageGuardSize();
871 }
872
873
874 int MemoryAllocator::CodePageAreaEndOffset() {
875   // We are guarding code pages: the last OS page will be protected as
876   // non-writable.
877   return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
878 }
879
880
881 bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
882                                              Address start, size_t commit_size,
883                                              size_t reserved_size) {
884   // Commit page header (not executable).
885   if (!vm->Commit(start, CodePageGuardStartOffset(), false)) {
886     return false;
887   }
888
889   // Create guard page after the header.
890   if (!vm->Guard(start + CodePageGuardStartOffset())) {
891     return false;
892   }
893
894   // Commit page body (executable).
895   if (!vm->Commit(start + CodePageAreaStartOffset(),
896                   commit_size - CodePageGuardStartOffset(), true)) {
897     return false;
898   }
899
900   // Create guard page before the end.
901   if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
902     return false;
903   }
904
905   UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
906                                         commit_size -
907                                         CodePageGuardStartOffset());
908   return true;
909 }
910
911
912 // -----------------------------------------------------------------------------
913 // MemoryChunk implementation
914
915 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
916   MemoryChunk* chunk = MemoryChunk::FromAddress(address);
917   if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
918     static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
919   }
920   chunk->IncrementLiveBytes(by);
921 }
922
923
924 // -----------------------------------------------------------------------------
925 // PagedSpace implementation
926
927 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
928               ObjectSpace::kObjectSpaceNewSpace);
929 STATIC_ASSERT(static_cast<ObjectSpace>(1
930                                        << AllocationSpace::OLD_POINTER_SPACE) ==
931               ObjectSpace::kObjectSpaceOldPointerSpace);
932 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_DATA_SPACE) ==
933               ObjectSpace::kObjectSpaceOldDataSpace);
934 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
935               ObjectSpace::kObjectSpaceCodeSpace);
936 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CELL_SPACE) ==
937               ObjectSpace::kObjectSpaceCellSpace);
938 STATIC_ASSERT(
939     static_cast<ObjectSpace>(1 << AllocationSpace::PROPERTY_CELL_SPACE) ==
940     ObjectSpace::kObjectSpacePropertyCellSpace);
941 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
942               ObjectSpace::kObjectSpaceMapSpace);
943
944
945 PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace space,
946                        Executability executable)
947     : Space(heap, space, executable),
948       free_list_(this),
949       unswept_free_bytes_(0),
950       end_of_unswept_pages_(NULL),
951       emergency_memory_(NULL) {
952   area_size_ = MemoryAllocator::PageAreaSize(space);
953   max_capacity_ =
954       (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * AreaSize();
955   accounting_stats_.Clear();
956
957   allocation_info_.set_top(NULL);
958   allocation_info_.set_limit(NULL);
959
960   anchor_.InitializeAsAnchor(this);
961 }
962
963
964 bool PagedSpace::SetUp() { return true; }
965
966
967 bool PagedSpace::HasBeenSetUp() { return true; }
968
969
970 void PagedSpace::TearDown() {
971   PageIterator iterator(this);
972   while (iterator.has_next()) {
973     heap()->isolate()->memory_allocator()->Free(iterator.next());
974   }
975   anchor_.set_next_page(&anchor_);
976   anchor_.set_prev_page(&anchor_);
977   accounting_stats_.Clear();
978 }
979
980
981 size_t PagedSpace::CommittedPhysicalMemory() {
982   if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
983   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
984   size_t size = 0;
985   PageIterator it(this);
986   while (it.has_next()) {
987     size += it.next()->CommittedPhysicalMemory();
988   }
989   return size;
990 }
991
992
993 bool PagedSpace::ContainsSafe(Address addr) {
994   Page* p = Page::FromAddress(addr);
995   PageIterator iterator(this);
996   while (iterator.has_next()) {
997     if (iterator.next() == p) return true;
998   }
999   return false;
1000 }
1001
1002
1003 Object* PagedSpace::FindObject(Address addr) {
1004   // Note: this function can only be called on iterable spaces.
1005   DCHECK(!heap()->mark_compact_collector()->in_use());
1006
1007   if (!Contains(addr)) return Smi::FromInt(0);  // Signaling not found.
1008
1009   Page* p = Page::FromAddress(addr);
1010   HeapObjectIterator it(p, NULL);
1011   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1012     Address cur = obj->address();
1013     Address next = cur + obj->Size();
1014     if ((cur <= addr) && (addr < next)) return obj;
1015   }
1016
1017   UNREACHABLE();
1018   return Smi::FromInt(0);
1019 }
1020
1021
1022 bool PagedSpace::CanExpand() {
1023   DCHECK(max_capacity_ % AreaSize() == 0);
1024
1025   if (Capacity() == max_capacity_) return false;
1026
1027   DCHECK(Capacity() < max_capacity_);
1028
1029   // Are we going to exceed capacity for this space?
1030   if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
1031
1032   return true;
1033 }
1034
1035
1036 bool PagedSpace::Expand() {
1037   if (!CanExpand()) return false;
1038
1039   intptr_t size = AreaSize();
1040
1041   if (anchor_.next_page() == &anchor_) {
1042     size = Snapshot::SizeOfFirstPage(identity());
1043   }
1044
1045   Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
1046                                                                 executable());
1047   if (p == NULL) return false;
1048
1049   // Pages created during bootstrapping may contain immortal immovable objects.
1050   if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
1051
1052   DCHECK(Capacity() <= max_capacity_);
1053
1054   p->InsertAfter(anchor_.prev_page());
1055
1056   return true;
1057 }
1058
1059
1060 int PagedSpace::CountTotalPages() {
1061   PageIterator it(this);
1062   int count = 0;
1063   while (it.has_next()) {
1064     it.next();
1065     count++;
1066   }
1067   return count;
1068 }
1069
1070
1071 void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
1072   sizes->huge_size_ = page->available_in_huge_free_list();
1073   sizes->small_size_ = page->available_in_small_free_list();
1074   sizes->medium_size_ = page->available_in_medium_free_list();
1075   sizes->large_size_ = page->available_in_large_free_list();
1076 }
1077
1078
1079 void PagedSpace::ResetFreeListStatistics() {
1080   PageIterator page_iterator(this);
1081   while (page_iterator.has_next()) {
1082     Page* page = page_iterator.next();
1083     page->ResetFreeListStatistics();
1084   }
1085 }
1086
1087
1088 void PagedSpace::IncreaseCapacity(int size) {
1089   accounting_stats_.ExpandSpace(size);
1090 }
1091
1092
1093 void PagedSpace::ReleasePage(Page* page) {
1094   DCHECK(page->LiveBytes() == 0);
1095   DCHECK(AreaSize() == page->area_size());
1096
1097   if (page->WasSwept()) {
1098     intptr_t size = free_list_.EvictFreeListItems(page);
1099     accounting_stats_.AllocateBytes(size);
1100     DCHECK_EQ(AreaSize(), static_cast<int>(size));
1101   } else {
1102     DecreaseUnsweptFreeBytes(page);
1103   }
1104
1105   if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
1106     heap()->decrement_scan_on_scavenge_pages();
1107     page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
1108   }
1109
1110   DCHECK(!free_list_.ContainsPageFreeListItems(page));
1111
1112   if (Page::FromAllocationTop(allocation_info_.top()) == page) {
1113     allocation_info_.set_top(NULL);
1114     allocation_info_.set_limit(NULL);
1115   }
1116
1117   // If page is still in a list, unlink it from that list.
1118   if (page->next_chunk() != NULL) {
1119     DCHECK(page->prev_chunk() != NULL);
1120     page->Unlink();
1121   }
1122
1123   if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
1124     heap()->isolate()->memory_allocator()->Free(page);
1125   } else {
1126     heap()->QueueMemoryChunkForFree(page);
1127   }
1128
1129   DCHECK(Capacity() > 0);
1130   accounting_stats_.ShrinkSpace(AreaSize());
1131 }
1132
1133
1134 void PagedSpace::CreateEmergencyMemory() {
1135   if (identity() == CODE_SPACE) {
1136     // Make the emergency block available to the allocator.
1137     CodeRange* code_range = heap()->isolate()->code_range();
1138     if (code_range != NULL && code_range->valid()) {
1139       code_range->ReleaseEmergencyBlock();
1140     }
1141     DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize());
1142   }
1143   emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
1144       AreaSize(), AreaSize(), executable(), this);
1145 }
1146
1147
1148 void PagedSpace::FreeEmergencyMemory() {
1149   Page* page = static_cast<Page*>(emergency_memory_);
1150   DCHECK(page->LiveBytes() == 0);
1151   DCHECK(AreaSize() == page->area_size());
1152   DCHECK(!free_list_.ContainsPageFreeListItems(page));
1153   heap()->isolate()->memory_allocator()->Free(page);
1154   emergency_memory_ = NULL;
1155 }
1156
1157
1158 void PagedSpace::UseEmergencyMemory() {
1159   Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this);
1160   page->InsertAfter(anchor_.prev_page());
1161   emergency_memory_ = NULL;
1162 }
1163
1164
1165 #ifdef DEBUG
1166 void PagedSpace::Print() {}
1167 #endif
1168
1169 #ifdef VERIFY_HEAP
1170 void PagedSpace::Verify(ObjectVisitor* visitor) {
1171   bool allocation_pointer_found_in_space =
1172       (allocation_info_.top() == allocation_info_.limit());
1173   PageIterator page_iterator(this);
1174   while (page_iterator.has_next()) {
1175     Page* page = page_iterator.next();
1176     CHECK(page->owner() == this);
1177     if (page == Page::FromAllocationTop(allocation_info_.top())) {
1178       allocation_pointer_found_in_space = true;
1179     }
1180     CHECK(page->WasSwept());
1181     HeapObjectIterator it(page, NULL);
1182     Address end_of_previous_object = page->area_start();
1183     Address top = page->area_end();
1184     int black_size = 0;
1185     for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1186       CHECK(end_of_previous_object <= object->address());
1187
1188       // The first word should be a map, and we expect all map pointers to
1189       // be in map space.
1190       Map* map = object->map();
1191       CHECK(map->IsMap());
1192       CHECK(heap()->map_space()->Contains(map));
1193
1194       // Perform space-specific object verification.
1195       VerifyObject(object);
1196
1197       // The object itself should look OK.
1198       object->ObjectVerify();
1199
1200       // All the interior pointers should be contained in the heap.
1201       int size = object->Size();
1202       object->IterateBody(map->instance_type(), size, visitor);
1203       if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1204         black_size += size;
1205       }
1206
1207       CHECK(object->address() + size <= top);
1208       end_of_previous_object = object->address() + size;
1209     }
1210     CHECK_LE(black_size, page->LiveBytes());
1211   }
1212   CHECK(allocation_pointer_found_in_space);
1213 }
1214 #endif  // VERIFY_HEAP
1215
1216 // -----------------------------------------------------------------------------
1217 // NewSpace implementation
1218
1219
1220 bool NewSpace::SetUp(int reserved_semispace_capacity,
1221                      int maximum_semispace_capacity) {
1222   // Set up new space based on the preallocated memory block defined by
1223   // start and size. The provided space is divided into two semi-spaces.
1224   // To support fast containment testing in the new space, the size of
1225   // this chunk must be a power of two and it must be aligned to its size.
1226   int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1227
1228   int target_semispace_capacity = heap()->TargetSemiSpaceSize();
1229
1230   size_t size = 2 * reserved_semispace_capacity;
1231   Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1232       size, size, &reservation_);
1233   if (base == NULL) return false;
1234
1235   chunk_base_ = base;
1236   chunk_size_ = static_cast<uintptr_t>(size);
1237   LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1238
1239   DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
1240   DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
1241
1242   // Allocate and set up the histogram arrays if necessary.
1243   allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1244   promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1245
1246 #define SET_NAME(name)                        \
1247   allocated_histogram_[name].set_name(#name); \
1248   promoted_histogram_[name].set_name(#name);
1249   INSTANCE_TYPE_LIST(SET_NAME)
1250 #undef SET_NAME
1251
1252   DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1253   DCHECK(static_cast<intptr_t>(chunk_size_) >=
1254          2 * heap()->ReservedSemiSpaceSize());
1255   DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1256
1257   to_space_.SetUp(chunk_base_, initial_semispace_capacity,
1258                   target_semispace_capacity, maximum_semispace_capacity);
1259   from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1260                     initial_semispace_capacity, target_semispace_capacity,
1261                     maximum_semispace_capacity);
1262   if (!to_space_.Commit()) {
1263     return false;
1264   }
1265   DCHECK(!from_space_.is_committed());  // No need to use memory yet.
1266
1267   start_ = chunk_base_;
1268   address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1269   object_mask_ = address_mask_ | kHeapObjectTagMask;
1270   object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1271
1272   ResetAllocationInfo();
1273
1274   return true;
1275 }
1276
1277
1278 void NewSpace::TearDown() {
1279   if (allocated_histogram_) {
1280     DeleteArray(allocated_histogram_);
1281     allocated_histogram_ = NULL;
1282   }
1283   if (promoted_histogram_) {
1284     DeleteArray(promoted_histogram_);
1285     promoted_histogram_ = NULL;
1286   }
1287
1288   start_ = NULL;
1289   allocation_info_.set_top(NULL);
1290   allocation_info_.set_limit(NULL);
1291
1292   to_space_.TearDown();
1293   from_space_.TearDown();
1294
1295   LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1296
1297   DCHECK(reservation_.IsReserved());
1298   heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1299                                                     NOT_EXECUTABLE);
1300   chunk_base_ = NULL;
1301   chunk_size_ = 0;
1302 }
1303
1304
1305 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
1306
1307
1308 void NewSpace::Grow() {
1309   // Double the semispace size but only up to maximum capacity.
1310   DCHECK(TotalCapacity() < MaximumCapacity());
1311   int new_capacity =
1312       Min(MaximumCapacity(),
1313           FLAG_semi_space_growth_factor * static_cast<int>(TotalCapacity()));
1314   if (to_space_.GrowTo(new_capacity)) {
1315     // Only grow from space if we managed to grow to-space.
1316     if (!from_space_.GrowTo(new_capacity)) {
1317       // If we managed to grow to-space but couldn't grow from-space,
1318       // attempt to shrink to-space.
1319       if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
1320         // We are in an inconsistent state because we could not
1321         // commit/uncommit memory from new space.
1322         CHECK(false);
1323       }
1324     }
1325   }
1326   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1327 }
1328
1329
1330 bool NewSpace::GrowOnePage() {
1331   if (TotalCapacity() == MaximumCapacity()) return false;
1332   int new_capacity = static_cast<int>(TotalCapacity()) + Page::kPageSize;
1333   if (to_space_.GrowTo(new_capacity)) {
1334     // Only grow from space if we managed to grow to-space and the from space
1335     // is actually committed.
1336     if (from_space_.is_committed()) {
1337       if (!from_space_.GrowTo(new_capacity)) {
1338         // If we managed to grow to-space but couldn't grow from-space,
1339         // attempt to shrink to-space.
1340         if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
1341           // We are in an inconsistent state because we could not
1342           // commit/uncommit memory from new space.
1343           CHECK(false);
1344         }
1345         return false;
1346       }
1347     } else {
1348       if (!from_space_.SetTotalCapacity(new_capacity)) {
1349         // Can't really happen, but better safe than sorry.
1350         CHECK(false);
1351       }
1352     }
1353     DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1354     return true;
1355   }
1356   return false;
1357 }
1358
1359
1360 void NewSpace::Shrink() {
1361   int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
1362   int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1363   if (rounded_new_capacity < TotalCapacity() &&
1364       to_space_.ShrinkTo(rounded_new_capacity)) {
1365     // Only shrink from-space if we managed to shrink to-space.
1366     from_space_.Reset();
1367     if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1368       // If we managed to shrink to-space but couldn't shrink from
1369       // space, attempt to grow to-space again.
1370       if (!to_space_.GrowTo(from_space_.TotalCapacity())) {
1371         // We are in an inconsistent state because we could not
1372         // commit/uncommit memory from new space.
1373         CHECK(false);
1374       }
1375     }
1376   }
1377   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1378 }
1379
1380
1381 void NewSpace::UpdateAllocationInfo() {
1382   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1383   allocation_info_.set_top(to_space_.page_low());
1384   allocation_info_.set_limit(to_space_.page_high());
1385   UpdateInlineAllocationLimit(0);
1386   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1387 }
1388
1389
1390 void NewSpace::ResetAllocationInfo() {
1391   to_space_.Reset();
1392   UpdateAllocationInfo();
1393   pages_used_ = 0;
1394   // Clear all mark-bits in the to-space.
1395   NewSpacePageIterator it(&to_space_);
1396   while (it.has_next()) {
1397     Bitmap::Clear(it.next());
1398   }
1399 }
1400
1401
1402 void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1403   if (heap()->inline_allocation_disabled()) {
1404     // Lowest limit when linear allocation was disabled.
1405     Address high = to_space_.page_high();
1406     Address new_top = allocation_info_.top() + size_in_bytes;
1407     allocation_info_.set_limit(Min(new_top, high));
1408   } else if (inline_allocation_limit_step() == 0) {
1409     // Normal limit is the end of the current page.
1410     allocation_info_.set_limit(to_space_.page_high());
1411   } else {
1412     // Lower limit during incremental marking.
1413     Address high = to_space_.page_high();
1414     Address new_top = allocation_info_.top() + size_in_bytes;
1415     Address new_limit = new_top + inline_allocation_limit_step_;
1416     allocation_info_.set_limit(Min(new_limit, high));
1417   }
1418   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1419 }
1420
1421
1422 bool NewSpace::AddFreshPage() {
1423   Address top = allocation_info_.top();
1424   if (NewSpacePage::IsAtStart(top)) {
1425     // The current page is already empty. Don't try to make another.
1426
1427     // We should only get here if someone asks to allocate more
1428     // than what can be stored in a single page.
1429     // TODO(gc): Change the limit on new-space allocation to prevent this
1430     // from happening (all such allocations should go directly to LOSpace).
1431     return false;
1432   }
1433   if (!to_space_.AdvancePage()) {
1434     // Check if we reached the target capacity yet. If not, try to commit a page
1435     // and continue.
1436     if ((to_space_.TotalCapacity() < to_space_.TargetCapacity()) &&
1437         GrowOnePage()) {
1438       if (!to_space_.AdvancePage()) {
1439         // It doesn't make sense that we managed to commit a page, but can't use
1440         // it.
1441         CHECK(false);
1442       }
1443     } else {
1444       // Failed to get a new page in to-space.
1445       return false;
1446     }
1447   }
1448
1449   // Clear remainder of current page.
1450   Address limit = NewSpacePage::FromLimit(top)->area_end();
1451   if (heap()->gc_state() == Heap::SCAVENGE) {
1452     heap()->promotion_queue()->SetNewLimit(limit);
1453   }
1454
1455   int remaining_in_page = static_cast<int>(limit - top);
1456   heap()->CreateFillerObjectAt(top, remaining_in_page);
1457   pages_used_++;
1458   UpdateAllocationInfo();
1459
1460   return true;
1461 }
1462
1463
1464 AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) {
1465   Address old_top = allocation_info_.top();
1466   Address high = to_space_.page_high();
1467   if (allocation_info_.limit() < high) {
1468     // Either the limit has been lowered because linear allocation was disabled
1469     // or because incremental marking wants to get a chance to do a step. Set
1470     // the new limit accordingly.
1471     Address new_top = old_top + size_in_bytes;
1472     int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1473     heap()->incremental_marking()->Step(bytes_allocated,
1474                                         IncrementalMarking::GC_VIA_STACK_GUARD);
1475     UpdateInlineAllocationLimit(size_in_bytes);
1476     top_on_previous_step_ = new_top;
1477     return AllocateRaw(size_in_bytes);
1478   } else if (AddFreshPage()) {
1479     // Switched to new page. Try allocating again.
1480     int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1481     heap()->incremental_marking()->Step(bytes_allocated,
1482                                         IncrementalMarking::GC_VIA_STACK_GUARD);
1483     top_on_previous_step_ = to_space_.page_low();
1484     return AllocateRaw(size_in_bytes);
1485   } else {
1486     return AllocationResult::Retry();
1487   }
1488 }
1489
1490
1491 #ifdef VERIFY_HEAP
1492 // We do not use the SemiSpaceIterator because verification doesn't assume
1493 // that it works (it depends on the invariants we are checking).
1494 void NewSpace::Verify() {
1495   // The allocation pointer should be in the space or at the very end.
1496   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1497
1498   // There should be objects packed in from the low address up to the
1499   // allocation pointer.
1500   Address current = to_space_.first_page()->area_start();
1501   CHECK_EQ(current, to_space_.space_start());
1502
1503   while (current != top()) {
1504     if (!NewSpacePage::IsAtEnd(current)) {
1505       // The allocation pointer should not be in the middle of an object.
1506       CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1507             current < top());
1508
1509       HeapObject* object = HeapObject::FromAddress(current);
1510
1511       // The first word should be a map, and we expect all map pointers to
1512       // be in map space.
1513       Map* map = object->map();
1514       CHECK(map->IsMap());
1515       CHECK(heap()->map_space()->Contains(map));
1516
1517       // The object should not be code or a map.
1518       CHECK(!object->IsMap());
1519       CHECK(!object->IsCode());
1520
1521       // The object itself should look OK.
1522       object->ObjectVerify();
1523
1524       // All the interior pointers should be contained in the heap.
1525       VerifyPointersVisitor visitor;
1526       int size = object->Size();
1527       object->IterateBody(map->instance_type(), size, &visitor);
1528
1529       current += size;
1530     } else {
1531       // At end of page, switch to next page.
1532       NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1533       // Next page should be valid.
1534       CHECK(!page->is_anchor());
1535       current = page->area_start();
1536     }
1537   }
1538
1539   // Check semi-spaces.
1540   CHECK_EQ(from_space_.id(), kFromSpace);
1541   CHECK_EQ(to_space_.id(), kToSpace);
1542   from_space_.Verify();
1543   to_space_.Verify();
1544 }
1545 #endif
1546
1547 // -----------------------------------------------------------------------------
1548 // SemiSpace implementation
1549
1550 void SemiSpace::SetUp(Address start, int initial_capacity, int target_capacity,
1551                       int maximum_capacity) {
1552   // Creates a space in the young generation. The constructor does not
1553   // allocate memory from the OS.  A SemiSpace is given a contiguous chunk of
1554   // memory of size 'capacity' when set up, and does not grow or shrink
1555   // otherwise.  In the mark-compact collector, the memory region of the from
1556   // space is used as the marking stack. It requires contiguous memory
1557   // addresses.
1558   DCHECK(maximum_capacity >= Page::kPageSize);
1559   DCHECK(initial_capacity <= target_capacity);
1560   DCHECK(target_capacity <= maximum_capacity);
1561   initial_total_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1562   total_capacity_ = initial_capacity;
1563   target_capacity_ = RoundDown(target_capacity, Page::kPageSize);
1564   maximum_total_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1565   maximum_committed_ = 0;
1566   committed_ = false;
1567   start_ = start;
1568   address_mask_ = ~(maximum_capacity - 1);
1569   object_mask_ = address_mask_ | kHeapObjectTagMask;
1570   object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1571   age_mark_ = start_;
1572 }
1573
1574
1575 void SemiSpace::TearDown() {
1576   start_ = NULL;
1577   total_capacity_ = 0;
1578 }
1579
1580
1581 bool SemiSpace::Commit() {
1582   DCHECK(!is_committed());
1583   int pages = total_capacity_ / Page::kPageSize;
1584   if (!heap()->isolate()->memory_allocator()->CommitBlock(
1585           start_, total_capacity_, executable())) {
1586     return false;
1587   }
1588
1589   NewSpacePage* current = anchor();
1590   for (int i = 0; i < pages; i++) {
1591     NewSpacePage* new_page =
1592         NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
1593     new_page->InsertAfter(current);
1594     current = new_page;
1595   }
1596
1597   SetCapacity(total_capacity_);
1598   committed_ = true;
1599   Reset();
1600   return true;
1601 }
1602
1603
1604 bool SemiSpace::Uncommit() {
1605   DCHECK(is_committed());
1606   Address start = start_ + maximum_total_capacity_ - total_capacity_;
1607   if (!heap()->isolate()->memory_allocator()->UncommitBlock(start,
1608                                                             total_capacity_)) {
1609     return false;
1610   }
1611   anchor()->set_next_page(anchor());
1612   anchor()->set_prev_page(anchor());
1613
1614   committed_ = false;
1615   return true;
1616 }
1617
1618
1619 size_t SemiSpace::CommittedPhysicalMemory() {
1620   if (!is_committed()) return 0;
1621   size_t size = 0;
1622   NewSpacePageIterator it(this);
1623   while (it.has_next()) {
1624     size += it.next()->CommittedPhysicalMemory();
1625   }
1626   return size;
1627 }
1628
1629
1630 bool SemiSpace::GrowTo(int new_capacity) {
1631   if (!is_committed()) {
1632     if (!Commit()) return false;
1633   }
1634   DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
1635   DCHECK(new_capacity <= maximum_total_capacity_);
1636   DCHECK(new_capacity > total_capacity_);
1637   int pages_before = total_capacity_ / Page::kPageSize;
1638   int pages_after = new_capacity / Page::kPageSize;
1639
1640   size_t delta = new_capacity - total_capacity_;
1641
1642   DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1643   if (!heap()->isolate()->memory_allocator()->CommitBlock(
1644           start_ + total_capacity_, delta, executable())) {
1645     return false;
1646   }
1647   SetCapacity(new_capacity);
1648   NewSpacePage* last_page = anchor()->prev_page();
1649   DCHECK(last_page != anchor());
1650   for (int i = pages_before; i < pages_after; i++) {
1651     Address page_address = start_ + i * Page::kPageSize;
1652     NewSpacePage* new_page =
1653         NewSpacePage::Initialize(heap(), page_address, this);
1654     new_page->InsertAfter(last_page);
1655     Bitmap::Clear(new_page);
1656     // Duplicate the flags that was set on the old page.
1657     new_page->SetFlags(last_page->GetFlags(),
1658                        NewSpacePage::kCopyOnFlipFlagsMask);
1659     last_page = new_page;
1660   }
1661   return true;
1662 }
1663
1664
1665 bool SemiSpace::ShrinkTo(int new_capacity) {
1666   DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
1667   DCHECK(new_capacity >= initial_total_capacity_);
1668   DCHECK(new_capacity < total_capacity_);
1669   if (is_committed()) {
1670     size_t delta = total_capacity_ - new_capacity;
1671     DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1672
1673     MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1674     if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
1675       return false;
1676     }
1677
1678     int pages_after = new_capacity / Page::kPageSize;
1679     NewSpacePage* new_last_page =
1680         NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
1681     new_last_page->set_next_page(anchor());
1682     anchor()->set_prev_page(new_last_page);
1683     DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page));
1684   }
1685
1686   SetCapacity(new_capacity);
1687
1688   return true;
1689 }
1690
1691
1692 bool SemiSpace::SetTotalCapacity(int new_capacity) {
1693   CHECK(!is_committed());
1694   if (new_capacity >= initial_total_capacity_ &&
1695       new_capacity <= maximum_total_capacity_) {
1696     total_capacity_ = new_capacity;
1697     return true;
1698   }
1699   return false;
1700 }
1701
1702
1703 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1704   anchor_.set_owner(this);
1705   // Fixup back-pointers to anchor. Address of anchor changes
1706   // when we swap.
1707   anchor_.prev_page()->set_next_page(&anchor_);
1708   anchor_.next_page()->set_prev_page(&anchor_);
1709
1710   bool becomes_to_space = (id_ == kFromSpace);
1711   id_ = becomes_to_space ? kToSpace : kFromSpace;
1712   NewSpacePage* page = anchor_.next_page();
1713   while (page != &anchor_) {
1714     page->set_owner(this);
1715     page->SetFlags(flags, mask);
1716     if (becomes_to_space) {
1717       page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1718       page->SetFlag(MemoryChunk::IN_TO_SPACE);
1719       page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1720       page->ResetLiveBytes();
1721     } else {
1722       page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1723       page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1724     }
1725     DCHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1726     DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1727            page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1728     page = page->next_page();
1729   }
1730 }
1731
1732
1733 void SemiSpace::Reset() {
1734   DCHECK(anchor_.next_page() != &anchor_);
1735   current_page_ = anchor_.next_page();
1736 }
1737
1738
1739 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1740   // We won't be swapping semispaces without data in them.
1741   DCHECK(from->anchor_.next_page() != &from->anchor_);
1742   DCHECK(to->anchor_.next_page() != &to->anchor_);
1743
1744   // Swap bits.
1745   SemiSpace tmp = *from;
1746   *from = *to;
1747   *to = tmp;
1748
1749   // Fixup back-pointers to the page list anchor now that its address
1750   // has changed.
1751   // Swap to/from-space bits on pages.
1752   // Copy GC flags from old active space (from-space) to new (to-space).
1753   intptr_t flags = from->current_page()->GetFlags();
1754   to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1755
1756   from->FlipPages(0, 0);
1757 }
1758
1759
1760 void SemiSpace::SetCapacity(int new_capacity) {
1761   total_capacity_ = new_capacity;
1762   if (total_capacity_ > maximum_committed_) {
1763     maximum_committed_ = total_capacity_;
1764   }
1765 }
1766
1767
1768 void SemiSpace::set_age_mark(Address mark) {
1769   DCHECK(NewSpacePage::FromLimit(mark)->semi_space() == this);
1770   age_mark_ = mark;
1771   // Mark all pages up to the one containing mark.
1772   NewSpacePageIterator it(space_start(), mark);
1773   while (it.has_next()) {
1774     it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1775   }
1776 }
1777
1778
1779 #ifdef DEBUG
1780 void SemiSpace::Print() {}
1781 #endif
1782
1783 #ifdef VERIFY_HEAP
1784 void SemiSpace::Verify() {
1785   bool is_from_space = (id_ == kFromSpace);
1786   NewSpacePage* page = anchor_.next_page();
1787   CHECK(anchor_.semi_space() == this);
1788   while (page != &anchor_) {
1789     CHECK(page->semi_space() == this);
1790     CHECK(page->InNewSpace());
1791     CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1792                                         : MemoryChunk::IN_TO_SPACE));
1793     CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1794                                          : MemoryChunk::IN_FROM_SPACE));
1795     CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1796     if (!is_from_space) {
1797       // The pointers-from-here-are-interesting flag isn't updated dynamically
1798       // on from-space pages, so it might be out of sync with the marking state.
1799       if (page->heap()->incremental_marking()->IsMarking()) {
1800         CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1801       } else {
1802         CHECK(
1803             !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1804       }
1805       // TODO(gc): Check that the live_bytes_count_ field matches the
1806       // black marking on the page (if we make it match in new-space).
1807     }
1808     CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1809     CHECK(page->prev_page()->next_page() == page);
1810     page = page->next_page();
1811   }
1812 }
1813 #endif
1814
1815 #ifdef DEBUG
1816 void SemiSpace::AssertValidRange(Address start, Address end) {
1817   // Addresses belong to same semi-space
1818   NewSpacePage* page = NewSpacePage::FromLimit(start);
1819   NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1820   SemiSpace* space = page->semi_space();
1821   CHECK_EQ(space, end_page->semi_space());
1822   // Start address is before end address, either on same page,
1823   // or end address is on a later page in the linked list of
1824   // semi-space pages.
1825   if (page == end_page) {
1826     CHECK(start <= end);
1827   } else {
1828     while (page != end_page) {
1829       page = page->next_page();
1830       CHECK_NE(page, space->anchor());
1831     }
1832   }
1833 }
1834 #endif
1835
1836
1837 // -----------------------------------------------------------------------------
1838 // SemiSpaceIterator implementation.
1839 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1840   Initialize(space->bottom(), space->top(), NULL);
1841 }
1842
1843
1844 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1845                                      HeapObjectCallback size_func) {
1846   Initialize(space->bottom(), space->top(), size_func);
1847 }
1848
1849
1850 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1851   Initialize(start, space->top(), NULL);
1852 }
1853
1854
1855 SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
1856   Initialize(from, to, NULL);
1857 }
1858
1859
1860 void SemiSpaceIterator::Initialize(Address start, Address end,
1861                                    HeapObjectCallback size_func) {
1862   SemiSpace::AssertValidRange(start, end);
1863   current_ = start;
1864   limit_ = end;
1865   size_func_ = size_func;
1866 }
1867
1868
1869 #ifdef DEBUG
1870 // heap_histograms is shared, always clear it before using it.
1871 static void ClearHistograms(Isolate* isolate) {
1872 // We reset the name each time, though it hasn't changed.
1873 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1874   INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1875 #undef DEF_TYPE_NAME
1876
1877 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1878   INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1879 #undef CLEAR_HISTOGRAM
1880
1881   isolate->js_spill_information()->Clear();
1882 }
1883
1884
1885 static void ClearCodeKindStatistics(int* code_kind_statistics) {
1886   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1887     code_kind_statistics[i] = 0;
1888   }
1889 }
1890
1891
1892 static void ReportCodeKindStatistics(int* code_kind_statistics) {
1893   PrintF("\n   Code kind histograms: \n");
1894   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1895     if (code_kind_statistics[i] > 0) {
1896       PrintF("     %-20s: %10d bytes\n",
1897              Code::Kind2String(static_cast<Code::Kind>(i)),
1898              code_kind_statistics[i]);
1899     }
1900   }
1901   PrintF("\n");
1902 }
1903
1904
1905 static int CollectHistogramInfo(HeapObject* obj) {
1906   Isolate* isolate = obj->GetIsolate();
1907   InstanceType type = obj->map()->instance_type();
1908   DCHECK(0 <= type && type <= LAST_TYPE);
1909   DCHECK(isolate->heap_histograms()[type].name() != NULL);
1910   isolate->heap_histograms()[type].increment_number(1);
1911   isolate->heap_histograms()[type].increment_bytes(obj->Size());
1912
1913   if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1914     JSObject::cast(obj)
1915         ->IncrementSpillStatistics(isolate->js_spill_information());
1916   }
1917
1918   return obj->Size();
1919 }
1920
1921
1922 static void ReportHistogram(Isolate* isolate, bool print_spill) {
1923   PrintF("\n  Object Histogram:\n");
1924   for (int i = 0; i <= LAST_TYPE; i++) {
1925     if (isolate->heap_histograms()[i].number() > 0) {
1926       PrintF("    %-34s%10d (%10d bytes)\n",
1927              isolate->heap_histograms()[i].name(),
1928              isolate->heap_histograms()[i].number(),
1929              isolate->heap_histograms()[i].bytes());
1930     }
1931   }
1932   PrintF("\n");
1933
1934   // Summarize string types.
1935   int string_number = 0;
1936   int string_bytes = 0;
1937 #define INCREMENT(type, size, name, camel_name)               \
1938   string_number += isolate->heap_histograms()[type].number(); \
1939   string_bytes += isolate->heap_histograms()[type].bytes();
1940   STRING_TYPE_LIST(INCREMENT)
1941 #undef INCREMENT
1942   if (string_number > 0) {
1943     PrintF("    %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1944            string_bytes);
1945   }
1946
1947   if (FLAG_collect_heap_spill_statistics && print_spill) {
1948     isolate->js_spill_information()->Print();
1949   }
1950 }
1951 #endif  // DEBUG
1952
1953
1954 // Support for statistics gathering for --heap-stats and --log-gc.
1955 void NewSpace::ClearHistograms() {
1956   for (int i = 0; i <= LAST_TYPE; i++) {
1957     allocated_histogram_[i].clear();
1958     promoted_histogram_[i].clear();
1959   }
1960 }
1961
1962
1963 // Because the copying collector does not touch garbage objects, we iterate
1964 // the new space before a collection to get a histogram of allocated objects.
1965 // This only happens when --log-gc flag is set.
1966 void NewSpace::CollectStatistics() {
1967   ClearHistograms();
1968   SemiSpaceIterator it(this);
1969   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
1970     RecordAllocation(obj);
1971 }
1972
1973
1974 static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
1975                                const char* description) {
1976   LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1977   // Lump all the string types together.
1978   int string_number = 0;
1979   int string_bytes = 0;
1980 #define INCREMENT(type, size, name, camel_name) \
1981   string_number += info[type].number();         \
1982   string_bytes += info[type].bytes();
1983   STRING_TYPE_LIST(INCREMENT)
1984 #undef INCREMENT
1985   if (string_number > 0) {
1986     LOG(isolate,
1987         HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1988   }
1989
1990   // Then do the other types.
1991   for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1992     if (info[i].number() > 0) {
1993       LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
1994                                        info[i].bytes()));
1995     }
1996   }
1997   LOG(isolate, HeapSampleEndEvent("NewSpace", description));
1998 }
1999
2000
2001 void NewSpace::ReportStatistics() {
2002 #ifdef DEBUG
2003   if (FLAG_heap_stats) {
2004     float pct = static_cast<float>(Available()) / TotalCapacity();
2005     PrintF("  capacity: %" V8_PTR_PREFIX
2006            "d"
2007            ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2008            TotalCapacity(), Available(), static_cast<int>(pct * 100));
2009     PrintF("\n  Object Histogram:\n");
2010     for (int i = 0; i <= LAST_TYPE; i++) {
2011       if (allocated_histogram_[i].number() > 0) {
2012         PrintF("    %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
2013                allocated_histogram_[i].number(),
2014                allocated_histogram_[i].bytes());
2015       }
2016     }
2017     PrintF("\n");
2018   }
2019 #endif  // DEBUG
2020
2021   if (FLAG_log_gc) {
2022     Isolate* isolate = heap()->isolate();
2023     DoReportStatistics(isolate, allocated_histogram_, "allocated");
2024     DoReportStatistics(isolate, promoted_histogram_, "promoted");
2025   }
2026 }
2027
2028
2029 void NewSpace::RecordAllocation(HeapObject* obj) {
2030   InstanceType type = obj->map()->instance_type();
2031   DCHECK(0 <= type && type <= LAST_TYPE);
2032   allocated_histogram_[type].increment_number(1);
2033   allocated_histogram_[type].increment_bytes(obj->Size());
2034 }
2035
2036
2037 void NewSpace::RecordPromotion(HeapObject* obj) {
2038   InstanceType type = obj->map()->instance_type();
2039   DCHECK(0 <= type && type <= LAST_TYPE);
2040   promoted_histogram_[type].increment_number(1);
2041   promoted_histogram_[type].increment_bytes(obj->Size());
2042 }
2043
2044
2045 size_t NewSpace::CommittedPhysicalMemory() {
2046   if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2047   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2048   size_t size = to_space_.CommittedPhysicalMemory();
2049   if (from_space_.is_committed()) {
2050     size += from_space_.CommittedPhysicalMemory();
2051   }
2052   return size;
2053 }
2054
2055
2056 // -----------------------------------------------------------------------------
2057 // Free lists for old object spaces implementation
2058
2059 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
2060   intptr_t free_bytes = 0;
2061   if (category->top() != NULL) {
2062     // This is safe (not going to deadlock) since Concatenate operations
2063     // are never performed on the same free lists at the same time in
2064     // reverse order.
2065     base::LockGuard<base::Mutex> target_lock_guard(mutex());
2066     base::LockGuard<base::Mutex> source_lock_guard(category->mutex());
2067     DCHECK(category->end_ != NULL);
2068     free_bytes = category->available();
2069     if (end_ == NULL) {
2070       end_ = category->end();
2071     } else {
2072       category->end()->set_next(top());
2073     }
2074     set_top(category->top());
2075     base::NoBarrier_Store(&top_, category->top_);
2076     available_ += category->available();
2077     category->Reset();
2078   }
2079   return free_bytes;
2080 }
2081
2082
2083 void FreeListCategory::Reset() {
2084   set_top(NULL);
2085   set_end(NULL);
2086   set_available(0);
2087 }
2088
2089
2090 intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
2091   int sum = 0;
2092   FreeSpace* t = top();
2093   FreeSpace** n = &t;
2094   while (*n != NULL) {
2095     if (Page::FromAddress((*n)->address()) == p) {
2096       FreeSpace* free_space = *n;
2097       sum += free_space->Size();
2098       *n = (*n)->next();
2099     } else {
2100       n = (*n)->next_address();
2101     }
2102   }
2103   set_top(t);
2104   if (top() == NULL) {
2105     set_end(NULL);
2106   }
2107   available_ -= sum;
2108   return sum;
2109 }
2110
2111
2112 bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
2113   FreeSpace* node = top();
2114   while (node != NULL) {
2115     if (Page::FromAddress(node->address()) == p) return true;
2116     node = node->next();
2117   }
2118   return false;
2119 }
2120
2121
2122 FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
2123   FreeSpace* node = top();
2124
2125   if (node == NULL) return NULL;
2126
2127   while (node != NULL &&
2128          Page::FromAddress(node->address())->IsEvacuationCandidate()) {
2129     available_ -= node->Size();
2130     node = node->next();
2131   }
2132
2133   if (node != NULL) {
2134     set_top(node->next());
2135     *node_size = node->Size();
2136     available_ -= *node_size;
2137   } else {
2138     set_top(NULL);
2139   }
2140
2141   if (top() == NULL) {
2142     set_end(NULL);
2143   }
2144
2145   return node;
2146 }
2147
2148
2149 FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
2150                                               int* node_size) {
2151   FreeSpace* node = PickNodeFromList(node_size);
2152   if (node != NULL && *node_size < size_in_bytes) {
2153     Free(node, *node_size);
2154     *node_size = 0;
2155     return NULL;
2156   }
2157   return node;
2158 }
2159
2160
2161 void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) {
2162   DCHECK_LE(FreeList::kSmallListMin, size_in_bytes);
2163   free_space->set_next(top());
2164   set_top(free_space);
2165   if (end_ == NULL) {
2166     end_ = free_space;
2167   }
2168   available_ += size_in_bytes;
2169 }
2170
2171
2172 void FreeListCategory::RepairFreeList(Heap* heap) {
2173   FreeSpace* n = top();
2174   while (n != NULL) {
2175     Map** map_location = reinterpret_cast<Map**>(n->address());
2176     if (*map_location == NULL) {
2177       *map_location = heap->free_space_map();
2178     } else {
2179       DCHECK(*map_location == heap->free_space_map());
2180     }
2181     n = n->next();
2182   }
2183 }
2184
2185
2186 FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) {
2187   Reset();
2188 }
2189
2190
2191 intptr_t FreeList::Concatenate(FreeList* free_list) {
2192   intptr_t free_bytes = 0;
2193   free_bytes += small_list_.Concatenate(free_list->small_list());
2194   free_bytes += medium_list_.Concatenate(free_list->medium_list());
2195   free_bytes += large_list_.Concatenate(free_list->large_list());
2196   free_bytes += huge_list_.Concatenate(free_list->huge_list());
2197   return free_bytes;
2198 }
2199
2200
2201 void FreeList::Reset() {
2202   small_list_.Reset();
2203   medium_list_.Reset();
2204   large_list_.Reset();
2205   huge_list_.Reset();
2206 }
2207
2208
2209 int FreeList::Free(Address start, int size_in_bytes) {
2210   if (size_in_bytes == 0) return 0;
2211
2212   heap_->CreateFillerObjectAt(start, size_in_bytes);
2213
2214   Page* page = Page::FromAddress(start);
2215
2216   // Early return to drop too-small blocks on the floor.
2217   if (size_in_bytes < kSmallListMin) {
2218     page->add_non_available_small_blocks(size_in_bytes);
2219     return size_in_bytes;
2220   }
2221
2222   FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
2223   // Insert other blocks at the head of a free list of the appropriate
2224   // magnitude.
2225   if (size_in_bytes <= kSmallListMax) {
2226     small_list_.Free(free_space, size_in_bytes);
2227     page->add_available_in_small_free_list(size_in_bytes);
2228   } else if (size_in_bytes <= kMediumListMax) {
2229     medium_list_.Free(free_space, size_in_bytes);
2230     page->add_available_in_medium_free_list(size_in_bytes);
2231   } else if (size_in_bytes <= kLargeListMax) {
2232     large_list_.Free(free_space, size_in_bytes);
2233     page->add_available_in_large_free_list(size_in_bytes);
2234   } else {
2235     huge_list_.Free(free_space, size_in_bytes);
2236     page->add_available_in_huge_free_list(size_in_bytes);
2237   }
2238
2239   DCHECK(IsVeryLong() || available() == SumFreeLists());
2240   return 0;
2241 }
2242
2243
2244 FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2245   FreeSpace* node = NULL;
2246   Page* page = NULL;
2247
2248   if (size_in_bytes <= kSmallAllocationMax) {
2249     node = small_list_.PickNodeFromList(node_size);
2250     if (node != NULL) {
2251       DCHECK(size_in_bytes <= *node_size);
2252       page = Page::FromAddress(node->address());
2253       page->add_available_in_small_free_list(-(*node_size));
2254       DCHECK(IsVeryLong() || available() == SumFreeLists());
2255       return node;
2256     }
2257   }
2258
2259   if (size_in_bytes <= kMediumAllocationMax) {
2260     node = medium_list_.PickNodeFromList(node_size);
2261     if (node != NULL) {
2262       DCHECK(size_in_bytes <= *node_size);
2263       page = Page::FromAddress(node->address());
2264       page->add_available_in_medium_free_list(-(*node_size));
2265       DCHECK(IsVeryLong() || available() == SumFreeLists());
2266       return node;
2267     }
2268   }
2269
2270   if (size_in_bytes <= kLargeAllocationMax) {
2271     node = large_list_.PickNodeFromList(node_size);
2272     if (node != NULL) {
2273       DCHECK(size_in_bytes <= *node_size);
2274       page = Page::FromAddress(node->address());
2275       page->add_available_in_large_free_list(-(*node_size));
2276       DCHECK(IsVeryLong() || available() == SumFreeLists());
2277       return node;
2278     }
2279   }
2280
2281   int huge_list_available = huge_list_.available();
2282   FreeSpace* top_node = huge_list_.top();
2283   for (FreeSpace** cur = &top_node; *cur != NULL;
2284        cur = (*cur)->next_address()) {
2285     FreeSpace* cur_node = *cur;
2286     while (cur_node != NULL &&
2287            Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
2288       int size = cur_node->Size();
2289       huge_list_available -= size;
2290       page = Page::FromAddress(cur_node->address());
2291       page->add_available_in_huge_free_list(-size);
2292       cur_node = cur_node->next();
2293     }
2294
2295     *cur = cur_node;
2296     if (cur_node == NULL) {
2297       huge_list_.set_end(NULL);
2298       break;
2299     }
2300
2301     int size = cur_node->Size();
2302     if (size >= size_in_bytes) {
2303       // Large enough node found.  Unlink it from the list.
2304       node = *cur;
2305       *cur = node->next();
2306       *node_size = size;
2307       huge_list_available -= size;
2308       page = Page::FromAddress(node->address());
2309       page->add_available_in_huge_free_list(-size);
2310       break;
2311     }
2312   }
2313
2314   huge_list_.set_top(top_node);
2315   if (huge_list_.top() == NULL) {
2316     huge_list_.set_end(NULL);
2317   }
2318   huge_list_.set_available(huge_list_available);
2319
2320   if (node != NULL) {
2321     DCHECK(IsVeryLong() || available() == SumFreeLists());
2322     return node;
2323   }
2324
2325   if (size_in_bytes <= kSmallListMax) {
2326     node = small_list_.PickNodeFromList(size_in_bytes, node_size);
2327     if (node != NULL) {
2328       DCHECK(size_in_bytes <= *node_size);
2329       page = Page::FromAddress(node->address());
2330       page->add_available_in_small_free_list(-(*node_size));
2331     }
2332   } else if (size_in_bytes <= kMediumListMax) {
2333     node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
2334     if (node != NULL) {
2335       DCHECK(size_in_bytes <= *node_size);
2336       page = Page::FromAddress(node->address());
2337       page->add_available_in_medium_free_list(-(*node_size));
2338     }
2339   } else if (size_in_bytes <= kLargeListMax) {
2340     node = large_list_.PickNodeFromList(size_in_bytes, node_size);
2341     if (node != NULL) {
2342       DCHECK(size_in_bytes <= *node_size);
2343       page = Page::FromAddress(node->address());
2344       page->add_available_in_large_free_list(-(*node_size));
2345     }
2346   }
2347
2348   DCHECK(IsVeryLong() || available() == SumFreeLists());
2349   return node;
2350 }
2351
2352
2353 // Allocation on the old space free list.  If it succeeds then a new linear
2354 // allocation space has been set up with the top and limit of the space.  If
2355 // the allocation fails then NULL is returned, and the caller can perform a GC
2356 // or allocate a new page before retrying.
2357 HeapObject* FreeList::Allocate(int size_in_bytes) {
2358   DCHECK(0 < size_in_bytes);
2359   DCHECK(size_in_bytes <= kMaxBlockSize);
2360   DCHECK(IsAligned(size_in_bytes, kPointerSize));
2361   // Don't free list allocate if there is linear space available.
2362   DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
2363
2364   int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2365   // Mark the old linear allocation area with a free space map so it can be
2366   // skipped when scanning the heap.  This also puts it back in the free list
2367   // if it is big enough.
2368   owner_->Free(owner_->top(), old_linear_size);
2369
2370   owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
2371                                                       old_linear_size);
2372
2373   int new_node_size = 0;
2374   FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2375   if (new_node == NULL) {
2376     owner_->SetTopAndLimit(NULL, NULL);
2377     return NULL;
2378   }
2379
2380   int bytes_left = new_node_size - size_in_bytes;
2381   DCHECK(bytes_left >= 0);
2382
2383 #ifdef DEBUG
2384   for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2385     reinterpret_cast<Object**>(new_node->address())[i] =
2386         Smi::FromInt(kCodeZapValue);
2387   }
2388 #endif
2389
2390   // The old-space-step might have finished sweeping and restarted marking.
2391   // Verify that it did not turn the page of the new node into an evacuation
2392   // candidate.
2393   DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2394
2395   const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2396
2397   // Memory in the linear allocation area is counted as allocated.  We may free
2398   // a little of this again immediately - see below.
2399   owner_->Allocate(new_node_size);
2400
2401   if (owner_->heap()->inline_allocation_disabled()) {
2402     // Keep the linear allocation area empty if requested to do so, just
2403     // return area back to the free list instead.
2404     owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2405     DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
2406   } else if (bytes_left > kThreshold &&
2407              owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2408              FLAG_incremental_marking_steps) {
2409     int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2410     // We don't want to give too large linear areas to the allocator while
2411     // incremental marking is going on, because we won't check again whether
2412     // we want to do another increment until the linear area is used up.
2413     owner_->Free(new_node->address() + size_in_bytes + linear_size,
2414                  new_node_size - size_in_bytes - linear_size);
2415     owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2416                            new_node->address() + size_in_bytes + linear_size);
2417   } else if (bytes_left > 0) {
2418     // Normally we give the rest of the node to the allocator as its new
2419     // linear allocation area.
2420     owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2421                            new_node->address() + new_node_size);
2422   } else {
2423     // TODO(gc) Try not freeing linear allocation region when bytes_left
2424     // are zero.
2425     owner_->SetTopAndLimit(NULL, NULL);
2426   }
2427
2428   return new_node;
2429 }
2430
2431
2432 intptr_t FreeList::EvictFreeListItems(Page* p) {
2433   intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
2434   p->set_available_in_huge_free_list(0);
2435
2436   if (sum < p->area_size()) {
2437     sum += small_list_.EvictFreeListItemsInList(p) +
2438            medium_list_.EvictFreeListItemsInList(p) +
2439            large_list_.EvictFreeListItemsInList(p);
2440     p->set_available_in_small_free_list(0);
2441     p->set_available_in_medium_free_list(0);
2442     p->set_available_in_large_free_list(0);
2443   }
2444
2445   return sum;
2446 }
2447
2448
2449 bool FreeList::ContainsPageFreeListItems(Page* p) {
2450   return huge_list_.EvictFreeListItemsInList(p) ||
2451          small_list_.EvictFreeListItemsInList(p) ||
2452          medium_list_.EvictFreeListItemsInList(p) ||
2453          large_list_.EvictFreeListItemsInList(p);
2454 }
2455
2456
2457 void FreeList::RepairLists(Heap* heap) {
2458   small_list_.RepairFreeList(heap);
2459   medium_list_.RepairFreeList(heap);
2460   large_list_.RepairFreeList(heap);
2461   huge_list_.RepairFreeList(heap);
2462 }
2463
2464
2465 #ifdef DEBUG
2466 intptr_t FreeListCategory::SumFreeList() {
2467   intptr_t sum = 0;
2468   FreeSpace* cur = top();
2469   while (cur != NULL) {
2470     DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
2471     sum += cur->nobarrier_size();
2472     cur = cur->next();
2473   }
2474   return sum;
2475 }
2476
2477
2478 static const int kVeryLongFreeList = 500;
2479
2480
2481 int FreeListCategory::FreeListLength() {
2482   int length = 0;
2483   FreeSpace* cur = top();
2484   while (cur != NULL) {
2485     length++;
2486     cur = cur->next();
2487     if (length == kVeryLongFreeList) return length;
2488   }
2489   return length;
2490 }
2491
2492
2493 bool FreeList::IsVeryLong() {
2494   if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
2495   if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
2496   if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
2497   if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
2498   return false;
2499 }
2500
2501
2502 // This can take a very long time because it is linear in the number of entries
2503 // on the free list, so it should not be called if FreeListLength returns
2504 // kVeryLongFreeList.
2505 intptr_t FreeList::SumFreeLists() {
2506   intptr_t sum = small_list_.SumFreeList();
2507   sum += medium_list_.SumFreeList();
2508   sum += large_list_.SumFreeList();
2509   sum += huge_list_.SumFreeList();
2510   return sum;
2511 }
2512 #endif
2513
2514
2515 // -----------------------------------------------------------------------------
2516 // OldSpace implementation
2517
2518 void PagedSpace::PrepareForMarkCompact() {
2519   // We don't have a linear allocation area while sweeping.  It will be restored
2520   // on the first allocation after the sweep.
2521   EmptyAllocationInfo();
2522
2523   // This counter will be increased for pages which will be swept by the
2524   // sweeper threads.
2525   unswept_free_bytes_ = 0;
2526
2527   // Clear the free list before a full GC---it will be rebuilt afterward.
2528   free_list_.Reset();
2529 }
2530
2531
2532 intptr_t PagedSpace::SizeOfObjects() {
2533   DCHECK(!FLAG_concurrent_sweeping ||
2534          heap()->mark_compact_collector()->sweeping_in_progress() ||
2535          (unswept_free_bytes_ == 0));
2536   return Size() - unswept_free_bytes_ - (limit() - top());
2537 }
2538
2539
2540 // After we have booted, we have created a map which represents free space
2541 // on the heap.  If there was already a free list then the elements on it
2542 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
2543 // fix them.
2544 void PagedSpace::RepairFreeListsAfterDeserialization() {
2545   free_list_.RepairLists(heap());
2546   // Each page may have a small free space that is not tracked by a free list.
2547   // Update the maps for those free space objects.
2548   PageIterator iterator(this);
2549   while (iterator.has_next()) {
2550     Page* page = iterator.next();
2551     int size = static_cast<int>(page->non_available_small_blocks());
2552     if (size == 0) continue;
2553     Address address = page->OffsetToAddress(Page::kPageSize - size);
2554     heap()->CreateFillerObjectAt(address, size);
2555   }
2556 }
2557
2558
2559 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2560   if (allocation_info_.top() >= allocation_info_.limit()) return;
2561
2562   if (Page::FromAllocationTop(allocation_info_.top())
2563           ->IsEvacuationCandidate()) {
2564     // Create filler object to keep page iterable if it was iterable.
2565     int remaining =
2566         static_cast<int>(allocation_info_.limit() - allocation_info_.top());
2567     heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
2568
2569     allocation_info_.set_top(NULL);
2570     allocation_info_.set_limit(NULL);
2571   }
2572 }
2573
2574
2575 HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
2576     int size_in_bytes) {
2577   MarkCompactCollector* collector = heap()->mark_compact_collector();
2578   if (collector->sweeping_in_progress()) {
2579     // Wait for the sweeper threads here and complete the sweeping phase.
2580     collector->EnsureSweepingCompleted();
2581
2582     // After waiting for the sweeper threads, there may be new free-list
2583     // entries.
2584     return free_list_.Allocate(size_in_bytes);
2585   }
2586   return NULL;
2587 }
2588
2589
2590 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2591   // Allocation in this space has failed.
2592
2593   MarkCompactCollector* collector = heap()->mark_compact_collector();
2594   // Sweeping is still in progress.
2595   if (collector->sweeping_in_progress()) {
2596     // First try to refill the free-list, concurrent sweeper threads
2597     // may have freed some objects in the meantime.
2598     collector->RefillFreeList(this);
2599
2600     // Retry the free list allocation.
2601     HeapObject* object = free_list_.Allocate(size_in_bytes);
2602     if (object != NULL) return object;
2603
2604     // If sweeping is still in progress try to sweep pages on the main thread.
2605     int free_chunk = collector->SweepInParallel(this, size_in_bytes);
2606     collector->RefillFreeList(this);
2607     if (free_chunk >= size_in_bytes) {
2608       HeapObject* object = free_list_.Allocate(size_in_bytes);
2609       // We should be able to allocate an object here since we just freed that
2610       // much memory.
2611       DCHECK(object != NULL);
2612       if (object != NULL) return object;
2613     }
2614   }
2615
2616   // Free list allocation failed and there is no next page.  Fail if we have
2617   // hit the old generation size limit that should cause a garbage
2618   // collection.
2619   if (!heap()->always_allocate() &&
2620       heap()->OldGenerationAllocationLimitReached()) {
2621     // If sweeper threads are active, wait for them at that point and steal
2622     // elements form their free-lists.
2623     HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
2624     if (object != NULL) return object;
2625   }
2626
2627   // Try to expand the space and allocate in the new next page.
2628   if (Expand()) {
2629     DCHECK(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
2630     return free_list_.Allocate(size_in_bytes);
2631   }
2632
2633   // If sweeper threads are active, wait for them at that point and steal
2634   // elements form their free-lists. Allocation may still fail their which
2635   // would indicate that there is not enough memory for the given allocation.
2636   return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
2637 }
2638
2639
2640 #ifdef DEBUG
2641 void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
2642   CommentStatistic* comments_statistics =
2643       isolate->paged_space_comments_statistics();
2644   ReportCodeKindStatistics(isolate->code_kind_statistics());
2645   PrintF(
2646       "Code comment statistics (\"   [ comment-txt   :    size/   "
2647       "count  (average)\"):\n");
2648   for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2649     const CommentStatistic& cs = comments_statistics[i];
2650     if (cs.size > 0) {
2651       PrintF("   %-30s: %10d/%6d     (%d)\n", cs.comment, cs.size, cs.count,
2652              cs.size / cs.count);
2653     }
2654   }
2655   PrintF("\n");
2656 }
2657
2658
2659 void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2660   CommentStatistic* comments_statistics =
2661       isolate->paged_space_comments_statistics();
2662   ClearCodeKindStatistics(isolate->code_kind_statistics());
2663   for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2664     comments_statistics[i].Clear();
2665   }
2666   comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2667   comments_statistics[CommentStatistic::kMaxComments].size = 0;
2668   comments_statistics[CommentStatistic::kMaxComments].count = 0;
2669 }
2670
2671
2672 // Adds comment to 'comment_statistics' table. Performance OK as long as
2673 // 'kMaxComments' is small
2674 static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2675   CommentStatistic* comments_statistics =
2676       isolate->paged_space_comments_statistics();
2677   // Do not count empty comments
2678   if (delta <= 0) return;
2679   CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2680   // Search for a free or matching entry in 'comments_statistics': 'cs'
2681   // points to result.
2682   for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2683     if (comments_statistics[i].comment == NULL) {
2684       cs = &comments_statistics[i];
2685       cs->comment = comment;
2686       break;
2687     } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2688       cs = &comments_statistics[i];
2689       break;
2690     }
2691   }
2692   // Update entry for 'comment'
2693   cs->size += delta;
2694   cs->count += 1;
2695 }
2696
2697
2698 // Call for each nested comment start (start marked with '[ xxx', end marked
2699 // with ']'.  RelocIterator 'it' must point to a comment reloc info.
2700 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2701   DCHECK(!it->done());
2702   DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT);
2703   const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2704   if (tmp[0] != '[') {
2705     // Not a nested comment; skip
2706     return;
2707   }
2708
2709   // Search for end of nested comment or a new nested comment
2710   const char* const comment_txt =
2711       reinterpret_cast<const char*>(it->rinfo()->data());
2712   const byte* prev_pc = it->rinfo()->pc();
2713   int flat_delta = 0;
2714   it->next();
2715   while (true) {
2716     // All nested comments must be terminated properly, and therefore exit
2717     // from loop.
2718     DCHECK(!it->done());
2719     if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2720       const char* const txt =
2721           reinterpret_cast<const char*>(it->rinfo()->data());
2722       flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2723       if (txt[0] == ']') break;  // End of nested  comment
2724       // A new comment
2725       CollectCommentStatistics(isolate, it);
2726       // Skip code that was covered with previous comment
2727       prev_pc = it->rinfo()->pc();
2728     }
2729     it->next();
2730   }
2731   EnterComment(isolate, comment_txt, flat_delta);
2732 }
2733
2734
2735 // Collects code size statistics:
2736 // - by code kind
2737 // - by code comment
2738 void PagedSpace::CollectCodeStatistics() {
2739   Isolate* isolate = heap()->isolate();
2740   HeapObjectIterator obj_it(this);
2741   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2742     if (obj->IsCode()) {
2743       Code* code = Code::cast(obj);
2744       isolate->code_kind_statistics()[code->kind()] += code->Size();
2745       RelocIterator it(code);
2746       int delta = 0;
2747       const byte* prev_pc = code->instruction_start();
2748       while (!it.done()) {
2749         if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2750           delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2751           CollectCommentStatistics(isolate, &it);
2752           prev_pc = it.rinfo()->pc();
2753         }
2754         it.next();
2755       }
2756
2757       DCHECK(code->instruction_start() <= prev_pc &&
2758              prev_pc <= code->instruction_end());
2759       delta += static_cast<int>(code->instruction_end() - prev_pc);
2760       EnterComment(isolate, "NoComment", delta);
2761     }
2762   }
2763 }
2764
2765
2766 void PagedSpace::ReportStatistics() {
2767   int pct = static_cast<int>(Available() * 100 / Capacity());
2768   PrintF("  capacity: %" V8_PTR_PREFIX
2769          "d"
2770          ", waste: %" V8_PTR_PREFIX
2771          "d"
2772          ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2773          Capacity(), Waste(), Available(), pct);
2774
2775   if (heap()->mark_compact_collector()->sweeping_in_progress()) {
2776     heap()->mark_compact_collector()->EnsureSweepingCompleted();
2777   }
2778   ClearHistograms(heap()->isolate());
2779   HeapObjectIterator obj_it(this);
2780   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2781     CollectHistogramInfo(obj);
2782   ReportHistogram(heap()->isolate(), true);
2783 }
2784 #endif
2785
2786
2787 // -----------------------------------------------------------------------------
2788 // MapSpace implementation
2789 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2790 // there is at least one non-inlined virtual function. I would prefer to hide
2791 // the VerifyObject definition behind VERIFY_HEAP.
2792
2793 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
2794
2795
2796 // -----------------------------------------------------------------------------
2797 // CellSpace and PropertyCellSpace implementation
2798 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2799 // there is at least one non-inlined virtual function. I would prefer to hide
2800 // the VerifyObject definition behind VERIFY_HEAP.
2801
2802 void CellSpace::VerifyObject(HeapObject* object) { CHECK(object->IsCell()); }
2803
2804
2805 void PropertyCellSpace::VerifyObject(HeapObject* object) {
2806   CHECK(object->IsPropertyCell());
2807 }
2808
2809
2810 // -----------------------------------------------------------------------------
2811 // LargeObjectIterator
2812
2813 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2814   current_ = space->first_page_;
2815   size_func_ = NULL;
2816 }
2817
2818
2819 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2820                                          HeapObjectCallback size_func) {
2821   current_ = space->first_page_;
2822   size_func_ = size_func;
2823 }
2824
2825
2826 HeapObject* LargeObjectIterator::Next() {
2827   if (current_ == NULL) return NULL;
2828
2829   HeapObject* object = current_->GetObject();
2830   current_ = current_->next_page();
2831   return object;
2832 }
2833
2834
2835 // -----------------------------------------------------------------------------
2836 // LargeObjectSpace
2837 static bool ComparePointers(void* key1, void* key2) { return key1 == key2; }
2838
2839
2840 LargeObjectSpace::LargeObjectSpace(Heap* heap, intptr_t max_capacity,
2841                                    AllocationSpace id)
2842     : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
2843       max_capacity_(max_capacity),
2844       first_page_(NULL),
2845       size_(0),
2846       page_count_(0),
2847       objects_size_(0),
2848       chunk_map_(ComparePointers, 1024) {}
2849
2850
2851 bool LargeObjectSpace::SetUp() {
2852   first_page_ = NULL;
2853   size_ = 0;
2854   maximum_committed_ = 0;
2855   page_count_ = 0;
2856   objects_size_ = 0;
2857   chunk_map_.Clear();
2858   return true;
2859 }
2860
2861
2862 void LargeObjectSpace::TearDown() {
2863   while (first_page_ != NULL) {
2864     LargePage* page = first_page_;
2865     first_page_ = first_page_->next_page();
2866     LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2867
2868     ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2869     heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2870         space, kAllocationActionFree, page->size());
2871     heap()->isolate()->memory_allocator()->Free(page);
2872   }
2873   SetUp();
2874 }
2875
2876
2877 AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
2878                                                Executability executable) {
2879   // Check if we want to force a GC before growing the old space further.
2880   // If so, fail the allocation.
2881   if (!heap()->always_allocate() &&
2882       heap()->OldGenerationAllocationLimitReached()) {
2883     return AllocationResult::Retry(identity());
2884   }
2885
2886   if (!CanAllocateSize(object_size)) return AllocationResult::Retry(identity());
2887
2888   LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
2889       object_size, this, executable);
2890   if (page == NULL) return AllocationResult::Retry(identity());
2891   DCHECK(page->area_size() >= object_size);
2892
2893   size_ += static_cast<int>(page->size());
2894   objects_size_ += object_size;
2895   page_count_++;
2896   page->set_next_page(first_page_);
2897   first_page_ = page;
2898
2899   if (size_ > maximum_committed_) {
2900     maximum_committed_ = size_;
2901   }
2902
2903   // Register all MemoryChunk::kAlignment-aligned chunks covered by
2904   // this large page in the chunk map.
2905   uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2906   uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2907   for (uintptr_t key = base; key <= limit; key++) {
2908     HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2909                                               static_cast<uint32_t>(key), true);
2910     DCHECK(entry != NULL);
2911     entry->value = page;
2912   }
2913
2914   HeapObject* object = page->GetObject();
2915
2916   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
2917
2918   if (Heap::ShouldZapGarbage()) {
2919     // Make the object consistent so the heap can be verified in OldSpaceStep.
2920     // We only need to do this in debug builds or if verify_heap is on.
2921     reinterpret_cast<Object**>(object->address())[0] =
2922         heap()->fixed_array_map();
2923     reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2924   }
2925
2926   heap()->incremental_marking()->OldSpaceStep(object_size);
2927   return object;
2928 }
2929
2930
2931 size_t LargeObjectSpace::CommittedPhysicalMemory() {
2932   if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2933   size_t size = 0;
2934   LargePage* current = first_page_;
2935   while (current != NULL) {
2936     size += current->CommittedPhysicalMemory();
2937     current = current->next_page();
2938   }
2939   return size;
2940 }
2941
2942
2943 // GC support
2944 Object* LargeObjectSpace::FindObject(Address a) {
2945   LargePage* page = FindPage(a);
2946   if (page != NULL) {
2947     return page->GetObject();
2948   }
2949   return Smi::FromInt(0);  // Signaling not found.
2950 }
2951
2952
2953 LargePage* LargeObjectSpace::FindPage(Address a) {
2954   uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
2955   HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2956                                         static_cast<uint32_t>(key), false);
2957   if (e != NULL) {
2958     DCHECK(e->value != NULL);
2959     LargePage* page = reinterpret_cast<LargePage*>(e->value);
2960     DCHECK(page->is_valid());
2961     if (page->Contains(a)) {
2962       return page;
2963     }
2964   }
2965   return NULL;
2966 }
2967
2968
2969 void LargeObjectSpace::FreeUnmarkedObjects() {
2970   LargePage* previous = NULL;
2971   LargePage* current = first_page_;
2972   while (current != NULL) {
2973     HeapObject* object = current->GetObject();
2974     // Can this large page contain pointers to non-trivial objects.  No other
2975     // pointer object is this big.
2976     bool is_pointer_object = object->IsFixedArray();
2977     MarkBit mark_bit = Marking::MarkBitFrom(object);
2978     if (mark_bit.Get()) {
2979       mark_bit.Clear();
2980       Page::FromAddress(object->address())->ResetProgressBar();
2981       Page::FromAddress(object->address())->ResetLiveBytes();
2982       previous = current;
2983       current = current->next_page();
2984     } else {
2985       LargePage* page = current;
2986       // Cut the chunk out from the chunk list.
2987       current = current->next_page();
2988       if (previous == NULL) {
2989         first_page_ = current;
2990       } else {
2991         previous->set_next_page(current);
2992       }
2993
2994       // Free the chunk.
2995       heap()->mark_compact_collector()->ReportDeleteIfNeeded(object,
2996                                                              heap()->isolate());
2997       size_ -= static_cast<int>(page->size());
2998       objects_size_ -= object->Size();
2999       page_count_--;
3000
3001       // Remove entries belonging to this page.
3002       // Use variable alignment to help pass length check (<= 80 characters)
3003       // of single line in tools/presubmit.py.
3004       const intptr_t alignment = MemoryChunk::kAlignment;
3005       uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
3006       uintptr_t limit = base + (page->size() - 1) / alignment;
3007       for (uintptr_t key = base; key <= limit; key++) {
3008         chunk_map_.Remove(reinterpret_cast<void*>(key),
3009                           static_cast<uint32_t>(key));
3010       }
3011
3012       if (is_pointer_object) {
3013         heap()->QueueMemoryChunkForFree(page);
3014       } else {
3015         heap()->isolate()->memory_allocator()->Free(page);
3016       }
3017     }
3018   }
3019   heap()->FreeQueuedChunks();
3020 }
3021
3022
3023 bool LargeObjectSpace::Contains(HeapObject* object) {
3024   Address address = object->address();
3025   MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3026
3027   bool owned = (chunk->owner() == this);
3028
3029   SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
3030
3031   return owned;
3032 }
3033
3034
3035 #ifdef VERIFY_HEAP
3036 // We do not assume that the large object iterator works, because it depends
3037 // on the invariants we are checking during verification.
3038 void LargeObjectSpace::Verify() {
3039   for (LargePage* chunk = first_page_; chunk != NULL;
3040        chunk = chunk->next_page()) {
3041     // Each chunk contains an object that starts at the large object page's
3042     // object area start.
3043     HeapObject* object = chunk->GetObject();
3044     Page* page = Page::FromAddress(object->address());
3045     CHECK(object->address() == page->area_start());
3046
3047     // The first word should be a map, and we expect all map pointers to be
3048     // in map space.
3049     Map* map = object->map();
3050     CHECK(map->IsMap());
3051     CHECK(heap()->map_space()->Contains(map));
3052
3053     // We have only code, sequential strings, external strings
3054     // (sequential strings that have been morphed into external
3055     // strings), fixed arrays, byte arrays, and constant pool arrays in the
3056     // large object space.
3057     CHECK(object->IsCode() || object->IsSeqString() ||
3058           object->IsExternalString() || object->IsFixedArray() ||
3059           object->IsFixedDoubleArray() || object->IsByteArray() ||
3060           object->IsConstantPoolArray());
3061
3062     // The object itself should look OK.
3063     object->ObjectVerify();
3064
3065     // Byte arrays and strings don't have interior pointers.
3066     if (object->IsCode()) {
3067       VerifyPointersVisitor code_visitor;
3068       object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
3069     } else if (object->IsFixedArray()) {
3070       FixedArray* array = FixedArray::cast(object);
3071       for (int j = 0; j < array->length(); j++) {
3072         Object* element = array->get(j);
3073         if (element->IsHeapObject()) {
3074           HeapObject* element_object = HeapObject::cast(element);
3075           CHECK(heap()->Contains(element_object));
3076           CHECK(element_object->map()->IsMap());
3077         }
3078       }
3079     }
3080   }
3081 }
3082 #endif
3083
3084
3085 #ifdef DEBUG
3086 void LargeObjectSpace::Print() {
3087   OFStream os(stdout);
3088   LargeObjectIterator it(this);
3089   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3090     obj->Print(os);
3091   }
3092 }
3093
3094
3095 void LargeObjectSpace::ReportStatistics() {
3096   PrintF("  size: %" V8_PTR_PREFIX "d\n", size_);
3097   int num_objects = 0;
3098   ClearHistograms(heap()->isolate());
3099   LargeObjectIterator it(this);
3100   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3101     num_objects++;
3102     CollectHistogramInfo(obj);
3103   }
3104
3105   PrintF(
3106       "  number of objects %d, "
3107       "size of objects %" V8_PTR_PREFIX "d\n",
3108       num_objects, objects_size_);
3109   if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3110 }
3111
3112
3113 void LargeObjectSpace::CollectCodeStatistics() {
3114   Isolate* isolate = heap()->isolate();
3115   LargeObjectIterator obj_it(this);
3116   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
3117     if (obj->IsCode()) {
3118       Code* code = Code::cast(obj);
3119       isolate->code_kind_statistics()[code->kind()] += code->Size();
3120     }
3121   }
3122 }
3123
3124
3125 void Page::Print() {
3126   // Make a best-effort to print the objects in the page.
3127   PrintF("Page@%p in %s\n", this->address(),
3128          AllocationSpaceName(this->owner()->identity()));
3129   printf(" --------------------------------------\n");
3130   HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
3131   unsigned mark_size = 0;
3132   for (HeapObject* object = objects.Next(); object != NULL;
3133        object = objects.Next()) {
3134     bool is_marked = Marking::MarkBitFrom(object).Get();
3135     PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
3136     if (is_marked) {
3137       mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
3138     }
3139     object->ShortPrint();
3140     PrintF("\n");
3141   }
3142   printf(" --------------------------------------\n");
3143   printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3144 }
3145
3146 #endif  // DEBUG
3147 }
3148 }  // namespace v8::internal