deps: backport IsValid changes from 4e8736d in V8
[platform/upstream/nodejs.git] / deps / v8 / src / heap / spaces.cc
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h"
9 #include "src/full-codegen.h"
10 #include "src/heap/mark-compact.h"
11 #include "src/macro-assembler.h"
12 #include "src/msan.h"
13 #include "src/snapshot/snapshot.h"
14
15 namespace v8 {
16 namespace internal {
17
18
19 // ----------------------------------------------------------------------------
20 // HeapObjectIterator
21
22 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
23   // You can't actually iterate over the anchor page.  It is not a real page,
24   // just an anchor for the double linked page list.  Initialize as if we have
25   // reached the end of the anchor page, then the first iteration will move on
26   // to the first page.
27   Initialize(space, NULL, NULL, kAllPagesInSpace, NULL);
28 }
29
30
31 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
32                                        HeapObjectCallback size_func) {
33   // You can't actually iterate over the anchor page.  It is not a real page,
34   // just an anchor for the double linked page list.  Initialize the current
35   // address and end as NULL, then the first iteration will move on
36   // to the first page.
37   Initialize(space, NULL, NULL, kAllPagesInSpace, size_func);
38 }
39
40
41 HeapObjectIterator::HeapObjectIterator(Page* page,
42                                        HeapObjectCallback size_func) {
43   Space* owner = page->owner();
44   DCHECK(owner == page->heap()->old_space() ||
45          owner == page->heap()->map_space() ||
46          owner == page->heap()->code_space());
47   Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
48              page->area_end(), kOnePageOnly, size_func);
49   DCHECK(page->WasSwept() || page->SweepingCompleted());
50 }
51
52
53 void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
54                                     HeapObjectIterator::PageMode mode,
55                                     HeapObjectCallback size_f) {
56   space_ = space;
57   cur_addr_ = cur;
58   cur_end_ = end;
59   page_mode_ = mode;
60   size_func_ = size_f;
61 }
62
63
64 // We have hit the end of the page and should advance to the next block of
65 // objects.  This happens at the end of the page.
66 bool HeapObjectIterator::AdvanceToNextPage() {
67   DCHECK(cur_addr_ == cur_end_);
68   if (page_mode_ == kOnePageOnly) return false;
69   Page* cur_page;
70   if (cur_addr_ == NULL) {
71     cur_page = space_->anchor();
72   } else {
73     cur_page = Page::FromAddress(cur_addr_ - 1);
74     DCHECK(cur_addr_ == cur_page->area_end());
75   }
76   cur_page = cur_page->next_page();
77   if (cur_page == space_->anchor()) return false;
78   cur_addr_ = cur_page->area_start();
79   cur_end_ = cur_page->area_end();
80   DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
81   return true;
82 }
83
84
85 // -----------------------------------------------------------------------------
86 // CodeRange
87
88
89 CodeRange::CodeRange(Isolate* isolate)
90     : isolate_(isolate),
91       code_range_(NULL),
92       free_list_(0),
93       allocation_list_(0),
94       current_allocation_block_index_(0),
95       emergency_block_() {}
96
97
98 bool CodeRange::SetUp(size_t requested) {
99   DCHECK(code_range_ == NULL);
100
101   if (requested == 0) {
102     // When a target requires the code range feature, we put all code objects
103     // in a kMaximalCodeRangeSize range of virtual address space, so that
104     // they can call each other with near calls.
105     if (kRequiresCodeRange) {
106       requested = kMaximalCodeRangeSize;
107     } else {
108       return true;
109     }
110   }
111
112   if (requested <= kMinimumCodeRangeSize) {
113     requested = kMinimumCodeRangeSize;
114   }
115
116   DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
117 #ifdef V8_TARGET_ARCH_MIPS64
118   // To use pseudo-relative jumps such as j/jal instructions which have 28-bit
119   // encoded immediate, the addresses have to be in range of 256Mb aligned
120   // region.
121   code_range_ = new base::VirtualMemory(requested, kMaximalCodeRangeSize);
122 #else
123   code_range_ = new base::VirtualMemory(requested);
124 #endif
125   CHECK(code_range_ != NULL);
126   if (!code_range_->IsReserved()) {
127     delete code_range_;
128     code_range_ = NULL;
129     return false;
130   }
131
132   // We are sure that we have mapped a block of requested addresses.
133   DCHECK(code_range_->size() == requested);
134   Address base = reinterpret_cast<Address>(code_range_->address());
135
136   // On some platforms, specifically Win64, we need to reserve some pages at
137   // the beginning of an executable space.
138   if (kReservedCodeRangePages) {
139     if (!code_range_->Commit(
140             base, kReservedCodeRangePages * base::OS::CommitPageSize(), true)) {
141       delete code_range_;
142       code_range_ = NULL;
143       return false;
144     }
145     base += kReservedCodeRangePages * base::OS::CommitPageSize();
146   }
147   Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
148   size_t size = code_range_->size() - (aligned_base - base) -
149                 kReservedCodeRangePages * base::OS::CommitPageSize();
150   allocation_list_.Add(FreeBlock(aligned_base, size));
151   current_allocation_block_index_ = 0;
152
153   LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
154   ReserveEmergencyBlock();
155   return true;
156 }
157
158
159 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
160                                        const FreeBlock* right) {
161   // The entire point of CodeRange is that the difference between two
162   // addresses in the range can be represented as a signed 32-bit int,
163   // so the cast is semantically correct.
164   return static_cast<int>(left->start - right->start);
165 }
166
167
168 bool CodeRange::GetNextAllocationBlock(size_t requested) {
169   for (current_allocation_block_index_++;
170        current_allocation_block_index_ < allocation_list_.length();
171        current_allocation_block_index_++) {
172     if (requested <= allocation_list_[current_allocation_block_index_].size) {
173       return true;  // Found a large enough allocation block.
174     }
175   }
176
177   // Sort and merge the free blocks on the free list and the allocation list.
178   free_list_.AddAll(allocation_list_);
179   allocation_list_.Clear();
180   free_list_.Sort(&CompareFreeBlockAddress);
181   for (int i = 0; i < free_list_.length();) {
182     FreeBlock merged = free_list_[i];
183     i++;
184     // Add adjacent free blocks to the current merged block.
185     while (i < free_list_.length() &&
186            free_list_[i].start == merged.start + merged.size) {
187       merged.size += free_list_[i].size;
188       i++;
189     }
190     if (merged.size > 0) {
191       allocation_list_.Add(merged);
192     }
193   }
194   free_list_.Clear();
195
196   for (current_allocation_block_index_ = 0;
197        current_allocation_block_index_ < allocation_list_.length();
198        current_allocation_block_index_++) {
199     if (requested <= allocation_list_[current_allocation_block_index_].size) {
200       return true;  // Found a large enough allocation block.
201     }
202   }
203   current_allocation_block_index_ = 0;
204   // Code range is full or too fragmented.
205   return false;
206 }
207
208
209 Address CodeRange::AllocateRawMemory(const size_t requested_size,
210                                      const size_t commit_size,
211                                      size_t* allocated) {
212   DCHECK(commit_size <= requested_size);
213   FreeBlock current;
214   if (!ReserveBlock(requested_size, &current)) {
215     *allocated = 0;
216     return NULL;
217   }
218   *allocated = current.size;
219   DCHECK(*allocated <= current.size);
220   DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
221   if (!isolate_->memory_allocator()->CommitExecutableMemory(
222           code_range_, current.start, commit_size, *allocated)) {
223     *allocated = 0;
224     ReleaseBlock(&current);
225     return NULL;
226   }
227   return current.start;
228 }
229
230
231 bool CodeRange::CommitRawMemory(Address start, size_t length) {
232   return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
233 }
234
235
236 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
237   return code_range_->Uncommit(start, length);
238 }
239
240
241 void CodeRange::FreeRawMemory(Address address, size_t length) {
242   DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
243   free_list_.Add(FreeBlock(address, length));
244   code_range_->Uncommit(address, length);
245 }
246
247
248 void CodeRange::TearDown() {
249   delete code_range_;  // Frees all memory in the virtual memory range.
250   code_range_ = NULL;
251   free_list_.Free();
252   allocation_list_.Free();
253 }
254
255
256 bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
257   DCHECK(allocation_list_.length() == 0 ||
258          current_allocation_block_index_ < allocation_list_.length());
259   if (allocation_list_.length() == 0 ||
260       requested_size > allocation_list_[current_allocation_block_index_].size) {
261     // Find an allocation block large enough.
262     if (!GetNextAllocationBlock(requested_size)) return false;
263   }
264   // Commit the requested memory at the start of the current allocation block.
265   size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
266   *block = allocation_list_[current_allocation_block_index_];
267   // Don't leave a small free block, useless for a large object or chunk.
268   if (aligned_requested < (block->size - Page::kPageSize)) {
269     block->size = aligned_requested;
270   }
271   DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
272   allocation_list_[current_allocation_block_index_].start += block->size;
273   allocation_list_[current_allocation_block_index_].size -= block->size;
274   return true;
275 }
276
277
278 void CodeRange::ReleaseBlock(const FreeBlock* block) { free_list_.Add(*block); }
279
280
281 void CodeRange::ReserveEmergencyBlock() {
282   const size_t requested_size = MemoryAllocator::CodePageAreaSize();
283   if (emergency_block_.size == 0) {
284     ReserveBlock(requested_size, &emergency_block_);
285   } else {
286     DCHECK(emergency_block_.size >= requested_size);
287   }
288 }
289
290
291 void CodeRange::ReleaseEmergencyBlock() {
292   if (emergency_block_.size != 0) {
293     ReleaseBlock(&emergency_block_);
294     emergency_block_.size = 0;
295   }
296 }
297
298
299 // -----------------------------------------------------------------------------
300 // MemoryAllocator
301 //
302
303 MemoryAllocator::MemoryAllocator(Isolate* isolate)
304     : isolate_(isolate),
305       capacity_(0),
306       capacity_executable_(0),
307       size_(0),
308       size_executable_(0),
309       lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
310       highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
311
312
313 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
314   capacity_ = RoundUp(capacity, Page::kPageSize);
315   capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
316   DCHECK_GE(capacity_, capacity_executable_);
317
318   size_ = 0;
319   size_executable_ = 0;
320
321   return true;
322 }
323
324
325 void MemoryAllocator::TearDown() {
326   // Check that spaces were torn down before MemoryAllocator.
327   DCHECK(size_ == 0);
328   // TODO(gc) this will be true again when we fix FreeMemory.
329   // DCHECK(size_executable_ == 0);
330   capacity_ = 0;
331   capacity_executable_ = 0;
332 }
333
334
335 bool MemoryAllocator::CommitMemory(Address base, size_t size,
336                                    Executability executable) {
337   if (!base::VirtualMemory::CommitRegion(base, size,
338                                          executable == EXECUTABLE)) {
339     return false;
340   }
341   UpdateAllocatedSpaceLimits(base, base + size);
342   return true;
343 }
344
345
346 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
347                                  Executability executable) {
348   // TODO(gc) make code_range part of memory allocator?
349   DCHECK(reservation->IsReserved());
350   size_t size = reservation->size();
351   DCHECK(size_ >= size);
352   size_ -= size;
353
354   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
355
356   if (executable == EXECUTABLE) {
357     DCHECK(size_executable_ >= size);
358     size_executable_ -= size;
359   }
360   // Code which is part of the code-range does not have its own VirtualMemory.
361   DCHECK(isolate_->code_range() == NULL ||
362          !isolate_->code_range()->contains(
363              static_cast<Address>(reservation->address())));
364   DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
365          !isolate_->code_range()->valid() || size <= Page::kPageSize);
366
367   reservation->Release();
368 }
369
370
371 void MemoryAllocator::FreeMemory(Address base, size_t size,
372                                  Executability executable) {
373   // TODO(gc) make code_range part of memory allocator?
374   DCHECK(size_ >= size);
375   size_ -= size;
376
377   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
378
379   if (executable == EXECUTABLE) {
380     DCHECK(size_executable_ >= size);
381     size_executable_ -= size;
382   }
383   if (isolate_->code_range() != NULL &&
384       isolate_->code_range()->contains(static_cast<Address>(base))) {
385     DCHECK(executable == EXECUTABLE);
386     isolate_->code_range()->FreeRawMemory(base, size);
387   } else {
388     DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
389            !isolate_->code_range()->valid());
390     bool result = base::VirtualMemory::ReleaseRegion(base, size);
391     USE(result);
392     DCHECK(result);
393   }
394 }
395
396
397 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
398                                               base::VirtualMemory* controller) {
399   base::VirtualMemory reservation(size, alignment);
400
401   if (!reservation.IsReserved()) return NULL;
402   size_ += reservation.size();
403   Address base =
404       RoundUp(static_cast<Address>(reservation.address()), alignment);
405   controller->TakeControl(&reservation);
406   return base;
407 }
408
409
410 Address MemoryAllocator::AllocateAlignedMemory(
411     size_t reserve_size, size_t commit_size, size_t alignment,
412     Executability executable, base::VirtualMemory* controller) {
413   DCHECK(commit_size <= reserve_size);
414   base::VirtualMemory reservation;
415   Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
416   if (base == NULL) return NULL;
417
418   if (executable == EXECUTABLE) {
419     if (!CommitExecutableMemory(&reservation, base, commit_size,
420                                 reserve_size)) {
421       base = NULL;
422     }
423   } else {
424     if (reservation.Commit(base, commit_size, false)) {
425       UpdateAllocatedSpaceLimits(base, base + commit_size);
426     } else {
427       base = NULL;
428     }
429   }
430
431   if (base == NULL) {
432     // Failed to commit the body. Release the mapping and any partially
433     // commited regions inside it.
434     reservation.Release();
435     return NULL;
436   }
437
438   controller->TakeControl(&reservation);
439   return base;
440 }
441
442
443 void Page::InitializeAsAnchor(PagedSpace* owner) {
444   set_owner(owner);
445   set_prev_page(this);
446   set_next_page(this);
447 }
448
449
450 NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
451                                        SemiSpace* semi_space) {
452   Address area_start = start + NewSpacePage::kObjectStartOffset;
453   Address area_end = start + Page::kPageSize;
454
455   MemoryChunk* chunk =
456       MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
457                               area_end, NOT_EXECUTABLE, semi_space);
458   chunk->set_next_chunk(NULL);
459   chunk->set_prev_chunk(NULL);
460   chunk->initialize_scan_on_scavenge(true);
461   bool in_to_space = (semi_space->id() != kFromSpace);
462   chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
463                              : MemoryChunk::IN_FROM_SPACE);
464   DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
465                                        : MemoryChunk::IN_TO_SPACE));
466   NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
467   heap->incremental_marking()->SetNewSpacePageFlags(page);
468   return page;
469 }
470
471
472 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
473   set_owner(semi_space);
474   set_next_chunk(this);
475   set_prev_chunk(this);
476   // Flags marks this invalid page as not being in new-space.
477   // All real new-space pages will be in new-space.
478   SetFlags(0, ~0);
479 }
480
481
482 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
483                                      Address area_start, Address area_end,
484                                      Executability executable, Space* owner) {
485   MemoryChunk* chunk = FromAddress(base);
486
487   DCHECK(base == chunk->address());
488
489   chunk->heap_ = heap;
490   chunk->size_ = size;
491   chunk->area_start_ = area_start;
492   chunk->area_end_ = area_end;
493   chunk->flags_ = 0;
494   chunk->set_owner(owner);
495   chunk->InitializeReservedMemory();
496   chunk->slots_buffer_ = NULL;
497   chunk->skip_list_ = NULL;
498   chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
499   chunk->progress_bar_ = 0;
500   chunk->high_water_mark_ = static_cast<int>(area_start - base);
501   chunk->set_parallel_sweeping(SWEEPING_DONE);
502   chunk->available_in_small_free_list_ = 0;
503   chunk->available_in_medium_free_list_ = 0;
504   chunk->available_in_large_free_list_ = 0;
505   chunk->available_in_huge_free_list_ = 0;
506   chunk->non_available_small_blocks_ = 0;
507   chunk->ResetLiveBytes();
508   Bitmap::Clear(chunk);
509   chunk->initialize_scan_on_scavenge(false);
510   chunk->SetFlag(WAS_SWEPT);
511
512   DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
513   DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
514
515   if (executable == EXECUTABLE) {
516     chunk->SetFlag(IS_EXECUTABLE);
517   }
518
519   return chunk;
520 }
521
522
523 // Commit MemoryChunk area to the requested size.
524 bool MemoryChunk::CommitArea(size_t requested) {
525   size_t guard_size =
526       IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
527   size_t header_size = area_start() - address() - guard_size;
528   size_t commit_size =
529       RoundUp(header_size + requested, base::OS::CommitPageSize());
530   size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
531                                   base::OS::CommitPageSize());
532
533   if (commit_size > committed_size) {
534     // Commit size should be less or equal than the reserved size.
535     DCHECK(commit_size <= size() - 2 * guard_size);
536     // Append the committed area.
537     Address start = address() + committed_size + guard_size;
538     size_t length = commit_size - committed_size;
539     if (reservation_.IsReserved()) {
540       Executability executable =
541           IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
542       if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length,
543                                                                executable)) {
544         return false;
545       }
546     } else {
547       CodeRange* code_range = heap_->isolate()->code_range();
548       DCHECK(code_range != NULL && code_range->valid() &&
549              IsFlagSet(IS_EXECUTABLE));
550       if (!code_range->CommitRawMemory(start, length)) return false;
551     }
552
553     if (Heap::ShouldZapGarbage()) {
554       heap_->isolate()->memory_allocator()->ZapBlock(start, length);
555     }
556   } else if (commit_size < committed_size) {
557     DCHECK(commit_size > 0);
558     // Shrink the committed area.
559     size_t length = committed_size - commit_size;
560     Address start = address() + committed_size + guard_size - length;
561     if (reservation_.IsReserved()) {
562       if (!reservation_.Uncommit(start, length)) return false;
563     } else {
564       CodeRange* code_range = heap_->isolate()->code_range();
565       DCHECK(code_range != NULL && code_range->valid() &&
566              IsFlagSet(IS_EXECUTABLE));
567       if (!code_range->UncommitRawMemory(start, length)) return false;
568     }
569   }
570
571   area_end_ = area_start_ + requested;
572   return true;
573 }
574
575
576 void MemoryChunk::InsertAfter(MemoryChunk* other) {
577   MemoryChunk* other_next = other->next_chunk();
578
579   set_next_chunk(other_next);
580   set_prev_chunk(other);
581   other_next->set_prev_chunk(this);
582   other->set_next_chunk(this);
583 }
584
585
586 void MemoryChunk::Unlink() {
587   MemoryChunk* next_element = next_chunk();
588   MemoryChunk* prev_element = prev_chunk();
589   next_element->set_prev_chunk(prev_element);
590   prev_element->set_next_chunk(next_element);
591   set_prev_chunk(NULL);
592   set_next_chunk(NULL);
593 }
594
595
596 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
597                                             intptr_t commit_area_size,
598                                             Executability executable,
599                                             Space* owner) {
600   DCHECK(commit_area_size <= reserve_area_size);
601
602   size_t chunk_size;
603   Heap* heap = isolate_->heap();
604   Address base = NULL;
605   base::VirtualMemory reservation;
606   Address area_start = NULL;
607   Address area_end = NULL;
608
609   //
610   // MemoryChunk layout:
611   //
612   //             Executable
613   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
614   // |           Header           |
615   // +----------------------------+<- base + CodePageGuardStartOffset
616   // |           Guard            |
617   // +----------------------------+<- area_start_
618   // |           Area             |
619   // +----------------------------+<- area_end_ (area_start + commit_area_size)
620   // |   Committed but not used   |
621   // +----------------------------+<- aligned at OS page boundary
622   // | Reserved but not committed |
623   // +----------------------------+<- aligned at OS page boundary
624   // |           Guard            |
625   // +----------------------------+<- base + chunk_size
626   //
627   //           Non-executable
628   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
629   // |          Header            |
630   // +----------------------------+<- area_start_ (base + kObjectStartOffset)
631   // |           Area             |
632   // +----------------------------+<- area_end_ (area_start + commit_area_size)
633   // |  Committed but not used    |
634   // +----------------------------+<- aligned at OS page boundary
635   // | Reserved but not committed |
636   // +----------------------------+<- base + chunk_size
637   //
638
639   if (executable == EXECUTABLE) {
640     chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
641                          base::OS::CommitPageSize()) +
642                  CodePageGuardSize();
643
644     // Check executable memory limit.
645     if (size_executable_ + chunk_size > capacity_executable_) {
646       LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
647                                 "V8 Executable Allocation capacity exceeded"));
648       return NULL;
649     }
650
651     // Size of header (not executable) plus area (executable).
652     size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
653                                  base::OS::CommitPageSize());
654     // Allocate executable memory either from code range or from the
655     // OS.
656 #ifdef V8_TARGET_ARCH_MIPS64
657     // Use code range only for large object space on mips64 to keep address
658     // range within 256-MB memory region.
659     if (isolate_->code_range() != NULL && isolate_->code_range()->valid() &&
660         reserve_area_size > CodePageAreaSize()) {
661 #else
662     if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
663 #endif
664       base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
665                                                        &chunk_size);
666       DCHECK(
667           IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
668       if (base == NULL) return NULL;
669       size_ += chunk_size;
670       // Update executable memory size.
671       size_executable_ += chunk_size;
672     } else {
673       base = AllocateAlignedMemory(chunk_size, commit_size,
674                                    MemoryChunk::kAlignment, executable,
675                                    &reservation);
676       if (base == NULL) return NULL;
677       // Update executable memory size.
678       size_executable_ += reservation.size();
679     }
680
681     if (Heap::ShouldZapGarbage()) {
682       ZapBlock(base, CodePageGuardStartOffset());
683       ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
684     }
685
686     area_start = base + CodePageAreaStartOffset();
687     area_end = area_start + commit_area_size;
688   } else {
689     chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
690                          base::OS::CommitPageSize());
691     size_t commit_size =
692         RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
693                 base::OS::CommitPageSize());
694     base =
695         AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
696                               executable, &reservation);
697
698     if (base == NULL) return NULL;
699
700     if (Heap::ShouldZapGarbage()) {
701       ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
702     }
703
704     area_start = base + Page::kObjectStartOffset;
705     area_end = area_start + commit_area_size;
706   }
707
708   // Use chunk_size for statistics and callbacks because we assume that they
709   // treat reserved but not-yet committed memory regions of chunks as allocated.
710   isolate_->counters()->memory_allocated()->Increment(
711       static_cast<int>(chunk_size));
712
713   LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
714   if (owner != NULL) {
715     ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
716     PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
717   }
718
719   MemoryChunk* result = MemoryChunk::Initialize(
720       heap, base, chunk_size, area_start, area_end, executable, owner);
721   result->set_reserved_memory(&reservation);
722   return result;
723 }
724
725
726 void Page::ResetFreeListStatistics() {
727   non_available_small_blocks_ = 0;
728   available_in_small_free_list_ = 0;
729   available_in_medium_free_list_ = 0;
730   available_in_large_free_list_ = 0;
731   available_in_huge_free_list_ = 0;
732 }
733
734
735 Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
736                                     Executability executable) {
737   MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
738
739   if (chunk == NULL) return NULL;
740
741   return Page::Initialize(isolate_->heap(), chunk, executable, owner);
742 }
743
744
745 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
746                                               Space* owner,
747                                               Executability executable) {
748   MemoryChunk* chunk =
749       AllocateChunk(object_size, object_size, executable, owner);
750   if (chunk == NULL) return NULL;
751   return LargePage::Initialize(isolate_->heap(), chunk);
752 }
753
754
755 void MemoryAllocator::Free(MemoryChunk* chunk) {
756   LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
757   if (chunk->owner() != NULL) {
758     ObjectSpace space =
759         static_cast<ObjectSpace>(1 << chunk->owner()->identity());
760     PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
761   }
762
763   isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
764                                          chunk->IsEvacuationCandidate());
765
766   delete chunk->slots_buffer();
767   delete chunk->skip_list();
768
769   base::VirtualMemory* reservation = chunk->reserved_memory();
770   if (reservation->IsReserved()) {
771     FreeMemory(reservation, chunk->executable());
772   } else {
773     FreeMemory(chunk->address(), chunk->size(), chunk->executable());
774   }
775 }
776
777
778 bool MemoryAllocator::CommitBlock(Address start, size_t size,
779                                   Executability executable) {
780   if (!CommitMemory(start, size, executable)) return false;
781
782   if (Heap::ShouldZapGarbage()) {
783     ZapBlock(start, size);
784   }
785
786   isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
787   return true;
788 }
789
790
791 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
792   if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
793   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
794   return true;
795 }
796
797
798 void MemoryAllocator::ZapBlock(Address start, size_t size) {
799   for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
800     Memory::Address_at(start + s) = kZapValue;
801   }
802 }
803
804
805 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
806                                                 AllocationAction action,
807                                                 size_t size) {
808   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
809     MemoryAllocationCallbackRegistration registration =
810         memory_allocation_callbacks_[i];
811     if ((registration.space & space) == space &&
812         (registration.action & action) == action)
813       registration.callback(space, action, static_cast<int>(size));
814   }
815 }
816
817
818 bool MemoryAllocator::MemoryAllocationCallbackRegistered(
819     MemoryAllocationCallback callback) {
820   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
821     if (memory_allocation_callbacks_[i].callback == callback) return true;
822   }
823   return false;
824 }
825
826
827 void MemoryAllocator::AddMemoryAllocationCallback(
828     MemoryAllocationCallback callback, ObjectSpace space,
829     AllocationAction action) {
830   DCHECK(callback != NULL);
831   MemoryAllocationCallbackRegistration registration(callback, space, action);
832   DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
833   return memory_allocation_callbacks_.Add(registration);
834 }
835
836
837 void MemoryAllocator::RemoveMemoryAllocationCallback(
838     MemoryAllocationCallback callback) {
839   DCHECK(callback != NULL);
840   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
841     if (memory_allocation_callbacks_[i].callback == callback) {
842       memory_allocation_callbacks_.Remove(i);
843       return;
844     }
845   }
846   UNREACHABLE();
847 }
848
849
850 #ifdef DEBUG
851 void MemoryAllocator::ReportStatistics() {
852   float pct = static_cast<float>(capacity_ - size_) / capacity_;
853   PrintF("  capacity: %" V8_PTR_PREFIX
854          "d"
855          ", used: %" V8_PTR_PREFIX
856          "d"
857          ", available: %%%d\n\n",
858          capacity_, size_, static_cast<int>(pct * 100));
859 }
860 #endif
861
862
863 int MemoryAllocator::CodePageGuardStartOffset() {
864   // We are guarding code pages: the first OS page after the header
865   // will be protected as non-writable.
866   return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
867 }
868
869
870 int MemoryAllocator::CodePageGuardSize() {
871   return static_cast<int>(base::OS::CommitPageSize());
872 }
873
874
875 int MemoryAllocator::CodePageAreaStartOffset() {
876   // We are guarding code pages: the first OS page after the header
877   // will be protected as non-writable.
878   return CodePageGuardStartOffset() + CodePageGuardSize();
879 }
880
881
882 int MemoryAllocator::CodePageAreaEndOffset() {
883   // We are guarding code pages: the last OS page will be protected as
884   // non-writable.
885   return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
886 }
887
888
889 bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
890                                              Address start, size_t commit_size,
891                                              size_t reserved_size) {
892   // Commit page header (not executable).
893   Address header = start;
894   size_t header_size = CodePageGuardStartOffset();
895   if (vm->Commit(header, header_size, false)) {
896     // Create guard page after the header.
897     if (vm->Guard(start + CodePageGuardStartOffset())) {
898       // Commit page body (executable).
899       Address body = start + CodePageAreaStartOffset();
900       size_t body_size = commit_size - CodePageGuardStartOffset();
901       if (vm->Commit(body, body_size, true)) {
902         // Create guard page before the end.
903         if (vm->Guard(start + reserved_size - CodePageGuardSize())) {
904           UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
905                                                 commit_size -
906                                                 CodePageGuardStartOffset());
907           return true;
908         }
909         vm->Uncommit(body, body_size);
910       }
911     }
912     vm->Uncommit(header, header_size);
913   }
914   return false;
915 }
916
917
918 // -----------------------------------------------------------------------------
919 // MemoryChunk implementation
920
921 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
922   MemoryChunk* chunk = MemoryChunk::FromAddress(address);
923   if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
924     static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
925   }
926   chunk->IncrementLiveBytes(by);
927 }
928
929
930 // -----------------------------------------------------------------------------
931 // PagedSpace implementation
932
933 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
934               ObjectSpace::kObjectSpaceNewSpace);
935 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) ==
936               ObjectSpace::kObjectSpaceOldSpace);
937 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
938               ObjectSpace::kObjectSpaceCodeSpace);
939 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
940               ObjectSpace::kObjectSpaceMapSpace);
941
942
943 PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace space,
944                        Executability executable)
945     : Space(heap, space, executable),
946       free_list_(this),
947       unswept_free_bytes_(0),
948       end_of_unswept_pages_(NULL),
949       emergency_memory_(NULL) {
950   area_size_ = MemoryAllocator::PageAreaSize(space);
951   max_capacity_ =
952       (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * AreaSize();
953   accounting_stats_.Clear();
954
955   allocation_info_.set_top(NULL);
956   allocation_info_.set_limit(NULL);
957
958   anchor_.InitializeAsAnchor(this);
959 }
960
961
962 bool PagedSpace::SetUp() { return true; }
963
964
965 bool PagedSpace::HasBeenSetUp() { return true; }
966
967
968 void PagedSpace::TearDown() {
969   PageIterator iterator(this);
970   while (iterator.has_next()) {
971     heap()->isolate()->memory_allocator()->Free(iterator.next());
972   }
973   anchor_.set_next_page(&anchor_);
974   anchor_.set_prev_page(&anchor_);
975   accounting_stats_.Clear();
976 }
977
978
979 size_t PagedSpace::CommittedPhysicalMemory() {
980   if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
981   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
982   size_t size = 0;
983   PageIterator it(this);
984   while (it.has_next()) {
985     size += it.next()->CommittedPhysicalMemory();
986   }
987   return size;
988 }
989
990
991 bool PagedSpace::ContainsSafe(Address addr) {
992   Page* p = Page::FromAddress(addr);
993   PageIterator iterator(this);
994   while (iterator.has_next()) {
995     if (iterator.next() == p) return true;
996   }
997   return false;
998 }
999
1000
1001 Object* PagedSpace::FindObject(Address addr) {
1002   // Note: this function can only be called on iterable spaces.
1003   DCHECK(!heap()->mark_compact_collector()->in_use());
1004
1005   if (!Contains(addr)) return Smi::FromInt(0);  // Signaling not found.
1006
1007   Page* p = Page::FromAddress(addr);
1008   HeapObjectIterator it(p, NULL);
1009   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1010     Address cur = obj->address();
1011     Address next = cur + obj->Size();
1012     if ((cur <= addr) && (addr < next)) return obj;
1013   }
1014
1015   UNREACHABLE();
1016   return Smi::FromInt(0);
1017 }
1018
1019
1020 bool PagedSpace::CanExpand() {
1021   DCHECK(max_capacity_ % AreaSize() == 0);
1022   DCHECK(heap()->mark_compact_collector()->is_compacting() ||
1023          Capacity() <= heap()->MaxOldGenerationSize());
1024   DCHECK(heap()->CommittedOldGenerationMemory() <=
1025          heap()->MaxOldGenerationSize() +
1026              PagedSpace::MaxEmergencyMemoryAllocated());
1027
1028   // Are we going to exceed capacity for this space?
1029   if (!heap()->CanExpandOldGeneration(Page::kPageSize)) return false;
1030
1031   return true;
1032 }
1033
1034
1035 bool PagedSpace::Expand() {
1036   if (!CanExpand()) return false;
1037
1038   intptr_t size = AreaSize();
1039
1040   if (anchor_.next_page() == &anchor_) {
1041     size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
1042   }
1043
1044   Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
1045                                                                 executable());
1046   if (p == NULL) return false;
1047
1048   // Pages created during bootstrapping may contain immortal immovable objects.
1049   if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
1050
1051   DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
1052   DCHECK(heap()->CommittedOldGenerationMemory() <=
1053          heap()->MaxOldGenerationSize() +
1054              PagedSpace::MaxEmergencyMemoryAllocated());
1055
1056   p->InsertAfter(anchor_.prev_page());
1057
1058   return true;
1059 }
1060
1061
1062 int PagedSpace::CountTotalPages() {
1063   PageIterator it(this);
1064   int count = 0;
1065   while (it.has_next()) {
1066     it.next();
1067     count++;
1068   }
1069   return count;
1070 }
1071
1072
1073 void PagedSpace::ResetFreeListStatistics() {
1074   PageIterator page_iterator(this);
1075   while (page_iterator.has_next()) {
1076     Page* page = page_iterator.next();
1077     page->ResetFreeListStatistics();
1078   }
1079 }
1080
1081
1082 void PagedSpace::IncreaseCapacity(int size) {
1083   accounting_stats_.ExpandSpace(size);
1084 }
1085
1086
1087 void PagedSpace::ReleasePage(Page* page) {
1088   DCHECK(page->LiveBytes() == 0);
1089   DCHECK(AreaSize() == page->area_size());
1090
1091   if (page->WasSwept()) {
1092     intptr_t size = free_list_.EvictFreeListItems(page);
1093     accounting_stats_.AllocateBytes(size);
1094     DCHECK_EQ(AreaSize(), static_cast<int>(size));
1095   } else {
1096     DecreaseUnsweptFreeBytes(page);
1097   }
1098
1099   if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
1100     heap()->decrement_scan_on_scavenge_pages();
1101     page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
1102   }
1103
1104   DCHECK(!free_list_.ContainsPageFreeListItems(page));
1105
1106   if (Page::FromAllocationTop(allocation_info_.top()) == page) {
1107     allocation_info_.set_top(NULL);
1108     allocation_info_.set_limit(NULL);
1109   }
1110
1111   // If page is still in a list, unlink it from that list.
1112   if (page->next_chunk() != NULL) {
1113     DCHECK(page->prev_chunk() != NULL);
1114     page->Unlink();
1115   }
1116
1117   heap()->QueueMemoryChunkForFree(page);
1118
1119   DCHECK(Capacity() > 0);
1120   accounting_stats_.ShrinkSpace(AreaSize());
1121 }
1122
1123
1124 intptr_t PagedSpace::MaxEmergencyMemoryAllocated() {
1125   // New space and large object space.
1126   static const int spaces_without_emergency_memory = 2;
1127   static const int spaces_with_emergency_memory =
1128       LAST_SPACE - FIRST_SPACE + 1 - spaces_without_emergency_memory;
1129   return Page::kPageSize * spaces_with_emergency_memory;
1130 }
1131
1132
1133 void PagedSpace::CreateEmergencyMemory() {
1134   if (identity() == CODE_SPACE) {
1135     // Make the emergency block available to the allocator.
1136     CodeRange* code_range = heap()->isolate()->code_range();
1137     if (code_range != NULL && code_range->valid()) {
1138       code_range->ReleaseEmergencyBlock();
1139     }
1140     DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize());
1141   }
1142   emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
1143       AreaSize(), AreaSize(), executable(), this);
1144 }
1145
1146
1147 void PagedSpace::FreeEmergencyMemory() {
1148   Page* page = static_cast<Page*>(emergency_memory_);
1149   DCHECK(page->LiveBytes() == 0);
1150   DCHECK(AreaSize() == page->area_size());
1151   DCHECK(!free_list_.ContainsPageFreeListItems(page));
1152   heap()->isolate()->memory_allocator()->Free(page);
1153   emergency_memory_ = NULL;
1154 }
1155
1156
1157 void PagedSpace::UseEmergencyMemory() {
1158   // Page::Initialize makes the chunk into a real page and adds it to the
1159   // accounting for this space.  Unlike PagedSpace::Expand, we don't check
1160   // CanExpand first, so we can go over the limits a little here.  That's OK,
1161   // because we are in the process of compacting which will free up at least as
1162   // much memory as it allocates.
1163   Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this);
1164   page->InsertAfter(anchor_.prev_page());
1165   emergency_memory_ = NULL;
1166 }
1167
1168
1169 #ifdef DEBUG
1170 void PagedSpace::Print() {}
1171 #endif
1172
1173 #ifdef VERIFY_HEAP
1174 void PagedSpace::Verify(ObjectVisitor* visitor) {
1175   bool allocation_pointer_found_in_space =
1176       (allocation_info_.top() == allocation_info_.limit());
1177   PageIterator page_iterator(this);
1178   while (page_iterator.has_next()) {
1179     Page* page = page_iterator.next();
1180     CHECK(page->owner() == this);
1181     if (page == Page::FromAllocationTop(allocation_info_.top())) {
1182       allocation_pointer_found_in_space = true;
1183     }
1184     CHECK(page->WasSwept());
1185     HeapObjectIterator it(page, NULL);
1186     Address end_of_previous_object = page->area_start();
1187     Address top = page->area_end();
1188     int black_size = 0;
1189     for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1190       CHECK(end_of_previous_object <= object->address());
1191
1192       // The first word should be a map, and we expect all map pointers to
1193       // be in map space.
1194       Map* map = object->map();
1195       CHECK(map->IsMap());
1196       CHECK(heap()->map_space()->Contains(map));
1197
1198       // Perform space-specific object verification.
1199       VerifyObject(object);
1200
1201       // The object itself should look OK.
1202       object->ObjectVerify();
1203
1204       // All the interior pointers should be contained in the heap.
1205       int size = object->Size();
1206       object->IterateBody(map->instance_type(), size, visitor);
1207       if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1208         black_size += size;
1209       }
1210
1211       CHECK(object->address() + size <= top);
1212       end_of_previous_object = object->address() + size;
1213     }
1214     CHECK_LE(black_size, page->LiveBytes());
1215   }
1216   CHECK(allocation_pointer_found_in_space);
1217 }
1218 #endif  // VERIFY_HEAP
1219
1220 // -----------------------------------------------------------------------------
1221 // NewSpace implementation
1222
1223
1224 bool NewSpace::SetUp(int reserved_semispace_capacity,
1225                      int maximum_semispace_capacity) {
1226   // Set up new space based on the preallocated memory block defined by
1227   // start and size. The provided space is divided into two semi-spaces.
1228   // To support fast containment testing in the new space, the size of
1229   // this chunk must be a power of two and it must be aligned to its size.
1230   int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1231
1232   int target_semispace_capacity = heap()->TargetSemiSpaceSize();
1233
1234   size_t size = 2 * reserved_semispace_capacity;
1235   Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1236       size, size, &reservation_);
1237   if (base == NULL) return false;
1238
1239   chunk_base_ = base;
1240   chunk_size_ = static_cast<uintptr_t>(size);
1241   LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1242
1243   DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
1244   DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
1245
1246   // Allocate and set up the histogram arrays if necessary.
1247   allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1248   promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1249
1250 #define SET_NAME(name)                        \
1251   allocated_histogram_[name].set_name(#name); \
1252   promoted_histogram_[name].set_name(#name);
1253   INSTANCE_TYPE_LIST(SET_NAME)
1254 #undef SET_NAME
1255
1256   DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1257   DCHECK(static_cast<intptr_t>(chunk_size_) >=
1258          2 * heap()->ReservedSemiSpaceSize());
1259   DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1260
1261   to_space_.SetUp(chunk_base_, initial_semispace_capacity,
1262                   target_semispace_capacity, maximum_semispace_capacity);
1263   from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1264                     initial_semispace_capacity, target_semispace_capacity,
1265                     maximum_semispace_capacity);
1266   if (!to_space_.Commit()) {
1267     return false;
1268   }
1269   DCHECK(!from_space_.is_committed());  // No need to use memory yet.
1270
1271   start_ = chunk_base_;
1272   address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1273   object_mask_ = address_mask_ | kHeapObjectTagMask;
1274   object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1275
1276   ResetAllocationInfo();
1277
1278   return true;
1279 }
1280
1281
1282 void NewSpace::TearDown() {
1283   if (allocated_histogram_) {
1284     DeleteArray(allocated_histogram_);
1285     allocated_histogram_ = NULL;
1286   }
1287   if (promoted_histogram_) {
1288     DeleteArray(promoted_histogram_);
1289     promoted_histogram_ = NULL;
1290   }
1291
1292   start_ = NULL;
1293   allocation_info_.set_top(NULL);
1294   allocation_info_.set_limit(NULL);
1295
1296   to_space_.TearDown();
1297   from_space_.TearDown();
1298
1299   LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1300
1301   DCHECK(reservation_.IsReserved());
1302   heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1303                                                     NOT_EXECUTABLE);
1304   chunk_base_ = NULL;
1305   chunk_size_ = 0;
1306 }
1307
1308
1309 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
1310
1311
1312 void NewSpace::Grow() {
1313   // Double the semispace size but only up to maximum capacity.
1314   DCHECK(TotalCapacity() < MaximumCapacity());
1315   int new_capacity =
1316       Min(MaximumCapacity(),
1317           FLAG_semi_space_growth_factor * static_cast<int>(TotalCapacity()));
1318   if (to_space_.GrowTo(new_capacity)) {
1319     // Only grow from space if we managed to grow to-space.
1320     if (!from_space_.GrowTo(new_capacity)) {
1321       // If we managed to grow to-space but couldn't grow from-space,
1322       // attempt to shrink to-space.
1323       if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
1324         // We are in an inconsistent state because we could not
1325         // commit/uncommit memory from new space.
1326         CHECK(false);
1327       }
1328     }
1329   }
1330   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1331 }
1332
1333
1334 bool NewSpace::GrowOnePage() {
1335   if (TotalCapacity() == MaximumCapacity()) return false;
1336   int new_capacity = static_cast<int>(TotalCapacity()) + Page::kPageSize;
1337   if (to_space_.GrowTo(new_capacity)) {
1338     // Only grow from space if we managed to grow to-space and the from space
1339     // is actually committed.
1340     if (from_space_.is_committed()) {
1341       if (!from_space_.GrowTo(new_capacity)) {
1342         // If we managed to grow to-space but couldn't grow from-space,
1343         // attempt to shrink to-space.
1344         if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
1345           // We are in an inconsistent state because we could not
1346           // commit/uncommit memory from new space.
1347           CHECK(false);
1348         }
1349         return false;
1350       }
1351     } else {
1352       if (!from_space_.SetTotalCapacity(new_capacity)) {
1353         // Can't really happen, but better safe than sorry.
1354         CHECK(false);
1355       }
1356     }
1357     DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1358     return true;
1359   }
1360   return false;
1361 }
1362
1363
1364 void NewSpace::Shrink() {
1365   int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
1366   int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1367   if (rounded_new_capacity < TotalCapacity() &&
1368       to_space_.ShrinkTo(rounded_new_capacity)) {
1369     // Only shrink from-space if we managed to shrink to-space.
1370     from_space_.Reset();
1371     if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1372       // If we managed to shrink to-space but couldn't shrink from
1373       // space, attempt to grow to-space again.
1374       if (!to_space_.GrowTo(from_space_.TotalCapacity())) {
1375         // We are in an inconsistent state because we could not
1376         // commit/uncommit memory from new space.
1377         CHECK(false);
1378       }
1379     }
1380   }
1381   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1382 }
1383
1384
1385 void NewSpace::UpdateAllocationInfo() {
1386   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1387   allocation_info_.set_top(to_space_.page_low());
1388   allocation_info_.set_limit(to_space_.page_high());
1389   UpdateInlineAllocationLimit(0);
1390   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1391 }
1392
1393
1394 void NewSpace::ResetAllocationInfo() {
1395   to_space_.Reset();
1396   UpdateAllocationInfo();
1397   pages_used_ = 0;
1398   // Clear all mark-bits in the to-space.
1399   NewSpacePageIterator it(&to_space_);
1400   while (it.has_next()) {
1401     Bitmap::Clear(it.next());
1402   }
1403 }
1404
1405
1406 void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1407   if (heap()->inline_allocation_disabled()) {
1408     // Lowest limit when linear allocation was disabled.
1409     Address high = to_space_.page_high();
1410     Address new_top = allocation_info_.top() + size_in_bytes;
1411     allocation_info_.set_limit(Min(new_top, high));
1412   } else if (inline_allocation_limit_step() == 0) {
1413     // Normal limit is the end of the current page.
1414     allocation_info_.set_limit(to_space_.page_high());
1415   } else {
1416     // Lower limit during incremental marking.
1417     Address high = to_space_.page_high();
1418     Address new_top = allocation_info_.top() + size_in_bytes;
1419     Address new_limit = new_top + inline_allocation_limit_step_;
1420     allocation_info_.set_limit(Min(new_limit, high));
1421   }
1422   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1423 }
1424
1425
1426 bool NewSpace::AddFreshPage() {
1427   Address top = allocation_info_.top();
1428   if (NewSpacePage::IsAtStart(top)) {
1429     // The current page is already empty. Don't try to make another.
1430
1431     // We should only get here if someone asks to allocate more
1432     // than what can be stored in a single page.
1433     // TODO(gc): Change the limit on new-space allocation to prevent this
1434     // from happening (all such allocations should go directly to LOSpace).
1435     return false;
1436   }
1437   if (!to_space_.AdvancePage()) {
1438     // Check if we reached the target capacity yet. If not, try to commit a page
1439     // and continue.
1440     if ((to_space_.TotalCapacity() < to_space_.TargetCapacity()) &&
1441         GrowOnePage()) {
1442       if (!to_space_.AdvancePage()) {
1443         // It doesn't make sense that we managed to commit a page, but can't use
1444         // it.
1445         CHECK(false);
1446       }
1447     } else {
1448       // Failed to get a new page in to-space.
1449       return false;
1450     }
1451   }
1452
1453   // Clear remainder of current page.
1454   Address limit = NewSpacePage::FromLimit(top)->area_end();
1455   if (heap()->gc_state() == Heap::SCAVENGE) {
1456     heap()->promotion_queue()->SetNewLimit(limit);
1457   }
1458
1459   int remaining_in_page = static_cast<int>(limit - top);
1460   heap()->CreateFillerObjectAt(top, remaining_in_page);
1461   pages_used_++;
1462   UpdateAllocationInfo();
1463
1464   return true;
1465 }
1466
1467
1468 AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes,
1469                                            AllocationAlignment alignment) {
1470   Address old_top = allocation_info_.top();
1471   Address high = to_space_.page_high();
1472   if (allocation_info_.limit() < high) {
1473     int alignment_size = Heap::GetFillToAlign(old_top, alignment);
1474     int aligned_size_in_bytes = size_in_bytes + alignment_size;
1475
1476     // Either the limit has been lowered because linear allocation was disabled
1477     // or because incremental marking wants to get a chance to do a step. Set
1478     // the new limit accordingly.
1479     Address new_top = old_top + aligned_size_in_bytes;
1480     int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1481     heap()->incremental_marking()->Step(bytes_allocated,
1482                                         IncrementalMarking::GC_VIA_STACK_GUARD);
1483     UpdateInlineAllocationLimit(aligned_size_in_bytes);
1484     top_on_previous_step_ = new_top;
1485     if (alignment == kWordAligned) return AllocateRawUnaligned(size_in_bytes);
1486     return AllocateRawAligned(size_in_bytes, alignment);
1487   } else if (AddFreshPage()) {
1488     // Switched to new page. Try allocating again.
1489     int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1490     heap()->incremental_marking()->Step(bytes_allocated,
1491                                         IncrementalMarking::GC_VIA_STACK_GUARD);
1492     top_on_previous_step_ = to_space_.page_low();
1493     if (alignment == kWordAligned) return AllocateRawUnaligned(size_in_bytes);
1494     return AllocateRawAligned(size_in_bytes, alignment);
1495   } else {
1496     return AllocationResult::Retry();
1497   }
1498 }
1499
1500
1501 #ifdef VERIFY_HEAP
1502 // We do not use the SemiSpaceIterator because verification doesn't assume
1503 // that it works (it depends on the invariants we are checking).
1504 void NewSpace::Verify() {
1505   // The allocation pointer should be in the space or at the very end.
1506   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1507
1508   // There should be objects packed in from the low address up to the
1509   // allocation pointer.
1510   Address current = to_space_.first_page()->area_start();
1511   CHECK_EQ(current, to_space_.space_start());
1512
1513   while (current != top()) {
1514     if (!NewSpacePage::IsAtEnd(current)) {
1515       // The allocation pointer should not be in the middle of an object.
1516       CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1517             current < top());
1518
1519       HeapObject* object = HeapObject::FromAddress(current);
1520
1521       // The first word should be a map, and we expect all map pointers to
1522       // be in map space.
1523       Map* map = object->map();
1524       CHECK(map->IsMap());
1525       CHECK(heap()->map_space()->Contains(map));
1526
1527       // The object should not be code or a map.
1528       CHECK(!object->IsMap());
1529       CHECK(!object->IsCode());
1530
1531       // The object itself should look OK.
1532       object->ObjectVerify();
1533
1534       // All the interior pointers should be contained in the heap.
1535       VerifyPointersVisitor visitor;
1536       int size = object->Size();
1537       object->IterateBody(map->instance_type(), size, &visitor);
1538
1539       current += size;
1540     } else {
1541       // At end of page, switch to next page.
1542       NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1543       // Next page should be valid.
1544       CHECK(!page->is_anchor());
1545       current = page->area_start();
1546     }
1547   }
1548
1549   // Check semi-spaces.
1550   CHECK_EQ(from_space_.id(), kFromSpace);
1551   CHECK_EQ(to_space_.id(), kToSpace);
1552   from_space_.Verify();
1553   to_space_.Verify();
1554 }
1555 #endif
1556
1557 // -----------------------------------------------------------------------------
1558 // SemiSpace implementation
1559
1560 void SemiSpace::SetUp(Address start, int initial_capacity, int target_capacity,
1561                       int maximum_capacity) {
1562   // Creates a space in the young generation. The constructor does not
1563   // allocate memory from the OS.  A SemiSpace is given a contiguous chunk of
1564   // memory of size 'capacity' when set up, and does not grow or shrink
1565   // otherwise.  In the mark-compact collector, the memory region of the from
1566   // space is used as the marking stack. It requires contiguous memory
1567   // addresses.
1568   DCHECK(maximum_capacity >= Page::kPageSize);
1569   DCHECK(initial_capacity <= target_capacity);
1570   DCHECK(target_capacity <= maximum_capacity);
1571   initial_total_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1572   total_capacity_ = initial_capacity;
1573   target_capacity_ = RoundDown(target_capacity, Page::kPageSize);
1574   maximum_total_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1575   maximum_committed_ = 0;
1576   committed_ = false;
1577   start_ = start;
1578   address_mask_ = ~(maximum_capacity - 1);
1579   object_mask_ = address_mask_ | kHeapObjectTagMask;
1580   object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1581   age_mark_ = start_ + NewSpacePage::kObjectStartOffset;
1582 }
1583
1584
1585 void SemiSpace::TearDown() {
1586   start_ = NULL;
1587   total_capacity_ = 0;
1588 }
1589
1590
1591 bool SemiSpace::Commit() {
1592   DCHECK(!is_committed());
1593   int pages = total_capacity_ / Page::kPageSize;
1594   if (!heap()->isolate()->memory_allocator()->CommitBlock(
1595           start_, total_capacity_, executable())) {
1596     return false;
1597   }
1598
1599   NewSpacePage* current = anchor();
1600   for (int i = 0; i < pages; i++) {
1601     NewSpacePage* new_page =
1602         NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
1603     new_page->InsertAfter(current);
1604     current = new_page;
1605   }
1606
1607   SetCapacity(total_capacity_);
1608   committed_ = true;
1609   Reset();
1610   return true;
1611 }
1612
1613
1614 bool SemiSpace::Uncommit() {
1615   DCHECK(is_committed());
1616   Address start = start_ + maximum_total_capacity_ - total_capacity_;
1617   if (!heap()->isolate()->memory_allocator()->UncommitBlock(start,
1618                                                             total_capacity_)) {
1619     return false;
1620   }
1621   anchor()->set_next_page(anchor());
1622   anchor()->set_prev_page(anchor());
1623
1624   committed_ = false;
1625   return true;
1626 }
1627
1628
1629 size_t SemiSpace::CommittedPhysicalMemory() {
1630   if (!is_committed()) return 0;
1631   size_t size = 0;
1632   NewSpacePageIterator it(this);
1633   while (it.has_next()) {
1634     size += it.next()->CommittedPhysicalMemory();
1635   }
1636   return size;
1637 }
1638
1639
1640 bool SemiSpace::GrowTo(int new_capacity) {
1641   if (!is_committed()) {
1642     if (!Commit()) return false;
1643   }
1644   DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
1645   DCHECK(new_capacity <= maximum_total_capacity_);
1646   DCHECK(new_capacity > total_capacity_);
1647   int pages_before = total_capacity_ / Page::kPageSize;
1648   int pages_after = new_capacity / Page::kPageSize;
1649
1650   size_t delta = new_capacity - total_capacity_;
1651
1652   DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1653   if (!heap()->isolate()->memory_allocator()->CommitBlock(
1654           start_ + total_capacity_, delta, executable())) {
1655     return false;
1656   }
1657   SetCapacity(new_capacity);
1658   NewSpacePage* last_page = anchor()->prev_page();
1659   DCHECK(last_page != anchor());
1660   for (int i = pages_before; i < pages_after; i++) {
1661     Address page_address = start_ + i * Page::kPageSize;
1662     NewSpacePage* new_page =
1663         NewSpacePage::Initialize(heap(), page_address, this);
1664     new_page->InsertAfter(last_page);
1665     Bitmap::Clear(new_page);
1666     // Duplicate the flags that was set on the old page.
1667     new_page->SetFlags(last_page->GetFlags(),
1668                        NewSpacePage::kCopyOnFlipFlagsMask);
1669     last_page = new_page;
1670   }
1671   return true;
1672 }
1673
1674
1675 bool SemiSpace::ShrinkTo(int new_capacity) {
1676   DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
1677   DCHECK(new_capacity >= initial_total_capacity_);
1678   DCHECK(new_capacity < total_capacity_);
1679   if (is_committed()) {
1680     size_t delta = total_capacity_ - new_capacity;
1681     DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1682
1683     MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1684     if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
1685       return false;
1686     }
1687
1688     int pages_after = new_capacity / Page::kPageSize;
1689     NewSpacePage* new_last_page =
1690         NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
1691     new_last_page->set_next_page(anchor());
1692     anchor()->set_prev_page(new_last_page);
1693     DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page));
1694   }
1695
1696   SetCapacity(new_capacity);
1697
1698   return true;
1699 }
1700
1701
1702 bool SemiSpace::SetTotalCapacity(int new_capacity) {
1703   CHECK(!is_committed());
1704   if (new_capacity >= initial_total_capacity_ &&
1705       new_capacity <= maximum_total_capacity_) {
1706     total_capacity_ = new_capacity;
1707     return true;
1708   }
1709   return false;
1710 }
1711
1712
1713 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1714   anchor_.set_owner(this);
1715   // Fixup back-pointers to anchor. Address of anchor changes
1716   // when we swap.
1717   anchor_.prev_page()->set_next_page(&anchor_);
1718   anchor_.next_page()->set_prev_page(&anchor_);
1719
1720   bool becomes_to_space = (id_ == kFromSpace);
1721   id_ = becomes_to_space ? kToSpace : kFromSpace;
1722   NewSpacePage* page = anchor_.next_page();
1723   while (page != &anchor_) {
1724     page->set_owner(this);
1725     page->SetFlags(flags, mask);
1726     if (becomes_to_space) {
1727       page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1728       page->SetFlag(MemoryChunk::IN_TO_SPACE);
1729       page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1730       page->ResetLiveBytes();
1731     } else {
1732       page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1733       page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1734     }
1735     DCHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1736     DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1737            page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1738     page = page->next_page();
1739   }
1740 }
1741
1742
1743 void SemiSpace::Reset() {
1744   DCHECK(anchor_.next_page() != &anchor_);
1745   current_page_ = anchor_.next_page();
1746 }
1747
1748
1749 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1750   // We won't be swapping semispaces without data in them.
1751   DCHECK(from->anchor_.next_page() != &from->anchor_);
1752   DCHECK(to->anchor_.next_page() != &to->anchor_);
1753
1754   // Swap bits.
1755   SemiSpace tmp = *from;
1756   *from = *to;
1757   *to = tmp;
1758
1759   // Fixup back-pointers to the page list anchor now that its address
1760   // has changed.
1761   // Swap to/from-space bits on pages.
1762   // Copy GC flags from old active space (from-space) to new (to-space).
1763   intptr_t flags = from->current_page()->GetFlags();
1764   to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1765
1766   from->FlipPages(0, 0);
1767 }
1768
1769
1770 void SemiSpace::SetCapacity(int new_capacity) {
1771   total_capacity_ = new_capacity;
1772   if (total_capacity_ > maximum_committed_) {
1773     maximum_committed_ = total_capacity_;
1774   }
1775 }
1776
1777
1778 void SemiSpace::set_age_mark(Address mark) {
1779   DCHECK(NewSpacePage::FromLimit(mark)->semi_space() == this);
1780   age_mark_ = mark;
1781   // Mark all pages up to the one containing mark.
1782   NewSpacePageIterator it(space_start(), mark);
1783   while (it.has_next()) {
1784     it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1785   }
1786 }
1787
1788
1789 #ifdef DEBUG
1790 void SemiSpace::Print() {}
1791 #endif
1792
1793 #ifdef VERIFY_HEAP
1794 void SemiSpace::Verify() {
1795   bool is_from_space = (id_ == kFromSpace);
1796   NewSpacePage* page = anchor_.next_page();
1797   CHECK(anchor_.semi_space() == this);
1798   while (page != &anchor_) {
1799     CHECK(page->semi_space() == this);
1800     CHECK(page->InNewSpace());
1801     CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1802                                         : MemoryChunk::IN_TO_SPACE));
1803     CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1804                                          : MemoryChunk::IN_FROM_SPACE));
1805     CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1806     if (!is_from_space) {
1807       // The pointers-from-here-are-interesting flag isn't updated dynamically
1808       // on from-space pages, so it might be out of sync with the marking state.
1809       if (page->heap()->incremental_marking()->IsMarking()) {
1810         CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1811       } else {
1812         CHECK(
1813             !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1814       }
1815       // TODO(gc): Check that the live_bytes_count_ field matches the
1816       // black marking on the page (if we make it match in new-space).
1817     }
1818     CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1819     CHECK(page->prev_page()->next_page() == page);
1820     page = page->next_page();
1821   }
1822 }
1823 #endif
1824
1825 #ifdef DEBUG
1826 void SemiSpace::AssertValidRange(Address start, Address end) {
1827   // Addresses belong to same semi-space
1828   NewSpacePage* page = NewSpacePage::FromLimit(start);
1829   NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1830   SemiSpace* space = page->semi_space();
1831   CHECK_EQ(space, end_page->semi_space());
1832   // Start address is before end address, either on same page,
1833   // or end address is on a later page in the linked list of
1834   // semi-space pages.
1835   if (page == end_page) {
1836     CHECK(start <= end);
1837   } else {
1838     while (page != end_page) {
1839       page = page->next_page();
1840       CHECK_NE(page, space->anchor());
1841     }
1842   }
1843 }
1844 #endif
1845
1846
1847 // -----------------------------------------------------------------------------
1848 // SemiSpaceIterator implementation.
1849 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1850   Initialize(space->bottom(), space->top(), NULL);
1851 }
1852
1853
1854 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1855                                      HeapObjectCallback size_func) {
1856   Initialize(space->bottom(), space->top(), size_func);
1857 }
1858
1859
1860 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1861   Initialize(start, space->top(), NULL);
1862 }
1863
1864
1865 SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
1866   Initialize(from, to, NULL);
1867 }
1868
1869
1870 void SemiSpaceIterator::Initialize(Address start, Address end,
1871                                    HeapObjectCallback size_func) {
1872   SemiSpace::AssertValidRange(start, end);
1873   current_ = start;
1874   limit_ = end;
1875   size_func_ = size_func;
1876 }
1877
1878
1879 #ifdef DEBUG
1880 // heap_histograms is shared, always clear it before using it.
1881 static void ClearHistograms(Isolate* isolate) {
1882 // We reset the name each time, though it hasn't changed.
1883 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1884   INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1885 #undef DEF_TYPE_NAME
1886
1887 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1888   INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1889 #undef CLEAR_HISTOGRAM
1890
1891   isolate->js_spill_information()->Clear();
1892 }
1893
1894
1895 static void ClearCodeKindStatistics(int* code_kind_statistics) {
1896   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1897     code_kind_statistics[i] = 0;
1898   }
1899 }
1900
1901
1902 static void ReportCodeKindStatistics(int* code_kind_statistics) {
1903   PrintF("\n   Code kind histograms: \n");
1904   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1905     if (code_kind_statistics[i] > 0) {
1906       PrintF("     %-20s: %10d bytes\n",
1907              Code::Kind2String(static_cast<Code::Kind>(i)),
1908              code_kind_statistics[i]);
1909     }
1910   }
1911   PrintF("\n");
1912 }
1913
1914
1915 static int CollectHistogramInfo(HeapObject* obj) {
1916   Isolate* isolate = obj->GetIsolate();
1917   InstanceType type = obj->map()->instance_type();
1918   DCHECK(0 <= type && type <= LAST_TYPE);
1919   DCHECK(isolate->heap_histograms()[type].name() != NULL);
1920   isolate->heap_histograms()[type].increment_number(1);
1921   isolate->heap_histograms()[type].increment_bytes(obj->Size());
1922
1923   if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1924     JSObject::cast(obj)
1925         ->IncrementSpillStatistics(isolate->js_spill_information());
1926   }
1927
1928   return obj->Size();
1929 }
1930
1931
1932 static void ReportHistogram(Isolate* isolate, bool print_spill) {
1933   PrintF("\n  Object Histogram:\n");
1934   for (int i = 0; i <= LAST_TYPE; i++) {
1935     if (isolate->heap_histograms()[i].number() > 0) {
1936       PrintF("    %-34s%10d (%10d bytes)\n",
1937              isolate->heap_histograms()[i].name(),
1938              isolate->heap_histograms()[i].number(),
1939              isolate->heap_histograms()[i].bytes());
1940     }
1941   }
1942   PrintF("\n");
1943
1944   // Summarize string types.
1945   int string_number = 0;
1946   int string_bytes = 0;
1947 #define INCREMENT(type, size, name, camel_name)               \
1948   string_number += isolate->heap_histograms()[type].number(); \
1949   string_bytes += isolate->heap_histograms()[type].bytes();
1950   STRING_TYPE_LIST(INCREMENT)
1951 #undef INCREMENT
1952   if (string_number > 0) {
1953     PrintF("    %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1954            string_bytes);
1955   }
1956
1957   if (FLAG_collect_heap_spill_statistics && print_spill) {
1958     isolate->js_spill_information()->Print();
1959   }
1960 }
1961 #endif  // DEBUG
1962
1963
1964 // Support for statistics gathering for --heap-stats and --log-gc.
1965 void NewSpace::ClearHistograms() {
1966   for (int i = 0; i <= LAST_TYPE; i++) {
1967     allocated_histogram_[i].clear();
1968     promoted_histogram_[i].clear();
1969   }
1970 }
1971
1972
1973 // Because the copying collector does not touch garbage objects, we iterate
1974 // the new space before a collection to get a histogram of allocated objects.
1975 // This only happens when --log-gc flag is set.
1976 void NewSpace::CollectStatistics() {
1977   ClearHistograms();
1978   SemiSpaceIterator it(this);
1979   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
1980     RecordAllocation(obj);
1981 }
1982
1983
1984 static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
1985                                const char* description) {
1986   LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1987   // Lump all the string types together.
1988   int string_number = 0;
1989   int string_bytes = 0;
1990 #define INCREMENT(type, size, name, camel_name) \
1991   string_number += info[type].number();         \
1992   string_bytes += info[type].bytes();
1993   STRING_TYPE_LIST(INCREMENT)
1994 #undef INCREMENT
1995   if (string_number > 0) {
1996     LOG(isolate,
1997         HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1998   }
1999
2000   // Then do the other types.
2001   for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
2002     if (info[i].number() > 0) {
2003       LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
2004                                        info[i].bytes()));
2005     }
2006   }
2007   LOG(isolate, HeapSampleEndEvent("NewSpace", description));
2008 }
2009
2010
2011 void NewSpace::ReportStatistics() {
2012 #ifdef DEBUG
2013   if (FLAG_heap_stats) {
2014     float pct = static_cast<float>(Available()) / TotalCapacity();
2015     PrintF("  capacity: %" V8_PTR_PREFIX
2016            "d"
2017            ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2018            TotalCapacity(), Available(), static_cast<int>(pct * 100));
2019     PrintF("\n  Object Histogram:\n");
2020     for (int i = 0; i <= LAST_TYPE; i++) {
2021       if (allocated_histogram_[i].number() > 0) {
2022         PrintF("    %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
2023                allocated_histogram_[i].number(),
2024                allocated_histogram_[i].bytes());
2025       }
2026     }
2027     PrintF("\n");
2028   }
2029 #endif  // DEBUG
2030
2031   if (FLAG_log_gc) {
2032     Isolate* isolate = heap()->isolate();
2033     DoReportStatistics(isolate, allocated_histogram_, "allocated");
2034     DoReportStatistics(isolate, promoted_histogram_, "promoted");
2035   }
2036 }
2037
2038
2039 void NewSpace::RecordAllocation(HeapObject* obj) {
2040   InstanceType type = obj->map()->instance_type();
2041   DCHECK(0 <= type && type <= LAST_TYPE);
2042   allocated_histogram_[type].increment_number(1);
2043   allocated_histogram_[type].increment_bytes(obj->Size());
2044 }
2045
2046
2047 void NewSpace::RecordPromotion(HeapObject* obj) {
2048   InstanceType type = obj->map()->instance_type();
2049   DCHECK(0 <= type && type <= LAST_TYPE);
2050   promoted_histogram_[type].increment_number(1);
2051   promoted_histogram_[type].increment_bytes(obj->Size());
2052 }
2053
2054
2055 size_t NewSpace::CommittedPhysicalMemory() {
2056   if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2057   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2058   size_t size = to_space_.CommittedPhysicalMemory();
2059   if (from_space_.is_committed()) {
2060     size += from_space_.CommittedPhysicalMemory();
2061   }
2062   return size;
2063 }
2064
2065
2066 // -----------------------------------------------------------------------------
2067 // Free lists for old object spaces implementation
2068
2069 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
2070   intptr_t free_bytes = 0;
2071   if (category->top() != NULL) {
2072     // This is safe (not going to deadlock) since Concatenate operations
2073     // are never performed on the same free lists at the same time in
2074     // reverse order.
2075     base::LockGuard<base::Mutex> target_lock_guard(mutex());
2076     base::LockGuard<base::Mutex> source_lock_guard(category->mutex());
2077     DCHECK(category->end_ != NULL);
2078     free_bytes = category->available();
2079     if (end_ == NULL) {
2080       end_ = category->end();
2081     } else {
2082       category->end()->set_next(top());
2083     }
2084     set_top(category->top());
2085     base::NoBarrier_Store(&top_, category->top_);
2086     available_ += category->available();
2087     category->Reset();
2088   }
2089   return free_bytes;
2090 }
2091
2092
2093 void FreeListCategory::Reset() {
2094   set_top(NULL);
2095   set_end(NULL);
2096   set_available(0);
2097 }
2098
2099
2100 intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
2101   int sum = 0;
2102   FreeSpace* t = top();
2103   FreeSpace** n = &t;
2104   while (*n != NULL) {
2105     if (Page::FromAddress((*n)->address()) == p) {
2106       FreeSpace* free_space = *n;
2107       sum += free_space->Size();
2108       *n = (*n)->next();
2109     } else {
2110       n = (*n)->next_address();
2111     }
2112   }
2113   set_top(t);
2114   if (top() == NULL) {
2115     set_end(NULL);
2116   }
2117   available_ -= sum;
2118   return sum;
2119 }
2120
2121
2122 bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
2123   FreeSpace* node = top();
2124   while (node != NULL) {
2125     if (Page::FromAddress(node->address()) == p) return true;
2126     node = node->next();
2127   }
2128   return false;
2129 }
2130
2131
2132 FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
2133   FreeSpace* node = top();
2134
2135   if (node == NULL) return NULL;
2136
2137   while (node != NULL &&
2138          Page::FromAddress(node->address())->IsEvacuationCandidate()) {
2139     available_ -= node->Size();
2140     node = node->next();
2141   }
2142
2143   if (node != NULL) {
2144     set_top(node->next());
2145     *node_size = node->Size();
2146     available_ -= *node_size;
2147   } else {
2148     set_top(NULL);
2149   }
2150
2151   if (top() == NULL) {
2152     set_end(NULL);
2153   }
2154
2155   return node;
2156 }
2157
2158
2159 FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
2160                                               int* node_size) {
2161   FreeSpace* node = PickNodeFromList(node_size);
2162   if (node != NULL && *node_size < size_in_bytes) {
2163     Free(node, *node_size);
2164     *node_size = 0;
2165     return NULL;
2166   }
2167   return node;
2168 }
2169
2170
2171 void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) {
2172   DCHECK_LE(FreeList::kSmallListMin, size_in_bytes);
2173   free_space->set_next(top());
2174   set_top(free_space);
2175   if (end_ == NULL) {
2176     end_ = free_space;
2177   }
2178   available_ += size_in_bytes;
2179 }
2180
2181
2182 void FreeListCategory::RepairFreeList(Heap* heap) {
2183   FreeSpace* n = top();
2184   while (n != NULL) {
2185     Map** map_location = reinterpret_cast<Map**>(n->address());
2186     if (*map_location == NULL) {
2187       *map_location = heap->free_space_map();
2188     } else {
2189       DCHECK(*map_location == heap->free_space_map());
2190     }
2191     n = n->next();
2192   }
2193 }
2194
2195
2196 FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) {
2197   Reset();
2198 }
2199
2200
2201 intptr_t FreeList::Concatenate(FreeList* free_list) {
2202   intptr_t free_bytes = 0;
2203   free_bytes += small_list_.Concatenate(free_list->small_list());
2204   free_bytes += medium_list_.Concatenate(free_list->medium_list());
2205   free_bytes += large_list_.Concatenate(free_list->large_list());
2206   free_bytes += huge_list_.Concatenate(free_list->huge_list());
2207   return free_bytes;
2208 }
2209
2210
2211 void FreeList::Reset() {
2212   small_list_.Reset();
2213   medium_list_.Reset();
2214   large_list_.Reset();
2215   huge_list_.Reset();
2216 }
2217
2218
2219 int FreeList::Free(Address start, int size_in_bytes) {
2220   if (size_in_bytes == 0) return 0;
2221
2222   heap_->CreateFillerObjectAt(start, size_in_bytes);
2223
2224   Page* page = Page::FromAddress(start);
2225
2226   // Early return to drop too-small blocks on the floor.
2227   if (size_in_bytes < kSmallListMin) {
2228     page->add_non_available_small_blocks(size_in_bytes);
2229     return size_in_bytes;
2230   }
2231
2232   FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
2233   // Insert other blocks at the head of a free list of the appropriate
2234   // magnitude.
2235   if (size_in_bytes <= kSmallListMax) {
2236     small_list_.Free(free_space, size_in_bytes);
2237     page->add_available_in_small_free_list(size_in_bytes);
2238   } else if (size_in_bytes <= kMediumListMax) {
2239     medium_list_.Free(free_space, size_in_bytes);
2240     page->add_available_in_medium_free_list(size_in_bytes);
2241   } else if (size_in_bytes <= kLargeListMax) {
2242     large_list_.Free(free_space, size_in_bytes);
2243     page->add_available_in_large_free_list(size_in_bytes);
2244   } else {
2245     huge_list_.Free(free_space, size_in_bytes);
2246     page->add_available_in_huge_free_list(size_in_bytes);
2247   }
2248
2249   DCHECK(IsVeryLong() || available() == SumFreeLists());
2250   return 0;
2251 }
2252
2253
2254 FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2255   FreeSpace* node = NULL;
2256   Page* page = NULL;
2257
2258   if (size_in_bytes <= kSmallAllocationMax) {
2259     node = small_list_.PickNodeFromList(node_size);
2260     if (node != NULL) {
2261       DCHECK(size_in_bytes <= *node_size);
2262       page = Page::FromAddress(node->address());
2263       page->add_available_in_small_free_list(-(*node_size));
2264       DCHECK(IsVeryLong() || available() == SumFreeLists());
2265       return node;
2266     }
2267   }
2268
2269   if (size_in_bytes <= kMediumAllocationMax) {
2270     node = medium_list_.PickNodeFromList(node_size);
2271     if (node != NULL) {
2272       DCHECK(size_in_bytes <= *node_size);
2273       page = Page::FromAddress(node->address());
2274       page->add_available_in_medium_free_list(-(*node_size));
2275       DCHECK(IsVeryLong() || available() == SumFreeLists());
2276       return node;
2277     }
2278   }
2279
2280   if (size_in_bytes <= kLargeAllocationMax) {
2281     node = large_list_.PickNodeFromList(node_size);
2282     if (node != NULL) {
2283       DCHECK(size_in_bytes <= *node_size);
2284       page = Page::FromAddress(node->address());
2285       page->add_available_in_large_free_list(-(*node_size));
2286       DCHECK(IsVeryLong() || available() == SumFreeLists());
2287       return node;
2288     }
2289   }
2290
2291   int huge_list_available = huge_list_.available();
2292   FreeSpace* top_node = huge_list_.top();
2293   for (FreeSpace** cur = &top_node; *cur != NULL;
2294        cur = (*cur)->next_address()) {
2295     FreeSpace* cur_node = *cur;
2296     while (cur_node != NULL &&
2297            Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
2298       int size = cur_node->Size();
2299       huge_list_available -= size;
2300       page = Page::FromAddress(cur_node->address());
2301       page->add_available_in_huge_free_list(-size);
2302       cur_node = cur_node->next();
2303     }
2304
2305     *cur = cur_node;
2306     if (cur_node == NULL) {
2307       huge_list_.set_end(NULL);
2308       break;
2309     }
2310
2311     int size = cur_node->Size();
2312     if (size >= size_in_bytes) {
2313       // Large enough node found.  Unlink it from the list.
2314       node = *cur;
2315       *cur = node->next();
2316       *node_size = size;
2317       huge_list_available -= size;
2318       page = Page::FromAddress(node->address());
2319       page->add_available_in_huge_free_list(-size);
2320       break;
2321     }
2322   }
2323
2324   huge_list_.set_top(top_node);
2325   if (huge_list_.top() == NULL) {
2326     huge_list_.set_end(NULL);
2327   }
2328   huge_list_.set_available(huge_list_available);
2329
2330   if (node != NULL) {
2331     DCHECK(IsVeryLong() || available() == SumFreeLists());
2332     return node;
2333   }
2334
2335   if (size_in_bytes <= kSmallListMax) {
2336     node = small_list_.PickNodeFromList(size_in_bytes, node_size);
2337     if (node != NULL) {
2338       DCHECK(size_in_bytes <= *node_size);
2339       page = Page::FromAddress(node->address());
2340       page->add_available_in_small_free_list(-(*node_size));
2341     }
2342   } else if (size_in_bytes <= kMediumListMax) {
2343     node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
2344     if (node != NULL) {
2345       DCHECK(size_in_bytes <= *node_size);
2346       page = Page::FromAddress(node->address());
2347       page->add_available_in_medium_free_list(-(*node_size));
2348     }
2349   } else if (size_in_bytes <= kLargeListMax) {
2350     node = large_list_.PickNodeFromList(size_in_bytes, node_size);
2351     if (node != NULL) {
2352       DCHECK(size_in_bytes <= *node_size);
2353       page = Page::FromAddress(node->address());
2354       page->add_available_in_large_free_list(-(*node_size));
2355     }
2356   }
2357
2358   DCHECK(IsVeryLong() || available() == SumFreeLists());
2359   return node;
2360 }
2361
2362
2363 // Allocation on the old space free list.  If it succeeds then a new linear
2364 // allocation space has been set up with the top and limit of the space.  If
2365 // the allocation fails then NULL is returned, and the caller can perform a GC
2366 // or allocate a new page before retrying.
2367 HeapObject* FreeList::Allocate(int size_in_bytes) {
2368   DCHECK(0 < size_in_bytes);
2369   DCHECK(size_in_bytes <= kMaxBlockSize);
2370   DCHECK(IsAligned(size_in_bytes, kPointerSize));
2371   // Don't free list allocate if there is linear space available.
2372   DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
2373
2374   int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2375   // Mark the old linear allocation area with a free space map so it can be
2376   // skipped when scanning the heap.  This also puts it back in the free list
2377   // if it is big enough.
2378   owner_->Free(owner_->top(), old_linear_size);
2379
2380   owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
2381                                                       old_linear_size);
2382
2383   int new_node_size = 0;
2384   FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2385   if (new_node == NULL) {
2386     owner_->SetTopAndLimit(NULL, NULL);
2387     return NULL;
2388   }
2389
2390   int bytes_left = new_node_size - size_in_bytes;
2391   DCHECK(bytes_left >= 0);
2392
2393 #ifdef DEBUG
2394   for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2395     reinterpret_cast<Object**>(new_node->address())[i] =
2396         Smi::FromInt(kCodeZapValue);
2397   }
2398 #endif
2399
2400   // The old-space-step might have finished sweeping and restarted marking.
2401   // Verify that it did not turn the page of the new node into an evacuation
2402   // candidate.
2403   DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2404
2405   const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2406
2407   // Memory in the linear allocation area is counted as allocated.  We may free
2408   // a little of this again immediately - see below.
2409   owner_->Allocate(new_node_size);
2410
2411   if (owner_->heap()->inline_allocation_disabled()) {
2412     // Keep the linear allocation area empty if requested to do so, just
2413     // return area back to the free list instead.
2414     owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2415     DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
2416   } else if (bytes_left > kThreshold &&
2417              owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2418              FLAG_incremental_marking_steps) {
2419     int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2420     // We don't want to give too large linear areas to the allocator while
2421     // incremental marking is going on, because we won't check again whether
2422     // we want to do another increment until the linear area is used up.
2423     owner_->Free(new_node->address() + size_in_bytes + linear_size,
2424                  new_node_size - size_in_bytes - linear_size);
2425     owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2426                            new_node->address() + size_in_bytes + linear_size);
2427   } else if (bytes_left > 0) {
2428     // Normally we give the rest of the node to the allocator as its new
2429     // linear allocation area.
2430     owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2431                            new_node->address() + new_node_size);
2432   } else {
2433     // TODO(gc) Try not freeing linear allocation region when bytes_left
2434     // are zero.
2435     owner_->SetTopAndLimit(NULL, NULL);
2436   }
2437
2438   return new_node;
2439 }
2440
2441
2442 intptr_t FreeList::EvictFreeListItems(Page* p) {
2443   intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
2444   p->set_available_in_huge_free_list(0);
2445
2446   if (sum < p->area_size()) {
2447     sum += small_list_.EvictFreeListItemsInList(p) +
2448            medium_list_.EvictFreeListItemsInList(p) +
2449            large_list_.EvictFreeListItemsInList(p);
2450     p->set_available_in_small_free_list(0);
2451     p->set_available_in_medium_free_list(0);
2452     p->set_available_in_large_free_list(0);
2453   }
2454
2455   return sum;
2456 }
2457
2458
2459 bool FreeList::ContainsPageFreeListItems(Page* p) {
2460   return huge_list_.EvictFreeListItemsInList(p) ||
2461          small_list_.EvictFreeListItemsInList(p) ||
2462          medium_list_.EvictFreeListItemsInList(p) ||
2463          large_list_.EvictFreeListItemsInList(p);
2464 }
2465
2466
2467 void FreeList::RepairLists(Heap* heap) {
2468   small_list_.RepairFreeList(heap);
2469   medium_list_.RepairFreeList(heap);
2470   large_list_.RepairFreeList(heap);
2471   huge_list_.RepairFreeList(heap);
2472 }
2473
2474
2475 #ifdef DEBUG
2476 intptr_t FreeListCategory::SumFreeList() {
2477   intptr_t sum = 0;
2478   FreeSpace* cur = top();
2479   while (cur != NULL) {
2480     DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
2481     sum += cur->nobarrier_size();
2482     cur = cur->next();
2483   }
2484   return sum;
2485 }
2486
2487
2488 static const int kVeryLongFreeList = 500;
2489
2490
2491 int FreeListCategory::FreeListLength() {
2492   int length = 0;
2493   FreeSpace* cur = top();
2494   while (cur != NULL) {
2495     length++;
2496     cur = cur->next();
2497     if (length == kVeryLongFreeList) return length;
2498   }
2499   return length;
2500 }
2501
2502
2503 bool FreeList::IsVeryLong() {
2504   if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
2505   if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
2506   if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
2507   if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
2508   return false;
2509 }
2510
2511
2512 // This can take a very long time because it is linear in the number of entries
2513 // on the free list, so it should not be called if FreeListLength returns
2514 // kVeryLongFreeList.
2515 intptr_t FreeList::SumFreeLists() {
2516   intptr_t sum = small_list_.SumFreeList();
2517   sum += medium_list_.SumFreeList();
2518   sum += large_list_.SumFreeList();
2519   sum += huge_list_.SumFreeList();
2520   return sum;
2521 }
2522 #endif
2523
2524
2525 // -----------------------------------------------------------------------------
2526 // OldSpace implementation
2527
2528 void PagedSpace::PrepareForMarkCompact() {
2529   // We don't have a linear allocation area while sweeping.  It will be restored
2530   // on the first allocation after the sweep.
2531   EmptyAllocationInfo();
2532
2533   // This counter will be increased for pages which will be swept by the
2534   // sweeper threads.
2535   unswept_free_bytes_ = 0;
2536
2537   // Clear the free list before a full GC---it will be rebuilt afterward.
2538   free_list_.Reset();
2539 }
2540
2541
2542 intptr_t PagedSpace::SizeOfObjects() {
2543   DCHECK(!FLAG_concurrent_sweeping ||
2544          heap()->mark_compact_collector()->sweeping_in_progress() ||
2545          (unswept_free_bytes_ == 0));
2546   return Size() - unswept_free_bytes_ - (limit() - top());
2547 }
2548
2549
2550 // After we have booted, we have created a map which represents free space
2551 // on the heap.  If there was already a free list then the elements on it
2552 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
2553 // fix them.
2554 void PagedSpace::RepairFreeListsAfterDeserialization() {
2555   free_list_.RepairLists(heap());
2556   // Each page may have a small free space that is not tracked by a free list.
2557   // Update the maps for those free space objects.
2558   PageIterator iterator(this);
2559   while (iterator.has_next()) {
2560     Page* page = iterator.next();
2561     int size = static_cast<int>(page->non_available_small_blocks());
2562     if (size == 0) continue;
2563     Address address = page->OffsetToAddress(Page::kPageSize - size);
2564     heap()->CreateFillerObjectAt(address, size);
2565   }
2566 }
2567
2568
2569 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2570   if (allocation_info_.top() >= allocation_info_.limit()) return;
2571
2572   if (Page::FromAllocationTop(allocation_info_.top())
2573           ->IsEvacuationCandidate()) {
2574     // Create filler object to keep page iterable if it was iterable.
2575     int remaining =
2576         static_cast<int>(allocation_info_.limit() - allocation_info_.top());
2577     heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
2578
2579     allocation_info_.set_top(NULL);
2580     allocation_info_.set_limit(NULL);
2581   }
2582 }
2583
2584
2585 HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
2586     int size_in_bytes) {
2587   MarkCompactCollector* collector = heap()->mark_compact_collector();
2588   if (collector->sweeping_in_progress()) {
2589     // Wait for the sweeper threads here and complete the sweeping phase.
2590     collector->EnsureSweepingCompleted();
2591
2592     // After waiting for the sweeper threads, there may be new free-list
2593     // entries.
2594     return free_list_.Allocate(size_in_bytes);
2595   }
2596   return NULL;
2597 }
2598
2599
2600 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2601   // Allocation in this space has failed.
2602
2603   MarkCompactCollector* collector = heap()->mark_compact_collector();
2604   // Sweeping is still in progress.
2605   if (collector->sweeping_in_progress()) {
2606     // First try to refill the free-list, concurrent sweeper threads
2607     // may have freed some objects in the meantime.
2608     collector->RefillFreeList(this);
2609
2610     // Retry the free list allocation.
2611     HeapObject* object = free_list_.Allocate(size_in_bytes);
2612     if (object != NULL) return object;
2613
2614     // If sweeping is still in progress try to sweep pages on the main thread.
2615     int free_chunk = collector->SweepInParallel(this, size_in_bytes);
2616     collector->RefillFreeList(this);
2617     if (free_chunk >= size_in_bytes) {
2618       HeapObject* object = free_list_.Allocate(size_in_bytes);
2619       // We should be able to allocate an object here since we just freed that
2620       // much memory.
2621       DCHECK(object != NULL);
2622       if (object != NULL) return object;
2623     }
2624   }
2625
2626   // Free list allocation failed and there is no next page.  Fail if we have
2627   // hit the old generation size limit that should cause a garbage
2628   // collection.
2629   if (!heap()->always_allocate() &&
2630       heap()->OldGenerationAllocationLimitReached()) {
2631     // If sweeper threads are active, wait for them at that point and steal
2632     // elements form their free-lists.
2633     HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
2634     return object;
2635   }
2636
2637   // Try to expand the space and allocate in the new next page.
2638   if (Expand()) {
2639     DCHECK(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
2640     return free_list_.Allocate(size_in_bytes);
2641   }
2642
2643   // If sweeper threads are active, wait for them at that point and steal
2644   // elements form their free-lists. Allocation may still fail their which
2645   // would indicate that there is not enough memory for the given allocation.
2646   return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
2647 }
2648
2649
2650 #ifdef DEBUG
2651 void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
2652   CommentStatistic* comments_statistics =
2653       isolate->paged_space_comments_statistics();
2654   ReportCodeKindStatistics(isolate->code_kind_statistics());
2655   PrintF(
2656       "Code comment statistics (\"   [ comment-txt   :    size/   "
2657       "count  (average)\"):\n");
2658   for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2659     const CommentStatistic& cs = comments_statistics[i];
2660     if (cs.size > 0) {
2661       PrintF("   %-30s: %10d/%6d     (%d)\n", cs.comment, cs.size, cs.count,
2662              cs.size / cs.count);
2663     }
2664   }
2665   PrintF("\n");
2666 }
2667
2668
2669 void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2670   CommentStatistic* comments_statistics =
2671       isolate->paged_space_comments_statistics();
2672   ClearCodeKindStatistics(isolate->code_kind_statistics());
2673   for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2674     comments_statistics[i].Clear();
2675   }
2676   comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2677   comments_statistics[CommentStatistic::kMaxComments].size = 0;
2678   comments_statistics[CommentStatistic::kMaxComments].count = 0;
2679 }
2680
2681
2682 // Adds comment to 'comment_statistics' table. Performance OK as long as
2683 // 'kMaxComments' is small
2684 static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2685   CommentStatistic* comments_statistics =
2686       isolate->paged_space_comments_statistics();
2687   // Do not count empty comments
2688   if (delta <= 0) return;
2689   CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2690   // Search for a free or matching entry in 'comments_statistics': 'cs'
2691   // points to result.
2692   for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2693     if (comments_statistics[i].comment == NULL) {
2694       cs = &comments_statistics[i];
2695       cs->comment = comment;
2696       break;
2697     } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2698       cs = &comments_statistics[i];
2699       break;
2700     }
2701   }
2702   // Update entry for 'comment'
2703   cs->size += delta;
2704   cs->count += 1;
2705 }
2706
2707
2708 // Call for each nested comment start (start marked with '[ xxx', end marked
2709 // with ']'.  RelocIterator 'it' must point to a comment reloc info.
2710 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2711   DCHECK(!it->done());
2712   DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT);
2713   const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2714   if (tmp[0] != '[') {
2715     // Not a nested comment; skip
2716     return;
2717   }
2718
2719   // Search for end of nested comment or a new nested comment
2720   const char* const comment_txt =
2721       reinterpret_cast<const char*>(it->rinfo()->data());
2722   const byte* prev_pc = it->rinfo()->pc();
2723   int flat_delta = 0;
2724   it->next();
2725   while (true) {
2726     // All nested comments must be terminated properly, and therefore exit
2727     // from loop.
2728     DCHECK(!it->done());
2729     if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2730       const char* const txt =
2731           reinterpret_cast<const char*>(it->rinfo()->data());
2732       flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2733       if (txt[0] == ']') break;  // End of nested  comment
2734       // A new comment
2735       CollectCommentStatistics(isolate, it);
2736       // Skip code that was covered with previous comment
2737       prev_pc = it->rinfo()->pc();
2738     }
2739     it->next();
2740   }
2741   EnterComment(isolate, comment_txt, flat_delta);
2742 }
2743
2744
2745 // Collects code size statistics:
2746 // - by code kind
2747 // - by code comment
2748 void PagedSpace::CollectCodeStatistics() {
2749   Isolate* isolate = heap()->isolate();
2750   HeapObjectIterator obj_it(this);
2751   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2752     if (obj->IsCode()) {
2753       Code* code = Code::cast(obj);
2754       isolate->code_kind_statistics()[code->kind()] += code->Size();
2755       RelocIterator it(code);
2756       int delta = 0;
2757       const byte* prev_pc = code->instruction_start();
2758       while (!it.done()) {
2759         if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2760           delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2761           CollectCommentStatistics(isolate, &it);
2762           prev_pc = it.rinfo()->pc();
2763         }
2764         it.next();
2765       }
2766
2767       DCHECK(code->instruction_start() <= prev_pc &&
2768              prev_pc <= code->instruction_end());
2769       delta += static_cast<int>(code->instruction_end() - prev_pc);
2770       EnterComment(isolate, "NoComment", delta);
2771     }
2772   }
2773 }
2774
2775
2776 void PagedSpace::ReportStatistics() {
2777   int pct = static_cast<int>(Available() * 100 / Capacity());
2778   PrintF("  capacity: %" V8_PTR_PREFIX
2779          "d"
2780          ", waste: %" V8_PTR_PREFIX
2781          "d"
2782          ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2783          Capacity(), Waste(), Available(), pct);
2784
2785   if (heap()->mark_compact_collector()->sweeping_in_progress()) {
2786     heap()->mark_compact_collector()->EnsureSweepingCompleted();
2787   }
2788   ClearHistograms(heap()->isolate());
2789   HeapObjectIterator obj_it(this);
2790   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2791     CollectHistogramInfo(obj);
2792   ReportHistogram(heap()->isolate(), true);
2793 }
2794 #endif
2795
2796
2797 // -----------------------------------------------------------------------------
2798 // MapSpace implementation
2799 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2800 // there is at least one non-inlined virtual function. I would prefer to hide
2801 // the VerifyObject definition behind VERIFY_HEAP.
2802
2803 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
2804
2805
2806 // -----------------------------------------------------------------------------
2807 // LargeObjectIterator
2808
2809 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2810   current_ = space->first_page_;
2811   size_func_ = NULL;
2812 }
2813
2814
2815 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2816                                          HeapObjectCallback size_func) {
2817   current_ = space->first_page_;
2818   size_func_ = size_func;
2819 }
2820
2821
2822 HeapObject* LargeObjectIterator::Next() {
2823   if (current_ == NULL) return NULL;
2824
2825   HeapObject* object = current_->GetObject();
2826   current_ = current_->next_page();
2827   return object;
2828 }
2829
2830
2831 // -----------------------------------------------------------------------------
2832 // LargeObjectSpace
2833 static bool ComparePointers(void* key1, void* key2) { return key1 == key2; }
2834
2835
2836 LargeObjectSpace::LargeObjectSpace(Heap* heap, intptr_t max_capacity,
2837                                    AllocationSpace id)
2838     : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
2839       max_capacity_(max_capacity),
2840       first_page_(NULL),
2841       size_(0),
2842       page_count_(0),
2843       objects_size_(0),
2844       chunk_map_(ComparePointers, 1024) {}
2845
2846
2847 bool LargeObjectSpace::SetUp() {
2848   first_page_ = NULL;
2849   size_ = 0;
2850   maximum_committed_ = 0;
2851   page_count_ = 0;
2852   objects_size_ = 0;
2853   chunk_map_.Clear();
2854   return true;
2855 }
2856
2857
2858 void LargeObjectSpace::TearDown() {
2859   while (first_page_ != NULL) {
2860     LargePage* page = first_page_;
2861     first_page_ = first_page_->next_page();
2862     LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2863
2864     ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2865     heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2866         space, kAllocationActionFree, page->size());
2867     heap()->isolate()->memory_allocator()->Free(page);
2868   }
2869   SetUp();
2870 }
2871
2872
2873 AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
2874                                                Executability executable) {
2875   // Check if we want to force a GC before growing the old space further.
2876   // If so, fail the allocation.
2877   if (!heap()->always_allocate() &&
2878       heap()->OldGenerationAllocationLimitReached()) {
2879     return AllocationResult::Retry(identity());
2880   }
2881
2882   if (!CanAllocateSize(object_size)) return AllocationResult::Retry(identity());
2883
2884   LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
2885       object_size, this, executable);
2886   if (page == NULL) return AllocationResult::Retry(identity());
2887   DCHECK(page->area_size() >= object_size);
2888
2889   size_ += static_cast<int>(page->size());
2890   objects_size_ += object_size;
2891   page_count_++;
2892   page->set_next_page(first_page_);
2893   first_page_ = page;
2894
2895   if (size_ > maximum_committed_) {
2896     maximum_committed_ = size_;
2897   }
2898
2899   // Register all MemoryChunk::kAlignment-aligned chunks covered by
2900   // this large page in the chunk map.
2901   uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2902   uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2903   for (uintptr_t key = base; key <= limit; key++) {
2904     HashMap::Entry* entry = chunk_map_.LookupOrInsert(
2905         reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
2906     DCHECK(entry != NULL);
2907     entry->value = page;
2908   }
2909
2910   HeapObject* object = page->GetObject();
2911
2912   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
2913
2914   if (Heap::ShouldZapGarbage()) {
2915     // Make the object consistent so the heap can be verified in OldSpaceStep.
2916     // We only need to do this in debug builds or if verify_heap is on.
2917     reinterpret_cast<Object**>(object->address())[0] =
2918         heap()->fixed_array_map();
2919     reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2920   }
2921
2922   heap()->incremental_marking()->OldSpaceStep(object_size);
2923   return object;
2924 }
2925
2926
2927 size_t LargeObjectSpace::CommittedPhysicalMemory() {
2928   if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2929   size_t size = 0;
2930   LargePage* current = first_page_;
2931   while (current != NULL) {
2932     size += current->CommittedPhysicalMemory();
2933     current = current->next_page();
2934   }
2935   return size;
2936 }
2937
2938
2939 // GC support
2940 Object* LargeObjectSpace::FindObject(Address a) {
2941   LargePage* page = FindPage(a);
2942   if (page != NULL) {
2943     return page->GetObject();
2944   }
2945   return Smi::FromInt(0);  // Signaling not found.
2946 }
2947
2948
2949 LargePage* LargeObjectSpace::FindPage(Address a) {
2950   uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
2951   HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2952                                         static_cast<uint32_t>(key));
2953   if (e != NULL) {
2954     DCHECK(e->value != NULL);
2955     LargePage* page = reinterpret_cast<LargePage*>(e->value);
2956     DCHECK(LargePage::IsValid(page));
2957     if (page->Contains(a)) {
2958       return page;
2959     }
2960   }
2961   return NULL;
2962 }
2963
2964
2965 void LargeObjectSpace::FreeUnmarkedObjects() {
2966   LargePage* previous = NULL;
2967   LargePage* current = first_page_;
2968   while (current != NULL) {
2969     HeapObject* object = current->GetObject();
2970     // Can this large page contain pointers to non-trivial objects.  No other
2971     // pointer object is this big.
2972     bool is_pointer_object = object->IsFixedArray();
2973     MarkBit mark_bit = Marking::MarkBitFrom(object);
2974     if (Marking::IsBlackOrGrey(mark_bit)) {
2975       Marking::BlackToWhite(mark_bit);
2976       Page::FromAddress(object->address())->ResetProgressBar();
2977       Page::FromAddress(object->address())->ResetLiveBytes();
2978       previous = current;
2979       current = current->next_page();
2980     } else {
2981       LargePage* page = current;
2982       // Cut the chunk out from the chunk list.
2983       current = current->next_page();
2984       if (previous == NULL) {
2985         first_page_ = current;
2986       } else {
2987         previous->set_next_page(current);
2988       }
2989
2990       // Free the chunk.
2991       heap()->mark_compact_collector()->ReportDeleteIfNeeded(object,
2992                                                              heap()->isolate());
2993       size_ -= static_cast<int>(page->size());
2994       objects_size_ -= object->Size();
2995       page_count_--;
2996
2997       // Remove entries belonging to this page.
2998       // Use variable alignment to help pass length check (<= 80 characters)
2999       // of single line in tools/presubmit.py.
3000       const intptr_t alignment = MemoryChunk::kAlignment;
3001       uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
3002       uintptr_t limit = base + (page->size() - 1) / alignment;
3003       for (uintptr_t key = base; key <= limit; key++) {
3004         chunk_map_.Remove(reinterpret_cast<void*>(key),
3005                           static_cast<uint32_t>(key));
3006       }
3007
3008       if (is_pointer_object) {
3009         heap()->QueueMemoryChunkForFree(page);
3010       } else {
3011         heap()->isolate()->memory_allocator()->Free(page);
3012       }
3013     }
3014   }
3015   heap()->FreeQueuedChunks();
3016 }
3017
3018
3019 bool LargeObjectSpace::Contains(HeapObject* object) {
3020   Address address = object->address();
3021   MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3022
3023   bool owned = (chunk->owner() == this);
3024
3025   SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
3026
3027   return owned;
3028 }
3029
3030
3031 #ifdef VERIFY_HEAP
3032 // We do not assume that the large object iterator works, because it depends
3033 // on the invariants we are checking during verification.
3034 void LargeObjectSpace::Verify() {
3035   for (LargePage* chunk = first_page_; chunk != NULL;
3036        chunk = chunk->next_page()) {
3037     // Each chunk contains an object that starts at the large object page's
3038     // object area start.
3039     HeapObject* object = chunk->GetObject();
3040     Page* page = Page::FromAddress(object->address());
3041     CHECK(object->address() == page->area_start());
3042
3043     // The first word should be a map, and we expect all map pointers to be
3044     // in map space.
3045     Map* map = object->map();
3046     CHECK(map->IsMap());
3047     CHECK(heap()->map_space()->Contains(map));
3048
3049     // We have only code, sequential strings, external strings
3050     // (sequential strings that have been morphed into external
3051     // strings), fixed arrays, byte arrays, and constant pool arrays in the
3052     // large object space.
3053     CHECK(object->IsCode() || object->IsSeqString() ||
3054           object->IsExternalString() || object->IsFixedArray() ||
3055           object->IsFixedDoubleArray() || object->IsByteArray());
3056
3057     // The object itself should look OK.
3058     object->ObjectVerify();
3059
3060     // Byte arrays and strings don't have interior pointers.
3061     if (object->IsCode()) {
3062       VerifyPointersVisitor code_visitor;
3063       object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
3064     } else if (object->IsFixedArray()) {
3065       FixedArray* array = FixedArray::cast(object);
3066       for (int j = 0; j < array->length(); j++) {
3067         Object* element = array->get(j);
3068         if (element->IsHeapObject()) {
3069           HeapObject* element_object = HeapObject::cast(element);
3070           CHECK(heap()->Contains(element_object));
3071           CHECK(element_object->map()->IsMap());
3072         }
3073       }
3074     }
3075   }
3076 }
3077 #endif
3078
3079
3080 #ifdef DEBUG
3081 void LargeObjectSpace::Print() {
3082   OFStream os(stdout);
3083   LargeObjectIterator it(this);
3084   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3085     obj->Print(os);
3086   }
3087 }
3088
3089
3090 void LargeObjectSpace::ReportStatistics() {
3091   PrintF("  size: %" V8_PTR_PREFIX "d\n", size_);
3092   int num_objects = 0;
3093   ClearHistograms(heap()->isolate());
3094   LargeObjectIterator it(this);
3095   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3096     num_objects++;
3097     CollectHistogramInfo(obj);
3098   }
3099
3100   PrintF(
3101       "  number of objects %d, "
3102       "size of objects %" V8_PTR_PREFIX "d\n",
3103       num_objects, objects_size_);
3104   if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3105 }
3106
3107
3108 void LargeObjectSpace::CollectCodeStatistics() {
3109   Isolate* isolate = heap()->isolate();
3110   LargeObjectIterator obj_it(this);
3111   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
3112     if (obj->IsCode()) {
3113       Code* code = Code::cast(obj);
3114       isolate->code_kind_statistics()[code->kind()] += code->Size();
3115     }
3116   }
3117 }
3118
3119
3120 void Page::Print() {
3121   // Make a best-effort to print the objects in the page.
3122   PrintF("Page@%p in %s\n", this->address(),
3123          AllocationSpaceName(this->owner()->identity()));
3124   printf(" --------------------------------------\n");
3125   HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
3126   unsigned mark_size = 0;
3127   for (HeapObject* object = objects.Next(); object != NULL;
3128        object = objects.Next()) {
3129     bool is_marked = Marking::IsBlackOrGrey(Marking::MarkBitFrom(object));
3130     PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
3131     if (is_marked) {
3132       mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
3133     }
3134     object->ShortPrint();
3135     PrintF("\n");
3136   }
3137   printf(" --------------------------------------\n");
3138   printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3139 }
3140
3141 #endif  // DEBUG
3142 }  // namespace internal
3143 }  // namespace v8