deps: update v8 to 4.3.61.21
[platform/upstream/nodejs.git] / deps / v8 / src / heap / spaces.cc
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #include "src/base/bits.h"
8 #include "src/base/platform/platform.h"
9 #include "src/full-codegen.h"
10 #include "src/heap/mark-compact.h"
11 #include "src/macro-assembler.h"
12 #include "src/msan.h"
13 #include "src/snapshot/snapshot.h"
14
15 namespace v8 {
16 namespace internal {
17
18
19 // ----------------------------------------------------------------------------
20 // HeapObjectIterator
21
22 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
23   // You can't actually iterate over the anchor page.  It is not a real page,
24   // just an anchor for the double linked page list.  Initialize as if we have
25   // reached the end of the anchor page, then the first iteration will move on
26   // to the first page.
27   Initialize(space, NULL, NULL, kAllPagesInSpace, NULL);
28 }
29
30
31 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
32                                        HeapObjectCallback size_func) {
33   // You can't actually iterate over the anchor page.  It is not a real page,
34   // just an anchor for the double linked page list.  Initialize the current
35   // address and end as NULL, then the first iteration will move on
36   // to the first page.
37   Initialize(space, NULL, NULL, kAllPagesInSpace, size_func);
38 }
39
40
41 HeapObjectIterator::HeapObjectIterator(Page* page,
42                                        HeapObjectCallback size_func) {
43   Space* owner = page->owner();
44   DCHECK(owner == page->heap()->old_pointer_space() ||
45          owner == page->heap()->old_data_space() ||
46          owner == page->heap()->map_space() ||
47          owner == page->heap()->cell_space() ||
48          owner == page->heap()->code_space());
49   Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
50              page->area_end(), kOnePageOnly, size_func);
51   DCHECK(page->WasSwept() || page->SweepingCompleted());
52 }
53
54
55 void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
56                                     HeapObjectIterator::PageMode mode,
57                                     HeapObjectCallback size_f) {
58   space_ = space;
59   cur_addr_ = cur;
60   cur_end_ = end;
61   page_mode_ = mode;
62   size_func_ = size_f;
63 }
64
65
66 // We have hit the end of the page and should advance to the next block of
67 // objects.  This happens at the end of the page.
68 bool HeapObjectIterator::AdvanceToNextPage() {
69   DCHECK(cur_addr_ == cur_end_);
70   if (page_mode_ == kOnePageOnly) return false;
71   Page* cur_page;
72   if (cur_addr_ == NULL) {
73     cur_page = space_->anchor();
74   } else {
75     cur_page = Page::FromAddress(cur_addr_ - 1);
76     DCHECK(cur_addr_ == cur_page->area_end());
77   }
78   cur_page = cur_page->next_page();
79   if (cur_page == space_->anchor()) return false;
80   cur_addr_ = cur_page->area_start();
81   cur_end_ = cur_page->area_end();
82   DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
83   return true;
84 }
85
86
87 // -----------------------------------------------------------------------------
88 // CodeRange
89
90
91 CodeRange::CodeRange(Isolate* isolate)
92     : isolate_(isolate),
93       code_range_(NULL),
94       free_list_(0),
95       allocation_list_(0),
96       current_allocation_block_index_(0),
97       emergency_block_() {}
98
99
100 bool CodeRange::SetUp(size_t requested) {
101   DCHECK(code_range_ == NULL);
102
103   if (requested == 0) {
104     // When a target requires the code range feature, we put all code objects
105     // in a kMaximalCodeRangeSize range of virtual address space, so that
106     // they can call each other with near calls.
107     if (kRequiresCodeRange) {
108       requested = kMaximalCodeRangeSize;
109     } else {
110       return true;
111     }
112   }
113
114   if (requested <= kMinimumCodeRangeSize) {
115     requested = kMinimumCodeRangeSize;
116   }
117
118   DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
119   code_range_ = new base::VirtualMemory(requested);
120   CHECK(code_range_ != NULL);
121   if (!code_range_->IsReserved()) {
122     delete code_range_;
123     code_range_ = NULL;
124     return false;
125   }
126
127   // We are sure that we have mapped a block of requested addresses.
128   DCHECK(code_range_->size() == requested);
129   Address base = reinterpret_cast<Address>(code_range_->address());
130
131   // On some platforms, specifically Win64, we need to reserve some pages at
132   // the beginning of an executable space.
133   if (kReservedCodeRangePages) {
134     if (!code_range_->Commit(
135             base, kReservedCodeRangePages * base::OS::CommitPageSize(), true)) {
136       delete code_range_;
137       code_range_ = NULL;
138       return false;
139     }
140     base += kReservedCodeRangePages * base::OS::CommitPageSize();
141   }
142   Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
143   size_t size = code_range_->size() - (aligned_base - base) -
144                 kReservedCodeRangePages * base::OS::CommitPageSize();
145   allocation_list_.Add(FreeBlock(aligned_base, size));
146   current_allocation_block_index_ = 0;
147
148   LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
149   ReserveEmergencyBlock();
150   return true;
151 }
152
153
154 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
155                                        const FreeBlock* right) {
156   // The entire point of CodeRange is that the difference between two
157   // addresses in the range can be represented as a signed 32-bit int,
158   // so the cast is semantically correct.
159   return static_cast<int>(left->start - right->start);
160 }
161
162
163 bool CodeRange::GetNextAllocationBlock(size_t requested) {
164   for (current_allocation_block_index_++;
165        current_allocation_block_index_ < allocation_list_.length();
166        current_allocation_block_index_++) {
167     if (requested <= allocation_list_[current_allocation_block_index_].size) {
168       return true;  // Found a large enough allocation block.
169     }
170   }
171
172   // Sort and merge the free blocks on the free list and the allocation list.
173   free_list_.AddAll(allocation_list_);
174   allocation_list_.Clear();
175   free_list_.Sort(&CompareFreeBlockAddress);
176   for (int i = 0; i < free_list_.length();) {
177     FreeBlock merged = free_list_[i];
178     i++;
179     // Add adjacent free blocks to the current merged block.
180     while (i < free_list_.length() &&
181            free_list_[i].start == merged.start + merged.size) {
182       merged.size += free_list_[i].size;
183       i++;
184     }
185     if (merged.size > 0) {
186       allocation_list_.Add(merged);
187     }
188   }
189   free_list_.Clear();
190
191   for (current_allocation_block_index_ = 0;
192        current_allocation_block_index_ < allocation_list_.length();
193        current_allocation_block_index_++) {
194     if (requested <= allocation_list_[current_allocation_block_index_].size) {
195       return true;  // Found a large enough allocation block.
196     }
197   }
198   current_allocation_block_index_ = 0;
199   // Code range is full or too fragmented.
200   return false;
201 }
202
203
204 Address CodeRange::AllocateRawMemory(const size_t requested_size,
205                                      const size_t commit_size,
206                                      size_t* allocated) {
207   DCHECK(commit_size <= requested_size);
208   FreeBlock current;
209   if (!ReserveBlock(requested_size, &current)) {
210     *allocated = 0;
211     return NULL;
212   }
213   *allocated = current.size;
214   DCHECK(*allocated <= current.size);
215   DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
216   if (!isolate_->memory_allocator()->CommitExecutableMemory(
217           code_range_, current.start, commit_size, *allocated)) {
218     *allocated = 0;
219     ReleaseBlock(&current);
220     return NULL;
221   }
222   return current.start;
223 }
224
225
226 bool CodeRange::CommitRawMemory(Address start, size_t length) {
227   return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
228 }
229
230
231 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
232   return code_range_->Uncommit(start, length);
233 }
234
235
236 void CodeRange::FreeRawMemory(Address address, size_t length) {
237   DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
238   free_list_.Add(FreeBlock(address, length));
239   code_range_->Uncommit(address, length);
240 }
241
242
243 void CodeRange::TearDown() {
244   delete code_range_;  // Frees all memory in the virtual memory range.
245   code_range_ = NULL;
246   free_list_.Free();
247   allocation_list_.Free();
248 }
249
250
251 bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
252   DCHECK(allocation_list_.length() == 0 ||
253          current_allocation_block_index_ < allocation_list_.length());
254   if (allocation_list_.length() == 0 ||
255       requested_size > allocation_list_[current_allocation_block_index_].size) {
256     // Find an allocation block large enough.
257     if (!GetNextAllocationBlock(requested_size)) return false;
258   }
259   // Commit the requested memory at the start of the current allocation block.
260   size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
261   *block = allocation_list_[current_allocation_block_index_];
262   // Don't leave a small free block, useless for a large object or chunk.
263   if (aligned_requested < (block->size - Page::kPageSize)) {
264     block->size = aligned_requested;
265   }
266   DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
267   allocation_list_[current_allocation_block_index_].start += block->size;
268   allocation_list_[current_allocation_block_index_].size -= block->size;
269   return true;
270 }
271
272
273 void CodeRange::ReleaseBlock(const FreeBlock* block) { free_list_.Add(*block); }
274
275
276 void CodeRange::ReserveEmergencyBlock() {
277   const size_t requested_size = MemoryAllocator::CodePageAreaSize();
278   if (emergency_block_.size == 0) {
279     ReserveBlock(requested_size, &emergency_block_);
280   } else {
281     DCHECK(emergency_block_.size >= requested_size);
282   }
283 }
284
285
286 void CodeRange::ReleaseEmergencyBlock() {
287   if (emergency_block_.size != 0) {
288     ReleaseBlock(&emergency_block_);
289     emergency_block_.size = 0;
290   }
291 }
292
293
294 // -----------------------------------------------------------------------------
295 // MemoryAllocator
296 //
297
298 MemoryAllocator::MemoryAllocator(Isolate* isolate)
299     : isolate_(isolate),
300       capacity_(0),
301       capacity_executable_(0),
302       size_(0),
303       size_executable_(0),
304       lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
305       highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
306
307
308 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
309   capacity_ = RoundUp(capacity, Page::kPageSize);
310   capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
311   DCHECK_GE(capacity_, capacity_executable_);
312
313   size_ = 0;
314   size_executable_ = 0;
315
316   return true;
317 }
318
319
320 void MemoryAllocator::TearDown() {
321   // Check that spaces were torn down before MemoryAllocator.
322   DCHECK(size_ == 0);
323   // TODO(gc) this will be true again when we fix FreeMemory.
324   // DCHECK(size_executable_ == 0);
325   capacity_ = 0;
326   capacity_executable_ = 0;
327 }
328
329
330 bool MemoryAllocator::CommitMemory(Address base, size_t size,
331                                    Executability executable) {
332   if (!base::VirtualMemory::CommitRegion(base, size,
333                                          executable == EXECUTABLE)) {
334     return false;
335   }
336   UpdateAllocatedSpaceLimits(base, base + size);
337   return true;
338 }
339
340
341 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
342                                  Executability executable) {
343   // TODO(gc) make code_range part of memory allocator?
344   DCHECK(reservation->IsReserved());
345   size_t size = reservation->size();
346   DCHECK(size_ >= size);
347   size_ -= size;
348
349   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
350
351   if (executable == EXECUTABLE) {
352     DCHECK(size_executable_ >= size);
353     size_executable_ -= size;
354   }
355   // Code which is part of the code-range does not have its own VirtualMemory.
356   DCHECK(isolate_->code_range() == NULL ||
357          !isolate_->code_range()->contains(
358              static_cast<Address>(reservation->address())));
359   DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
360          !isolate_->code_range()->valid());
361   reservation->Release();
362 }
363
364
365 void MemoryAllocator::FreeMemory(Address base, size_t size,
366                                  Executability executable) {
367   // TODO(gc) make code_range part of memory allocator?
368   DCHECK(size_ >= size);
369   size_ -= size;
370
371   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
372
373   if (executable == EXECUTABLE) {
374     DCHECK(size_executable_ >= size);
375     size_executable_ -= size;
376   }
377   if (isolate_->code_range() != NULL &&
378       isolate_->code_range()->contains(static_cast<Address>(base))) {
379     DCHECK(executable == EXECUTABLE);
380     isolate_->code_range()->FreeRawMemory(base, size);
381   } else {
382     DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
383            !isolate_->code_range()->valid());
384     bool result = base::VirtualMemory::ReleaseRegion(base, size);
385     USE(result);
386     DCHECK(result);
387   }
388 }
389
390
391 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
392                                               base::VirtualMemory* controller) {
393   base::VirtualMemory reservation(size, alignment);
394
395   if (!reservation.IsReserved()) return NULL;
396   size_ += reservation.size();
397   Address base =
398       RoundUp(static_cast<Address>(reservation.address()), alignment);
399   controller->TakeControl(&reservation);
400   return base;
401 }
402
403
404 Address MemoryAllocator::AllocateAlignedMemory(
405     size_t reserve_size, size_t commit_size, size_t alignment,
406     Executability executable, base::VirtualMemory* controller) {
407   DCHECK(commit_size <= reserve_size);
408   base::VirtualMemory reservation;
409   Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
410   if (base == NULL) return NULL;
411
412   if (executable == EXECUTABLE) {
413     if (!CommitExecutableMemory(&reservation, base, commit_size,
414                                 reserve_size)) {
415       base = NULL;
416     }
417   } else {
418     if (reservation.Commit(base, commit_size, false)) {
419       UpdateAllocatedSpaceLimits(base, base + commit_size);
420     } else {
421       base = NULL;
422     }
423   }
424
425   if (base == NULL) {
426     // Failed to commit the body. Release the mapping and any partially
427     // commited regions inside it.
428     reservation.Release();
429     return NULL;
430   }
431
432   controller->TakeControl(&reservation);
433   return base;
434 }
435
436
437 void Page::InitializeAsAnchor(PagedSpace* owner) {
438   set_owner(owner);
439   set_prev_page(this);
440   set_next_page(this);
441 }
442
443
444 NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
445                                        SemiSpace* semi_space) {
446   Address area_start = start + NewSpacePage::kObjectStartOffset;
447   Address area_end = start + Page::kPageSize;
448
449   MemoryChunk* chunk =
450       MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
451                               area_end, NOT_EXECUTABLE, semi_space);
452   chunk->set_next_chunk(NULL);
453   chunk->set_prev_chunk(NULL);
454   chunk->initialize_scan_on_scavenge(true);
455   bool in_to_space = (semi_space->id() != kFromSpace);
456   chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
457                              : MemoryChunk::IN_FROM_SPACE);
458   DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
459                                        : MemoryChunk::IN_TO_SPACE));
460   NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
461   heap->incremental_marking()->SetNewSpacePageFlags(page);
462   return page;
463 }
464
465
466 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
467   set_owner(semi_space);
468   set_next_chunk(this);
469   set_prev_chunk(this);
470   // Flags marks this invalid page as not being in new-space.
471   // All real new-space pages will be in new-space.
472   SetFlags(0, ~0);
473 }
474
475
476 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
477                                      Address area_start, Address area_end,
478                                      Executability executable, Space* owner) {
479   MemoryChunk* chunk = FromAddress(base);
480
481   DCHECK(base == chunk->address());
482
483   chunk->heap_ = heap;
484   chunk->size_ = size;
485   chunk->area_start_ = area_start;
486   chunk->area_end_ = area_end;
487   chunk->flags_ = 0;
488   chunk->set_owner(owner);
489   chunk->InitializeReservedMemory();
490   chunk->slots_buffer_ = NULL;
491   chunk->skip_list_ = NULL;
492   chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
493   chunk->progress_bar_ = 0;
494   chunk->high_water_mark_ = static_cast<int>(area_start - base);
495   chunk->set_parallel_sweeping(SWEEPING_DONE);
496   chunk->available_in_small_free_list_ = 0;
497   chunk->available_in_medium_free_list_ = 0;
498   chunk->available_in_large_free_list_ = 0;
499   chunk->available_in_huge_free_list_ = 0;
500   chunk->non_available_small_blocks_ = 0;
501   chunk->ResetLiveBytes();
502   Bitmap::Clear(chunk);
503   chunk->initialize_scan_on_scavenge(false);
504   chunk->SetFlag(WAS_SWEPT);
505
506   DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
507   DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
508
509   if (executable == EXECUTABLE) {
510     chunk->SetFlag(IS_EXECUTABLE);
511   }
512
513   if (owner == heap->old_data_space()) {
514     chunk->SetFlag(CONTAINS_ONLY_DATA);
515   }
516
517   return chunk;
518 }
519
520
521 // Commit MemoryChunk area to the requested size.
522 bool MemoryChunk::CommitArea(size_t requested) {
523   size_t guard_size =
524       IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
525   size_t header_size = area_start() - address() - guard_size;
526   size_t commit_size =
527       RoundUp(header_size + requested, base::OS::CommitPageSize());
528   size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
529                                   base::OS::CommitPageSize());
530
531   if (commit_size > committed_size) {
532     // Commit size should be less or equal than the reserved size.
533     DCHECK(commit_size <= size() - 2 * guard_size);
534     // Append the committed area.
535     Address start = address() + committed_size + guard_size;
536     size_t length = commit_size - committed_size;
537     if (reservation_.IsReserved()) {
538       Executability executable =
539           IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
540       if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length,
541                                                                executable)) {
542         return false;
543       }
544     } else {
545       CodeRange* code_range = heap_->isolate()->code_range();
546       DCHECK(code_range != NULL && code_range->valid() &&
547              IsFlagSet(IS_EXECUTABLE));
548       if (!code_range->CommitRawMemory(start, length)) return false;
549     }
550
551     if (Heap::ShouldZapGarbage()) {
552       heap_->isolate()->memory_allocator()->ZapBlock(start, length);
553     }
554   } else if (commit_size < committed_size) {
555     DCHECK(commit_size > 0);
556     // Shrink the committed area.
557     size_t length = committed_size - commit_size;
558     Address start = address() + committed_size + guard_size - length;
559     if (reservation_.IsReserved()) {
560       if (!reservation_.Uncommit(start, length)) return false;
561     } else {
562       CodeRange* code_range = heap_->isolate()->code_range();
563       DCHECK(code_range != NULL && code_range->valid() &&
564              IsFlagSet(IS_EXECUTABLE));
565       if (!code_range->UncommitRawMemory(start, length)) return false;
566     }
567   }
568
569   area_end_ = area_start_ + requested;
570   return true;
571 }
572
573
574 void MemoryChunk::InsertAfter(MemoryChunk* other) {
575   MemoryChunk* other_next = other->next_chunk();
576
577   set_next_chunk(other_next);
578   set_prev_chunk(other);
579   other_next->set_prev_chunk(this);
580   other->set_next_chunk(this);
581 }
582
583
584 void MemoryChunk::Unlink() {
585   MemoryChunk* next_element = next_chunk();
586   MemoryChunk* prev_element = prev_chunk();
587   next_element->set_prev_chunk(prev_element);
588   prev_element->set_next_chunk(next_element);
589   set_prev_chunk(NULL);
590   set_next_chunk(NULL);
591 }
592
593
594 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
595                                             intptr_t commit_area_size,
596                                             Executability executable,
597                                             Space* owner) {
598   DCHECK(commit_area_size <= reserve_area_size);
599
600   size_t chunk_size;
601   Heap* heap = isolate_->heap();
602   Address base = NULL;
603   base::VirtualMemory reservation;
604   Address area_start = NULL;
605   Address area_end = NULL;
606
607   //
608   // MemoryChunk layout:
609   //
610   //             Executable
611   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
612   // |           Header           |
613   // +----------------------------+<- base + CodePageGuardStartOffset
614   // |           Guard            |
615   // +----------------------------+<- area_start_
616   // |           Area             |
617   // +----------------------------+<- area_end_ (area_start + commit_area_size)
618   // |   Committed but not used   |
619   // +----------------------------+<- aligned at OS page boundary
620   // | Reserved but not committed |
621   // +----------------------------+<- aligned at OS page boundary
622   // |           Guard            |
623   // +----------------------------+<- base + chunk_size
624   //
625   //           Non-executable
626   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
627   // |          Header            |
628   // +----------------------------+<- area_start_ (base + kObjectStartOffset)
629   // |           Area             |
630   // +----------------------------+<- area_end_ (area_start + commit_area_size)
631   // |  Committed but not used    |
632   // +----------------------------+<- aligned at OS page boundary
633   // | Reserved but not committed |
634   // +----------------------------+<- base + chunk_size
635   //
636
637   if (executable == EXECUTABLE) {
638     chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
639                          base::OS::CommitPageSize()) +
640                  CodePageGuardSize();
641
642     // Check executable memory limit.
643     if (size_executable_ + chunk_size > capacity_executable_) {
644       LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
645                                 "V8 Executable Allocation capacity exceeded"));
646       return NULL;
647     }
648
649     // Size of header (not executable) plus area (executable).
650     size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
651                                  base::OS::CommitPageSize());
652     // Allocate executable memory either from code range or from the
653     // OS.
654     if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
655       base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
656                                                        &chunk_size);
657       DCHECK(
658           IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
659       if (base == NULL) return NULL;
660       size_ += chunk_size;
661       // Update executable memory size.
662       size_executable_ += chunk_size;
663     } else {
664       base = AllocateAlignedMemory(chunk_size, commit_size,
665                                    MemoryChunk::kAlignment, executable,
666                                    &reservation);
667       if (base == NULL) return NULL;
668       // Update executable memory size.
669       size_executable_ += reservation.size();
670     }
671
672     if (Heap::ShouldZapGarbage()) {
673       ZapBlock(base, CodePageGuardStartOffset());
674       ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
675     }
676
677     area_start = base + CodePageAreaStartOffset();
678     area_end = area_start + commit_area_size;
679   } else {
680     chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
681                          base::OS::CommitPageSize());
682     size_t commit_size =
683         RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
684                 base::OS::CommitPageSize());
685     base =
686         AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
687                               executable, &reservation);
688
689     if (base == NULL) return NULL;
690
691     if (Heap::ShouldZapGarbage()) {
692       ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
693     }
694
695     area_start = base + Page::kObjectStartOffset;
696     area_end = area_start + commit_area_size;
697   }
698
699   // Use chunk_size for statistics and callbacks because we assume that they
700   // treat reserved but not-yet committed memory regions of chunks as allocated.
701   isolate_->counters()->memory_allocated()->Increment(
702       static_cast<int>(chunk_size));
703
704   LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
705   if (owner != NULL) {
706     ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
707     PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
708   }
709
710   MemoryChunk* result = MemoryChunk::Initialize(
711       heap, base, chunk_size, area_start, area_end, executable, owner);
712   result->set_reserved_memory(&reservation);
713   return result;
714 }
715
716
717 void Page::ResetFreeListStatistics() {
718   non_available_small_blocks_ = 0;
719   available_in_small_free_list_ = 0;
720   available_in_medium_free_list_ = 0;
721   available_in_large_free_list_ = 0;
722   available_in_huge_free_list_ = 0;
723 }
724
725
726 Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
727                                     Executability executable) {
728   MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
729
730   if (chunk == NULL) return NULL;
731
732   return Page::Initialize(isolate_->heap(), chunk, executable, owner);
733 }
734
735
736 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
737                                               Space* owner,
738                                               Executability executable) {
739   MemoryChunk* chunk =
740       AllocateChunk(object_size, object_size, executable, owner);
741   if (chunk == NULL) return NULL;
742   return LargePage::Initialize(isolate_->heap(), chunk);
743 }
744
745
746 void MemoryAllocator::Free(MemoryChunk* chunk) {
747   LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
748   if (chunk->owner() != NULL) {
749     ObjectSpace space =
750         static_cast<ObjectSpace>(1 << chunk->owner()->identity());
751     PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
752   }
753
754   isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
755                                          chunk->IsEvacuationCandidate());
756
757   delete chunk->slots_buffer();
758   delete chunk->skip_list();
759
760   base::VirtualMemory* reservation = chunk->reserved_memory();
761   if (reservation->IsReserved()) {
762     FreeMemory(reservation, chunk->executable());
763   } else {
764     FreeMemory(chunk->address(), chunk->size(), chunk->executable());
765   }
766 }
767
768
769 bool MemoryAllocator::CommitBlock(Address start, size_t size,
770                                   Executability executable) {
771   if (!CommitMemory(start, size, executable)) return false;
772
773   if (Heap::ShouldZapGarbage()) {
774     ZapBlock(start, size);
775   }
776
777   isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
778   return true;
779 }
780
781
782 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
783   if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
784   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
785   return true;
786 }
787
788
789 void MemoryAllocator::ZapBlock(Address start, size_t size) {
790   for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
791     Memory::Address_at(start + s) = kZapValue;
792   }
793 }
794
795
796 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
797                                                 AllocationAction action,
798                                                 size_t size) {
799   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
800     MemoryAllocationCallbackRegistration registration =
801         memory_allocation_callbacks_[i];
802     if ((registration.space & space) == space &&
803         (registration.action & action) == action)
804       registration.callback(space, action, static_cast<int>(size));
805   }
806 }
807
808
809 bool MemoryAllocator::MemoryAllocationCallbackRegistered(
810     MemoryAllocationCallback callback) {
811   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
812     if (memory_allocation_callbacks_[i].callback == callback) return true;
813   }
814   return false;
815 }
816
817
818 void MemoryAllocator::AddMemoryAllocationCallback(
819     MemoryAllocationCallback callback, ObjectSpace space,
820     AllocationAction action) {
821   DCHECK(callback != NULL);
822   MemoryAllocationCallbackRegistration registration(callback, space, action);
823   DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
824   return memory_allocation_callbacks_.Add(registration);
825 }
826
827
828 void MemoryAllocator::RemoveMemoryAllocationCallback(
829     MemoryAllocationCallback callback) {
830   DCHECK(callback != NULL);
831   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
832     if (memory_allocation_callbacks_[i].callback == callback) {
833       memory_allocation_callbacks_.Remove(i);
834       return;
835     }
836   }
837   UNREACHABLE();
838 }
839
840
841 #ifdef DEBUG
842 void MemoryAllocator::ReportStatistics() {
843   float pct = static_cast<float>(capacity_ - size_) / capacity_;
844   PrintF("  capacity: %" V8_PTR_PREFIX
845          "d"
846          ", used: %" V8_PTR_PREFIX
847          "d"
848          ", available: %%%d\n\n",
849          capacity_, size_, static_cast<int>(pct * 100));
850 }
851 #endif
852
853
854 int MemoryAllocator::CodePageGuardStartOffset() {
855   // We are guarding code pages: the first OS page after the header
856   // will be protected as non-writable.
857   return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
858 }
859
860
861 int MemoryAllocator::CodePageGuardSize() {
862   return static_cast<int>(base::OS::CommitPageSize());
863 }
864
865
866 int MemoryAllocator::CodePageAreaStartOffset() {
867   // We are guarding code pages: the first OS page after the header
868   // will be protected as non-writable.
869   return CodePageGuardStartOffset() + CodePageGuardSize();
870 }
871
872
873 int MemoryAllocator::CodePageAreaEndOffset() {
874   // We are guarding code pages: the last OS page will be protected as
875   // non-writable.
876   return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
877 }
878
879
880 bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
881                                              Address start, size_t commit_size,
882                                              size_t reserved_size) {
883   // Commit page header (not executable).
884   if (!vm->Commit(start, CodePageGuardStartOffset(), false)) {
885     return false;
886   }
887
888   // Create guard page after the header.
889   if (!vm->Guard(start + CodePageGuardStartOffset())) {
890     return false;
891   }
892
893   // Commit page body (executable).
894   if (!vm->Commit(start + CodePageAreaStartOffset(),
895                   commit_size - CodePageGuardStartOffset(), true)) {
896     return false;
897   }
898
899   // Create guard page before the end.
900   if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
901     return false;
902   }
903
904   UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
905                                         commit_size -
906                                         CodePageGuardStartOffset());
907   return true;
908 }
909
910
911 // -----------------------------------------------------------------------------
912 // MemoryChunk implementation
913
914 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
915   MemoryChunk* chunk = MemoryChunk::FromAddress(address);
916   if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
917     static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
918   }
919   chunk->IncrementLiveBytes(by);
920 }
921
922
923 // -----------------------------------------------------------------------------
924 // PagedSpace implementation
925
926 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
927               ObjectSpace::kObjectSpaceNewSpace);
928 STATIC_ASSERT(static_cast<ObjectSpace>(1
929                                        << AllocationSpace::OLD_POINTER_SPACE) ==
930               ObjectSpace::kObjectSpaceOldPointerSpace);
931 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_DATA_SPACE) ==
932               ObjectSpace::kObjectSpaceOldDataSpace);
933 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
934               ObjectSpace::kObjectSpaceCodeSpace);
935 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CELL_SPACE) ==
936               ObjectSpace::kObjectSpaceCellSpace);
937 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
938               ObjectSpace::kObjectSpaceMapSpace);
939
940
941 PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace space,
942                        Executability executable)
943     : Space(heap, space, executable),
944       free_list_(this),
945       unswept_free_bytes_(0),
946       end_of_unswept_pages_(NULL),
947       emergency_memory_(NULL) {
948   area_size_ = MemoryAllocator::PageAreaSize(space);
949   max_capacity_ =
950       (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * AreaSize();
951   accounting_stats_.Clear();
952
953   allocation_info_.set_top(NULL);
954   allocation_info_.set_limit(NULL);
955
956   anchor_.InitializeAsAnchor(this);
957 }
958
959
960 bool PagedSpace::SetUp() { return true; }
961
962
963 bool PagedSpace::HasBeenSetUp() { return true; }
964
965
966 void PagedSpace::TearDown() {
967   PageIterator iterator(this);
968   while (iterator.has_next()) {
969     heap()->isolate()->memory_allocator()->Free(iterator.next());
970   }
971   anchor_.set_next_page(&anchor_);
972   anchor_.set_prev_page(&anchor_);
973   accounting_stats_.Clear();
974 }
975
976
977 size_t PagedSpace::CommittedPhysicalMemory() {
978   if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
979   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
980   size_t size = 0;
981   PageIterator it(this);
982   while (it.has_next()) {
983     size += it.next()->CommittedPhysicalMemory();
984   }
985   return size;
986 }
987
988
989 bool PagedSpace::ContainsSafe(Address addr) {
990   Page* p = Page::FromAddress(addr);
991   PageIterator iterator(this);
992   while (iterator.has_next()) {
993     if (iterator.next() == p) return true;
994   }
995   return false;
996 }
997
998
999 Object* PagedSpace::FindObject(Address addr) {
1000   // Note: this function can only be called on iterable spaces.
1001   DCHECK(!heap()->mark_compact_collector()->in_use());
1002
1003   if (!Contains(addr)) return Smi::FromInt(0);  // Signaling not found.
1004
1005   Page* p = Page::FromAddress(addr);
1006   HeapObjectIterator it(p, NULL);
1007   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1008     Address cur = obj->address();
1009     Address next = cur + obj->Size();
1010     if ((cur <= addr) && (addr < next)) return obj;
1011   }
1012
1013   UNREACHABLE();
1014   return Smi::FromInt(0);
1015 }
1016
1017
1018 bool PagedSpace::CanExpand() {
1019   DCHECK(max_capacity_ % AreaSize() == 0);
1020   DCHECK(heap()->mark_compact_collector()->is_compacting() ||
1021          Capacity() <= heap()->MaxOldGenerationSize());
1022   DCHECK(heap()->CommittedOldGenerationMemory() <=
1023          heap()->MaxOldGenerationSize() +
1024              PagedSpace::MaxEmergencyMemoryAllocated());
1025
1026   // Are we going to exceed capacity for this space?
1027   if (!heap()->CanExpandOldGeneration(Page::kPageSize)) return false;
1028
1029   return true;
1030 }
1031
1032
1033 bool PagedSpace::Expand() {
1034   if (!CanExpand()) return false;
1035
1036   intptr_t size = AreaSize();
1037
1038   if (anchor_.next_page() == &anchor_) {
1039     size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
1040   }
1041
1042   Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
1043                                                                 executable());
1044   if (p == NULL) return false;
1045
1046   // Pages created during bootstrapping may contain immortal immovable objects.
1047   if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
1048
1049   DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
1050   DCHECK(heap()->CommittedOldGenerationMemory() <=
1051          heap()->MaxOldGenerationSize() +
1052              PagedSpace::MaxEmergencyMemoryAllocated());
1053
1054   p->InsertAfter(anchor_.prev_page());
1055
1056   return true;
1057 }
1058
1059
1060 int PagedSpace::CountTotalPages() {
1061   PageIterator it(this);
1062   int count = 0;
1063   while (it.has_next()) {
1064     it.next();
1065     count++;
1066   }
1067   return count;
1068 }
1069
1070
1071 void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
1072   sizes->huge_size_ = page->available_in_huge_free_list();
1073   sizes->small_size_ = page->available_in_small_free_list();
1074   sizes->medium_size_ = page->available_in_medium_free_list();
1075   sizes->large_size_ = page->available_in_large_free_list();
1076 }
1077
1078
1079 void PagedSpace::ResetFreeListStatistics() {
1080   PageIterator page_iterator(this);
1081   while (page_iterator.has_next()) {
1082     Page* page = page_iterator.next();
1083     page->ResetFreeListStatistics();
1084   }
1085 }
1086
1087
1088 void PagedSpace::IncreaseCapacity(int size) {
1089   accounting_stats_.ExpandSpace(size);
1090 }
1091
1092
1093 void PagedSpace::ReleasePage(Page* page) {
1094   DCHECK(page->LiveBytes() == 0);
1095   DCHECK(AreaSize() == page->area_size());
1096
1097   if (page->WasSwept()) {
1098     intptr_t size = free_list_.EvictFreeListItems(page);
1099     accounting_stats_.AllocateBytes(size);
1100     DCHECK_EQ(AreaSize(), static_cast<int>(size));
1101   } else {
1102     DecreaseUnsweptFreeBytes(page);
1103   }
1104
1105   if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
1106     heap()->decrement_scan_on_scavenge_pages();
1107     page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
1108   }
1109
1110   DCHECK(!free_list_.ContainsPageFreeListItems(page));
1111
1112   if (Page::FromAllocationTop(allocation_info_.top()) == page) {
1113     allocation_info_.set_top(NULL);
1114     allocation_info_.set_limit(NULL);
1115   }
1116
1117   // If page is still in a list, unlink it from that list.
1118   if (page->next_chunk() != NULL) {
1119     DCHECK(page->prev_chunk() != NULL);
1120     page->Unlink();
1121   }
1122
1123   if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
1124     heap()->isolate()->memory_allocator()->Free(page);
1125   } else {
1126     heap()->QueueMemoryChunkForFree(page);
1127   }
1128
1129   DCHECK(Capacity() > 0);
1130   accounting_stats_.ShrinkSpace(AreaSize());
1131 }
1132
1133
1134 intptr_t PagedSpace::MaxEmergencyMemoryAllocated() {
1135   // New space and large object space.
1136   static const int spaces_without_emergency_memory = 2;
1137   static const int spaces_with_emergency_memory =
1138       LAST_SPACE - FIRST_SPACE + 1 - spaces_without_emergency_memory;
1139   return Page::kPageSize * spaces_with_emergency_memory;
1140 }
1141
1142
1143 void PagedSpace::CreateEmergencyMemory() {
1144   if (identity() == CODE_SPACE) {
1145     // Make the emergency block available to the allocator.
1146     CodeRange* code_range = heap()->isolate()->code_range();
1147     if (code_range != NULL && code_range->valid()) {
1148       code_range->ReleaseEmergencyBlock();
1149     }
1150     DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize());
1151   }
1152   emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
1153       AreaSize(), AreaSize(), executable(), this);
1154 }
1155
1156
1157 void PagedSpace::FreeEmergencyMemory() {
1158   Page* page = static_cast<Page*>(emergency_memory_);
1159   DCHECK(page->LiveBytes() == 0);
1160   DCHECK(AreaSize() == page->area_size());
1161   DCHECK(!free_list_.ContainsPageFreeListItems(page));
1162   heap()->isolate()->memory_allocator()->Free(page);
1163   emergency_memory_ = NULL;
1164 }
1165
1166
1167 void PagedSpace::UseEmergencyMemory() {
1168   // Page::Initialize makes the chunk into a real page and adds it to the
1169   // accounting for this space.  Unlike PagedSpace::Expand, we don't check
1170   // CanExpand first, so we can go over the limits a little here.  That's OK,
1171   // because we are in the process of compacting which will free up at least as
1172   // much memory as it allocates.
1173   Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this);
1174   page->InsertAfter(anchor_.prev_page());
1175   emergency_memory_ = NULL;
1176 }
1177
1178
1179 #ifdef DEBUG
1180 void PagedSpace::Print() {}
1181 #endif
1182
1183 #ifdef VERIFY_HEAP
1184 void PagedSpace::Verify(ObjectVisitor* visitor) {
1185   bool allocation_pointer_found_in_space =
1186       (allocation_info_.top() == allocation_info_.limit());
1187   PageIterator page_iterator(this);
1188   while (page_iterator.has_next()) {
1189     Page* page = page_iterator.next();
1190     CHECK(page->owner() == this);
1191     if (page == Page::FromAllocationTop(allocation_info_.top())) {
1192       allocation_pointer_found_in_space = true;
1193     }
1194     CHECK(page->WasSwept());
1195     HeapObjectIterator it(page, NULL);
1196     Address end_of_previous_object = page->area_start();
1197     Address top = page->area_end();
1198     int black_size = 0;
1199     for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1200       CHECK(end_of_previous_object <= object->address());
1201
1202       // The first word should be a map, and we expect all map pointers to
1203       // be in map space.
1204       Map* map = object->map();
1205       CHECK(map->IsMap());
1206       CHECK(heap()->map_space()->Contains(map));
1207
1208       // Perform space-specific object verification.
1209       VerifyObject(object);
1210
1211       // The object itself should look OK.
1212       object->ObjectVerify();
1213
1214       // All the interior pointers should be contained in the heap.
1215       int size = object->Size();
1216       object->IterateBody(map->instance_type(), size, visitor);
1217       if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1218         black_size += size;
1219       }
1220
1221       CHECK(object->address() + size <= top);
1222       end_of_previous_object = object->address() + size;
1223     }
1224     CHECK_LE(black_size, page->LiveBytes());
1225   }
1226   CHECK(allocation_pointer_found_in_space);
1227 }
1228 #endif  // VERIFY_HEAP
1229
1230 // -----------------------------------------------------------------------------
1231 // NewSpace implementation
1232
1233
1234 bool NewSpace::SetUp(int reserved_semispace_capacity,
1235                      int maximum_semispace_capacity) {
1236   // Set up new space based on the preallocated memory block defined by
1237   // start and size. The provided space is divided into two semi-spaces.
1238   // To support fast containment testing in the new space, the size of
1239   // this chunk must be a power of two and it must be aligned to its size.
1240   int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1241
1242   int target_semispace_capacity = heap()->TargetSemiSpaceSize();
1243
1244   size_t size = 2 * reserved_semispace_capacity;
1245   Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1246       size, size, &reservation_);
1247   if (base == NULL) return false;
1248
1249   chunk_base_ = base;
1250   chunk_size_ = static_cast<uintptr_t>(size);
1251   LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1252
1253   DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
1254   DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
1255
1256   // Allocate and set up the histogram arrays if necessary.
1257   allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1258   promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1259
1260 #define SET_NAME(name)                        \
1261   allocated_histogram_[name].set_name(#name); \
1262   promoted_histogram_[name].set_name(#name);
1263   INSTANCE_TYPE_LIST(SET_NAME)
1264 #undef SET_NAME
1265
1266   DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1267   DCHECK(static_cast<intptr_t>(chunk_size_) >=
1268          2 * heap()->ReservedSemiSpaceSize());
1269   DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1270
1271   to_space_.SetUp(chunk_base_, initial_semispace_capacity,
1272                   target_semispace_capacity, maximum_semispace_capacity);
1273   from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1274                     initial_semispace_capacity, target_semispace_capacity,
1275                     maximum_semispace_capacity);
1276   if (!to_space_.Commit()) {
1277     return false;
1278   }
1279   DCHECK(!from_space_.is_committed());  // No need to use memory yet.
1280
1281   start_ = chunk_base_;
1282   address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1283   object_mask_ = address_mask_ | kHeapObjectTagMask;
1284   object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1285
1286   ResetAllocationInfo();
1287
1288   return true;
1289 }
1290
1291
1292 void NewSpace::TearDown() {
1293   if (allocated_histogram_) {
1294     DeleteArray(allocated_histogram_);
1295     allocated_histogram_ = NULL;
1296   }
1297   if (promoted_histogram_) {
1298     DeleteArray(promoted_histogram_);
1299     promoted_histogram_ = NULL;
1300   }
1301
1302   start_ = NULL;
1303   allocation_info_.set_top(NULL);
1304   allocation_info_.set_limit(NULL);
1305
1306   to_space_.TearDown();
1307   from_space_.TearDown();
1308
1309   LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1310
1311   DCHECK(reservation_.IsReserved());
1312   heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1313                                                     NOT_EXECUTABLE);
1314   chunk_base_ = NULL;
1315   chunk_size_ = 0;
1316 }
1317
1318
1319 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
1320
1321
1322 void NewSpace::Grow() {
1323   // Double the semispace size but only up to maximum capacity.
1324   DCHECK(TotalCapacity() < MaximumCapacity());
1325   int new_capacity =
1326       Min(MaximumCapacity(),
1327           FLAG_semi_space_growth_factor * static_cast<int>(TotalCapacity()));
1328   if (to_space_.GrowTo(new_capacity)) {
1329     // Only grow from space if we managed to grow to-space.
1330     if (!from_space_.GrowTo(new_capacity)) {
1331       // If we managed to grow to-space but couldn't grow from-space,
1332       // attempt to shrink to-space.
1333       if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
1334         // We are in an inconsistent state because we could not
1335         // commit/uncommit memory from new space.
1336         CHECK(false);
1337       }
1338     }
1339   }
1340   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1341 }
1342
1343
1344 bool NewSpace::GrowOnePage() {
1345   if (TotalCapacity() == MaximumCapacity()) return false;
1346   int new_capacity = static_cast<int>(TotalCapacity()) + Page::kPageSize;
1347   if (to_space_.GrowTo(new_capacity)) {
1348     // Only grow from space if we managed to grow to-space and the from space
1349     // is actually committed.
1350     if (from_space_.is_committed()) {
1351       if (!from_space_.GrowTo(new_capacity)) {
1352         // If we managed to grow to-space but couldn't grow from-space,
1353         // attempt to shrink to-space.
1354         if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
1355           // We are in an inconsistent state because we could not
1356           // commit/uncommit memory from new space.
1357           CHECK(false);
1358         }
1359         return false;
1360       }
1361     } else {
1362       if (!from_space_.SetTotalCapacity(new_capacity)) {
1363         // Can't really happen, but better safe than sorry.
1364         CHECK(false);
1365       }
1366     }
1367     DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1368     return true;
1369   }
1370   return false;
1371 }
1372
1373
1374 void NewSpace::Shrink() {
1375   int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
1376   int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1377   if (rounded_new_capacity < TotalCapacity() &&
1378       to_space_.ShrinkTo(rounded_new_capacity)) {
1379     // Only shrink from-space if we managed to shrink to-space.
1380     from_space_.Reset();
1381     if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1382       // If we managed to shrink to-space but couldn't shrink from
1383       // space, attempt to grow to-space again.
1384       if (!to_space_.GrowTo(from_space_.TotalCapacity())) {
1385         // We are in an inconsistent state because we could not
1386         // commit/uncommit memory from new space.
1387         CHECK(false);
1388       }
1389     }
1390   }
1391   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1392 }
1393
1394
1395 void NewSpace::UpdateAllocationInfo() {
1396   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1397   allocation_info_.set_top(to_space_.page_low());
1398   allocation_info_.set_limit(to_space_.page_high());
1399   UpdateInlineAllocationLimit(0);
1400   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1401 }
1402
1403
1404 void NewSpace::ResetAllocationInfo() {
1405   to_space_.Reset();
1406   UpdateAllocationInfo();
1407   pages_used_ = 0;
1408   // Clear all mark-bits in the to-space.
1409   NewSpacePageIterator it(&to_space_);
1410   while (it.has_next()) {
1411     Bitmap::Clear(it.next());
1412   }
1413 }
1414
1415
1416 void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1417   if (heap()->inline_allocation_disabled()) {
1418     // Lowest limit when linear allocation was disabled.
1419     Address high = to_space_.page_high();
1420     Address new_top = allocation_info_.top() + size_in_bytes;
1421     allocation_info_.set_limit(Min(new_top, high));
1422   } else if (inline_allocation_limit_step() == 0) {
1423     // Normal limit is the end of the current page.
1424     allocation_info_.set_limit(to_space_.page_high());
1425   } else {
1426     // Lower limit during incremental marking.
1427     Address high = to_space_.page_high();
1428     Address new_top = allocation_info_.top() + size_in_bytes;
1429     Address new_limit = new_top + inline_allocation_limit_step_;
1430     allocation_info_.set_limit(Min(new_limit, high));
1431   }
1432   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1433 }
1434
1435
1436 bool NewSpace::AddFreshPage() {
1437   Address top = allocation_info_.top();
1438   if (NewSpacePage::IsAtStart(top)) {
1439     // The current page is already empty. Don't try to make another.
1440
1441     // We should only get here if someone asks to allocate more
1442     // than what can be stored in a single page.
1443     // TODO(gc): Change the limit on new-space allocation to prevent this
1444     // from happening (all such allocations should go directly to LOSpace).
1445     return false;
1446   }
1447   if (!to_space_.AdvancePage()) {
1448     // Check if we reached the target capacity yet. If not, try to commit a page
1449     // and continue.
1450     if ((to_space_.TotalCapacity() < to_space_.TargetCapacity()) &&
1451         GrowOnePage()) {
1452       if (!to_space_.AdvancePage()) {
1453         // It doesn't make sense that we managed to commit a page, but can't use
1454         // it.
1455         CHECK(false);
1456       }
1457     } else {
1458       // Failed to get a new page in to-space.
1459       return false;
1460     }
1461   }
1462
1463   // Clear remainder of current page.
1464   Address limit = NewSpacePage::FromLimit(top)->area_end();
1465   if (heap()->gc_state() == Heap::SCAVENGE) {
1466     heap()->promotion_queue()->SetNewLimit(limit);
1467   }
1468
1469   int remaining_in_page = static_cast<int>(limit - top);
1470   heap()->CreateFillerObjectAt(top, remaining_in_page);
1471   pages_used_++;
1472   UpdateAllocationInfo();
1473
1474   return true;
1475 }
1476
1477
1478 AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) {
1479   Address old_top = allocation_info_.top();
1480   Address high = to_space_.page_high();
1481   if (allocation_info_.limit() < high) {
1482     // Either the limit has been lowered because linear allocation was disabled
1483     // or because incremental marking wants to get a chance to do a step. Set
1484     // the new limit accordingly.
1485     Address new_top = old_top + size_in_bytes;
1486     int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1487     heap()->incremental_marking()->Step(bytes_allocated,
1488                                         IncrementalMarking::GC_VIA_STACK_GUARD);
1489     UpdateInlineAllocationLimit(size_in_bytes);
1490     top_on_previous_step_ = new_top;
1491     return AllocateRaw(size_in_bytes);
1492   } else if (AddFreshPage()) {
1493     // Switched to new page. Try allocating again.
1494     int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1495     heap()->incremental_marking()->Step(bytes_allocated,
1496                                         IncrementalMarking::GC_VIA_STACK_GUARD);
1497     top_on_previous_step_ = to_space_.page_low();
1498     return AllocateRaw(size_in_bytes);
1499   } else {
1500     return AllocationResult::Retry();
1501   }
1502 }
1503
1504
1505 #ifdef VERIFY_HEAP
1506 // We do not use the SemiSpaceIterator because verification doesn't assume
1507 // that it works (it depends on the invariants we are checking).
1508 void NewSpace::Verify() {
1509   // The allocation pointer should be in the space or at the very end.
1510   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1511
1512   // There should be objects packed in from the low address up to the
1513   // allocation pointer.
1514   Address current = to_space_.first_page()->area_start();
1515   CHECK_EQ(current, to_space_.space_start());
1516
1517   while (current != top()) {
1518     if (!NewSpacePage::IsAtEnd(current)) {
1519       // The allocation pointer should not be in the middle of an object.
1520       CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1521             current < top());
1522
1523       HeapObject* object = HeapObject::FromAddress(current);
1524
1525       // The first word should be a map, and we expect all map pointers to
1526       // be in map space.
1527       Map* map = object->map();
1528       CHECK(map->IsMap());
1529       CHECK(heap()->map_space()->Contains(map));
1530
1531       // The object should not be code or a map.
1532       CHECK(!object->IsMap());
1533       CHECK(!object->IsCode());
1534
1535       // The object itself should look OK.
1536       object->ObjectVerify();
1537
1538       // All the interior pointers should be contained in the heap.
1539       VerifyPointersVisitor visitor;
1540       int size = object->Size();
1541       object->IterateBody(map->instance_type(), size, &visitor);
1542
1543       current += size;
1544     } else {
1545       // At end of page, switch to next page.
1546       NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1547       // Next page should be valid.
1548       CHECK(!page->is_anchor());
1549       current = page->area_start();
1550     }
1551   }
1552
1553   // Check semi-spaces.
1554   CHECK_EQ(from_space_.id(), kFromSpace);
1555   CHECK_EQ(to_space_.id(), kToSpace);
1556   from_space_.Verify();
1557   to_space_.Verify();
1558 }
1559 #endif
1560
1561 // -----------------------------------------------------------------------------
1562 // SemiSpace implementation
1563
1564 void SemiSpace::SetUp(Address start, int initial_capacity, int target_capacity,
1565                       int maximum_capacity) {
1566   // Creates a space in the young generation. The constructor does not
1567   // allocate memory from the OS.  A SemiSpace is given a contiguous chunk of
1568   // memory of size 'capacity' when set up, and does not grow or shrink
1569   // otherwise.  In the mark-compact collector, the memory region of the from
1570   // space is used as the marking stack. It requires contiguous memory
1571   // addresses.
1572   DCHECK(maximum_capacity >= Page::kPageSize);
1573   DCHECK(initial_capacity <= target_capacity);
1574   DCHECK(target_capacity <= maximum_capacity);
1575   initial_total_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1576   total_capacity_ = initial_capacity;
1577   target_capacity_ = RoundDown(target_capacity, Page::kPageSize);
1578   maximum_total_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1579   maximum_committed_ = 0;
1580   committed_ = false;
1581   start_ = start;
1582   address_mask_ = ~(maximum_capacity - 1);
1583   object_mask_ = address_mask_ | kHeapObjectTagMask;
1584   object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1585   age_mark_ = start_;
1586 }
1587
1588
1589 void SemiSpace::TearDown() {
1590   start_ = NULL;
1591   total_capacity_ = 0;
1592 }
1593
1594
1595 bool SemiSpace::Commit() {
1596   DCHECK(!is_committed());
1597   int pages = total_capacity_ / Page::kPageSize;
1598   if (!heap()->isolate()->memory_allocator()->CommitBlock(
1599           start_, total_capacity_, executable())) {
1600     return false;
1601   }
1602
1603   NewSpacePage* current = anchor();
1604   for (int i = 0; i < pages; i++) {
1605     NewSpacePage* new_page =
1606         NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
1607     new_page->InsertAfter(current);
1608     current = new_page;
1609   }
1610
1611   SetCapacity(total_capacity_);
1612   committed_ = true;
1613   Reset();
1614   return true;
1615 }
1616
1617
1618 bool SemiSpace::Uncommit() {
1619   DCHECK(is_committed());
1620   Address start = start_ + maximum_total_capacity_ - total_capacity_;
1621   if (!heap()->isolate()->memory_allocator()->UncommitBlock(start,
1622                                                             total_capacity_)) {
1623     return false;
1624   }
1625   anchor()->set_next_page(anchor());
1626   anchor()->set_prev_page(anchor());
1627
1628   committed_ = false;
1629   return true;
1630 }
1631
1632
1633 size_t SemiSpace::CommittedPhysicalMemory() {
1634   if (!is_committed()) return 0;
1635   size_t size = 0;
1636   NewSpacePageIterator it(this);
1637   while (it.has_next()) {
1638     size += it.next()->CommittedPhysicalMemory();
1639   }
1640   return size;
1641 }
1642
1643
1644 bool SemiSpace::GrowTo(int new_capacity) {
1645   if (!is_committed()) {
1646     if (!Commit()) return false;
1647   }
1648   DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
1649   DCHECK(new_capacity <= maximum_total_capacity_);
1650   DCHECK(new_capacity > total_capacity_);
1651   int pages_before = total_capacity_ / Page::kPageSize;
1652   int pages_after = new_capacity / Page::kPageSize;
1653
1654   size_t delta = new_capacity - total_capacity_;
1655
1656   DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1657   if (!heap()->isolate()->memory_allocator()->CommitBlock(
1658           start_ + total_capacity_, delta, executable())) {
1659     return false;
1660   }
1661   SetCapacity(new_capacity);
1662   NewSpacePage* last_page = anchor()->prev_page();
1663   DCHECK(last_page != anchor());
1664   for (int i = pages_before; i < pages_after; i++) {
1665     Address page_address = start_ + i * Page::kPageSize;
1666     NewSpacePage* new_page =
1667         NewSpacePage::Initialize(heap(), page_address, this);
1668     new_page->InsertAfter(last_page);
1669     Bitmap::Clear(new_page);
1670     // Duplicate the flags that was set on the old page.
1671     new_page->SetFlags(last_page->GetFlags(),
1672                        NewSpacePage::kCopyOnFlipFlagsMask);
1673     last_page = new_page;
1674   }
1675   return true;
1676 }
1677
1678
1679 bool SemiSpace::ShrinkTo(int new_capacity) {
1680   DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
1681   DCHECK(new_capacity >= initial_total_capacity_);
1682   DCHECK(new_capacity < total_capacity_);
1683   if (is_committed()) {
1684     size_t delta = total_capacity_ - new_capacity;
1685     DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1686
1687     MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1688     if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
1689       return false;
1690     }
1691
1692     int pages_after = new_capacity / Page::kPageSize;
1693     NewSpacePage* new_last_page =
1694         NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
1695     new_last_page->set_next_page(anchor());
1696     anchor()->set_prev_page(new_last_page);
1697     DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page));
1698   }
1699
1700   SetCapacity(new_capacity);
1701
1702   return true;
1703 }
1704
1705
1706 bool SemiSpace::SetTotalCapacity(int new_capacity) {
1707   CHECK(!is_committed());
1708   if (new_capacity >= initial_total_capacity_ &&
1709       new_capacity <= maximum_total_capacity_) {
1710     total_capacity_ = new_capacity;
1711     return true;
1712   }
1713   return false;
1714 }
1715
1716
1717 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1718   anchor_.set_owner(this);
1719   // Fixup back-pointers to anchor. Address of anchor changes
1720   // when we swap.
1721   anchor_.prev_page()->set_next_page(&anchor_);
1722   anchor_.next_page()->set_prev_page(&anchor_);
1723
1724   bool becomes_to_space = (id_ == kFromSpace);
1725   id_ = becomes_to_space ? kToSpace : kFromSpace;
1726   NewSpacePage* page = anchor_.next_page();
1727   while (page != &anchor_) {
1728     page->set_owner(this);
1729     page->SetFlags(flags, mask);
1730     if (becomes_to_space) {
1731       page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1732       page->SetFlag(MemoryChunk::IN_TO_SPACE);
1733       page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1734       page->ResetLiveBytes();
1735     } else {
1736       page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1737       page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1738     }
1739     DCHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1740     DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1741            page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1742     page = page->next_page();
1743   }
1744 }
1745
1746
1747 void SemiSpace::Reset() {
1748   DCHECK(anchor_.next_page() != &anchor_);
1749   current_page_ = anchor_.next_page();
1750 }
1751
1752
1753 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1754   // We won't be swapping semispaces without data in them.
1755   DCHECK(from->anchor_.next_page() != &from->anchor_);
1756   DCHECK(to->anchor_.next_page() != &to->anchor_);
1757
1758   // Swap bits.
1759   SemiSpace tmp = *from;
1760   *from = *to;
1761   *to = tmp;
1762
1763   // Fixup back-pointers to the page list anchor now that its address
1764   // has changed.
1765   // Swap to/from-space bits on pages.
1766   // Copy GC flags from old active space (from-space) to new (to-space).
1767   intptr_t flags = from->current_page()->GetFlags();
1768   to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1769
1770   from->FlipPages(0, 0);
1771 }
1772
1773
1774 void SemiSpace::SetCapacity(int new_capacity) {
1775   total_capacity_ = new_capacity;
1776   if (total_capacity_ > maximum_committed_) {
1777     maximum_committed_ = total_capacity_;
1778   }
1779 }
1780
1781
1782 void SemiSpace::set_age_mark(Address mark) {
1783   DCHECK(NewSpacePage::FromLimit(mark)->semi_space() == this);
1784   age_mark_ = mark;
1785   // Mark all pages up to the one containing mark.
1786   NewSpacePageIterator it(space_start(), mark);
1787   while (it.has_next()) {
1788     it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1789   }
1790 }
1791
1792
1793 #ifdef DEBUG
1794 void SemiSpace::Print() {}
1795 #endif
1796
1797 #ifdef VERIFY_HEAP
1798 void SemiSpace::Verify() {
1799   bool is_from_space = (id_ == kFromSpace);
1800   NewSpacePage* page = anchor_.next_page();
1801   CHECK(anchor_.semi_space() == this);
1802   while (page != &anchor_) {
1803     CHECK(page->semi_space() == this);
1804     CHECK(page->InNewSpace());
1805     CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1806                                         : MemoryChunk::IN_TO_SPACE));
1807     CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1808                                          : MemoryChunk::IN_FROM_SPACE));
1809     CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1810     if (!is_from_space) {
1811       // The pointers-from-here-are-interesting flag isn't updated dynamically
1812       // on from-space pages, so it might be out of sync with the marking state.
1813       if (page->heap()->incremental_marking()->IsMarking()) {
1814         CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1815       } else {
1816         CHECK(
1817             !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1818       }
1819       // TODO(gc): Check that the live_bytes_count_ field matches the
1820       // black marking on the page (if we make it match in new-space).
1821     }
1822     CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1823     CHECK(page->prev_page()->next_page() == page);
1824     page = page->next_page();
1825   }
1826 }
1827 #endif
1828
1829 #ifdef DEBUG
1830 void SemiSpace::AssertValidRange(Address start, Address end) {
1831   // Addresses belong to same semi-space
1832   NewSpacePage* page = NewSpacePage::FromLimit(start);
1833   NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1834   SemiSpace* space = page->semi_space();
1835   CHECK_EQ(space, end_page->semi_space());
1836   // Start address is before end address, either on same page,
1837   // or end address is on a later page in the linked list of
1838   // semi-space pages.
1839   if (page == end_page) {
1840     CHECK(start <= end);
1841   } else {
1842     while (page != end_page) {
1843       page = page->next_page();
1844       CHECK_NE(page, space->anchor());
1845     }
1846   }
1847 }
1848 #endif
1849
1850
1851 // -----------------------------------------------------------------------------
1852 // SemiSpaceIterator implementation.
1853 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1854   Initialize(space->bottom(), space->top(), NULL);
1855 }
1856
1857
1858 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1859                                      HeapObjectCallback size_func) {
1860   Initialize(space->bottom(), space->top(), size_func);
1861 }
1862
1863
1864 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1865   Initialize(start, space->top(), NULL);
1866 }
1867
1868
1869 SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
1870   Initialize(from, to, NULL);
1871 }
1872
1873
1874 void SemiSpaceIterator::Initialize(Address start, Address end,
1875                                    HeapObjectCallback size_func) {
1876   SemiSpace::AssertValidRange(start, end);
1877   current_ = start;
1878   limit_ = end;
1879   size_func_ = size_func;
1880 }
1881
1882
1883 #ifdef DEBUG
1884 // heap_histograms is shared, always clear it before using it.
1885 static void ClearHistograms(Isolate* isolate) {
1886 // We reset the name each time, though it hasn't changed.
1887 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1888   INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1889 #undef DEF_TYPE_NAME
1890
1891 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1892   INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1893 #undef CLEAR_HISTOGRAM
1894
1895   isolate->js_spill_information()->Clear();
1896 }
1897
1898
1899 static void ClearCodeKindStatistics(int* code_kind_statistics) {
1900   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1901     code_kind_statistics[i] = 0;
1902   }
1903 }
1904
1905
1906 static void ReportCodeKindStatistics(int* code_kind_statistics) {
1907   PrintF("\n   Code kind histograms: \n");
1908   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1909     if (code_kind_statistics[i] > 0) {
1910       PrintF("     %-20s: %10d bytes\n",
1911              Code::Kind2String(static_cast<Code::Kind>(i)),
1912              code_kind_statistics[i]);
1913     }
1914   }
1915   PrintF("\n");
1916 }
1917
1918
1919 static int CollectHistogramInfo(HeapObject* obj) {
1920   Isolate* isolate = obj->GetIsolate();
1921   InstanceType type = obj->map()->instance_type();
1922   DCHECK(0 <= type && type <= LAST_TYPE);
1923   DCHECK(isolate->heap_histograms()[type].name() != NULL);
1924   isolate->heap_histograms()[type].increment_number(1);
1925   isolate->heap_histograms()[type].increment_bytes(obj->Size());
1926
1927   if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1928     JSObject::cast(obj)
1929         ->IncrementSpillStatistics(isolate->js_spill_information());
1930   }
1931
1932   return obj->Size();
1933 }
1934
1935
1936 static void ReportHistogram(Isolate* isolate, bool print_spill) {
1937   PrintF("\n  Object Histogram:\n");
1938   for (int i = 0; i <= LAST_TYPE; i++) {
1939     if (isolate->heap_histograms()[i].number() > 0) {
1940       PrintF("    %-34s%10d (%10d bytes)\n",
1941              isolate->heap_histograms()[i].name(),
1942              isolate->heap_histograms()[i].number(),
1943              isolate->heap_histograms()[i].bytes());
1944     }
1945   }
1946   PrintF("\n");
1947
1948   // Summarize string types.
1949   int string_number = 0;
1950   int string_bytes = 0;
1951 #define INCREMENT(type, size, name, camel_name)               \
1952   string_number += isolate->heap_histograms()[type].number(); \
1953   string_bytes += isolate->heap_histograms()[type].bytes();
1954   STRING_TYPE_LIST(INCREMENT)
1955 #undef INCREMENT
1956   if (string_number > 0) {
1957     PrintF("    %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1958            string_bytes);
1959   }
1960
1961   if (FLAG_collect_heap_spill_statistics && print_spill) {
1962     isolate->js_spill_information()->Print();
1963   }
1964 }
1965 #endif  // DEBUG
1966
1967
1968 // Support for statistics gathering for --heap-stats and --log-gc.
1969 void NewSpace::ClearHistograms() {
1970   for (int i = 0; i <= LAST_TYPE; i++) {
1971     allocated_histogram_[i].clear();
1972     promoted_histogram_[i].clear();
1973   }
1974 }
1975
1976
1977 // Because the copying collector does not touch garbage objects, we iterate
1978 // the new space before a collection to get a histogram of allocated objects.
1979 // This only happens when --log-gc flag is set.
1980 void NewSpace::CollectStatistics() {
1981   ClearHistograms();
1982   SemiSpaceIterator it(this);
1983   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
1984     RecordAllocation(obj);
1985 }
1986
1987
1988 static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
1989                                const char* description) {
1990   LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1991   // Lump all the string types together.
1992   int string_number = 0;
1993   int string_bytes = 0;
1994 #define INCREMENT(type, size, name, camel_name) \
1995   string_number += info[type].number();         \
1996   string_bytes += info[type].bytes();
1997   STRING_TYPE_LIST(INCREMENT)
1998 #undef INCREMENT
1999   if (string_number > 0) {
2000     LOG(isolate,
2001         HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
2002   }
2003
2004   // Then do the other types.
2005   for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
2006     if (info[i].number() > 0) {
2007       LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
2008                                        info[i].bytes()));
2009     }
2010   }
2011   LOG(isolate, HeapSampleEndEvent("NewSpace", description));
2012 }
2013
2014
2015 void NewSpace::ReportStatistics() {
2016 #ifdef DEBUG
2017   if (FLAG_heap_stats) {
2018     float pct = static_cast<float>(Available()) / TotalCapacity();
2019     PrintF("  capacity: %" V8_PTR_PREFIX
2020            "d"
2021            ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2022            TotalCapacity(), Available(), static_cast<int>(pct * 100));
2023     PrintF("\n  Object Histogram:\n");
2024     for (int i = 0; i <= LAST_TYPE; i++) {
2025       if (allocated_histogram_[i].number() > 0) {
2026         PrintF("    %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
2027                allocated_histogram_[i].number(),
2028                allocated_histogram_[i].bytes());
2029       }
2030     }
2031     PrintF("\n");
2032   }
2033 #endif  // DEBUG
2034
2035   if (FLAG_log_gc) {
2036     Isolate* isolate = heap()->isolate();
2037     DoReportStatistics(isolate, allocated_histogram_, "allocated");
2038     DoReportStatistics(isolate, promoted_histogram_, "promoted");
2039   }
2040 }
2041
2042
2043 void NewSpace::RecordAllocation(HeapObject* obj) {
2044   InstanceType type = obj->map()->instance_type();
2045   DCHECK(0 <= type && type <= LAST_TYPE);
2046   allocated_histogram_[type].increment_number(1);
2047   allocated_histogram_[type].increment_bytes(obj->Size());
2048 }
2049
2050
2051 void NewSpace::RecordPromotion(HeapObject* obj) {
2052   InstanceType type = obj->map()->instance_type();
2053   DCHECK(0 <= type && type <= LAST_TYPE);
2054   promoted_histogram_[type].increment_number(1);
2055   promoted_histogram_[type].increment_bytes(obj->Size());
2056 }
2057
2058
2059 size_t NewSpace::CommittedPhysicalMemory() {
2060   if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2061   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2062   size_t size = to_space_.CommittedPhysicalMemory();
2063   if (from_space_.is_committed()) {
2064     size += from_space_.CommittedPhysicalMemory();
2065   }
2066   return size;
2067 }
2068
2069
2070 // -----------------------------------------------------------------------------
2071 // Free lists for old object spaces implementation
2072
2073 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
2074   intptr_t free_bytes = 0;
2075   if (category->top() != NULL) {
2076     // This is safe (not going to deadlock) since Concatenate operations
2077     // are never performed on the same free lists at the same time in
2078     // reverse order.
2079     base::LockGuard<base::Mutex> target_lock_guard(mutex());
2080     base::LockGuard<base::Mutex> source_lock_guard(category->mutex());
2081     DCHECK(category->end_ != NULL);
2082     free_bytes = category->available();
2083     if (end_ == NULL) {
2084       end_ = category->end();
2085     } else {
2086       category->end()->set_next(top());
2087     }
2088     set_top(category->top());
2089     base::NoBarrier_Store(&top_, category->top_);
2090     available_ += category->available();
2091     category->Reset();
2092   }
2093   return free_bytes;
2094 }
2095
2096
2097 void FreeListCategory::Reset() {
2098   set_top(NULL);
2099   set_end(NULL);
2100   set_available(0);
2101 }
2102
2103
2104 intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
2105   int sum = 0;
2106   FreeSpace* t = top();
2107   FreeSpace** n = &t;
2108   while (*n != NULL) {
2109     if (Page::FromAddress((*n)->address()) == p) {
2110       FreeSpace* free_space = *n;
2111       sum += free_space->Size();
2112       *n = (*n)->next();
2113     } else {
2114       n = (*n)->next_address();
2115     }
2116   }
2117   set_top(t);
2118   if (top() == NULL) {
2119     set_end(NULL);
2120   }
2121   available_ -= sum;
2122   return sum;
2123 }
2124
2125
2126 bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
2127   FreeSpace* node = top();
2128   while (node != NULL) {
2129     if (Page::FromAddress(node->address()) == p) return true;
2130     node = node->next();
2131   }
2132   return false;
2133 }
2134
2135
2136 FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
2137   FreeSpace* node = top();
2138
2139   if (node == NULL) return NULL;
2140
2141   while (node != NULL &&
2142          Page::FromAddress(node->address())->IsEvacuationCandidate()) {
2143     available_ -= node->Size();
2144     node = node->next();
2145   }
2146
2147   if (node != NULL) {
2148     set_top(node->next());
2149     *node_size = node->Size();
2150     available_ -= *node_size;
2151   } else {
2152     set_top(NULL);
2153   }
2154
2155   if (top() == NULL) {
2156     set_end(NULL);
2157   }
2158
2159   return node;
2160 }
2161
2162
2163 FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
2164                                               int* node_size) {
2165   FreeSpace* node = PickNodeFromList(node_size);
2166   if (node != NULL && *node_size < size_in_bytes) {
2167     Free(node, *node_size);
2168     *node_size = 0;
2169     return NULL;
2170   }
2171   return node;
2172 }
2173
2174
2175 void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) {
2176   DCHECK_LE(FreeList::kSmallListMin, size_in_bytes);
2177   free_space->set_next(top());
2178   set_top(free_space);
2179   if (end_ == NULL) {
2180     end_ = free_space;
2181   }
2182   available_ += size_in_bytes;
2183 }
2184
2185
2186 void FreeListCategory::RepairFreeList(Heap* heap) {
2187   FreeSpace* n = top();
2188   while (n != NULL) {
2189     Map** map_location = reinterpret_cast<Map**>(n->address());
2190     if (*map_location == NULL) {
2191       *map_location = heap->free_space_map();
2192     } else {
2193       DCHECK(*map_location == heap->free_space_map());
2194     }
2195     n = n->next();
2196   }
2197 }
2198
2199
2200 FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) {
2201   Reset();
2202 }
2203
2204
2205 intptr_t FreeList::Concatenate(FreeList* free_list) {
2206   intptr_t free_bytes = 0;
2207   free_bytes += small_list_.Concatenate(free_list->small_list());
2208   free_bytes += medium_list_.Concatenate(free_list->medium_list());
2209   free_bytes += large_list_.Concatenate(free_list->large_list());
2210   free_bytes += huge_list_.Concatenate(free_list->huge_list());
2211   return free_bytes;
2212 }
2213
2214
2215 void FreeList::Reset() {
2216   small_list_.Reset();
2217   medium_list_.Reset();
2218   large_list_.Reset();
2219   huge_list_.Reset();
2220 }
2221
2222
2223 int FreeList::Free(Address start, int size_in_bytes) {
2224   if (size_in_bytes == 0) return 0;
2225
2226   heap_->CreateFillerObjectAt(start, size_in_bytes);
2227
2228   Page* page = Page::FromAddress(start);
2229
2230   // Early return to drop too-small blocks on the floor.
2231   if (size_in_bytes < kSmallListMin) {
2232     page->add_non_available_small_blocks(size_in_bytes);
2233     return size_in_bytes;
2234   }
2235
2236   FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
2237   // Insert other blocks at the head of a free list of the appropriate
2238   // magnitude.
2239   if (size_in_bytes <= kSmallListMax) {
2240     small_list_.Free(free_space, size_in_bytes);
2241     page->add_available_in_small_free_list(size_in_bytes);
2242   } else if (size_in_bytes <= kMediumListMax) {
2243     medium_list_.Free(free_space, size_in_bytes);
2244     page->add_available_in_medium_free_list(size_in_bytes);
2245   } else if (size_in_bytes <= kLargeListMax) {
2246     large_list_.Free(free_space, size_in_bytes);
2247     page->add_available_in_large_free_list(size_in_bytes);
2248   } else {
2249     huge_list_.Free(free_space, size_in_bytes);
2250     page->add_available_in_huge_free_list(size_in_bytes);
2251   }
2252
2253   DCHECK(IsVeryLong() || available() == SumFreeLists());
2254   return 0;
2255 }
2256
2257
2258 FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2259   FreeSpace* node = NULL;
2260   Page* page = NULL;
2261
2262   if (size_in_bytes <= kSmallAllocationMax) {
2263     node = small_list_.PickNodeFromList(node_size);
2264     if (node != NULL) {
2265       DCHECK(size_in_bytes <= *node_size);
2266       page = Page::FromAddress(node->address());
2267       page->add_available_in_small_free_list(-(*node_size));
2268       DCHECK(IsVeryLong() || available() == SumFreeLists());
2269       return node;
2270     }
2271   }
2272
2273   if (size_in_bytes <= kMediumAllocationMax) {
2274     node = medium_list_.PickNodeFromList(node_size);
2275     if (node != NULL) {
2276       DCHECK(size_in_bytes <= *node_size);
2277       page = Page::FromAddress(node->address());
2278       page->add_available_in_medium_free_list(-(*node_size));
2279       DCHECK(IsVeryLong() || available() == SumFreeLists());
2280       return node;
2281     }
2282   }
2283
2284   if (size_in_bytes <= kLargeAllocationMax) {
2285     node = large_list_.PickNodeFromList(node_size);
2286     if (node != NULL) {
2287       DCHECK(size_in_bytes <= *node_size);
2288       page = Page::FromAddress(node->address());
2289       page->add_available_in_large_free_list(-(*node_size));
2290       DCHECK(IsVeryLong() || available() == SumFreeLists());
2291       return node;
2292     }
2293   }
2294
2295   int huge_list_available = huge_list_.available();
2296   FreeSpace* top_node = huge_list_.top();
2297   for (FreeSpace** cur = &top_node; *cur != NULL;
2298        cur = (*cur)->next_address()) {
2299     FreeSpace* cur_node = *cur;
2300     while (cur_node != NULL &&
2301            Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
2302       int size = cur_node->Size();
2303       huge_list_available -= size;
2304       page = Page::FromAddress(cur_node->address());
2305       page->add_available_in_huge_free_list(-size);
2306       cur_node = cur_node->next();
2307     }
2308
2309     *cur = cur_node;
2310     if (cur_node == NULL) {
2311       huge_list_.set_end(NULL);
2312       break;
2313     }
2314
2315     int size = cur_node->Size();
2316     if (size >= size_in_bytes) {
2317       // Large enough node found.  Unlink it from the list.
2318       node = *cur;
2319       *cur = node->next();
2320       *node_size = size;
2321       huge_list_available -= size;
2322       page = Page::FromAddress(node->address());
2323       page->add_available_in_huge_free_list(-size);
2324       break;
2325     }
2326   }
2327
2328   huge_list_.set_top(top_node);
2329   if (huge_list_.top() == NULL) {
2330     huge_list_.set_end(NULL);
2331   }
2332   huge_list_.set_available(huge_list_available);
2333
2334   if (node != NULL) {
2335     DCHECK(IsVeryLong() || available() == SumFreeLists());
2336     return node;
2337   }
2338
2339   if (size_in_bytes <= kSmallListMax) {
2340     node = small_list_.PickNodeFromList(size_in_bytes, node_size);
2341     if (node != NULL) {
2342       DCHECK(size_in_bytes <= *node_size);
2343       page = Page::FromAddress(node->address());
2344       page->add_available_in_small_free_list(-(*node_size));
2345     }
2346   } else if (size_in_bytes <= kMediumListMax) {
2347     node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
2348     if (node != NULL) {
2349       DCHECK(size_in_bytes <= *node_size);
2350       page = Page::FromAddress(node->address());
2351       page->add_available_in_medium_free_list(-(*node_size));
2352     }
2353   } else if (size_in_bytes <= kLargeListMax) {
2354     node = large_list_.PickNodeFromList(size_in_bytes, node_size);
2355     if (node != NULL) {
2356       DCHECK(size_in_bytes <= *node_size);
2357       page = Page::FromAddress(node->address());
2358       page->add_available_in_large_free_list(-(*node_size));
2359     }
2360   }
2361
2362   DCHECK(IsVeryLong() || available() == SumFreeLists());
2363   return node;
2364 }
2365
2366
2367 // Allocation on the old space free list.  If it succeeds then a new linear
2368 // allocation space has been set up with the top and limit of the space.  If
2369 // the allocation fails then NULL is returned, and the caller can perform a GC
2370 // or allocate a new page before retrying.
2371 HeapObject* FreeList::Allocate(int size_in_bytes) {
2372   DCHECK(0 < size_in_bytes);
2373   DCHECK(size_in_bytes <= kMaxBlockSize);
2374   DCHECK(IsAligned(size_in_bytes, kPointerSize));
2375   // Don't free list allocate if there is linear space available.
2376   DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
2377
2378   int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2379   // Mark the old linear allocation area with a free space map so it can be
2380   // skipped when scanning the heap.  This also puts it back in the free list
2381   // if it is big enough.
2382   owner_->Free(owner_->top(), old_linear_size);
2383
2384   owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
2385                                                       old_linear_size);
2386
2387   int new_node_size = 0;
2388   FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2389   if (new_node == NULL) {
2390     owner_->SetTopAndLimit(NULL, NULL);
2391     return NULL;
2392   }
2393
2394   int bytes_left = new_node_size - size_in_bytes;
2395   DCHECK(bytes_left >= 0);
2396
2397 #ifdef DEBUG
2398   for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2399     reinterpret_cast<Object**>(new_node->address())[i] =
2400         Smi::FromInt(kCodeZapValue);
2401   }
2402 #endif
2403
2404   // The old-space-step might have finished sweeping and restarted marking.
2405   // Verify that it did not turn the page of the new node into an evacuation
2406   // candidate.
2407   DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2408
2409   const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2410
2411   // Memory in the linear allocation area is counted as allocated.  We may free
2412   // a little of this again immediately - see below.
2413   owner_->Allocate(new_node_size);
2414
2415   if (owner_->heap()->inline_allocation_disabled()) {
2416     // Keep the linear allocation area empty if requested to do so, just
2417     // return area back to the free list instead.
2418     owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2419     DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
2420   } else if (bytes_left > kThreshold &&
2421              owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2422              FLAG_incremental_marking_steps) {
2423     int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2424     // We don't want to give too large linear areas to the allocator while
2425     // incremental marking is going on, because we won't check again whether
2426     // we want to do another increment until the linear area is used up.
2427     owner_->Free(new_node->address() + size_in_bytes + linear_size,
2428                  new_node_size - size_in_bytes - linear_size);
2429     owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2430                            new_node->address() + size_in_bytes + linear_size);
2431   } else if (bytes_left > 0) {
2432     // Normally we give the rest of the node to the allocator as its new
2433     // linear allocation area.
2434     owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2435                            new_node->address() + new_node_size);
2436   } else {
2437     // TODO(gc) Try not freeing linear allocation region when bytes_left
2438     // are zero.
2439     owner_->SetTopAndLimit(NULL, NULL);
2440   }
2441
2442   return new_node;
2443 }
2444
2445
2446 intptr_t FreeList::EvictFreeListItems(Page* p) {
2447   intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
2448   p->set_available_in_huge_free_list(0);
2449
2450   if (sum < p->area_size()) {
2451     sum += small_list_.EvictFreeListItemsInList(p) +
2452            medium_list_.EvictFreeListItemsInList(p) +
2453            large_list_.EvictFreeListItemsInList(p);
2454     p->set_available_in_small_free_list(0);
2455     p->set_available_in_medium_free_list(0);
2456     p->set_available_in_large_free_list(0);
2457   }
2458
2459   return sum;
2460 }
2461
2462
2463 bool FreeList::ContainsPageFreeListItems(Page* p) {
2464   return huge_list_.EvictFreeListItemsInList(p) ||
2465          small_list_.EvictFreeListItemsInList(p) ||
2466          medium_list_.EvictFreeListItemsInList(p) ||
2467          large_list_.EvictFreeListItemsInList(p);
2468 }
2469
2470
2471 void FreeList::RepairLists(Heap* heap) {
2472   small_list_.RepairFreeList(heap);
2473   medium_list_.RepairFreeList(heap);
2474   large_list_.RepairFreeList(heap);
2475   huge_list_.RepairFreeList(heap);
2476 }
2477
2478
2479 #ifdef DEBUG
2480 intptr_t FreeListCategory::SumFreeList() {
2481   intptr_t sum = 0;
2482   FreeSpace* cur = top();
2483   while (cur != NULL) {
2484     DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
2485     sum += cur->nobarrier_size();
2486     cur = cur->next();
2487   }
2488   return sum;
2489 }
2490
2491
2492 static const int kVeryLongFreeList = 500;
2493
2494
2495 int FreeListCategory::FreeListLength() {
2496   int length = 0;
2497   FreeSpace* cur = top();
2498   while (cur != NULL) {
2499     length++;
2500     cur = cur->next();
2501     if (length == kVeryLongFreeList) return length;
2502   }
2503   return length;
2504 }
2505
2506
2507 bool FreeList::IsVeryLong() {
2508   if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
2509   if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
2510   if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
2511   if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
2512   return false;
2513 }
2514
2515
2516 // This can take a very long time because it is linear in the number of entries
2517 // on the free list, so it should not be called if FreeListLength returns
2518 // kVeryLongFreeList.
2519 intptr_t FreeList::SumFreeLists() {
2520   intptr_t sum = small_list_.SumFreeList();
2521   sum += medium_list_.SumFreeList();
2522   sum += large_list_.SumFreeList();
2523   sum += huge_list_.SumFreeList();
2524   return sum;
2525 }
2526 #endif
2527
2528
2529 // -----------------------------------------------------------------------------
2530 // OldSpace implementation
2531
2532 void PagedSpace::PrepareForMarkCompact() {
2533   // We don't have a linear allocation area while sweeping.  It will be restored
2534   // on the first allocation after the sweep.
2535   EmptyAllocationInfo();
2536
2537   // This counter will be increased for pages which will be swept by the
2538   // sweeper threads.
2539   unswept_free_bytes_ = 0;
2540
2541   // Clear the free list before a full GC---it will be rebuilt afterward.
2542   free_list_.Reset();
2543 }
2544
2545
2546 intptr_t PagedSpace::SizeOfObjects() {
2547   DCHECK(!FLAG_concurrent_sweeping ||
2548          heap()->mark_compact_collector()->sweeping_in_progress() ||
2549          (unswept_free_bytes_ == 0));
2550   return Size() - unswept_free_bytes_ - (limit() - top());
2551 }
2552
2553
2554 // After we have booted, we have created a map which represents free space
2555 // on the heap.  If there was already a free list then the elements on it
2556 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
2557 // fix them.
2558 void PagedSpace::RepairFreeListsAfterDeserialization() {
2559   free_list_.RepairLists(heap());
2560   // Each page may have a small free space that is not tracked by a free list.
2561   // Update the maps for those free space objects.
2562   PageIterator iterator(this);
2563   while (iterator.has_next()) {
2564     Page* page = iterator.next();
2565     int size = static_cast<int>(page->non_available_small_blocks());
2566     if (size == 0) continue;
2567     Address address = page->OffsetToAddress(Page::kPageSize - size);
2568     heap()->CreateFillerObjectAt(address, size);
2569   }
2570 }
2571
2572
2573 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2574   if (allocation_info_.top() >= allocation_info_.limit()) return;
2575
2576   if (Page::FromAllocationTop(allocation_info_.top())
2577           ->IsEvacuationCandidate()) {
2578     // Create filler object to keep page iterable if it was iterable.
2579     int remaining =
2580         static_cast<int>(allocation_info_.limit() - allocation_info_.top());
2581     heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
2582
2583     allocation_info_.set_top(NULL);
2584     allocation_info_.set_limit(NULL);
2585   }
2586 }
2587
2588
2589 HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
2590     int size_in_bytes) {
2591   MarkCompactCollector* collector = heap()->mark_compact_collector();
2592   if (collector->sweeping_in_progress()) {
2593     // Wait for the sweeper threads here and complete the sweeping phase.
2594     collector->EnsureSweepingCompleted();
2595
2596     // After waiting for the sweeper threads, there may be new free-list
2597     // entries.
2598     return free_list_.Allocate(size_in_bytes);
2599   }
2600   return NULL;
2601 }
2602
2603
2604 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2605   // Allocation in this space has failed.
2606
2607   MarkCompactCollector* collector = heap()->mark_compact_collector();
2608   // Sweeping is still in progress.
2609   if (collector->sweeping_in_progress()) {
2610     // First try to refill the free-list, concurrent sweeper threads
2611     // may have freed some objects in the meantime.
2612     collector->RefillFreeList(this);
2613
2614     // Retry the free list allocation.
2615     HeapObject* object = free_list_.Allocate(size_in_bytes);
2616     if (object != NULL) return object;
2617
2618     // If sweeping is still in progress try to sweep pages on the main thread.
2619     int free_chunk = collector->SweepInParallel(this, size_in_bytes);
2620     collector->RefillFreeList(this);
2621     if (free_chunk >= size_in_bytes) {
2622       HeapObject* object = free_list_.Allocate(size_in_bytes);
2623       // We should be able to allocate an object here since we just freed that
2624       // much memory.
2625       DCHECK(object != NULL);
2626       if (object != NULL) return object;
2627     }
2628   }
2629
2630   // Free list allocation failed and there is no next page.  Fail if we have
2631   // hit the old generation size limit that should cause a garbage
2632   // collection.
2633   if (!heap()->always_allocate() &&
2634       heap()->OldGenerationAllocationLimitReached()) {
2635     // If sweeper threads are active, wait for them at that point and steal
2636     // elements form their free-lists.
2637     HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
2638     return object;
2639   }
2640
2641   // Try to expand the space and allocate in the new next page.
2642   if (Expand()) {
2643     DCHECK(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
2644     return free_list_.Allocate(size_in_bytes);
2645   }
2646
2647   // If sweeper threads are active, wait for them at that point and steal
2648   // elements form their free-lists. Allocation may still fail their which
2649   // would indicate that there is not enough memory for the given allocation.
2650   return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
2651 }
2652
2653
2654 #ifdef DEBUG
2655 void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
2656   CommentStatistic* comments_statistics =
2657       isolate->paged_space_comments_statistics();
2658   ReportCodeKindStatistics(isolate->code_kind_statistics());
2659   PrintF(
2660       "Code comment statistics (\"   [ comment-txt   :    size/   "
2661       "count  (average)\"):\n");
2662   for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2663     const CommentStatistic& cs = comments_statistics[i];
2664     if (cs.size > 0) {
2665       PrintF("   %-30s: %10d/%6d     (%d)\n", cs.comment, cs.size, cs.count,
2666              cs.size / cs.count);
2667     }
2668   }
2669   PrintF("\n");
2670 }
2671
2672
2673 void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2674   CommentStatistic* comments_statistics =
2675       isolate->paged_space_comments_statistics();
2676   ClearCodeKindStatistics(isolate->code_kind_statistics());
2677   for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2678     comments_statistics[i].Clear();
2679   }
2680   comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2681   comments_statistics[CommentStatistic::kMaxComments].size = 0;
2682   comments_statistics[CommentStatistic::kMaxComments].count = 0;
2683 }
2684
2685
2686 // Adds comment to 'comment_statistics' table. Performance OK as long as
2687 // 'kMaxComments' is small
2688 static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2689   CommentStatistic* comments_statistics =
2690       isolate->paged_space_comments_statistics();
2691   // Do not count empty comments
2692   if (delta <= 0) return;
2693   CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2694   // Search for a free or matching entry in 'comments_statistics': 'cs'
2695   // points to result.
2696   for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2697     if (comments_statistics[i].comment == NULL) {
2698       cs = &comments_statistics[i];
2699       cs->comment = comment;
2700       break;
2701     } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2702       cs = &comments_statistics[i];
2703       break;
2704     }
2705   }
2706   // Update entry for 'comment'
2707   cs->size += delta;
2708   cs->count += 1;
2709 }
2710
2711
2712 // Call for each nested comment start (start marked with '[ xxx', end marked
2713 // with ']'.  RelocIterator 'it' must point to a comment reloc info.
2714 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2715   DCHECK(!it->done());
2716   DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT);
2717   const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2718   if (tmp[0] != '[') {
2719     // Not a nested comment; skip
2720     return;
2721   }
2722
2723   // Search for end of nested comment or a new nested comment
2724   const char* const comment_txt =
2725       reinterpret_cast<const char*>(it->rinfo()->data());
2726   const byte* prev_pc = it->rinfo()->pc();
2727   int flat_delta = 0;
2728   it->next();
2729   while (true) {
2730     // All nested comments must be terminated properly, and therefore exit
2731     // from loop.
2732     DCHECK(!it->done());
2733     if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2734       const char* const txt =
2735           reinterpret_cast<const char*>(it->rinfo()->data());
2736       flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2737       if (txt[0] == ']') break;  // End of nested  comment
2738       // A new comment
2739       CollectCommentStatistics(isolate, it);
2740       // Skip code that was covered with previous comment
2741       prev_pc = it->rinfo()->pc();
2742     }
2743     it->next();
2744   }
2745   EnterComment(isolate, comment_txt, flat_delta);
2746 }
2747
2748
2749 // Collects code size statistics:
2750 // - by code kind
2751 // - by code comment
2752 void PagedSpace::CollectCodeStatistics() {
2753   Isolate* isolate = heap()->isolate();
2754   HeapObjectIterator obj_it(this);
2755   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2756     if (obj->IsCode()) {
2757       Code* code = Code::cast(obj);
2758       isolate->code_kind_statistics()[code->kind()] += code->Size();
2759       RelocIterator it(code);
2760       int delta = 0;
2761       const byte* prev_pc = code->instruction_start();
2762       while (!it.done()) {
2763         if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2764           delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2765           CollectCommentStatistics(isolate, &it);
2766           prev_pc = it.rinfo()->pc();
2767         }
2768         it.next();
2769       }
2770
2771       DCHECK(code->instruction_start() <= prev_pc &&
2772              prev_pc <= code->instruction_end());
2773       delta += static_cast<int>(code->instruction_end() - prev_pc);
2774       EnterComment(isolate, "NoComment", delta);
2775     }
2776   }
2777 }
2778
2779
2780 void PagedSpace::ReportStatistics() {
2781   int pct = static_cast<int>(Available() * 100 / Capacity());
2782   PrintF("  capacity: %" V8_PTR_PREFIX
2783          "d"
2784          ", waste: %" V8_PTR_PREFIX
2785          "d"
2786          ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2787          Capacity(), Waste(), Available(), pct);
2788
2789   if (heap()->mark_compact_collector()->sweeping_in_progress()) {
2790     heap()->mark_compact_collector()->EnsureSweepingCompleted();
2791   }
2792   ClearHistograms(heap()->isolate());
2793   HeapObjectIterator obj_it(this);
2794   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2795     CollectHistogramInfo(obj);
2796   ReportHistogram(heap()->isolate(), true);
2797 }
2798 #endif
2799
2800
2801 // -----------------------------------------------------------------------------
2802 // MapSpace implementation
2803 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2804 // there is at least one non-inlined virtual function. I would prefer to hide
2805 // the VerifyObject definition behind VERIFY_HEAP.
2806
2807 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
2808
2809
2810 // -----------------------------------------------------------------------------
2811 // CellSpace implementation
2812 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2813 // there is at least one non-inlined virtual function. I would prefer to hide
2814 // the VerifyObject definition behind VERIFY_HEAP.
2815
2816 void CellSpace::VerifyObject(HeapObject* object) { CHECK(object->IsCell()); }
2817
2818
2819 // -----------------------------------------------------------------------------
2820 // LargeObjectIterator
2821
2822 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2823   current_ = space->first_page_;
2824   size_func_ = NULL;
2825 }
2826
2827
2828 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2829                                          HeapObjectCallback size_func) {
2830   current_ = space->first_page_;
2831   size_func_ = size_func;
2832 }
2833
2834
2835 HeapObject* LargeObjectIterator::Next() {
2836   if (current_ == NULL) return NULL;
2837
2838   HeapObject* object = current_->GetObject();
2839   current_ = current_->next_page();
2840   return object;
2841 }
2842
2843
2844 // -----------------------------------------------------------------------------
2845 // LargeObjectSpace
2846 static bool ComparePointers(void* key1, void* key2) { return key1 == key2; }
2847
2848
2849 LargeObjectSpace::LargeObjectSpace(Heap* heap, intptr_t max_capacity,
2850                                    AllocationSpace id)
2851     : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
2852       max_capacity_(max_capacity),
2853       first_page_(NULL),
2854       size_(0),
2855       page_count_(0),
2856       objects_size_(0),
2857       chunk_map_(ComparePointers, 1024) {}
2858
2859
2860 bool LargeObjectSpace::SetUp() {
2861   first_page_ = NULL;
2862   size_ = 0;
2863   maximum_committed_ = 0;
2864   page_count_ = 0;
2865   objects_size_ = 0;
2866   chunk_map_.Clear();
2867   return true;
2868 }
2869
2870
2871 void LargeObjectSpace::TearDown() {
2872   while (first_page_ != NULL) {
2873     LargePage* page = first_page_;
2874     first_page_ = first_page_->next_page();
2875     LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2876
2877     ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2878     heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2879         space, kAllocationActionFree, page->size());
2880     heap()->isolate()->memory_allocator()->Free(page);
2881   }
2882   SetUp();
2883 }
2884
2885
2886 AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
2887                                                Executability executable) {
2888   // Check if we want to force a GC before growing the old space further.
2889   // If so, fail the allocation.
2890   if (!heap()->always_allocate() &&
2891       heap()->OldGenerationAllocationLimitReached()) {
2892     return AllocationResult::Retry(identity());
2893   }
2894
2895   if (!CanAllocateSize(object_size)) return AllocationResult::Retry(identity());
2896
2897   LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
2898       object_size, this, executable);
2899   if (page == NULL) return AllocationResult::Retry(identity());
2900   DCHECK(page->area_size() >= object_size);
2901
2902   size_ += static_cast<int>(page->size());
2903   objects_size_ += object_size;
2904   page_count_++;
2905   page->set_next_page(first_page_);
2906   first_page_ = page;
2907
2908   if (size_ > maximum_committed_) {
2909     maximum_committed_ = size_;
2910   }
2911
2912   // Register all MemoryChunk::kAlignment-aligned chunks covered by
2913   // this large page in the chunk map.
2914   uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2915   uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2916   for (uintptr_t key = base; key <= limit; key++) {
2917     HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2918                                               static_cast<uint32_t>(key), true);
2919     DCHECK(entry != NULL);
2920     entry->value = page;
2921   }
2922
2923   HeapObject* object = page->GetObject();
2924
2925   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
2926
2927   if (Heap::ShouldZapGarbage()) {
2928     // Make the object consistent so the heap can be verified in OldSpaceStep.
2929     // We only need to do this in debug builds or if verify_heap is on.
2930     reinterpret_cast<Object**>(object->address())[0] =
2931         heap()->fixed_array_map();
2932     reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2933   }
2934
2935   heap()->incremental_marking()->OldSpaceStep(object_size);
2936   return object;
2937 }
2938
2939
2940 size_t LargeObjectSpace::CommittedPhysicalMemory() {
2941   if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2942   size_t size = 0;
2943   LargePage* current = first_page_;
2944   while (current != NULL) {
2945     size += current->CommittedPhysicalMemory();
2946     current = current->next_page();
2947   }
2948   return size;
2949 }
2950
2951
2952 // GC support
2953 Object* LargeObjectSpace::FindObject(Address a) {
2954   LargePage* page = FindPage(a);
2955   if (page != NULL) {
2956     return page->GetObject();
2957   }
2958   return Smi::FromInt(0);  // Signaling not found.
2959 }
2960
2961
2962 LargePage* LargeObjectSpace::FindPage(Address a) {
2963   uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
2964   HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2965                                         static_cast<uint32_t>(key), false);
2966   if (e != NULL) {
2967     DCHECK(e->value != NULL);
2968     LargePage* page = reinterpret_cast<LargePage*>(e->value);
2969     DCHECK(page->is_valid());
2970     if (page->Contains(a)) {
2971       return page;
2972     }
2973   }
2974   return NULL;
2975 }
2976
2977
2978 void LargeObjectSpace::FreeUnmarkedObjects() {
2979   LargePage* previous = NULL;
2980   LargePage* current = first_page_;
2981   while (current != NULL) {
2982     HeapObject* object = current->GetObject();
2983     // Can this large page contain pointers to non-trivial objects.  No other
2984     // pointer object is this big.
2985     bool is_pointer_object = object->IsFixedArray();
2986     MarkBit mark_bit = Marking::MarkBitFrom(object);
2987     if (mark_bit.Get()) {
2988       mark_bit.Clear();
2989       Page::FromAddress(object->address())->ResetProgressBar();
2990       Page::FromAddress(object->address())->ResetLiveBytes();
2991       previous = current;
2992       current = current->next_page();
2993     } else {
2994       LargePage* page = current;
2995       // Cut the chunk out from the chunk list.
2996       current = current->next_page();
2997       if (previous == NULL) {
2998         first_page_ = current;
2999       } else {
3000         previous->set_next_page(current);
3001       }
3002
3003       // Free the chunk.
3004       heap()->mark_compact_collector()->ReportDeleteIfNeeded(object,
3005                                                              heap()->isolate());
3006       size_ -= static_cast<int>(page->size());
3007       objects_size_ -= object->Size();
3008       page_count_--;
3009
3010       // Remove entries belonging to this page.
3011       // Use variable alignment to help pass length check (<= 80 characters)
3012       // of single line in tools/presubmit.py.
3013       const intptr_t alignment = MemoryChunk::kAlignment;
3014       uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
3015       uintptr_t limit = base + (page->size() - 1) / alignment;
3016       for (uintptr_t key = base; key <= limit; key++) {
3017         chunk_map_.Remove(reinterpret_cast<void*>(key),
3018                           static_cast<uint32_t>(key));
3019       }
3020
3021       if (is_pointer_object) {
3022         heap()->QueueMemoryChunkForFree(page);
3023       } else {
3024         heap()->isolate()->memory_allocator()->Free(page);
3025       }
3026     }
3027   }
3028   heap()->FreeQueuedChunks();
3029 }
3030
3031
3032 bool LargeObjectSpace::Contains(HeapObject* object) {
3033   Address address = object->address();
3034   MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3035
3036   bool owned = (chunk->owner() == this);
3037
3038   SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
3039
3040   return owned;
3041 }
3042
3043
3044 #ifdef VERIFY_HEAP
3045 // We do not assume that the large object iterator works, because it depends
3046 // on the invariants we are checking during verification.
3047 void LargeObjectSpace::Verify() {
3048   for (LargePage* chunk = first_page_; chunk != NULL;
3049        chunk = chunk->next_page()) {
3050     // Each chunk contains an object that starts at the large object page's
3051     // object area start.
3052     HeapObject* object = chunk->GetObject();
3053     Page* page = Page::FromAddress(object->address());
3054     CHECK(object->address() == page->area_start());
3055
3056     // The first word should be a map, and we expect all map pointers to be
3057     // in map space.
3058     Map* map = object->map();
3059     CHECK(map->IsMap());
3060     CHECK(heap()->map_space()->Contains(map));
3061
3062     // We have only code, sequential strings, external strings
3063     // (sequential strings that have been morphed into external
3064     // strings), fixed arrays, byte arrays, and constant pool arrays in the
3065     // large object space.
3066     CHECK(object->IsCode() || object->IsSeqString() ||
3067           object->IsExternalString() || object->IsFixedArray() ||
3068           object->IsFixedDoubleArray() || object->IsByteArray() ||
3069           object->IsConstantPoolArray());
3070
3071     // The object itself should look OK.
3072     object->ObjectVerify();
3073
3074     // Byte arrays and strings don't have interior pointers.
3075     if (object->IsCode()) {
3076       VerifyPointersVisitor code_visitor;
3077       object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
3078     } else if (object->IsFixedArray()) {
3079       FixedArray* array = FixedArray::cast(object);
3080       for (int j = 0; j < array->length(); j++) {
3081         Object* element = array->get(j);
3082         if (element->IsHeapObject()) {
3083           HeapObject* element_object = HeapObject::cast(element);
3084           CHECK(heap()->Contains(element_object));
3085           CHECK(element_object->map()->IsMap());
3086         }
3087       }
3088     }
3089   }
3090 }
3091 #endif
3092
3093
3094 #ifdef DEBUG
3095 void LargeObjectSpace::Print() {
3096   OFStream os(stdout);
3097   LargeObjectIterator it(this);
3098   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3099     obj->Print(os);
3100   }
3101 }
3102
3103
3104 void LargeObjectSpace::ReportStatistics() {
3105   PrintF("  size: %" V8_PTR_PREFIX "d\n", size_);
3106   int num_objects = 0;
3107   ClearHistograms(heap()->isolate());
3108   LargeObjectIterator it(this);
3109   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3110     num_objects++;
3111     CollectHistogramInfo(obj);
3112   }
3113
3114   PrintF(
3115       "  number of objects %d, "
3116       "size of objects %" V8_PTR_PREFIX "d\n",
3117       num_objects, objects_size_);
3118   if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3119 }
3120
3121
3122 void LargeObjectSpace::CollectCodeStatistics() {
3123   Isolate* isolate = heap()->isolate();
3124   LargeObjectIterator obj_it(this);
3125   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
3126     if (obj->IsCode()) {
3127       Code* code = Code::cast(obj);
3128       isolate->code_kind_statistics()[code->kind()] += code->Size();
3129     }
3130   }
3131 }
3132
3133
3134 void Page::Print() {
3135   // Make a best-effort to print the objects in the page.
3136   PrintF("Page@%p in %s\n", this->address(),
3137          AllocationSpaceName(this->owner()->identity()));
3138   printf(" --------------------------------------\n");
3139   HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
3140   unsigned mark_size = 0;
3141   for (HeapObject* object = objects.Next(); object != NULL;
3142        object = objects.Next()) {
3143     bool is_marked = Marking::MarkBitFrom(object).Get();
3144     PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
3145     if (is_marked) {
3146       mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
3147     }
3148     object->ShortPrint();
3149     PrintF("\n");
3150   }
3151   printf(" --------------------------------------\n");
3152   printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3153 }
3154
3155 #endif  // DEBUG
3156 }
3157 }  // namespace v8::internal