Upstream version 10.38.208.0
[platform/framework/web/crosswalk.git] / src / v8 / src / heap / spaces.cc
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #include "src/base/platform/platform.h"
8 #include "src/full-codegen.h"
9 #include "src/heap/mark-compact.h"
10 #include "src/macro-assembler.h"
11 #include "src/msan.h"
12
13 namespace v8 {
14 namespace internal {
15
16
17 // ----------------------------------------------------------------------------
18 // HeapObjectIterator
19
20 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
21   // You can't actually iterate over the anchor page.  It is not a real page,
22   // just an anchor for the double linked page list.  Initialize as if we have
23   // reached the end of the anchor page, then the first iteration will move on
24   // to the first page.
25   Initialize(space, NULL, NULL, kAllPagesInSpace, NULL);
26 }
27
28
29 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
30                                        HeapObjectCallback size_func) {
31   // You can't actually iterate over the anchor page.  It is not a real page,
32   // just an anchor for the double linked page list.  Initialize the current
33   // address and end as NULL, then the first iteration will move on
34   // to the first page.
35   Initialize(space, NULL, NULL, kAllPagesInSpace, size_func);
36 }
37
38
39 HeapObjectIterator::HeapObjectIterator(Page* page,
40                                        HeapObjectCallback size_func) {
41   Space* owner = page->owner();
42   DCHECK(owner == page->heap()->old_pointer_space() ||
43          owner == page->heap()->old_data_space() ||
44          owner == page->heap()->map_space() ||
45          owner == page->heap()->cell_space() ||
46          owner == page->heap()->property_cell_space() ||
47          owner == page->heap()->code_space());
48   Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
49              page->area_end(), kOnePageOnly, size_func);
50   DCHECK(page->WasSweptPrecisely() ||
51          (static_cast<PagedSpace*>(owner)->swept_precisely() &&
52           page->SweepingCompleted()));
53 }
54
55
56 void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
57                                     HeapObjectIterator::PageMode mode,
58                                     HeapObjectCallback size_f) {
59   // Check that we actually can iterate this space.
60   DCHECK(space->swept_precisely());
61
62   space_ = space;
63   cur_addr_ = cur;
64   cur_end_ = end;
65   page_mode_ = mode;
66   size_func_ = size_f;
67 }
68
69
70 // We have hit the end of the page and should advance to the next block of
71 // objects.  This happens at the end of the page.
72 bool HeapObjectIterator::AdvanceToNextPage() {
73   DCHECK(cur_addr_ == cur_end_);
74   if (page_mode_ == kOnePageOnly) return false;
75   Page* cur_page;
76   if (cur_addr_ == NULL) {
77     cur_page = space_->anchor();
78   } else {
79     cur_page = Page::FromAddress(cur_addr_ - 1);
80     DCHECK(cur_addr_ == cur_page->area_end());
81   }
82   cur_page = cur_page->next_page();
83   if (cur_page == space_->anchor()) return false;
84   cur_addr_ = cur_page->area_start();
85   cur_end_ = cur_page->area_end();
86   DCHECK(cur_page->WasSweptPrecisely() ||
87          (static_cast<PagedSpace*>(cur_page->owner())->swept_precisely() &&
88           cur_page->SweepingCompleted()));
89   return true;
90 }
91
92
93 // -----------------------------------------------------------------------------
94 // CodeRange
95
96
97 CodeRange::CodeRange(Isolate* isolate)
98     : isolate_(isolate),
99       code_range_(NULL),
100       free_list_(0),
101       allocation_list_(0),
102       current_allocation_block_index_(0) {}
103
104
105 bool CodeRange::SetUp(size_t requested) {
106   DCHECK(code_range_ == NULL);
107
108   if (requested == 0) {
109     // When a target requires the code range feature, we put all code objects
110     // in a kMaximalCodeRangeSize range of virtual address space, so that
111     // they can call each other with near calls.
112     if (kRequiresCodeRange) {
113       requested = kMaximalCodeRangeSize;
114     } else {
115       return true;
116     }
117   }
118
119   DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
120   code_range_ = new base::VirtualMemory(requested);
121   CHECK(code_range_ != NULL);
122   if (!code_range_->IsReserved()) {
123     delete code_range_;
124     code_range_ = NULL;
125     return false;
126   }
127
128   // We are sure that we have mapped a block of requested addresses.
129   DCHECK(code_range_->size() == requested);
130   LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
131   Address base = reinterpret_cast<Address>(code_range_->address());
132   Address aligned_base =
133       RoundUp(reinterpret_cast<Address>(code_range_->address()),
134               MemoryChunk::kAlignment);
135   size_t size = code_range_->size() - (aligned_base - base);
136   allocation_list_.Add(FreeBlock(aligned_base, size));
137   current_allocation_block_index_ = 0;
138   return true;
139 }
140
141
142 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
143                                        const FreeBlock* right) {
144   // The entire point of CodeRange is that the difference between two
145   // addresses in the range can be represented as a signed 32-bit int,
146   // so the cast is semantically correct.
147   return static_cast<int>(left->start - right->start);
148 }
149
150
151 bool CodeRange::GetNextAllocationBlock(size_t requested) {
152   for (current_allocation_block_index_++;
153        current_allocation_block_index_ < allocation_list_.length();
154        current_allocation_block_index_++) {
155     if (requested <= allocation_list_[current_allocation_block_index_].size) {
156       return true;  // Found a large enough allocation block.
157     }
158   }
159
160   // Sort and merge the free blocks on the free list and the allocation list.
161   free_list_.AddAll(allocation_list_);
162   allocation_list_.Clear();
163   free_list_.Sort(&CompareFreeBlockAddress);
164   for (int i = 0; i < free_list_.length();) {
165     FreeBlock merged = free_list_[i];
166     i++;
167     // Add adjacent free blocks to the current merged block.
168     while (i < free_list_.length() &&
169            free_list_[i].start == merged.start + merged.size) {
170       merged.size += free_list_[i].size;
171       i++;
172     }
173     if (merged.size > 0) {
174       allocation_list_.Add(merged);
175     }
176   }
177   free_list_.Clear();
178
179   for (current_allocation_block_index_ = 0;
180        current_allocation_block_index_ < allocation_list_.length();
181        current_allocation_block_index_++) {
182     if (requested <= allocation_list_[current_allocation_block_index_].size) {
183       return true;  // Found a large enough allocation block.
184     }
185   }
186   current_allocation_block_index_ = 0;
187   // Code range is full or too fragmented.
188   return false;
189 }
190
191
192 Address CodeRange::AllocateRawMemory(const size_t requested_size,
193                                      const size_t commit_size,
194                                      size_t* allocated) {
195   DCHECK(commit_size <= requested_size);
196   DCHECK(allocation_list_.length() == 0 ||
197          current_allocation_block_index_ < allocation_list_.length());
198   if (allocation_list_.length() == 0 ||
199       requested_size > allocation_list_[current_allocation_block_index_].size) {
200     // Find an allocation block large enough.
201     if (!GetNextAllocationBlock(requested_size)) return NULL;
202   }
203   // Commit the requested memory at the start of the current allocation block.
204   size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
205   FreeBlock current = allocation_list_[current_allocation_block_index_];
206   if (aligned_requested >= (current.size - Page::kPageSize)) {
207     // Don't leave a small free block, useless for a large object or chunk.
208     *allocated = current.size;
209   } else {
210     *allocated = aligned_requested;
211   }
212   DCHECK(*allocated <= current.size);
213   DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
214   if (!isolate_->memory_allocator()->CommitExecutableMemory(
215           code_range_, current.start, commit_size, *allocated)) {
216     *allocated = 0;
217     return NULL;
218   }
219   allocation_list_[current_allocation_block_index_].start += *allocated;
220   allocation_list_[current_allocation_block_index_].size -= *allocated;
221   if (*allocated == current.size) {
222     // This block is used up, get the next one.
223     GetNextAllocationBlock(0);
224   }
225   return current.start;
226 }
227
228
229 bool CodeRange::CommitRawMemory(Address start, size_t length) {
230   return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
231 }
232
233
234 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
235   return code_range_->Uncommit(start, length);
236 }
237
238
239 void CodeRange::FreeRawMemory(Address address, size_t length) {
240   DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
241   free_list_.Add(FreeBlock(address, length));
242   code_range_->Uncommit(address, length);
243 }
244
245
246 void CodeRange::TearDown() {
247   delete code_range_;  // Frees all memory in the virtual memory range.
248   code_range_ = NULL;
249   free_list_.Free();
250   allocation_list_.Free();
251 }
252
253
254 // -----------------------------------------------------------------------------
255 // MemoryAllocator
256 //
257
258 MemoryAllocator::MemoryAllocator(Isolate* isolate)
259     : isolate_(isolate),
260       capacity_(0),
261       capacity_executable_(0),
262       size_(0),
263       size_executable_(0),
264       lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
265       highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
266
267
268 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
269   capacity_ = RoundUp(capacity, Page::kPageSize);
270   capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
271   DCHECK_GE(capacity_, capacity_executable_);
272
273   size_ = 0;
274   size_executable_ = 0;
275
276   return true;
277 }
278
279
280 void MemoryAllocator::TearDown() {
281   // Check that spaces were torn down before MemoryAllocator.
282   DCHECK(size_ == 0);
283   // TODO(gc) this will be true again when we fix FreeMemory.
284   // DCHECK(size_executable_ == 0);
285   capacity_ = 0;
286   capacity_executable_ = 0;
287 }
288
289
290 bool MemoryAllocator::CommitMemory(Address base, size_t size,
291                                    Executability executable) {
292   if (!base::VirtualMemory::CommitRegion(base, size,
293                                          executable == EXECUTABLE)) {
294     return false;
295   }
296   UpdateAllocatedSpaceLimits(base, base + size);
297   return true;
298 }
299
300
301 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
302                                  Executability executable) {
303   // TODO(gc) make code_range part of memory allocator?
304   DCHECK(reservation->IsReserved());
305   size_t size = reservation->size();
306   DCHECK(size_ >= size);
307   size_ -= size;
308
309   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
310
311   if (executable == EXECUTABLE) {
312     DCHECK(size_executable_ >= size);
313     size_executable_ -= size;
314   }
315   // Code which is part of the code-range does not have its own VirtualMemory.
316   DCHECK(isolate_->code_range() == NULL ||
317          !isolate_->code_range()->contains(
318              static_cast<Address>(reservation->address())));
319   DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
320          !isolate_->code_range()->valid());
321   reservation->Release();
322 }
323
324
325 void MemoryAllocator::FreeMemory(Address base, size_t size,
326                                  Executability executable) {
327   // TODO(gc) make code_range part of memory allocator?
328   DCHECK(size_ >= size);
329   size_ -= size;
330
331   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
332
333   if (executable == EXECUTABLE) {
334     DCHECK(size_executable_ >= size);
335     size_executable_ -= size;
336   }
337   if (isolate_->code_range() != NULL &&
338       isolate_->code_range()->contains(static_cast<Address>(base))) {
339     DCHECK(executable == EXECUTABLE);
340     isolate_->code_range()->FreeRawMemory(base, size);
341   } else {
342     DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
343            !isolate_->code_range()->valid());
344     bool result = base::VirtualMemory::ReleaseRegion(base, size);
345     USE(result);
346     DCHECK(result);
347   }
348 }
349
350
351 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
352                                               base::VirtualMemory* controller) {
353   base::VirtualMemory reservation(size, alignment);
354
355   if (!reservation.IsReserved()) return NULL;
356   size_ += reservation.size();
357   Address base =
358       RoundUp(static_cast<Address>(reservation.address()), alignment);
359   controller->TakeControl(&reservation);
360   return base;
361 }
362
363
364 Address MemoryAllocator::AllocateAlignedMemory(
365     size_t reserve_size, size_t commit_size, size_t alignment,
366     Executability executable, base::VirtualMemory* controller) {
367   DCHECK(commit_size <= reserve_size);
368   base::VirtualMemory reservation;
369   Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
370   if (base == NULL) return NULL;
371
372   if (executable == EXECUTABLE) {
373     if (!CommitExecutableMemory(&reservation, base, commit_size,
374                                 reserve_size)) {
375       base = NULL;
376     }
377   } else {
378     if (reservation.Commit(base, commit_size, false)) {
379       UpdateAllocatedSpaceLimits(base, base + commit_size);
380     } else {
381       base = NULL;
382     }
383   }
384
385   if (base == NULL) {
386     // Failed to commit the body. Release the mapping and any partially
387     // commited regions inside it.
388     reservation.Release();
389     return NULL;
390   }
391
392   controller->TakeControl(&reservation);
393   return base;
394 }
395
396
397 void Page::InitializeAsAnchor(PagedSpace* owner) {
398   set_owner(owner);
399   set_prev_page(this);
400   set_next_page(this);
401 }
402
403
404 NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
405                                        SemiSpace* semi_space) {
406   Address area_start = start + NewSpacePage::kObjectStartOffset;
407   Address area_end = start + Page::kPageSize;
408
409   MemoryChunk* chunk =
410       MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
411                               area_end, NOT_EXECUTABLE, semi_space);
412   chunk->set_next_chunk(NULL);
413   chunk->set_prev_chunk(NULL);
414   chunk->initialize_scan_on_scavenge(true);
415   bool in_to_space = (semi_space->id() != kFromSpace);
416   chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
417                              : MemoryChunk::IN_FROM_SPACE);
418   DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
419                                        : MemoryChunk::IN_TO_SPACE));
420   NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
421   heap->incremental_marking()->SetNewSpacePageFlags(page);
422   return page;
423 }
424
425
426 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
427   set_owner(semi_space);
428   set_next_chunk(this);
429   set_prev_chunk(this);
430   // Flags marks this invalid page as not being in new-space.
431   // All real new-space pages will be in new-space.
432   SetFlags(0, ~0);
433 }
434
435
436 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
437                                      Address area_start, Address area_end,
438                                      Executability executable, Space* owner) {
439   MemoryChunk* chunk = FromAddress(base);
440
441   DCHECK(base == chunk->address());
442
443   chunk->heap_ = heap;
444   chunk->size_ = size;
445   chunk->area_start_ = area_start;
446   chunk->area_end_ = area_end;
447   chunk->flags_ = 0;
448   chunk->set_owner(owner);
449   chunk->InitializeReservedMemory();
450   chunk->slots_buffer_ = NULL;
451   chunk->skip_list_ = NULL;
452   chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
453   chunk->progress_bar_ = 0;
454   chunk->high_water_mark_ = static_cast<int>(area_start - base);
455   chunk->set_parallel_sweeping(SWEEPING_DONE);
456   chunk->available_in_small_free_list_ = 0;
457   chunk->available_in_medium_free_list_ = 0;
458   chunk->available_in_large_free_list_ = 0;
459   chunk->available_in_huge_free_list_ = 0;
460   chunk->non_available_small_blocks_ = 0;
461   chunk->ResetLiveBytes();
462   Bitmap::Clear(chunk);
463   chunk->initialize_scan_on_scavenge(false);
464   chunk->SetFlag(WAS_SWEPT_PRECISELY);
465
466   DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
467   DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
468
469   if (executable == EXECUTABLE) {
470     chunk->SetFlag(IS_EXECUTABLE);
471   }
472
473   if (owner == heap->old_data_space()) {
474     chunk->SetFlag(CONTAINS_ONLY_DATA);
475   }
476
477   return chunk;
478 }
479
480
481 // Commit MemoryChunk area to the requested size.
482 bool MemoryChunk::CommitArea(size_t requested) {
483   size_t guard_size =
484       IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
485   size_t header_size = area_start() - address() - guard_size;
486   size_t commit_size =
487       RoundUp(header_size + requested, base::OS::CommitPageSize());
488   size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
489                                   base::OS::CommitPageSize());
490
491   if (commit_size > committed_size) {
492     // Commit size should be less or equal than the reserved size.
493     DCHECK(commit_size <= size() - 2 * guard_size);
494     // Append the committed area.
495     Address start = address() + committed_size + guard_size;
496     size_t length = commit_size - committed_size;
497     if (reservation_.IsReserved()) {
498       Executability executable =
499           IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
500       if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length,
501                                                                executable)) {
502         return false;
503       }
504     } else {
505       CodeRange* code_range = heap_->isolate()->code_range();
506       DCHECK(code_range != NULL && code_range->valid() &&
507              IsFlagSet(IS_EXECUTABLE));
508       if (!code_range->CommitRawMemory(start, length)) return false;
509     }
510
511     if (Heap::ShouldZapGarbage()) {
512       heap_->isolate()->memory_allocator()->ZapBlock(start, length);
513     }
514   } else if (commit_size < committed_size) {
515     DCHECK(commit_size > 0);
516     // Shrink the committed area.
517     size_t length = committed_size - commit_size;
518     Address start = address() + committed_size + guard_size - length;
519     if (reservation_.IsReserved()) {
520       if (!reservation_.Uncommit(start, length)) return false;
521     } else {
522       CodeRange* code_range = heap_->isolate()->code_range();
523       DCHECK(code_range != NULL && code_range->valid() &&
524              IsFlagSet(IS_EXECUTABLE));
525       if (!code_range->UncommitRawMemory(start, length)) return false;
526     }
527   }
528
529   area_end_ = area_start_ + requested;
530   return true;
531 }
532
533
534 void MemoryChunk::InsertAfter(MemoryChunk* other) {
535   MemoryChunk* other_next = other->next_chunk();
536
537   set_next_chunk(other_next);
538   set_prev_chunk(other);
539   other_next->set_prev_chunk(this);
540   other->set_next_chunk(this);
541 }
542
543
544 void MemoryChunk::Unlink() {
545   MemoryChunk* next_element = next_chunk();
546   MemoryChunk* prev_element = prev_chunk();
547   next_element->set_prev_chunk(prev_element);
548   prev_element->set_next_chunk(next_element);
549   set_prev_chunk(NULL);
550   set_next_chunk(NULL);
551 }
552
553
554 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
555                                             intptr_t commit_area_size,
556                                             Executability executable,
557                                             Space* owner) {
558   DCHECK(commit_area_size <= reserve_area_size);
559
560   size_t chunk_size;
561   Heap* heap = isolate_->heap();
562   Address base = NULL;
563   base::VirtualMemory reservation;
564   Address area_start = NULL;
565   Address area_end = NULL;
566
567   //
568   // MemoryChunk layout:
569   //
570   //             Executable
571   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
572   // |           Header           |
573   // +----------------------------+<- base + CodePageGuardStartOffset
574   // |           Guard            |
575   // +----------------------------+<- area_start_
576   // |           Area             |
577   // +----------------------------+<- area_end_ (area_start + commit_area_size)
578   // |   Committed but not used   |
579   // +----------------------------+<- aligned at OS page boundary
580   // | Reserved but not committed |
581   // +----------------------------+<- aligned at OS page boundary
582   // |           Guard            |
583   // +----------------------------+<- base + chunk_size
584   //
585   //           Non-executable
586   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
587   // |          Header            |
588   // +----------------------------+<- area_start_ (base + kObjectStartOffset)
589   // |           Area             |
590   // +----------------------------+<- area_end_ (area_start + commit_area_size)
591   // |  Committed but not used    |
592   // +----------------------------+<- aligned at OS page boundary
593   // | Reserved but not committed |
594   // +----------------------------+<- base + chunk_size
595   //
596
597   if (executable == EXECUTABLE) {
598     chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
599                          base::OS::CommitPageSize()) +
600                  CodePageGuardSize();
601
602     // Check executable memory limit.
603     if (size_executable_ + chunk_size > capacity_executable_) {
604       LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
605                                 "V8 Executable Allocation capacity exceeded"));
606       return NULL;
607     }
608
609     // Size of header (not executable) plus area (executable).
610     size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
611                                  base::OS::CommitPageSize());
612     // Allocate executable memory either from code range or from the
613     // OS.
614     if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
615       base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
616                                                        &chunk_size);
617       DCHECK(
618           IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
619       if (base == NULL) return NULL;
620       size_ += chunk_size;
621       // Update executable memory size.
622       size_executable_ += chunk_size;
623     } else {
624       base = AllocateAlignedMemory(chunk_size, commit_size,
625                                    MemoryChunk::kAlignment, executable,
626                                    &reservation);
627       if (base == NULL) return NULL;
628       // Update executable memory size.
629       size_executable_ += reservation.size();
630     }
631
632     if (Heap::ShouldZapGarbage()) {
633       ZapBlock(base, CodePageGuardStartOffset());
634       ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
635     }
636
637     area_start = base + CodePageAreaStartOffset();
638     area_end = area_start + commit_area_size;
639   } else {
640     chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
641                          base::OS::CommitPageSize());
642     size_t commit_size =
643         RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
644                 base::OS::CommitPageSize());
645     base =
646         AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
647                               executable, &reservation);
648
649     if (base == NULL) return NULL;
650
651     if (Heap::ShouldZapGarbage()) {
652       ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
653     }
654
655     area_start = base + Page::kObjectStartOffset;
656     area_end = area_start + commit_area_size;
657   }
658
659   // Use chunk_size for statistics and callbacks because we assume that they
660   // treat reserved but not-yet committed memory regions of chunks as allocated.
661   isolate_->counters()->memory_allocated()->Increment(
662       static_cast<int>(chunk_size));
663
664   LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
665   if (owner != NULL) {
666     ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
667     PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
668   }
669
670   MemoryChunk* result = MemoryChunk::Initialize(
671       heap, base, chunk_size, area_start, area_end, executable, owner);
672   result->set_reserved_memory(&reservation);
673   MSAN_MEMORY_IS_INITIALIZED_IN_JIT(base, chunk_size);
674   return result;
675 }
676
677
678 void Page::ResetFreeListStatistics() {
679   non_available_small_blocks_ = 0;
680   available_in_small_free_list_ = 0;
681   available_in_medium_free_list_ = 0;
682   available_in_large_free_list_ = 0;
683   available_in_huge_free_list_ = 0;
684 }
685
686
687 Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
688                                     Executability executable) {
689   MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
690
691   if (chunk == NULL) return NULL;
692
693   return Page::Initialize(isolate_->heap(), chunk, executable, owner);
694 }
695
696
697 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
698                                               Space* owner,
699                                               Executability executable) {
700   MemoryChunk* chunk =
701       AllocateChunk(object_size, object_size, executable, owner);
702   if (chunk == NULL) return NULL;
703   return LargePage::Initialize(isolate_->heap(), chunk);
704 }
705
706
707 void MemoryAllocator::Free(MemoryChunk* chunk) {
708   LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
709   if (chunk->owner() != NULL) {
710     ObjectSpace space =
711         static_cast<ObjectSpace>(1 << chunk->owner()->identity());
712     PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
713   }
714
715   isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
716                                          chunk->IsEvacuationCandidate());
717
718   delete chunk->slots_buffer();
719   delete chunk->skip_list();
720
721   base::VirtualMemory* reservation = chunk->reserved_memory();
722   if (reservation->IsReserved()) {
723     FreeMemory(reservation, chunk->executable());
724   } else {
725     FreeMemory(chunk->address(), chunk->size(), chunk->executable());
726   }
727 }
728
729
730 bool MemoryAllocator::CommitBlock(Address start, size_t size,
731                                   Executability executable) {
732   if (!CommitMemory(start, size, executable)) return false;
733
734   if (Heap::ShouldZapGarbage()) {
735     ZapBlock(start, size);
736   }
737
738   isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
739   return true;
740 }
741
742
743 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
744   if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
745   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
746   return true;
747 }
748
749
750 void MemoryAllocator::ZapBlock(Address start, size_t size) {
751   for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
752     Memory::Address_at(start + s) = kZapValue;
753   }
754 }
755
756
757 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
758                                                 AllocationAction action,
759                                                 size_t size) {
760   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
761     MemoryAllocationCallbackRegistration registration =
762         memory_allocation_callbacks_[i];
763     if ((registration.space & space) == space &&
764         (registration.action & action) == action)
765       registration.callback(space, action, static_cast<int>(size));
766   }
767 }
768
769
770 bool MemoryAllocator::MemoryAllocationCallbackRegistered(
771     MemoryAllocationCallback callback) {
772   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
773     if (memory_allocation_callbacks_[i].callback == callback) return true;
774   }
775   return false;
776 }
777
778
779 void MemoryAllocator::AddMemoryAllocationCallback(
780     MemoryAllocationCallback callback, ObjectSpace space,
781     AllocationAction action) {
782   DCHECK(callback != NULL);
783   MemoryAllocationCallbackRegistration registration(callback, space, action);
784   DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
785   return memory_allocation_callbacks_.Add(registration);
786 }
787
788
789 void MemoryAllocator::RemoveMemoryAllocationCallback(
790     MemoryAllocationCallback callback) {
791   DCHECK(callback != NULL);
792   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
793     if (memory_allocation_callbacks_[i].callback == callback) {
794       memory_allocation_callbacks_.Remove(i);
795       return;
796     }
797   }
798   UNREACHABLE();
799 }
800
801
802 #ifdef DEBUG
803 void MemoryAllocator::ReportStatistics() {
804   float pct = static_cast<float>(capacity_ - size_) / capacity_;
805   PrintF("  capacity: %" V8_PTR_PREFIX
806          "d"
807          ", used: %" V8_PTR_PREFIX
808          "d"
809          ", available: %%%d\n\n",
810          capacity_, size_, static_cast<int>(pct * 100));
811 }
812 #endif
813
814
815 int MemoryAllocator::CodePageGuardStartOffset() {
816   // We are guarding code pages: the first OS page after the header
817   // will be protected as non-writable.
818   return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
819 }
820
821
822 int MemoryAllocator::CodePageGuardSize() {
823   return static_cast<int>(base::OS::CommitPageSize());
824 }
825
826
827 int MemoryAllocator::CodePageAreaStartOffset() {
828   // We are guarding code pages: the first OS page after the header
829   // will be protected as non-writable.
830   return CodePageGuardStartOffset() + CodePageGuardSize();
831 }
832
833
834 int MemoryAllocator::CodePageAreaEndOffset() {
835   // We are guarding code pages: the last OS page will be protected as
836   // non-writable.
837   return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
838 }
839
840
841 bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
842                                              Address start, size_t commit_size,
843                                              size_t reserved_size) {
844   // Commit page header (not executable).
845   if (!vm->Commit(start, CodePageGuardStartOffset(), false)) {
846     return false;
847   }
848
849   // Create guard page after the header.
850   if (!vm->Guard(start + CodePageGuardStartOffset())) {
851     return false;
852   }
853
854   // Commit page body (executable).
855   if (!vm->Commit(start + CodePageAreaStartOffset(),
856                   commit_size - CodePageGuardStartOffset(), true)) {
857     return false;
858   }
859
860   // Create guard page before the end.
861   if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
862     return false;
863   }
864
865   UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
866                                         commit_size -
867                                         CodePageGuardStartOffset());
868   return true;
869 }
870
871
872 // -----------------------------------------------------------------------------
873 // MemoryChunk implementation
874
875 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
876   MemoryChunk* chunk = MemoryChunk::FromAddress(address);
877   if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
878     static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
879   }
880   chunk->IncrementLiveBytes(by);
881 }
882
883
884 // -----------------------------------------------------------------------------
885 // PagedSpace implementation
886
887 PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
888                        Executability executable)
889     : Space(heap, id, executable),
890       free_list_(this),
891       swept_precisely_(true),
892       unswept_free_bytes_(0),
893       end_of_unswept_pages_(NULL),
894       emergency_memory_(NULL) {
895   if (id == CODE_SPACE) {
896     area_size_ = heap->isolate()->memory_allocator()->CodePageAreaSize();
897   } else {
898     area_size_ = Page::kPageSize - Page::kObjectStartOffset;
899   }
900   max_capacity_ =
901       (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * AreaSize();
902   accounting_stats_.Clear();
903
904   allocation_info_.set_top(NULL);
905   allocation_info_.set_limit(NULL);
906
907   anchor_.InitializeAsAnchor(this);
908 }
909
910
911 bool PagedSpace::SetUp() { return true; }
912
913
914 bool PagedSpace::HasBeenSetUp() { return true; }
915
916
917 void PagedSpace::TearDown() {
918   PageIterator iterator(this);
919   while (iterator.has_next()) {
920     heap()->isolate()->memory_allocator()->Free(iterator.next());
921   }
922   anchor_.set_next_page(&anchor_);
923   anchor_.set_prev_page(&anchor_);
924   accounting_stats_.Clear();
925 }
926
927
928 size_t PagedSpace::CommittedPhysicalMemory() {
929   if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
930   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
931   size_t size = 0;
932   PageIterator it(this);
933   while (it.has_next()) {
934     size += it.next()->CommittedPhysicalMemory();
935   }
936   return size;
937 }
938
939
940 Object* PagedSpace::FindObject(Address addr) {
941   // Note: this function can only be called on precisely swept spaces.
942   DCHECK(!heap()->mark_compact_collector()->in_use());
943
944   if (!Contains(addr)) return Smi::FromInt(0);  // Signaling not found.
945
946   Page* p = Page::FromAddress(addr);
947   HeapObjectIterator it(p, NULL);
948   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
949     Address cur = obj->address();
950     Address next = cur + obj->Size();
951     if ((cur <= addr) && (addr < next)) return obj;
952   }
953
954   UNREACHABLE();
955   return Smi::FromInt(0);
956 }
957
958
959 bool PagedSpace::CanExpand() {
960   DCHECK(max_capacity_ % AreaSize() == 0);
961
962   if (Capacity() == max_capacity_) return false;
963
964   DCHECK(Capacity() < max_capacity_);
965
966   // Are we going to exceed capacity for this space?
967   if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
968
969   return true;
970 }
971
972
973 bool PagedSpace::Expand() {
974   if (!CanExpand()) return false;
975
976   intptr_t size = AreaSize();
977
978   if (anchor_.next_page() == &anchor_) {
979     size = SizeOfFirstPage();
980   }
981
982   Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
983                                                                 executable());
984   if (p == NULL) return false;
985
986   DCHECK(Capacity() <= max_capacity_);
987
988   p->InsertAfter(anchor_.prev_page());
989
990   return true;
991 }
992
993
994 intptr_t PagedSpace::SizeOfFirstPage() {
995   int size = 0;
996   switch (identity()) {
997     case OLD_POINTER_SPACE:
998       size = 112 * kPointerSize * KB;
999       break;
1000     case OLD_DATA_SPACE:
1001       size = 192 * KB;
1002       break;
1003     case MAP_SPACE:
1004       size = 16 * kPointerSize * KB;
1005       break;
1006     case CELL_SPACE:
1007       size = 16 * kPointerSize * KB;
1008       break;
1009     case PROPERTY_CELL_SPACE:
1010       size = 8 * kPointerSize * KB;
1011       break;
1012     case CODE_SPACE: {
1013       CodeRange* code_range = heap()->isolate()->code_range();
1014       if (code_range != NULL && code_range->valid()) {
1015         // When code range exists, code pages are allocated in a special way
1016         // (from the reserved code range). That part of the code is not yet
1017         // upgraded to handle small pages.
1018         size = AreaSize();
1019       } else {
1020         size =
1021             RoundUp(480 * KB * FullCodeGenerator::kBootCodeSizeMultiplier / 100,
1022                     kPointerSize);
1023       }
1024       break;
1025     }
1026     default:
1027       UNREACHABLE();
1028   }
1029   return Min(size, AreaSize());
1030 }
1031
1032
1033 int PagedSpace::CountTotalPages() {
1034   PageIterator it(this);
1035   int count = 0;
1036   while (it.has_next()) {
1037     it.next();
1038     count++;
1039   }
1040   return count;
1041 }
1042
1043
1044 void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
1045   sizes->huge_size_ = page->available_in_huge_free_list();
1046   sizes->small_size_ = page->available_in_small_free_list();
1047   sizes->medium_size_ = page->available_in_medium_free_list();
1048   sizes->large_size_ = page->available_in_large_free_list();
1049 }
1050
1051
1052 void PagedSpace::ResetFreeListStatistics() {
1053   PageIterator page_iterator(this);
1054   while (page_iterator.has_next()) {
1055     Page* page = page_iterator.next();
1056     page->ResetFreeListStatistics();
1057   }
1058 }
1059
1060
1061 void PagedSpace::IncreaseCapacity(int size) {
1062   accounting_stats_.ExpandSpace(size);
1063 }
1064
1065
1066 void PagedSpace::ReleasePage(Page* page) {
1067   DCHECK(page->LiveBytes() == 0);
1068   DCHECK(AreaSize() == page->area_size());
1069
1070   if (page->WasSwept()) {
1071     intptr_t size = free_list_.EvictFreeListItems(page);
1072     accounting_stats_.AllocateBytes(size);
1073     DCHECK_EQ(AreaSize(), static_cast<int>(size));
1074   } else {
1075     DecreaseUnsweptFreeBytes(page);
1076   }
1077
1078   if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
1079     heap()->decrement_scan_on_scavenge_pages();
1080     page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
1081   }
1082
1083   DCHECK(!free_list_.ContainsPageFreeListItems(page));
1084
1085   if (Page::FromAllocationTop(allocation_info_.top()) == page) {
1086     allocation_info_.set_top(NULL);
1087     allocation_info_.set_limit(NULL);
1088   }
1089
1090   page->Unlink();
1091   if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
1092     heap()->isolate()->memory_allocator()->Free(page);
1093   } else {
1094     heap()->QueueMemoryChunkForFree(page);
1095   }
1096
1097   DCHECK(Capacity() > 0);
1098   accounting_stats_.ShrinkSpace(AreaSize());
1099 }
1100
1101
1102 void PagedSpace::CreateEmergencyMemory() {
1103   emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
1104       AreaSize(), AreaSize(), executable(), this);
1105 }
1106
1107
1108 void PagedSpace::FreeEmergencyMemory() {
1109   Page* page = static_cast<Page*>(emergency_memory_);
1110   DCHECK(page->LiveBytes() == 0);
1111   DCHECK(AreaSize() == page->area_size());
1112   DCHECK(!free_list_.ContainsPageFreeListItems(page));
1113   heap()->isolate()->memory_allocator()->Free(page);
1114   emergency_memory_ = NULL;
1115 }
1116
1117
1118 void PagedSpace::UseEmergencyMemory() {
1119   Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this);
1120   page->InsertAfter(anchor_.prev_page());
1121   emergency_memory_ = NULL;
1122 }
1123
1124
1125 #ifdef DEBUG
1126 void PagedSpace::Print() {}
1127 #endif
1128
1129 #ifdef VERIFY_HEAP
1130 void PagedSpace::Verify(ObjectVisitor* visitor) {
1131   // We can only iterate over the pages if they were swept precisely.
1132   if (!swept_precisely_) return;
1133
1134   bool allocation_pointer_found_in_space =
1135       (allocation_info_.top() == allocation_info_.limit());
1136   PageIterator page_iterator(this);
1137   while (page_iterator.has_next()) {
1138     Page* page = page_iterator.next();
1139     CHECK(page->owner() == this);
1140     if (page == Page::FromAllocationTop(allocation_info_.top())) {
1141       allocation_pointer_found_in_space = true;
1142     }
1143     CHECK(page->WasSweptPrecisely());
1144     HeapObjectIterator it(page, NULL);
1145     Address end_of_previous_object = page->area_start();
1146     Address top = page->area_end();
1147     int black_size = 0;
1148     for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1149       CHECK(end_of_previous_object <= object->address());
1150
1151       // The first word should be a map, and we expect all map pointers to
1152       // be in map space.
1153       Map* map = object->map();
1154       CHECK(map->IsMap());
1155       CHECK(heap()->map_space()->Contains(map));
1156
1157       // Perform space-specific object verification.
1158       VerifyObject(object);
1159
1160       // The object itself should look OK.
1161       object->ObjectVerify();
1162
1163       // All the interior pointers should be contained in the heap.
1164       int size = object->Size();
1165       object->IterateBody(map->instance_type(), size, visitor);
1166       if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1167         black_size += size;
1168       }
1169
1170       CHECK(object->address() + size <= top);
1171       end_of_previous_object = object->address() + size;
1172     }
1173     CHECK_LE(black_size, page->LiveBytes());
1174   }
1175   CHECK(allocation_pointer_found_in_space);
1176 }
1177 #endif  // VERIFY_HEAP
1178
1179 // -----------------------------------------------------------------------------
1180 // NewSpace implementation
1181
1182
1183 bool NewSpace::SetUp(int reserved_semispace_capacity,
1184                      int maximum_semispace_capacity) {
1185   // Set up new space based on the preallocated memory block defined by
1186   // start and size. The provided space is divided into two semi-spaces.
1187   // To support fast containment testing in the new space, the size of
1188   // this chunk must be a power of two and it must be aligned to its size.
1189   int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1190
1191   size_t size = 2 * reserved_semispace_capacity;
1192   Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1193       size, size, &reservation_);
1194   if (base == NULL) return false;
1195
1196   chunk_base_ = base;
1197   chunk_size_ = static_cast<uintptr_t>(size);
1198   LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1199
1200   DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
1201   DCHECK(IsPowerOf2(maximum_semispace_capacity));
1202
1203   // Allocate and set up the histogram arrays if necessary.
1204   allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1205   promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1206
1207 #define SET_NAME(name)                        \
1208   allocated_histogram_[name].set_name(#name); \
1209   promoted_histogram_[name].set_name(#name);
1210   INSTANCE_TYPE_LIST(SET_NAME)
1211 #undef SET_NAME
1212
1213   DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1214   DCHECK(static_cast<intptr_t>(chunk_size_) >=
1215          2 * heap()->ReservedSemiSpaceSize());
1216   DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1217
1218   to_space_.SetUp(chunk_base_, initial_semispace_capacity,
1219                   maximum_semispace_capacity);
1220   from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1221                     initial_semispace_capacity, maximum_semispace_capacity);
1222   if (!to_space_.Commit()) {
1223     return false;
1224   }
1225   DCHECK(!from_space_.is_committed());  // No need to use memory yet.
1226
1227   start_ = chunk_base_;
1228   address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1229   object_mask_ = address_mask_ | kHeapObjectTagMask;
1230   object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1231
1232   ResetAllocationInfo();
1233
1234   return true;
1235 }
1236
1237
1238 void NewSpace::TearDown() {
1239   if (allocated_histogram_) {
1240     DeleteArray(allocated_histogram_);
1241     allocated_histogram_ = NULL;
1242   }
1243   if (promoted_histogram_) {
1244     DeleteArray(promoted_histogram_);
1245     promoted_histogram_ = NULL;
1246   }
1247
1248   start_ = NULL;
1249   allocation_info_.set_top(NULL);
1250   allocation_info_.set_limit(NULL);
1251
1252   to_space_.TearDown();
1253   from_space_.TearDown();
1254
1255   LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1256
1257   DCHECK(reservation_.IsReserved());
1258   heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1259                                                     NOT_EXECUTABLE);
1260   chunk_base_ = NULL;
1261   chunk_size_ = 0;
1262 }
1263
1264
1265 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
1266
1267
1268 void NewSpace::Grow() {
1269   // Double the semispace size but only up to maximum capacity.
1270   DCHECK(Capacity() < MaximumCapacity());
1271   int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
1272   if (to_space_.GrowTo(new_capacity)) {
1273     // Only grow from space if we managed to grow to-space.
1274     if (!from_space_.GrowTo(new_capacity)) {
1275       // If we managed to grow to-space but couldn't grow from-space,
1276       // attempt to shrink to-space.
1277       if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1278         // We are in an inconsistent state because we could not
1279         // commit/uncommit memory from new space.
1280         V8::FatalProcessOutOfMemory("Failed to grow new space.");
1281       }
1282     }
1283   }
1284   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1285 }
1286
1287
1288 void NewSpace::Shrink() {
1289   int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
1290   int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1291   if (rounded_new_capacity < Capacity() &&
1292       to_space_.ShrinkTo(rounded_new_capacity)) {
1293     // Only shrink from-space if we managed to shrink to-space.
1294     from_space_.Reset();
1295     if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1296       // If we managed to shrink to-space but couldn't shrink from
1297       // space, attempt to grow to-space again.
1298       if (!to_space_.GrowTo(from_space_.Capacity())) {
1299         // We are in an inconsistent state because we could not
1300         // commit/uncommit memory from new space.
1301         V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1302       }
1303     }
1304   }
1305   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1306 }
1307
1308
1309 void NewSpace::UpdateAllocationInfo() {
1310   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1311   allocation_info_.set_top(to_space_.page_low());
1312   allocation_info_.set_limit(to_space_.page_high());
1313   UpdateInlineAllocationLimit(0);
1314   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1315 }
1316
1317
1318 void NewSpace::ResetAllocationInfo() {
1319   to_space_.Reset();
1320   UpdateAllocationInfo();
1321   pages_used_ = 0;
1322   // Clear all mark-bits in the to-space.
1323   NewSpacePageIterator it(&to_space_);
1324   while (it.has_next()) {
1325     Bitmap::Clear(it.next());
1326   }
1327 }
1328
1329
1330 void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1331   if (heap()->inline_allocation_disabled()) {
1332     // Lowest limit when linear allocation was disabled.
1333     Address high = to_space_.page_high();
1334     Address new_top = allocation_info_.top() + size_in_bytes;
1335     allocation_info_.set_limit(Min(new_top, high));
1336   } else if (inline_allocation_limit_step() == 0) {
1337     // Normal limit is the end of the current page.
1338     allocation_info_.set_limit(to_space_.page_high());
1339   } else {
1340     // Lower limit during incremental marking.
1341     Address high = to_space_.page_high();
1342     Address new_top = allocation_info_.top() + size_in_bytes;
1343     Address new_limit = new_top + inline_allocation_limit_step_;
1344     allocation_info_.set_limit(Min(new_limit, high));
1345   }
1346   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1347 }
1348
1349
1350 bool NewSpace::AddFreshPage() {
1351   Address top = allocation_info_.top();
1352   if (NewSpacePage::IsAtStart(top)) {
1353     // The current page is already empty. Don't try to make another.
1354
1355     // We should only get here if someone asks to allocate more
1356     // than what can be stored in a single page.
1357     // TODO(gc): Change the limit on new-space allocation to prevent this
1358     // from happening (all such allocations should go directly to LOSpace).
1359     return false;
1360   }
1361   if (!to_space_.AdvancePage()) {
1362     // Failed to get a new page in to-space.
1363     return false;
1364   }
1365
1366   // Clear remainder of current page.
1367   Address limit = NewSpacePage::FromLimit(top)->area_end();
1368   if (heap()->gc_state() == Heap::SCAVENGE) {
1369     heap()->promotion_queue()->SetNewLimit(limit);
1370     heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
1371   }
1372
1373   int remaining_in_page = static_cast<int>(limit - top);
1374   heap()->CreateFillerObjectAt(top, remaining_in_page);
1375   pages_used_++;
1376   UpdateAllocationInfo();
1377
1378   return true;
1379 }
1380
1381
1382 AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) {
1383   Address old_top = allocation_info_.top();
1384   Address high = to_space_.page_high();
1385   if (allocation_info_.limit() < high) {
1386     // Either the limit has been lowered because linear allocation was disabled
1387     // or because incremental marking wants to get a chance to do a step. Set
1388     // the new limit accordingly.
1389     Address new_top = old_top + size_in_bytes;
1390     int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1391     heap()->incremental_marking()->Step(bytes_allocated,
1392                                         IncrementalMarking::GC_VIA_STACK_GUARD);
1393     UpdateInlineAllocationLimit(size_in_bytes);
1394     top_on_previous_step_ = new_top;
1395     return AllocateRaw(size_in_bytes);
1396   } else if (AddFreshPage()) {
1397     // Switched to new page. Try allocating again.
1398     int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1399     heap()->incremental_marking()->Step(bytes_allocated,
1400                                         IncrementalMarking::GC_VIA_STACK_GUARD);
1401     top_on_previous_step_ = to_space_.page_low();
1402     return AllocateRaw(size_in_bytes);
1403   } else {
1404     return AllocationResult::Retry();
1405   }
1406 }
1407
1408
1409 #ifdef VERIFY_HEAP
1410 // We do not use the SemiSpaceIterator because verification doesn't assume
1411 // that it works (it depends on the invariants we are checking).
1412 void NewSpace::Verify() {
1413   // The allocation pointer should be in the space or at the very end.
1414   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1415
1416   // There should be objects packed in from the low address up to the
1417   // allocation pointer.
1418   Address current = to_space_.first_page()->area_start();
1419   CHECK_EQ(current, to_space_.space_start());
1420
1421   while (current != top()) {
1422     if (!NewSpacePage::IsAtEnd(current)) {
1423       // The allocation pointer should not be in the middle of an object.
1424       CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1425             current < top());
1426
1427       HeapObject* object = HeapObject::FromAddress(current);
1428
1429       // The first word should be a map, and we expect all map pointers to
1430       // be in map space.
1431       Map* map = object->map();
1432       CHECK(map->IsMap());
1433       CHECK(heap()->map_space()->Contains(map));
1434
1435       // The object should not be code or a map.
1436       CHECK(!object->IsMap());
1437       CHECK(!object->IsCode());
1438
1439       // The object itself should look OK.
1440       object->ObjectVerify();
1441
1442       // All the interior pointers should be contained in the heap.
1443       VerifyPointersVisitor visitor;
1444       int size = object->Size();
1445       object->IterateBody(map->instance_type(), size, &visitor);
1446
1447       current += size;
1448     } else {
1449       // At end of page, switch to next page.
1450       NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1451       // Next page should be valid.
1452       CHECK(!page->is_anchor());
1453       current = page->area_start();
1454     }
1455   }
1456
1457   // Check semi-spaces.
1458   CHECK_EQ(from_space_.id(), kFromSpace);
1459   CHECK_EQ(to_space_.id(), kToSpace);
1460   from_space_.Verify();
1461   to_space_.Verify();
1462 }
1463 #endif
1464
1465 // -----------------------------------------------------------------------------
1466 // SemiSpace implementation
1467
1468 void SemiSpace::SetUp(Address start, int initial_capacity,
1469                       int maximum_capacity) {
1470   // Creates a space in the young generation. The constructor does not
1471   // allocate memory from the OS.  A SemiSpace is given a contiguous chunk of
1472   // memory of size 'capacity' when set up, and does not grow or shrink
1473   // otherwise.  In the mark-compact collector, the memory region of the from
1474   // space is used as the marking stack. It requires contiguous memory
1475   // addresses.
1476   DCHECK(maximum_capacity >= Page::kPageSize);
1477   initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1478   capacity_ = initial_capacity;
1479   maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1480   maximum_committed_ = 0;
1481   committed_ = false;
1482   start_ = start;
1483   address_mask_ = ~(maximum_capacity - 1);
1484   object_mask_ = address_mask_ | kHeapObjectTagMask;
1485   object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1486   age_mark_ = start_;
1487 }
1488
1489
1490 void SemiSpace::TearDown() {
1491   start_ = NULL;
1492   capacity_ = 0;
1493 }
1494
1495
1496 bool SemiSpace::Commit() {
1497   DCHECK(!is_committed());
1498   int pages = capacity_ / Page::kPageSize;
1499   if (!heap()->isolate()->memory_allocator()->CommitBlock(start_, capacity_,
1500                                                           executable())) {
1501     return false;
1502   }
1503
1504   NewSpacePage* current = anchor();
1505   for (int i = 0; i < pages; i++) {
1506     NewSpacePage* new_page =
1507         NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
1508     new_page->InsertAfter(current);
1509     current = new_page;
1510   }
1511
1512   SetCapacity(capacity_);
1513   committed_ = true;
1514   Reset();
1515   return true;
1516 }
1517
1518
1519 bool SemiSpace::Uncommit() {
1520   DCHECK(is_committed());
1521   Address start = start_ + maximum_capacity_ - capacity_;
1522   if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
1523     return false;
1524   }
1525   anchor()->set_next_page(anchor());
1526   anchor()->set_prev_page(anchor());
1527
1528   committed_ = false;
1529   return true;
1530 }
1531
1532
1533 size_t SemiSpace::CommittedPhysicalMemory() {
1534   if (!is_committed()) return 0;
1535   size_t size = 0;
1536   NewSpacePageIterator it(this);
1537   while (it.has_next()) {
1538     size += it.next()->CommittedPhysicalMemory();
1539   }
1540   return size;
1541 }
1542
1543
1544 bool SemiSpace::GrowTo(int new_capacity) {
1545   if (!is_committed()) {
1546     if (!Commit()) return false;
1547   }
1548   DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
1549   DCHECK(new_capacity <= maximum_capacity_);
1550   DCHECK(new_capacity > capacity_);
1551   int pages_before = capacity_ / Page::kPageSize;
1552   int pages_after = new_capacity / Page::kPageSize;
1553
1554   size_t delta = new_capacity - capacity_;
1555
1556   DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1557   if (!heap()->isolate()->memory_allocator()->CommitBlock(
1558           start_ + capacity_, delta, executable())) {
1559     return false;
1560   }
1561   SetCapacity(new_capacity);
1562   NewSpacePage* last_page = anchor()->prev_page();
1563   DCHECK(last_page != anchor());
1564   for (int i = pages_before; i < pages_after; i++) {
1565     Address page_address = start_ + i * Page::kPageSize;
1566     NewSpacePage* new_page =
1567         NewSpacePage::Initialize(heap(), page_address, this);
1568     new_page->InsertAfter(last_page);
1569     Bitmap::Clear(new_page);
1570     // Duplicate the flags that was set on the old page.
1571     new_page->SetFlags(last_page->GetFlags(),
1572                        NewSpacePage::kCopyOnFlipFlagsMask);
1573     last_page = new_page;
1574   }
1575   return true;
1576 }
1577
1578
1579 bool SemiSpace::ShrinkTo(int new_capacity) {
1580   DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
1581   DCHECK(new_capacity >= initial_capacity_);
1582   DCHECK(new_capacity < capacity_);
1583   if (is_committed()) {
1584     size_t delta = capacity_ - new_capacity;
1585     DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1586
1587     MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1588     if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
1589       return false;
1590     }
1591
1592     int pages_after = new_capacity / Page::kPageSize;
1593     NewSpacePage* new_last_page =
1594         NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
1595     new_last_page->set_next_page(anchor());
1596     anchor()->set_prev_page(new_last_page);
1597     DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page));
1598   }
1599
1600   SetCapacity(new_capacity);
1601
1602   return true;
1603 }
1604
1605
1606 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1607   anchor_.set_owner(this);
1608   // Fixup back-pointers to anchor. Address of anchor changes
1609   // when we swap.
1610   anchor_.prev_page()->set_next_page(&anchor_);
1611   anchor_.next_page()->set_prev_page(&anchor_);
1612
1613   bool becomes_to_space = (id_ == kFromSpace);
1614   id_ = becomes_to_space ? kToSpace : kFromSpace;
1615   NewSpacePage* page = anchor_.next_page();
1616   while (page != &anchor_) {
1617     page->set_owner(this);
1618     page->SetFlags(flags, mask);
1619     if (becomes_to_space) {
1620       page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1621       page->SetFlag(MemoryChunk::IN_TO_SPACE);
1622       page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1623       page->ResetLiveBytes();
1624     } else {
1625       page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1626       page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1627     }
1628     DCHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1629     DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1630            page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1631     page = page->next_page();
1632   }
1633 }
1634
1635
1636 void SemiSpace::Reset() {
1637   DCHECK(anchor_.next_page() != &anchor_);
1638   current_page_ = anchor_.next_page();
1639 }
1640
1641
1642 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1643   // We won't be swapping semispaces without data in them.
1644   DCHECK(from->anchor_.next_page() != &from->anchor_);
1645   DCHECK(to->anchor_.next_page() != &to->anchor_);
1646
1647   // Swap bits.
1648   SemiSpace tmp = *from;
1649   *from = *to;
1650   *to = tmp;
1651
1652   // Fixup back-pointers to the page list anchor now that its address
1653   // has changed.
1654   // Swap to/from-space bits on pages.
1655   // Copy GC flags from old active space (from-space) to new (to-space).
1656   intptr_t flags = from->current_page()->GetFlags();
1657   to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1658
1659   from->FlipPages(0, 0);
1660 }
1661
1662
1663 void SemiSpace::SetCapacity(int new_capacity) {
1664   capacity_ = new_capacity;
1665   if (capacity_ > maximum_committed_) {
1666     maximum_committed_ = capacity_;
1667   }
1668 }
1669
1670
1671 void SemiSpace::set_age_mark(Address mark) {
1672   DCHECK(NewSpacePage::FromLimit(mark)->semi_space() == this);
1673   age_mark_ = mark;
1674   // Mark all pages up to the one containing mark.
1675   NewSpacePageIterator it(space_start(), mark);
1676   while (it.has_next()) {
1677     it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1678   }
1679 }
1680
1681
1682 #ifdef DEBUG
1683 void SemiSpace::Print() {}
1684 #endif
1685
1686 #ifdef VERIFY_HEAP
1687 void SemiSpace::Verify() {
1688   bool is_from_space = (id_ == kFromSpace);
1689   NewSpacePage* page = anchor_.next_page();
1690   CHECK(anchor_.semi_space() == this);
1691   while (page != &anchor_) {
1692     CHECK(page->semi_space() == this);
1693     CHECK(page->InNewSpace());
1694     CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1695                                         : MemoryChunk::IN_TO_SPACE));
1696     CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1697                                          : MemoryChunk::IN_FROM_SPACE));
1698     CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1699     if (!is_from_space) {
1700       // The pointers-from-here-are-interesting flag isn't updated dynamically
1701       // on from-space pages, so it might be out of sync with the marking state.
1702       if (page->heap()->incremental_marking()->IsMarking()) {
1703         CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1704       } else {
1705         CHECK(
1706             !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1707       }
1708       // TODO(gc): Check that the live_bytes_count_ field matches the
1709       // black marking on the page (if we make it match in new-space).
1710     }
1711     CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1712     CHECK(page->prev_page()->next_page() == page);
1713     page = page->next_page();
1714   }
1715 }
1716 #endif
1717
1718 #ifdef DEBUG
1719 void SemiSpace::AssertValidRange(Address start, Address end) {
1720   // Addresses belong to same semi-space
1721   NewSpacePage* page = NewSpacePage::FromLimit(start);
1722   NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1723   SemiSpace* space = page->semi_space();
1724   CHECK_EQ(space, end_page->semi_space());
1725   // Start address is before end address, either on same page,
1726   // or end address is on a later page in the linked list of
1727   // semi-space pages.
1728   if (page == end_page) {
1729     CHECK(start <= end);
1730   } else {
1731     while (page != end_page) {
1732       page = page->next_page();
1733       CHECK_NE(page, space->anchor());
1734     }
1735   }
1736 }
1737 #endif
1738
1739
1740 // -----------------------------------------------------------------------------
1741 // SemiSpaceIterator implementation.
1742 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1743   Initialize(space->bottom(), space->top(), NULL);
1744 }
1745
1746
1747 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1748                                      HeapObjectCallback size_func) {
1749   Initialize(space->bottom(), space->top(), size_func);
1750 }
1751
1752
1753 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1754   Initialize(start, space->top(), NULL);
1755 }
1756
1757
1758 SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
1759   Initialize(from, to, NULL);
1760 }
1761
1762
1763 void SemiSpaceIterator::Initialize(Address start, Address end,
1764                                    HeapObjectCallback size_func) {
1765   SemiSpace::AssertValidRange(start, end);
1766   current_ = start;
1767   limit_ = end;
1768   size_func_ = size_func;
1769 }
1770
1771
1772 #ifdef DEBUG
1773 // heap_histograms is shared, always clear it before using it.
1774 static void ClearHistograms(Isolate* isolate) {
1775 // We reset the name each time, though it hasn't changed.
1776 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1777   INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1778 #undef DEF_TYPE_NAME
1779
1780 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1781   INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1782 #undef CLEAR_HISTOGRAM
1783
1784   isolate->js_spill_information()->Clear();
1785 }
1786
1787
1788 static void ClearCodeKindStatistics(int* code_kind_statistics) {
1789   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1790     code_kind_statistics[i] = 0;
1791   }
1792 }
1793
1794
1795 static void ReportCodeKindStatistics(int* code_kind_statistics) {
1796   PrintF("\n   Code kind histograms: \n");
1797   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1798     if (code_kind_statistics[i] > 0) {
1799       PrintF("     %-20s: %10d bytes\n",
1800              Code::Kind2String(static_cast<Code::Kind>(i)),
1801              code_kind_statistics[i]);
1802     }
1803   }
1804   PrintF("\n");
1805 }
1806
1807
1808 static int CollectHistogramInfo(HeapObject* obj) {
1809   Isolate* isolate = obj->GetIsolate();
1810   InstanceType type = obj->map()->instance_type();
1811   DCHECK(0 <= type && type <= LAST_TYPE);
1812   DCHECK(isolate->heap_histograms()[type].name() != NULL);
1813   isolate->heap_histograms()[type].increment_number(1);
1814   isolate->heap_histograms()[type].increment_bytes(obj->Size());
1815
1816   if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1817     JSObject::cast(obj)
1818         ->IncrementSpillStatistics(isolate->js_spill_information());
1819   }
1820
1821   return obj->Size();
1822 }
1823
1824
1825 static void ReportHistogram(Isolate* isolate, bool print_spill) {
1826   PrintF("\n  Object Histogram:\n");
1827   for (int i = 0; i <= LAST_TYPE; i++) {
1828     if (isolate->heap_histograms()[i].number() > 0) {
1829       PrintF("    %-34s%10d (%10d bytes)\n",
1830              isolate->heap_histograms()[i].name(),
1831              isolate->heap_histograms()[i].number(),
1832              isolate->heap_histograms()[i].bytes());
1833     }
1834   }
1835   PrintF("\n");
1836
1837   // Summarize string types.
1838   int string_number = 0;
1839   int string_bytes = 0;
1840 #define INCREMENT(type, size, name, camel_name)               \
1841   string_number += isolate->heap_histograms()[type].number(); \
1842   string_bytes += isolate->heap_histograms()[type].bytes();
1843   STRING_TYPE_LIST(INCREMENT)
1844 #undef INCREMENT
1845   if (string_number > 0) {
1846     PrintF("    %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1847            string_bytes);
1848   }
1849
1850   if (FLAG_collect_heap_spill_statistics && print_spill) {
1851     isolate->js_spill_information()->Print();
1852   }
1853 }
1854 #endif  // DEBUG
1855
1856
1857 // Support for statistics gathering for --heap-stats and --log-gc.
1858 void NewSpace::ClearHistograms() {
1859   for (int i = 0; i <= LAST_TYPE; i++) {
1860     allocated_histogram_[i].clear();
1861     promoted_histogram_[i].clear();
1862   }
1863 }
1864
1865
1866 // Because the copying collector does not touch garbage objects, we iterate
1867 // the new space before a collection to get a histogram of allocated objects.
1868 // This only happens when --log-gc flag is set.
1869 void NewSpace::CollectStatistics() {
1870   ClearHistograms();
1871   SemiSpaceIterator it(this);
1872   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
1873     RecordAllocation(obj);
1874 }
1875
1876
1877 static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
1878                                const char* description) {
1879   LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1880   // Lump all the string types together.
1881   int string_number = 0;
1882   int string_bytes = 0;
1883 #define INCREMENT(type, size, name, camel_name) \
1884   string_number += info[type].number();         \
1885   string_bytes += info[type].bytes();
1886   STRING_TYPE_LIST(INCREMENT)
1887 #undef INCREMENT
1888   if (string_number > 0) {
1889     LOG(isolate,
1890         HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1891   }
1892
1893   // Then do the other types.
1894   for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1895     if (info[i].number() > 0) {
1896       LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
1897                                        info[i].bytes()));
1898     }
1899   }
1900   LOG(isolate, HeapSampleEndEvent("NewSpace", description));
1901 }
1902
1903
1904 void NewSpace::ReportStatistics() {
1905 #ifdef DEBUG
1906   if (FLAG_heap_stats) {
1907     float pct = static_cast<float>(Available()) / Capacity();
1908     PrintF("  capacity: %" V8_PTR_PREFIX
1909            "d"
1910            ", available: %" V8_PTR_PREFIX "d, %%%d\n",
1911            Capacity(), Available(), static_cast<int>(pct * 100));
1912     PrintF("\n  Object Histogram:\n");
1913     for (int i = 0; i <= LAST_TYPE; i++) {
1914       if (allocated_histogram_[i].number() > 0) {
1915         PrintF("    %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
1916                allocated_histogram_[i].number(),
1917                allocated_histogram_[i].bytes());
1918       }
1919     }
1920     PrintF("\n");
1921   }
1922 #endif  // DEBUG
1923
1924   if (FLAG_log_gc) {
1925     Isolate* isolate = heap()->isolate();
1926     DoReportStatistics(isolate, allocated_histogram_, "allocated");
1927     DoReportStatistics(isolate, promoted_histogram_, "promoted");
1928   }
1929 }
1930
1931
1932 void NewSpace::RecordAllocation(HeapObject* obj) {
1933   InstanceType type = obj->map()->instance_type();
1934   DCHECK(0 <= type && type <= LAST_TYPE);
1935   allocated_histogram_[type].increment_number(1);
1936   allocated_histogram_[type].increment_bytes(obj->Size());
1937 }
1938
1939
1940 void NewSpace::RecordPromotion(HeapObject* obj) {
1941   InstanceType type = obj->map()->instance_type();
1942   DCHECK(0 <= type && type <= LAST_TYPE);
1943   promoted_histogram_[type].increment_number(1);
1944   promoted_histogram_[type].increment_bytes(obj->Size());
1945 }
1946
1947
1948 size_t NewSpace::CommittedPhysicalMemory() {
1949   if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
1950   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1951   size_t size = to_space_.CommittedPhysicalMemory();
1952   if (from_space_.is_committed()) {
1953     size += from_space_.CommittedPhysicalMemory();
1954   }
1955   return size;
1956 }
1957
1958
1959 // -----------------------------------------------------------------------------
1960 // Free lists for old object spaces implementation
1961
1962 void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
1963   DCHECK(size_in_bytes > 0);
1964   DCHECK(IsAligned(size_in_bytes, kPointerSize));
1965
1966   // We write a map and possibly size information to the block.  If the block
1967   // is big enough to be a FreeSpace with at least one extra word (the next
1968   // pointer), we set its map to be the free space map and its size to an
1969   // appropriate array length for the desired size from HeapObject::Size().
1970   // If the block is too small (eg, one or two words), to hold both a size
1971   // field and a next pointer, we give it a filler map that gives it the
1972   // correct size.
1973   if (size_in_bytes > FreeSpace::kHeaderSize) {
1974     // Can't use FreeSpace::cast because it fails during deserialization.
1975     // We have to set the size first with a release store before we store
1976     // the map because a concurrent store buffer scan on scavenge must not
1977     // observe a map with an invalid size.
1978     FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
1979     this_as_free_space->nobarrier_set_size(size_in_bytes);
1980     synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
1981   } else if (size_in_bytes == kPointerSize) {
1982     set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
1983   } else if (size_in_bytes == 2 * kPointerSize) {
1984     set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
1985   } else {
1986     UNREACHABLE();
1987   }
1988   // We would like to DCHECK(Size() == size_in_bytes) but this would fail during
1989   // deserialization because the free space map is not done yet.
1990 }
1991
1992
1993 FreeListNode* FreeListNode::next() {
1994   DCHECK(IsFreeListNode(this));
1995   if (map() == GetHeap()->raw_unchecked_free_space_map()) {
1996     DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
1997     return reinterpret_cast<FreeListNode*>(
1998         Memory::Address_at(address() + kNextOffset));
1999   } else {
2000     return reinterpret_cast<FreeListNode*>(
2001         Memory::Address_at(address() + kPointerSize));
2002   }
2003 }
2004
2005
2006 FreeListNode** FreeListNode::next_address() {
2007   DCHECK(IsFreeListNode(this));
2008   if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2009     DCHECK(Size() >= kNextOffset + kPointerSize);
2010     return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
2011   } else {
2012     return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
2013   }
2014 }
2015
2016
2017 void FreeListNode::set_next(FreeListNode* next) {
2018   DCHECK(IsFreeListNode(this));
2019   // While we are booting the VM the free space map will actually be null.  So
2020   // we have to make sure that we don't try to use it for anything at that
2021   // stage.
2022   if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2023     DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
2024     base::NoBarrier_Store(
2025         reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
2026         reinterpret_cast<base::AtomicWord>(next));
2027   } else {
2028     base::NoBarrier_Store(
2029         reinterpret_cast<base::AtomicWord*>(address() + kPointerSize),
2030         reinterpret_cast<base::AtomicWord>(next));
2031   }
2032 }
2033
2034
2035 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
2036   intptr_t free_bytes = 0;
2037   if (category->top() != NULL) {
2038     // This is safe (not going to deadlock) since Concatenate operations
2039     // are never performed on the same free lists at the same time in
2040     // reverse order.
2041     base::LockGuard<base::Mutex> target_lock_guard(mutex());
2042     base::LockGuard<base::Mutex> source_lock_guard(category->mutex());
2043     DCHECK(category->end_ != NULL);
2044     free_bytes = category->available();
2045     if (end_ == NULL) {
2046       end_ = category->end();
2047     } else {
2048       category->end()->set_next(top());
2049     }
2050     set_top(category->top());
2051     base::NoBarrier_Store(&top_, category->top_);
2052     available_ += category->available();
2053     category->Reset();
2054   }
2055   return free_bytes;
2056 }
2057
2058
2059 void FreeListCategory::Reset() {
2060   set_top(NULL);
2061   set_end(NULL);
2062   set_available(0);
2063 }
2064
2065
2066 intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
2067   int sum = 0;
2068   FreeListNode* t = top();
2069   FreeListNode** n = &t;
2070   while (*n != NULL) {
2071     if (Page::FromAddress((*n)->address()) == p) {
2072       FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
2073       sum += free_space->Size();
2074       *n = (*n)->next();
2075     } else {
2076       n = (*n)->next_address();
2077     }
2078   }
2079   set_top(t);
2080   if (top() == NULL) {
2081     set_end(NULL);
2082   }
2083   available_ -= sum;
2084   return sum;
2085 }
2086
2087
2088 bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
2089   FreeListNode* node = top();
2090   while (node != NULL) {
2091     if (Page::FromAddress(node->address()) == p) return true;
2092     node = node->next();
2093   }
2094   return false;
2095 }
2096
2097
2098 FreeListNode* FreeListCategory::PickNodeFromList(int* node_size) {
2099   FreeListNode* node = top();
2100
2101   if (node == NULL) return NULL;
2102
2103   while (node != NULL &&
2104          Page::FromAddress(node->address())->IsEvacuationCandidate()) {
2105     available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
2106     node = node->next();
2107   }
2108
2109   if (node != NULL) {
2110     set_top(node->next());
2111     *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
2112     available_ -= *node_size;
2113   } else {
2114     set_top(NULL);
2115   }
2116
2117   if (top() == NULL) {
2118     set_end(NULL);
2119   }
2120
2121   return node;
2122 }
2123
2124
2125 FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
2126                                                  int* node_size) {
2127   FreeListNode* node = PickNodeFromList(node_size);
2128   if (node != NULL && *node_size < size_in_bytes) {
2129     Free(node, *node_size);
2130     *node_size = 0;
2131     return NULL;
2132   }
2133   return node;
2134 }
2135
2136
2137 void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
2138   node->set_next(top());
2139   set_top(node);
2140   if (end_ == NULL) {
2141     end_ = node;
2142   }
2143   available_ += size_in_bytes;
2144 }
2145
2146
2147 void FreeListCategory::RepairFreeList(Heap* heap) {
2148   FreeListNode* n = top();
2149   while (n != NULL) {
2150     Map** map_location = reinterpret_cast<Map**>(n->address());
2151     if (*map_location == NULL) {
2152       *map_location = heap->free_space_map();
2153     } else {
2154       DCHECK(*map_location == heap->free_space_map());
2155     }
2156     n = n->next();
2157   }
2158 }
2159
2160
2161 FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) {
2162   Reset();
2163 }
2164
2165
2166 intptr_t FreeList::Concatenate(FreeList* free_list) {
2167   intptr_t free_bytes = 0;
2168   free_bytes += small_list_.Concatenate(free_list->small_list());
2169   free_bytes += medium_list_.Concatenate(free_list->medium_list());
2170   free_bytes += large_list_.Concatenate(free_list->large_list());
2171   free_bytes += huge_list_.Concatenate(free_list->huge_list());
2172   return free_bytes;
2173 }
2174
2175
2176 void FreeList::Reset() {
2177   small_list_.Reset();
2178   medium_list_.Reset();
2179   large_list_.Reset();
2180   huge_list_.Reset();
2181 }
2182
2183
2184 int FreeList::Free(Address start, int size_in_bytes) {
2185   if (size_in_bytes == 0) return 0;
2186
2187   FreeListNode* node = FreeListNode::FromAddress(start);
2188   node->set_size(heap_, size_in_bytes);
2189   Page* page = Page::FromAddress(start);
2190
2191   // Early return to drop too-small blocks on the floor.
2192   if (size_in_bytes < kSmallListMin) {
2193     page->add_non_available_small_blocks(size_in_bytes);
2194     return size_in_bytes;
2195   }
2196
2197   // Insert other blocks at the head of a free list of the appropriate
2198   // magnitude.
2199   if (size_in_bytes <= kSmallListMax) {
2200     small_list_.Free(node, size_in_bytes);
2201     page->add_available_in_small_free_list(size_in_bytes);
2202   } else if (size_in_bytes <= kMediumListMax) {
2203     medium_list_.Free(node, size_in_bytes);
2204     page->add_available_in_medium_free_list(size_in_bytes);
2205   } else if (size_in_bytes <= kLargeListMax) {
2206     large_list_.Free(node, size_in_bytes);
2207     page->add_available_in_large_free_list(size_in_bytes);
2208   } else {
2209     huge_list_.Free(node, size_in_bytes);
2210     page->add_available_in_huge_free_list(size_in_bytes);
2211   }
2212
2213   DCHECK(IsVeryLong() || available() == SumFreeLists());
2214   return 0;
2215 }
2216
2217
2218 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2219   FreeListNode* node = NULL;
2220   Page* page = NULL;
2221
2222   if (size_in_bytes <= kSmallAllocationMax) {
2223     node = small_list_.PickNodeFromList(node_size);
2224     if (node != NULL) {
2225       DCHECK(size_in_bytes <= *node_size);
2226       page = Page::FromAddress(node->address());
2227       page->add_available_in_small_free_list(-(*node_size));
2228       DCHECK(IsVeryLong() || available() == SumFreeLists());
2229       return node;
2230     }
2231   }
2232
2233   if (size_in_bytes <= kMediumAllocationMax) {
2234     node = medium_list_.PickNodeFromList(node_size);
2235     if (node != NULL) {
2236       DCHECK(size_in_bytes <= *node_size);
2237       page = Page::FromAddress(node->address());
2238       page->add_available_in_medium_free_list(-(*node_size));
2239       DCHECK(IsVeryLong() || available() == SumFreeLists());
2240       return node;
2241     }
2242   }
2243
2244   if (size_in_bytes <= kLargeAllocationMax) {
2245     node = large_list_.PickNodeFromList(node_size);
2246     if (node != NULL) {
2247       DCHECK(size_in_bytes <= *node_size);
2248       page = Page::FromAddress(node->address());
2249       page->add_available_in_large_free_list(-(*node_size));
2250       DCHECK(IsVeryLong() || available() == SumFreeLists());
2251       return node;
2252     }
2253   }
2254
2255   int huge_list_available = huge_list_.available();
2256   FreeListNode* top_node = huge_list_.top();
2257   for (FreeListNode** cur = &top_node; *cur != NULL;
2258        cur = (*cur)->next_address()) {
2259     FreeListNode* cur_node = *cur;
2260     while (cur_node != NULL &&
2261            Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
2262       int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
2263       huge_list_available -= size;
2264       page = Page::FromAddress(cur_node->address());
2265       page->add_available_in_huge_free_list(-size);
2266       cur_node = cur_node->next();
2267     }
2268
2269     *cur = cur_node;
2270     if (cur_node == NULL) {
2271       huge_list_.set_end(NULL);
2272       break;
2273     }
2274
2275     DCHECK((*cur)->map() == heap_->raw_unchecked_free_space_map());
2276     FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
2277     int size = cur_as_free_space->Size();
2278     if (size >= size_in_bytes) {
2279       // Large enough node found.  Unlink it from the list.
2280       node = *cur;
2281       *cur = node->next();
2282       *node_size = size;
2283       huge_list_available -= size;
2284       page = Page::FromAddress(node->address());
2285       page->add_available_in_huge_free_list(-size);
2286       break;
2287     }
2288   }
2289
2290   huge_list_.set_top(top_node);
2291   if (huge_list_.top() == NULL) {
2292     huge_list_.set_end(NULL);
2293   }
2294   huge_list_.set_available(huge_list_available);
2295
2296   if (node != NULL) {
2297     DCHECK(IsVeryLong() || available() == SumFreeLists());
2298     return node;
2299   }
2300
2301   if (size_in_bytes <= kSmallListMax) {
2302     node = small_list_.PickNodeFromList(size_in_bytes, node_size);
2303     if (node != NULL) {
2304       DCHECK(size_in_bytes <= *node_size);
2305       page = Page::FromAddress(node->address());
2306       page->add_available_in_small_free_list(-(*node_size));
2307     }
2308   } else if (size_in_bytes <= kMediumListMax) {
2309     node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
2310     if (node != NULL) {
2311       DCHECK(size_in_bytes <= *node_size);
2312       page = Page::FromAddress(node->address());
2313       page->add_available_in_medium_free_list(-(*node_size));
2314     }
2315   } else if (size_in_bytes <= kLargeListMax) {
2316     node = large_list_.PickNodeFromList(size_in_bytes, node_size);
2317     if (node != NULL) {
2318       DCHECK(size_in_bytes <= *node_size);
2319       page = Page::FromAddress(node->address());
2320       page->add_available_in_large_free_list(-(*node_size));
2321     }
2322   }
2323
2324   DCHECK(IsVeryLong() || available() == SumFreeLists());
2325   return node;
2326 }
2327
2328
2329 // Allocation on the old space free list.  If it succeeds then a new linear
2330 // allocation space has been set up with the top and limit of the space.  If
2331 // the allocation fails then NULL is returned, and the caller can perform a GC
2332 // or allocate a new page before retrying.
2333 HeapObject* FreeList::Allocate(int size_in_bytes) {
2334   DCHECK(0 < size_in_bytes);
2335   DCHECK(size_in_bytes <= kMaxBlockSize);
2336   DCHECK(IsAligned(size_in_bytes, kPointerSize));
2337   // Don't free list allocate if there is linear space available.
2338   DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
2339
2340   int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2341   // Mark the old linear allocation area with a free space map so it can be
2342   // skipped when scanning the heap.  This also puts it back in the free list
2343   // if it is big enough.
2344   owner_->Free(owner_->top(), old_linear_size);
2345
2346   owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
2347                                                       old_linear_size);
2348
2349   int new_node_size = 0;
2350   FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2351   if (new_node == NULL) {
2352     owner_->SetTopAndLimit(NULL, NULL);
2353     return NULL;
2354   }
2355
2356   int bytes_left = new_node_size - size_in_bytes;
2357   DCHECK(bytes_left >= 0);
2358
2359 #ifdef DEBUG
2360   for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2361     reinterpret_cast<Object**>(new_node->address())[i] =
2362         Smi::FromInt(kCodeZapValue);
2363   }
2364 #endif
2365
2366   // The old-space-step might have finished sweeping and restarted marking.
2367   // Verify that it did not turn the page of the new node into an evacuation
2368   // candidate.
2369   DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2370
2371   const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2372
2373   // Memory in the linear allocation area is counted as allocated.  We may free
2374   // a little of this again immediately - see below.
2375   owner_->Allocate(new_node_size);
2376
2377   if (owner_->heap()->inline_allocation_disabled()) {
2378     // Keep the linear allocation area empty if requested to do so, just
2379     // return area back to the free list instead.
2380     owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2381     DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
2382   } else if (bytes_left > kThreshold &&
2383              owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2384              FLAG_incremental_marking_steps) {
2385     int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2386     // We don't want to give too large linear areas to the allocator while
2387     // incremental marking is going on, because we won't check again whether
2388     // we want to do another increment until the linear area is used up.
2389     owner_->Free(new_node->address() + size_in_bytes + linear_size,
2390                  new_node_size - size_in_bytes - linear_size);
2391     owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2392                            new_node->address() + size_in_bytes + linear_size);
2393   } else if (bytes_left > 0) {
2394     // Normally we give the rest of the node to the allocator as its new
2395     // linear allocation area.
2396     owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2397                            new_node->address() + new_node_size);
2398   } else {
2399     // TODO(gc) Try not freeing linear allocation region when bytes_left
2400     // are zero.
2401     owner_->SetTopAndLimit(NULL, NULL);
2402   }
2403
2404   return new_node;
2405 }
2406
2407
2408 intptr_t FreeList::EvictFreeListItems(Page* p) {
2409   intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
2410   p->set_available_in_huge_free_list(0);
2411
2412   if (sum < p->area_size()) {
2413     sum += small_list_.EvictFreeListItemsInList(p) +
2414            medium_list_.EvictFreeListItemsInList(p) +
2415            large_list_.EvictFreeListItemsInList(p);
2416     p->set_available_in_small_free_list(0);
2417     p->set_available_in_medium_free_list(0);
2418     p->set_available_in_large_free_list(0);
2419   }
2420
2421   return sum;
2422 }
2423
2424
2425 bool FreeList::ContainsPageFreeListItems(Page* p) {
2426   return huge_list_.EvictFreeListItemsInList(p) ||
2427          small_list_.EvictFreeListItemsInList(p) ||
2428          medium_list_.EvictFreeListItemsInList(p) ||
2429          large_list_.EvictFreeListItemsInList(p);
2430 }
2431
2432
2433 void FreeList::RepairLists(Heap* heap) {
2434   small_list_.RepairFreeList(heap);
2435   medium_list_.RepairFreeList(heap);
2436   large_list_.RepairFreeList(heap);
2437   huge_list_.RepairFreeList(heap);
2438 }
2439
2440
2441 #ifdef DEBUG
2442 intptr_t FreeListCategory::SumFreeList() {
2443   intptr_t sum = 0;
2444   FreeListNode* cur = top();
2445   while (cur != NULL) {
2446     DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
2447     FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
2448     sum += cur_as_free_space->nobarrier_size();
2449     cur = cur->next();
2450   }
2451   return sum;
2452 }
2453
2454
2455 static const int kVeryLongFreeList = 500;
2456
2457
2458 int FreeListCategory::FreeListLength() {
2459   int length = 0;
2460   FreeListNode* cur = top();
2461   while (cur != NULL) {
2462     length++;
2463     cur = cur->next();
2464     if (length == kVeryLongFreeList) return length;
2465   }
2466   return length;
2467 }
2468
2469
2470 bool FreeList::IsVeryLong() {
2471   if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
2472   if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
2473   if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
2474   if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
2475   return false;
2476 }
2477
2478
2479 // This can take a very long time because it is linear in the number of entries
2480 // on the free list, so it should not be called if FreeListLength returns
2481 // kVeryLongFreeList.
2482 intptr_t FreeList::SumFreeLists() {
2483   intptr_t sum = small_list_.SumFreeList();
2484   sum += medium_list_.SumFreeList();
2485   sum += large_list_.SumFreeList();
2486   sum += huge_list_.SumFreeList();
2487   return sum;
2488 }
2489 #endif
2490
2491
2492 // -----------------------------------------------------------------------------
2493 // OldSpace implementation
2494
2495 void PagedSpace::PrepareForMarkCompact() {
2496   // We don't have a linear allocation area while sweeping.  It will be restored
2497   // on the first allocation after the sweep.
2498   EmptyAllocationInfo();
2499
2500   // This counter will be increased for pages which will be swept by the
2501   // sweeper threads.
2502   unswept_free_bytes_ = 0;
2503
2504   // Clear the free list before a full GC---it will be rebuilt afterward.
2505   free_list_.Reset();
2506 }
2507
2508
2509 intptr_t PagedSpace::SizeOfObjects() {
2510   DCHECK(heap()->mark_compact_collector()->sweeping_in_progress() ||
2511          (unswept_free_bytes_ == 0));
2512   return Size() - unswept_free_bytes_ - (limit() - top());
2513 }
2514
2515
2516 // After we have booted, we have created a map which represents free space
2517 // on the heap.  If there was already a free list then the elements on it
2518 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
2519 // fix them.
2520 void PagedSpace::RepairFreeListsAfterBoot() { free_list_.RepairLists(heap()); }
2521
2522
2523 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2524   if (allocation_info_.top() >= allocation_info_.limit()) return;
2525
2526   if (Page::FromAllocationTop(allocation_info_.top())
2527           ->IsEvacuationCandidate()) {
2528     // Create filler object to keep page iterable if it was iterable.
2529     int remaining =
2530         static_cast<int>(allocation_info_.limit() - allocation_info_.top());
2531     heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
2532
2533     allocation_info_.set_top(NULL);
2534     allocation_info_.set_limit(NULL);
2535   }
2536 }
2537
2538
2539 HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
2540     int size_in_bytes) {
2541   MarkCompactCollector* collector = heap()->mark_compact_collector();
2542   if (collector->sweeping_in_progress()) {
2543     // Wait for the sweeper threads here and complete the sweeping phase.
2544     collector->EnsureSweepingCompleted();
2545
2546     // After waiting for the sweeper threads, there may be new free-list
2547     // entries.
2548     return free_list_.Allocate(size_in_bytes);
2549   }
2550   return NULL;
2551 }
2552
2553
2554 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2555   // Allocation in this space has failed.
2556
2557   MarkCompactCollector* collector = heap()->mark_compact_collector();
2558   // Sweeping is still in progress.
2559   if (collector->sweeping_in_progress()) {
2560     // First try to refill the free-list, concurrent sweeper threads
2561     // may have freed some objects in the meantime.
2562     collector->RefillFreeList(this);
2563
2564     // Retry the free list allocation.
2565     HeapObject* object = free_list_.Allocate(size_in_bytes);
2566     if (object != NULL) return object;
2567
2568     // If sweeping is still in progress try to sweep pages on the main thread.
2569     int free_chunk = collector->SweepInParallel(this, size_in_bytes);
2570     collector->RefillFreeList(this);
2571     if (free_chunk >= size_in_bytes) {
2572       HeapObject* object = free_list_.Allocate(size_in_bytes);
2573       // We should be able to allocate an object here since we just freed that
2574       // much memory.
2575       DCHECK(object != NULL);
2576       if (object != NULL) return object;
2577     }
2578   }
2579
2580   // Free list allocation failed and there is no next page.  Fail if we have
2581   // hit the old generation size limit that should cause a garbage
2582   // collection.
2583   if (!heap()->always_allocate() &&
2584       heap()->OldGenerationAllocationLimitReached()) {
2585     // If sweeper threads are active, wait for them at that point and steal
2586     // elements form their free-lists.
2587     HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
2588     if (object != NULL) return object;
2589   }
2590
2591   // Try to expand the space and allocate in the new next page.
2592   if (Expand()) {
2593     DCHECK(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
2594     return free_list_.Allocate(size_in_bytes);
2595   }
2596
2597   // If sweeper threads are active, wait for them at that point and steal
2598   // elements form their free-lists. Allocation may still fail their which
2599   // would indicate that there is not enough memory for the given allocation.
2600   return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
2601 }
2602
2603
2604 #ifdef DEBUG
2605 void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
2606   CommentStatistic* comments_statistics =
2607       isolate->paged_space_comments_statistics();
2608   ReportCodeKindStatistics(isolate->code_kind_statistics());
2609   PrintF(
2610       "Code comment statistics (\"   [ comment-txt   :    size/   "
2611       "count  (average)\"):\n");
2612   for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2613     const CommentStatistic& cs = comments_statistics[i];
2614     if (cs.size > 0) {
2615       PrintF("   %-30s: %10d/%6d     (%d)\n", cs.comment, cs.size, cs.count,
2616              cs.size / cs.count);
2617     }
2618   }
2619   PrintF("\n");
2620 }
2621
2622
2623 void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2624   CommentStatistic* comments_statistics =
2625       isolate->paged_space_comments_statistics();
2626   ClearCodeKindStatistics(isolate->code_kind_statistics());
2627   for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2628     comments_statistics[i].Clear();
2629   }
2630   comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2631   comments_statistics[CommentStatistic::kMaxComments].size = 0;
2632   comments_statistics[CommentStatistic::kMaxComments].count = 0;
2633 }
2634
2635
2636 // Adds comment to 'comment_statistics' table. Performance OK as long as
2637 // 'kMaxComments' is small
2638 static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2639   CommentStatistic* comments_statistics =
2640       isolate->paged_space_comments_statistics();
2641   // Do not count empty comments
2642   if (delta <= 0) return;
2643   CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2644   // Search for a free or matching entry in 'comments_statistics': 'cs'
2645   // points to result.
2646   for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2647     if (comments_statistics[i].comment == NULL) {
2648       cs = &comments_statistics[i];
2649       cs->comment = comment;
2650       break;
2651     } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2652       cs = &comments_statistics[i];
2653       break;
2654     }
2655   }
2656   // Update entry for 'comment'
2657   cs->size += delta;
2658   cs->count += 1;
2659 }
2660
2661
2662 // Call for each nested comment start (start marked with '[ xxx', end marked
2663 // with ']'.  RelocIterator 'it' must point to a comment reloc info.
2664 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2665   DCHECK(!it->done());
2666   DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT);
2667   const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2668   if (tmp[0] != '[') {
2669     // Not a nested comment; skip
2670     return;
2671   }
2672
2673   // Search for end of nested comment or a new nested comment
2674   const char* const comment_txt =
2675       reinterpret_cast<const char*>(it->rinfo()->data());
2676   const byte* prev_pc = it->rinfo()->pc();
2677   int flat_delta = 0;
2678   it->next();
2679   while (true) {
2680     // All nested comments must be terminated properly, and therefore exit
2681     // from loop.
2682     DCHECK(!it->done());
2683     if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2684       const char* const txt =
2685           reinterpret_cast<const char*>(it->rinfo()->data());
2686       flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2687       if (txt[0] == ']') break;  // End of nested  comment
2688       // A new comment
2689       CollectCommentStatistics(isolate, it);
2690       // Skip code that was covered with previous comment
2691       prev_pc = it->rinfo()->pc();
2692     }
2693     it->next();
2694   }
2695   EnterComment(isolate, comment_txt, flat_delta);
2696 }
2697
2698
2699 // Collects code size statistics:
2700 // - by code kind
2701 // - by code comment
2702 void PagedSpace::CollectCodeStatistics() {
2703   Isolate* isolate = heap()->isolate();
2704   HeapObjectIterator obj_it(this);
2705   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2706     if (obj->IsCode()) {
2707       Code* code = Code::cast(obj);
2708       isolate->code_kind_statistics()[code->kind()] += code->Size();
2709       RelocIterator it(code);
2710       int delta = 0;
2711       const byte* prev_pc = code->instruction_start();
2712       while (!it.done()) {
2713         if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2714           delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2715           CollectCommentStatistics(isolate, &it);
2716           prev_pc = it.rinfo()->pc();
2717         }
2718         it.next();
2719       }
2720
2721       DCHECK(code->instruction_start() <= prev_pc &&
2722              prev_pc <= code->instruction_end());
2723       delta += static_cast<int>(code->instruction_end() - prev_pc);
2724       EnterComment(isolate, "NoComment", delta);
2725     }
2726   }
2727 }
2728
2729
2730 void PagedSpace::ReportStatistics() {
2731   int pct = static_cast<int>(Available() * 100 / Capacity());
2732   PrintF("  capacity: %" V8_PTR_PREFIX
2733          "d"
2734          ", waste: %" V8_PTR_PREFIX
2735          "d"
2736          ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2737          Capacity(), Waste(), Available(), pct);
2738
2739   if (!swept_precisely_) return;
2740   ClearHistograms(heap()->isolate());
2741   HeapObjectIterator obj_it(this);
2742   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2743     CollectHistogramInfo(obj);
2744   ReportHistogram(heap()->isolate(), true);
2745 }
2746 #endif
2747
2748
2749 // -----------------------------------------------------------------------------
2750 // MapSpace implementation
2751 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2752 // there is at least one non-inlined virtual function. I would prefer to hide
2753 // the VerifyObject definition behind VERIFY_HEAP.
2754
2755 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
2756
2757
2758 // -----------------------------------------------------------------------------
2759 // CellSpace and PropertyCellSpace implementation
2760 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2761 // there is at least one non-inlined virtual function. I would prefer to hide
2762 // the VerifyObject definition behind VERIFY_HEAP.
2763
2764 void CellSpace::VerifyObject(HeapObject* object) { CHECK(object->IsCell()); }
2765
2766
2767 void PropertyCellSpace::VerifyObject(HeapObject* object) {
2768   CHECK(object->IsPropertyCell());
2769 }
2770
2771
2772 // -----------------------------------------------------------------------------
2773 // LargeObjectIterator
2774
2775 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2776   current_ = space->first_page_;
2777   size_func_ = NULL;
2778 }
2779
2780
2781 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2782                                          HeapObjectCallback size_func) {
2783   current_ = space->first_page_;
2784   size_func_ = size_func;
2785 }
2786
2787
2788 HeapObject* LargeObjectIterator::Next() {
2789   if (current_ == NULL) return NULL;
2790
2791   HeapObject* object = current_->GetObject();
2792   current_ = current_->next_page();
2793   return object;
2794 }
2795
2796
2797 // -----------------------------------------------------------------------------
2798 // LargeObjectSpace
2799 static bool ComparePointers(void* key1, void* key2) { return key1 == key2; }
2800
2801
2802 LargeObjectSpace::LargeObjectSpace(Heap* heap, intptr_t max_capacity,
2803                                    AllocationSpace id)
2804     : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
2805       max_capacity_(max_capacity),
2806       first_page_(NULL),
2807       size_(0),
2808       page_count_(0),
2809       objects_size_(0),
2810       chunk_map_(ComparePointers, 1024) {}
2811
2812
2813 bool LargeObjectSpace::SetUp() {
2814   first_page_ = NULL;
2815   size_ = 0;
2816   maximum_committed_ = 0;
2817   page_count_ = 0;
2818   objects_size_ = 0;
2819   chunk_map_.Clear();
2820   return true;
2821 }
2822
2823
2824 void LargeObjectSpace::TearDown() {
2825   while (first_page_ != NULL) {
2826     LargePage* page = first_page_;
2827     first_page_ = first_page_->next_page();
2828     LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2829
2830     ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2831     heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2832         space, kAllocationActionFree, page->size());
2833     heap()->isolate()->memory_allocator()->Free(page);
2834   }
2835   SetUp();
2836 }
2837
2838
2839 AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
2840                                                Executability executable) {
2841   // Check if we want to force a GC before growing the old space further.
2842   // If so, fail the allocation.
2843   if (!heap()->always_allocate() &&
2844       heap()->OldGenerationAllocationLimitReached()) {
2845     return AllocationResult::Retry(identity());
2846   }
2847
2848   if (Size() + object_size > max_capacity_) {
2849     return AllocationResult::Retry(identity());
2850   }
2851
2852   LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
2853       object_size, this, executable);
2854   if (page == NULL) return AllocationResult::Retry(identity());
2855   DCHECK(page->area_size() >= object_size);
2856
2857   size_ += static_cast<int>(page->size());
2858   objects_size_ += object_size;
2859   page_count_++;
2860   page->set_next_page(first_page_);
2861   first_page_ = page;
2862
2863   if (size_ > maximum_committed_) {
2864     maximum_committed_ = size_;
2865   }
2866
2867   // Register all MemoryChunk::kAlignment-aligned chunks covered by
2868   // this large page in the chunk map.
2869   uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2870   uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2871   for (uintptr_t key = base; key <= limit; key++) {
2872     HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2873                                               static_cast<uint32_t>(key), true);
2874     DCHECK(entry != NULL);
2875     entry->value = page;
2876   }
2877
2878   HeapObject* object = page->GetObject();
2879
2880   if (Heap::ShouldZapGarbage()) {
2881     // Make the object consistent so the heap can be verified in OldSpaceStep.
2882     // We only need to do this in debug builds or if verify_heap is on.
2883     reinterpret_cast<Object**>(object->address())[0] =
2884         heap()->fixed_array_map();
2885     reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2886   }
2887
2888   heap()->incremental_marking()->OldSpaceStep(object_size);
2889   return object;
2890 }
2891
2892
2893 size_t LargeObjectSpace::CommittedPhysicalMemory() {
2894   if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2895   size_t size = 0;
2896   LargePage* current = first_page_;
2897   while (current != NULL) {
2898     size += current->CommittedPhysicalMemory();
2899     current = current->next_page();
2900   }
2901   return size;
2902 }
2903
2904
2905 // GC support
2906 Object* LargeObjectSpace::FindObject(Address a) {
2907   LargePage* page = FindPage(a);
2908   if (page != NULL) {
2909     return page->GetObject();
2910   }
2911   return Smi::FromInt(0);  // Signaling not found.
2912 }
2913
2914
2915 LargePage* LargeObjectSpace::FindPage(Address a) {
2916   uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
2917   HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2918                                         static_cast<uint32_t>(key), false);
2919   if (e != NULL) {
2920     DCHECK(e->value != NULL);
2921     LargePage* page = reinterpret_cast<LargePage*>(e->value);
2922     DCHECK(page->is_valid());
2923     if (page->Contains(a)) {
2924       return page;
2925     }
2926   }
2927   return NULL;
2928 }
2929
2930
2931 void LargeObjectSpace::FreeUnmarkedObjects() {
2932   LargePage* previous = NULL;
2933   LargePage* current = first_page_;
2934   while (current != NULL) {
2935     HeapObject* object = current->GetObject();
2936     // Can this large page contain pointers to non-trivial objects.  No other
2937     // pointer object is this big.
2938     bool is_pointer_object = object->IsFixedArray();
2939     MarkBit mark_bit = Marking::MarkBitFrom(object);
2940     if (mark_bit.Get()) {
2941       mark_bit.Clear();
2942       Page::FromAddress(object->address())->ResetProgressBar();
2943       Page::FromAddress(object->address())->ResetLiveBytes();
2944       previous = current;
2945       current = current->next_page();
2946     } else {
2947       LargePage* page = current;
2948       // Cut the chunk out from the chunk list.
2949       current = current->next_page();
2950       if (previous == NULL) {
2951         first_page_ = current;
2952       } else {
2953         previous->set_next_page(current);
2954       }
2955
2956       // Free the chunk.
2957       heap()->mark_compact_collector()->ReportDeleteIfNeeded(object,
2958                                                              heap()->isolate());
2959       size_ -= static_cast<int>(page->size());
2960       objects_size_ -= object->Size();
2961       page_count_--;
2962
2963       // Remove entries belonging to this page.
2964       // Use variable alignment to help pass length check (<= 80 characters)
2965       // of single line in tools/presubmit.py.
2966       const intptr_t alignment = MemoryChunk::kAlignment;
2967       uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
2968       uintptr_t limit = base + (page->size() - 1) / alignment;
2969       for (uintptr_t key = base; key <= limit; key++) {
2970         chunk_map_.Remove(reinterpret_cast<void*>(key),
2971                           static_cast<uint32_t>(key));
2972       }
2973
2974       if (is_pointer_object) {
2975         heap()->QueueMemoryChunkForFree(page);
2976       } else {
2977         heap()->isolate()->memory_allocator()->Free(page);
2978       }
2979     }
2980   }
2981   heap()->FreeQueuedChunks();
2982 }
2983
2984
2985 bool LargeObjectSpace::Contains(HeapObject* object) {
2986   Address address = object->address();
2987   MemoryChunk* chunk = MemoryChunk::FromAddress(address);
2988
2989   bool owned = (chunk->owner() == this);
2990
2991   SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
2992
2993   return owned;
2994 }
2995
2996
2997 #ifdef VERIFY_HEAP
2998 // We do not assume that the large object iterator works, because it depends
2999 // on the invariants we are checking during verification.
3000 void LargeObjectSpace::Verify() {
3001   for (LargePage* chunk = first_page_; chunk != NULL;
3002        chunk = chunk->next_page()) {
3003     // Each chunk contains an object that starts at the large object page's
3004     // object area start.
3005     HeapObject* object = chunk->GetObject();
3006     Page* page = Page::FromAddress(object->address());
3007     CHECK(object->address() == page->area_start());
3008
3009     // The first word should be a map, and we expect all map pointers to be
3010     // in map space.
3011     Map* map = object->map();
3012     CHECK(map->IsMap());
3013     CHECK(heap()->map_space()->Contains(map));
3014
3015     // We have only code, sequential strings, external strings
3016     // (sequential strings that have been morphed into external
3017     // strings), fixed arrays, byte arrays, and constant pool arrays in the
3018     // large object space.
3019     CHECK(object->IsCode() || object->IsSeqString() ||
3020           object->IsExternalString() || object->IsFixedArray() ||
3021           object->IsFixedDoubleArray() || object->IsByteArray() ||
3022           object->IsConstantPoolArray());
3023
3024     // The object itself should look OK.
3025     object->ObjectVerify();
3026
3027     // Byte arrays and strings don't have interior pointers.
3028     if (object->IsCode()) {
3029       VerifyPointersVisitor code_visitor;
3030       object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
3031     } else if (object->IsFixedArray()) {
3032       FixedArray* array = FixedArray::cast(object);
3033       for (int j = 0; j < array->length(); j++) {
3034         Object* element = array->get(j);
3035         if (element->IsHeapObject()) {
3036           HeapObject* element_object = HeapObject::cast(element);
3037           CHECK(heap()->Contains(element_object));
3038           CHECK(element_object->map()->IsMap());
3039         }
3040       }
3041     }
3042   }
3043 }
3044 #endif
3045
3046
3047 #ifdef DEBUG
3048 void LargeObjectSpace::Print() {
3049   OFStream os(stdout);
3050   LargeObjectIterator it(this);
3051   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3052     obj->Print(os);
3053   }
3054 }
3055
3056
3057 void LargeObjectSpace::ReportStatistics() {
3058   PrintF("  size: %" V8_PTR_PREFIX "d\n", size_);
3059   int num_objects = 0;
3060   ClearHistograms(heap()->isolate());
3061   LargeObjectIterator it(this);
3062   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3063     num_objects++;
3064     CollectHistogramInfo(obj);
3065   }
3066
3067   PrintF(
3068       "  number of objects %d, "
3069       "size of objects %" V8_PTR_PREFIX "d\n",
3070       num_objects, objects_size_);
3071   if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3072 }
3073
3074
3075 void LargeObjectSpace::CollectCodeStatistics() {
3076   Isolate* isolate = heap()->isolate();
3077   LargeObjectIterator obj_it(this);
3078   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
3079     if (obj->IsCode()) {
3080       Code* code = Code::cast(obj);
3081       isolate->code_kind_statistics()[code->kind()] += code->Size();
3082     }
3083   }
3084 }
3085
3086
3087 void Page::Print() {
3088   // Make a best-effort to print the objects in the page.
3089   PrintF("Page@%p in %s\n", this->address(),
3090          AllocationSpaceName(this->owner()->identity()));
3091   printf(" --------------------------------------\n");
3092   HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
3093   unsigned mark_size = 0;
3094   for (HeapObject* object = objects.Next(); object != NULL;
3095        object = objects.Next()) {
3096     bool is_marked = Marking::MarkBitFrom(object).Get();
3097     PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
3098     if (is_marked) {
3099       mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
3100     }
3101     object->ShortPrint();
3102     PrintF("\n");
3103   }
3104   printf(" --------------------------------------\n");
3105   printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3106 }
3107
3108 #endif  // DEBUG
3109 }
3110 }  // namespace v8::internal