Upstream version 5.34.92.0
[platform/framework/web/crosswalk.git] / src / v8 / src / spaces.cc
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "macro-assembler.h"
31 #include "mark-compact.h"
32 #include "msan.h"
33 #include "platform.h"
34
35 namespace v8 {
36 namespace internal {
37
38
39 // ----------------------------------------------------------------------------
40 // HeapObjectIterator
41
42 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
43   // You can't actually iterate over the anchor page.  It is not a real page,
44   // just an anchor for the double linked page list.  Initialize as if we have
45   // reached the end of the anchor page, then the first iteration will move on
46   // to the first page.
47   Initialize(space,
48              NULL,
49              NULL,
50              kAllPagesInSpace,
51              NULL);
52 }
53
54
55 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
56                                        HeapObjectCallback size_func) {
57   // You can't actually iterate over the anchor page.  It is not a real page,
58   // just an anchor for the double linked page list.  Initialize the current
59   // address and end as NULL, then the first iteration will move on
60   // to the first page.
61   Initialize(space,
62              NULL,
63              NULL,
64              kAllPagesInSpace,
65              size_func);
66 }
67
68
69 HeapObjectIterator::HeapObjectIterator(Page* page,
70                                        HeapObjectCallback size_func) {
71   Space* owner = page->owner();
72   ASSERT(owner == page->heap()->old_pointer_space() ||
73          owner == page->heap()->old_data_space() ||
74          owner == page->heap()->map_space() ||
75          owner == page->heap()->cell_space() ||
76          owner == page->heap()->property_cell_space() ||
77          owner == page->heap()->code_space());
78   Initialize(reinterpret_cast<PagedSpace*>(owner),
79              page->area_start(),
80              page->area_end(),
81              kOnePageOnly,
82              size_func);
83   ASSERT(page->WasSweptPrecisely());
84 }
85
86
87 void HeapObjectIterator::Initialize(PagedSpace* space,
88                                     Address cur, Address end,
89                                     HeapObjectIterator::PageMode mode,
90                                     HeapObjectCallback size_f) {
91   // Check that we actually can iterate this space.
92   ASSERT(!space->was_swept_conservatively());
93
94   space_ = space;
95   cur_addr_ = cur;
96   cur_end_ = end;
97   page_mode_ = mode;
98   size_func_ = size_f;
99 }
100
101
102 // We have hit the end of the page and should advance to the next block of
103 // objects.  This happens at the end of the page.
104 bool HeapObjectIterator::AdvanceToNextPage() {
105   ASSERT(cur_addr_ == cur_end_);
106   if (page_mode_ == kOnePageOnly) return false;
107   Page* cur_page;
108   if (cur_addr_ == NULL) {
109     cur_page = space_->anchor();
110   } else {
111     cur_page = Page::FromAddress(cur_addr_ - 1);
112     ASSERT(cur_addr_ == cur_page->area_end());
113   }
114   cur_page = cur_page->next_page();
115   if (cur_page == space_->anchor()) return false;
116   cur_addr_ = cur_page->area_start();
117   cur_end_ = cur_page->area_end();
118   ASSERT(cur_page->WasSweptPrecisely());
119   return true;
120 }
121
122
123 // -----------------------------------------------------------------------------
124 // CodeRange
125
126
127 CodeRange::CodeRange(Isolate* isolate)
128     : isolate_(isolate),
129       code_range_(NULL),
130       free_list_(0),
131       allocation_list_(0),
132       current_allocation_block_index_(0) {
133 }
134
135
136 bool CodeRange::SetUp(const size_t requested) {
137   ASSERT(code_range_ == NULL);
138
139   code_range_ = new VirtualMemory(requested);
140   CHECK(code_range_ != NULL);
141   if (!code_range_->IsReserved()) {
142     delete code_range_;
143     code_range_ = NULL;
144     return false;
145   }
146
147   // We are sure that we have mapped a block of requested addresses.
148   ASSERT(code_range_->size() == requested);
149   LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
150   Address base = reinterpret_cast<Address>(code_range_->address());
151   Address aligned_base =
152       RoundUp(reinterpret_cast<Address>(code_range_->address()),
153               MemoryChunk::kAlignment);
154   size_t size = code_range_->size() - (aligned_base - base);
155   allocation_list_.Add(FreeBlock(aligned_base, size));
156   current_allocation_block_index_ = 0;
157   return true;
158 }
159
160
161 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
162                                        const FreeBlock* right) {
163   // The entire point of CodeRange is that the difference between two
164   // addresses in the range can be represented as a signed 32-bit int,
165   // so the cast is semantically correct.
166   return static_cast<int>(left->start - right->start);
167 }
168
169
170 void CodeRange::GetNextAllocationBlock(size_t requested) {
171   for (current_allocation_block_index_++;
172        current_allocation_block_index_ < allocation_list_.length();
173        current_allocation_block_index_++) {
174     if (requested <= allocation_list_[current_allocation_block_index_].size) {
175       return;  // Found a large enough allocation block.
176     }
177   }
178
179   // Sort and merge the free blocks on the free list and the allocation list.
180   free_list_.AddAll(allocation_list_);
181   allocation_list_.Clear();
182   free_list_.Sort(&CompareFreeBlockAddress);
183   for (int i = 0; i < free_list_.length();) {
184     FreeBlock merged = free_list_[i];
185     i++;
186     // Add adjacent free blocks to the current merged block.
187     while (i < free_list_.length() &&
188            free_list_[i].start == merged.start + merged.size) {
189       merged.size += free_list_[i].size;
190       i++;
191     }
192     if (merged.size > 0) {
193       allocation_list_.Add(merged);
194     }
195   }
196   free_list_.Clear();
197
198   for (current_allocation_block_index_ = 0;
199        current_allocation_block_index_ < allocation_list_.length();
200        current_allocation_block_index_++) {
201     if (requested <= allocation_list_[current_allocation_block_index_].size) {
202       return;  // Found a large enough allocation block.
203     }
204   }
205
206   // Code range is full or too fragmented.
207   V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
208 }
209
210
211 Address CodeRange::AllocateRawMemory(const size_t requested_size,
212                                      const size_t commit_size,
213                                      size_t* allocated) {
214   ASSERT(commit_size <= requested_size);
215   ASSERT(current_allocation_block_index_ < allocation_list_.length());
216   if (requested_size > allocation_list_[current_allocation_block_index_].size) {
217     // Find an allocation block large enough.  This function call may
218     // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
219     GetNextAllocationBlock(requested_size);
220   }
221   // Commit the requested memory at the start of the current allocation block.
222   size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
223   FreeBlock current = allocation_list_[current_allocation_block_index_];
224   if (aligned_requested >= (current.size - Page::kPageSize)) {
225     // Don't leave a small free block, useless for a large object or chunk.
226     *allocated = current.size;
227   } else {
228     *allocated = aligned_requested;
229   }
230   ASSERT(*allocated <= current.size);
231   ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
232   if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_,
233                                                             current.start,
234                                                             commit_size,
235                                                             *allocated)) {
236     *allocated = 0;
237     return NULL;
238   }
239   allocation_list_[current_allocation_block_index_].start += *allocated;
240   allocation_list_[current_allocation_block_index_].size -= *allocated;
241   if (*allocated == current.size) {
242     GetNextAllocationBlock(0);  // This block is used up, get the next one.
243   }
244   return current.start;
245 }
246
247
248 bool CodeRange::CommitRawMemory(Address start, size_t length) {
249   return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
250 }
251
252
253 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
254   return code_range_->Uncommit(start, length);
255 }
256
257
258 void CodeRange::FreeRawMemory(Address address, size_t length) {
259   ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
260   free_list_.Add(FreeBlock(address, length));
261   code_range_->Uncommit(address, length);
262 }
263
264
265 void CodeRange::TearDown() {
266     delete code_range_;  // Frees all memory in the virtual memory range.
267     code_range_ = NULL;
268     free_list_.Free();
269     allocation_list_.Free();
270 }
271
272
273 // -----------------------------------------------------------------------------
274 // MemoryAllocator
275 //
276
277 MemoryAllocator::MemoryAllocator(Isolate* isolate)
278     : isolate_(isolate),
279       capacity_(0),
280       capacity_executable_(0),
281       size_(0),
282       size_executable_(0),
283       lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
284       highest_ever_allocated_(reinterpret_cast<void*>(0)) {
285 }
286
287
288 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
289   capacity_ = RoundUp(capacity, Page::kPageSize);
290   capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
291   ASSERT_GE(capacity_, capacity_executable_);
292
293   size_ = 0;
294   size_executable_ = 0;
295
296   return true;
297 }
298
299
300 void MemoryAllocator::TearDown() {
301   // Check that spaces were torn down before MemoryAllocator.
302   ASSERT(size_ == 0);
303   // TODO(gc) this will be true again when we fix FreeMemory.
304   // ASSERT(size_executable_ == 0);
305   capacity_ = 0;
306   capacity_executable_ = 0;
307 }
308
309
310 bool MemoryAllocator::CommitMemory(Address base,
311                                    size_t size,
312                                    Executability executable) {
313   if (!VirtualMemory::CommitRegion(base, size, executable == EXECUTABLE)) {
314     return false;
315   }
316   UpdateAllocatedSpaceLimits(base, base + size);
317   return true;
318 }
319
320
321 void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
322                                  Executability executable) {
323   // TODO(gc) make code_range part of memory allocator?
324   ASSERT(reservation->IsReserved());
325   size_t size = reservation->size();
326   ASSERT(size_ >= size);
327   size_ -= size;
328
329   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
330
331   if (executable == EXECUTABLE) {
332     ASSERT(size_executable_ >= size);
333     size_executable_ -= size;
334   }
335   // Code which is part of the code-range does not have its own VirtualMemory.
336   ASSERT(!isolate_->code_range()->contains(
337       static_cast<Address>(reservation->address())));
338   ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
339   reservation->Release();
340 }
341
342
343 void MemoryAllocator::FreeMemory(Address base,
344                                  size_t size,
345                                  Executability executable) {
346   // TODO(gc) make code_range part of memory allocator?
347   ASSERT(size_ >= size);
348   size_ -= size;
349
350   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
351
352   if (executable == EXECUTABLE) {
353     ASSERT(size_executable_ >= size);
354     size_executable_ -= size;
355   }
356   if (isolate_->code_range()->contains(static_cast<Address>(base))) {
357     ASSERT(executable == EXECUTABLE);
358     isolate_->code_range()->FreeRawMemory(base, size);
359   } else {
360     ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
361     bool result = VirtualMemory::ReleaseRegion(base, size);
362     USE(result);
363     ASSERT(result);
364   }
365 }
366
367
368 Address MemoryAllocator::ReserveAlignedMemory(size_t size,
369                                               size_t alignment,
370                                               VirtualMemory* controller) {
371   VirtualMemory reservation(size, alignment);
372
373   if (!reservation.IsReserved()) return NULL;
374   size_ += reservation.size();
375   Address base = RoundUp(static_cast<Address>(reservation.address()),
376                          alignment);
377   controller->TakeControl(&reservation);
378   return base;
379 }
380
381
382 Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
383                                                size_t commit_size,
384                                                size_t alignment,
385                                                Executability executable,
386                                                VirtualMemory* controller) {
387   ASSERT(commit_size <= reserve_size);
388   VirtualMemory reservation;
389   Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
390   if (base == NULL) return NULL;
391
392   if (executable == EXECUTABLE) {
393     if (!CommitExecutableMemory(&reservation,
394                                 base,
395                                 commit_size,
396                                 reserve_size)) {
397       base = NULL;
398     }
399   } else {
400     if (reservation.Commit(base, commit_size, false)) {
401       UpdateAllocatedSpaceLimits(base, base + commit_size);
402     } else {
403       base = NULL;
404     }
405   }
406
407   if (base == NULL) {
408     // Failed to commit the body. Release the mapping and any partially
409     // commited regions inside it.
410     reservation.Release();
411     return NULL;
412   }
413
414   controller->TakeControl(&reservation);
415   return base;
416 }
417
418
419 void Page::InitializeAsAnchor(PagedSpace* owner) {
420   set_owner(owner);
421   set_prev_page(this);
422   set_next_page(this);
423 }
424
425
426 NewSpacePage* NewSpacePage::Initialize(Heap* heap,
427                                        Address start,
428                                        SemiSpace* semi_space) {
429   Address area_start = start + NewSpacePage::kObjectStartOffset;
430   Address area_end = start + Page::kPageSize;
431
432   MemoryChunk* chunk = MemoryChunk::Initialize(heap,
433                                                start,
434                                                Page::kPageSize,
435                                                area_start,
436                                                area_end,
437                                                NOT_EXECUTABLE,
438                                                semi_space);
439   chunk->set_next_chunk(NULL);
440   chunk->set_prev_chunk(NULL);
441   chunk->initialize_scan_on_scavenge(true);
442   bool in_to_space = (semi_space->id() != kFromSpace);
443   chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
444                              : MemoryChunk::IN_FROM_SPACE);
445   ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
446                                        : MemoryChunk::IN_TO_SPACE));
447   NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
448   heap->incremental_marking()->SetNewSpacePageFlags(page);
449   return page;
450 }
451
452
453 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
454   set_owner(semi_space);
455   set_next_chunk(this);
456   set_prev_chunk(this);
457   // Flags marks this invalid page as not being in new-space.
458   // All real new-space pages will be in new-space.
459   SetFlags(0, ~0);
460 }
461
462
463 MemoryChunk* MemoryChunk::Initialize(Heap* heap,
464                                      Address base,
465                                      size_t size,
466                                      Address area_start,
467                                      Address area_end,
468                                      Executability executable,
469                                      Space* owner) {
470   MemoryChunk* chunk = FromAddress(base);
471
472   ASSERT(base == chunk->address());
473
474   chunk->heap_ = heap;
475   chunk->size_ = size;
476   chunk->area_start_ = area_start;
477   chunk->area_end_ = area_end;
478   chunk->flags_ = 0;
479   chunk->set_owner(owner);
480   chunk->InitializeReservedMemory();
481   chunk->slots_buffer_ = NULL;
482   chunk->skip_list_ = NULL;
483   chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
484   chunk->progress_bar_ = 0;
485   chunk->high_water_mark_ = static_cast<int>(area_start - base);
486   chunk->parallel_sweeping_ = 0;
487   chunk->available_in_small_free_list_ = 0;
488   chunk->available_in_medium_free_list_ = 0;
489   chunk->available_in_large_free_list_ = 0;
490   chunk->available_in_huge_free_list_ = 0;
491   chunk->non_available_small_blocks_ = 0;
492   chunk->ResetLiveBytes();
493   Bitmap::Clear(chunk);
494   chunk->initialize_scan_on_scavenge(false);
495   chunk->SetFlag(WAS_SWEPT_PRECISELY);
496
497   ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
498   ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
499
500   if (executable == EXECUTABLE) {
501     chunk->SetFlag(IS_EXECUTABLE);
502   }
503
504   if (owner == heap->old_data_space()) {
505     chunk->SetFlag(CONTAINS_ONLY_DATA);
506   }
507
508   return chunk;
509 }
510
511
512 // Commit MemoryChunk area to the requested size.
513 bool MemoryChunk::CommitArea(size_t requested) {
514   size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
515                       MemoryAllocator::CodePageGuardSize() : 0;
516   size_t header_size = area_start() - address() - guard_size;
517   size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
518   size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
519                                   OS::CommitPageSize());
520
521   if (commit_size > committed_size) {
522     // Commit size should be less or equal than the reserved size.
523     ASSERT(commit_size <= size() - 2 * guard_size);
524     // Append the committed area.
525     Address start = address() + committed_size + guard_size;
526     size_t length = commit_size - committed_size;
527     if (reservation_.IsReserved()) {
528       Executability executable = IsFlagSet(IS_EXECUTABLE)
529           ? EXECUTABLE : NOT_EXECUTABLE;
530       if (!heap()->isolate()->memory_allocator()->CommitMemory(
531               start, length, executable)) {
532         return false;
533       }
534     } else {
535       CodeRange* code_range = heap_->isolate()->code_range();
536       ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
537       if (!code_range->CommitRawMemory(start, length)) return false;
538     }
539
540     if (Heap::ShouldZapGarbage()) {
541       heap_->isolate()->memory_allocator()->ZapBlock(start, length);
542     }
543   } else if (commit_size < committed_size) {
544     ASSERT(commit_size > 0);
545     // Shrink the committed area.
546     size_t length = committed_size - commit_size;
547     Address start = address() + committed_size + guard_size - length;
548     if (reservation_.IsReserved()) {
549       if (!reservation_.Uncommit(start, length)) return false;
550     } else {
551       CodeRange* code_range = heap_->isolate()->code_range();
552       ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
553       if (!code_range->UncommitRawMemory(start, length)) return false;
554     }
555   }
556
557   area_end_ = area_start_ + requested;
558   return true;
559 }
560
561
562 void MemoryChunk::InsertAfter(MemoryChunk* other) {
563   next_chunk_ = other->next_chunk_;
564   prev_chunk_ = other;
565
566   // This memory barrier is needed since concurrent sweeper threads may iterate
567   // over the list of pages while a new page is inserted.
568   // TODO(hpayer): find a cleaner way to guarantee that the page list can be
569   // expanded concurrently
570   MemoryBarrier();
571
572   // The following two write operations can take effect in arbitrary order
573   // since pages are always iterated by the sweeper threads in LIFO order, i.e,
574   // the inserted page becomes visible for the sweeper threads after
575   // other->next_chunk_ = this;
576   other->next_chunk_->prev_chunk_ = this;
577   other->next_chunk_ = this;
578 }
579
580
581 void MemoryChunk::Unlink() {
582   if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
583     heap_->decrement_scan_on_scavenge_pages();
584     ClearFlag(SCAN_ON_SCAVENGE);
585   }
586   next_chunk_->prev_chunk_ = prev_chunk_;
587   prev_chunk_->next_chunk_ = next_chunk_;
588   prev_chunk_ = NULL;
589   next_chunk_ = NULL;
590 }
591
592
593 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
594                                             intptr_t commit_area_size,
595                                             Executability executable,
596                                             Space* owner) {
597   ASSERT(commit_area_size <= reserve_area_size);
598
599   size_t chunk_size;
600   Heap* heap = isolate_->heap();
601   Address base = NULL;
602   VirtualMemory reservation;
603   Address area_start = NULL;
604   Address area_end = NULL;
605
606   //
607   // MemoryChunk layout:
608   //
609   //             Executable
610   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
611   // |           Header           |
612   // +----------------------------+<- base + CodePageGuardStartOffset
613   // |           Guard            |
614   // +----------------------------+<- area_start_
615   // |           Area             |
616   // +----------------------------+<- area_end_ (area_start + commit_area_size)
617   // |   Committed but not used   |
618   // +----------------------------+<- aligned at OS page boundary
619   // | Reserved but not committed |
620   // +----------------------------+<- aligned at OS page boundary
621   // |           Guard            |
622   // +----------------------------+<- base + chunk_size
623   //
624   //           Non-executable
625   // +----------------------------+<- base aligned with MemoryChunk::kAlignment
626   // |          Header            |
627   // +----------------------------+<- area_start_ (base + kObjectStartOffset)
628   // |           Area             |
629   // +----------------------------+<- area_end_ (area_start + commit_area_size)
630   // |  Committed but not used    |
631   // +----------------------------+<- aligned at OS page boundary
632   // | Reserved but not committed |
633   // +----------------------------+<- base + chunk_size
634   //
635
636   if (executable == EXECUTABLE) {
637     chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
638                          OS::CommitPageSize()) + CodePageGuardSize();
639
640     // Check executable memory limit.
641     if (size_executable_ + chunk_size > capacity_executable_) {
642       LOG(isolate_,
643           StringEvent("MemoryAllocator::AllocateRawMemory",
644                       "V8 Executable Allocation capacity exceeded"));
645       return NULL;
646     }
647
648     // Size of header (not executable) plus area (executable).
649     size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
650                                  OS::CommitPageSize());
651     // Allocate executable memory either from code range or from the
652     // OS.
653     if (isolate_->code_range()->exists()) {
654       base = isolate_->code_range()->AllocateRawMemory(chunk_size,
655                                                        commit_size,
656                                                        &chunk_size);
657       ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
658                        MemoryChunk::kAlignment));
659       if (base == NULL) return NULL;
660       size_ += chunk_size;
661       // Update executable memory size.
662       size_executable_ += chunk_size;
663     } else {
664       base = AllocateAlignedMemory(chunk_size,
665                                    commit_size,
666                                    MemoryChunk::kAlignment,
667                                    executable,
668                                    &reservation);
669       if (base == NULL) return NULL;
670       // Update executable memory size.
671       size_executable_ += reservation.size();
672     }
673
674     if (Heap::ShouldZapGarbage()) {
675       ZapBlock(base, CodePageGuardStartOffset());
676       ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
677     }
678
679     area_start = base + CodePageAreaStartOffset();
680     area_end = area_start + commit_area_size;
681   } else {
682     chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
683                          OS::CommitPageSize());
684     size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
685                                  commit_area_size, OS::CommitPageSize());
686     base = AllocateAlignedMemory(chunk_size,
687                                  commit_size,
688                                  MemoryChunk::kAlignment,
689                                  executable,
690                                  &reservation);
691
692     if (base == NULL) return NULL;
693
694     if (Heap::ShouldZapGarbage()) {
695       ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
696     }
697
698     area_start = base + Page::kObjectStartOffset;
699     area_end = area_start + commit_area_size;
700   }
701
702   // Use chunk_size for statistics and callbacks because we assume that they
703   // treat reserved but not-yet committed memory regions of chunks as allocated.
704   isolate_->counters()->memory_allocated()->
705       Increment(static_cast<int>(chunk_size));
706
707   LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
708   if (owner != NULL) {
709     ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
710     PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
711   }
712
713   MemoryChunk* result = MemoryChunk::Initialize(heap,
714                                                 base,
715                                                 chunk_size,
716                                                 area_start,
717                                                 area_end,
718                                                 executable,
719                                                 owner);
720   result->set_reserved_memory(&reservation);
721   MSAN_MEMORY_IS_INITIALIZED(base, chunk_size);
722   return result;
723 }
724
725
726 void Page::ResetFreeListStatistics() {
727   non_available_small_blocks_ = 0;
728   available_in_small_free_list_ = 0;
729   available_in_medium_free_list_ = 0;
730   available_in_large_free_list_ = 0;
731   available_in_huge_free_list_ = 0;
732 }
733
734
735 Page* MemoryAllocator::AllocatePage(intptr_t size,
736                                     PagedSpace* owner,
737                                     Executability executable) {
738   MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
739
740   if (chunk == NULL) return NULL;
741
742   return Page::Initialize(isolate_->heap(), chunk, executable, owner);
743 }
744
745
746 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
747                                               Space* owner,
748                                               Executability executable) {
749   MemoryChunk* chunk = AllocateChunk(object_size,
750                                      object_size,
751                                      executable,
752                                      owner);
753   if (chunk == NULL) return NULL;
754   return LargePage::Initialize(isolate_->heap(), chunk);
755 }
756
757
758 void MemoryAllocator::Free(MemoryChunk* chunk) {
759   LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
760   if (chunk->owner() != NULL) {
761     ObjectSpace space =
762         static_cast<ObjectSpace>(1 << chunk->owner()->identity());
763     PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
764   }
765
766   isolate_->heap()->RememberUnmappedPage(
767       reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
768
769   delete chunk->slots_buffer();
770   delete chunk->skip_list();
771
772   VirtualMemory* reservation = chunk->reserved_memory();
773   if (reservation->IsReserved()) {
774     FreeMemory(reservation, chunk->executable());
775   } else {
776     FreeMemory(chunk->address(),
777                chunk->size(),
778                chunk->executable());
779   }
780 }
781
782
783 bool MemoryAllocator::CommitBlock(Address start,
784                                   size_t size,
785                                   Executability executable) {
786   if (!CommitMemory(start, size, executable)) return false;
787
788   if (Heap::ShouldZapGarbage()) {
789     ZapBlock(start, size);
790   }
791
792   isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
793   return true;
794 }
795
796
797 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
798   if (!VirtualMemory::UncommitRegion(start, size)) return false;
799   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
800   return true;
801 }
802
803
804 void MemoryAllocator::ZapBlock(Address start, size_t size) {
805   for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
806     Memory::Address_at(start + s) = kZapValue;
807   }
808 }
809
810
811 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
812                                                 AllocationAction action,
813                                                 size_t size) {
814   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
815     MemoryAllocationCallbackRegistration registration =
816       memory_allocation_callbacks_[i];
817     if ((registration.space & space) == space &&
818         (registration.action & action) == action)
819       registration.callback(space, action, static_cast<int>(size));
820   }
821 }
822
823
824 bool MemoryAllocator::MemoryAllocationCallbackRegistered(
825     MemoryAllocationCallback callback) {
826   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
827     if (memory_allocation_callbacks_[i].callback == callback) return true;
828   }
829   return false;
830 }
831
832
833 void MemoryAllocator::AddMemoryAllocationCallback(
834     MemoryAllocationCallback callback,
835     ObjectSpace space,
836     AllocationAction action) {
837   ASSERT(callback != NULL);
838   MemoryAllocationCallbackRegistration registration(callback, space, action);
839   ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
840   return memory_allocation_callbacks_.Add(registration);
841 }
842
843
844 void MemoryAllocator::RemoveMemoryAllocationCallback(
845      MemoryAllocationCallback callback) {
846   ASSERT(callback != NULL);
847   for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
848     if (memory_allocation_callbacks_[i].callback == callback) {
849       memory_allocation_callbacks_.Remove(i);
850       return;
851     }
852   }
853   UNREACHABLE();
854 }
855
856
857 #ifdef DEBUG
858 void MemoryAllocator::ReportStatistics() {
859   float pct = static_cast<float>(capacity_ - size_) / capacity_;
860   PrintF("  capacity: %" V8_PTR_PREFIX "d"
861              ", used: %" V8_PTR_PREFIX "d"
862              ", available: %%%d\n\n",
863          capacity_, size_, static_cast<int>(pct*100));
864 }
865 #endif
866
867
868 int MemoryAllocator::CodePageGuardStartOffset() {
869   // We are guarding code pages: the first OS page after the header
870   // will be protected as non-writable.
871   return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
872 }
873
874
875 int MemoryAllocator::CodePageGuardSize() {
876   return static_cast<int>(OS::CommitPageSize());
877 }
878
879
880 int MemoryAllocator::CodePageAreaStartOffset() {
881   // We are guarding code pages: the first OS page after the header
882   // will be protected as non-writable.
883   return CodePageGuardStartOffset() + CodePageGuardSize();
884 }
885
886
887 int MemoryAllocator::CodePageAreaEndOffset() {
888   // We are guarding code pages: the last OS page will be protected as
889   // non-writable.
890   return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
891 }
892
893
894 bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
895                                              Address start,
896                                              size_t commit_size,
897                                              size_t reserved_size) {
898   // Commit page header (not executable).
899   if (!vm->Commit(start,
900                   CodePageGuardStartOffset(),
901                   false)) {
902     return false;
903   }
904
905   // Create guard page after the header.
906   if (!vm->Guard(start + CodePageGuardStartOffset())) {
907     return false;
908   }
909
910   // Commit page body (executable).
911   if (!vm->Commit(start + CodePageAreaStartOffset(),
912                   commit_size - CodePageGuardStartOffset(),
913                   true)) {
914     return false;
915   }
916
917   // Create guard page before the end.
918   if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
919     return false;
920   }
921
922   UpdateAllocatedSpaceLimits(start,
923                              start + CodePageAreaStartOffset() +
924                              commit_size - CodePageGuardStartOffset());
925   return true;
926 }
927
928
929 // -----------------------------------------------------------------------------
930 // MemoryChunk implementation
931
932 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
933   MemoryChunk* chunk = MemoryChunk::FromAddress(address);
934   if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
935     static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
936   }
937   chunk->IncrementLiveBytes(by);
938 }
939
940
941 // -----------------------------------------------------------------------------
942 // PagedSpace implementation
943
944 PagedSpace::PagedSpace(Heap* heap,
945                        intptr_t max_capacity,
946                        AllocationSpace id,
947                        Executability executable)
948     : Space(heap, id, executable),
949       free_list_(this),
950       was_swept_conservatively_(false),
951       first_unswept_page_(Page::FromAddress(NULL)),
952       unswept_free_bytes_(0) {
953   if (id == CODE_SPACE) {
954     area_size_ = heap->isolate()->memory_allocator()->
955         CodePageAreaSize();
956   } else {
957     area_size_ = Page::kPageSize - Page::kObjectStartOffset;
958   }
959   max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
960       * AreaSize();
961   accounting_stats_.Clear();
962
963   allocation_info_.set_top(NULL);
964   allocation_info_.set_limit(NULL);
965
966   anchor_.InitializeAsAnchor(this);
967 }
968
969
970 bool PagedSpace::SetUp() {
971   return true;
972 }
973
974
975 bool PagedSpace::HasBeenSetUp() {
976   return true;
977 }
978
979
980 void PagedSpace::TearDown() {
981   PageIterator iterator(this);
982   while (iterator.has_next()) {
983     heap()->isolate()->memory_allocator()->Free(iterator.next());
984   }
985   anchor_.set_next_page(&anchor_);
986   anchor_.set_prev_page(&anchor_);
987   accounting_stats_.Clear();
988 }
989
990
991 size_t PagedSpace::CommittedPhysicalMemory() {
992   if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
993   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
994   size_t size = 0;
995   PageIterator it(this);
996   while (it.has_next()) {
997     size += it.next()->CommittedPhysicalMemory();
998   }
999   return size;
1000 }
1001
1002
1003 MaybeObject* PagedSpace::FindObject(Address addr) {
1004   // Note: this function can only be called on precisely swept spaces.
1005   ASSERT(!heap()->mark_compact_collector()->in_use());
1006
1007   if (!Contains(addr)) return Failure::Exception();
1008
1009   Page* p = Page::FromAddress(addr);
1010   HeapObjectIterator it(p, NULL);
1011   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1012     Address cur = obj->address();
1013     Address next = cur + obj->Size();
1014     if ((cur <= addr) && (addr < next)) return obj;
1015   }
1016
1017   UNREACHABLE();
1018   return Failure::Exception();
1019 }
1020
1021
1022 bool PagedSpace::CanExpand() {
1023   ASSERT(max_capacity_ % AreaSize() == 0);
1024
1025   if (Capacity() == max_capacity_) return false;
1026
1027   ASSERT(Capacity() < max_capacity_);
1028
1029   // Are we going to exceed capacity for this space?
1030   if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
1031
1032   return true;
1033 }
1034
1035
1036 bool PagedSpace::Expand() {
1037   if (!CanExpand()) return false;
1038
1039   intptr_t size = AreaSize();
1040
1041   if (anchor_.next_page() == &anchor_) {
1042     size = SizeOfFirstPage();
1043   }
1044
1045   Page* p = heap()->isolate()->memory_allocator()->AllocatePage(
1046       size, this, executable());
1047   if (p == NULL) return false;
1048
1049   ASSERT(Capacity() <= max_capacity_);
1050
1051   p->InsertAfter(anchor_.prev_page());
1052
1053   return true;
1054 }
1055
1056
1057 intptr_t PagedSpace::SizeOfFirstPage() {
1058   int size = 0;
1059   switch (identity()) {
1060     case OLD_POINTER_SPACE:
1061       size = 72 * kPointerSize * KB;
1062       break;
1063     case OLD_DATA_SPACE:
1064       size = 192 * KB;
1065       break;
1066     case MAP_SPACE:
1067       size = 16 * kPointerSize * KB;
1068       break;
1069     case CELL_SPACE:
1070       size = 16 * kPointerSize * KB;
1071       break;
1072     case PROPERTY_CELL_SPACE:
1073       size = 8 * kPointerSize * KB;
1074       break;
1075     case CODE_SPACE:
1076       if (heap()->isolate()->code_range()->exists()) {
1077         // When code range exists, code pages are allocated in a special way
1078         // (from the reserved code range). That part of the code is not yet
1079         // upgraded to handle small pages.
1080         size = AreaSize();
1081       } else {
1082         size = 480 * KB;
1083       }
1084       break;
1085     default:
1086       UNREACHABLE();
1087   }
1088   return Min(size, AreaSize());
1089 }
1090
1091
1092 int PagedSpace::CountTotalPages() {
1093   PageIterator it(this);
1094   int count = 0;
1095   while (it.has_next()) {
1096     it.next();
1097     count++;
1098   }
1099   return count;
1100 }
1101
1102
1103 void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
1104   sizes->huge_size_ = page->available_in_huge_free_list();
1105   sizes->small_size_ = page->available_in_small_free_list();
1106   sizes->medium_size_ = page->available_in_medium_free_list();
1107   sizes->large_size_ = page->available_in_large_free_list();
1108 }
1109
1110
1111 void PagedSpace::ResetFreeListStatistics() {
1112   PageIterator page_iterator(this);
1113   while (page_iterator.has_next()) {
1114     Page* page = page_iterator.next();
1115     page->ResetFreeListStatistics();
1116   }
1117 }
1118
1119
1120 void PagedSpace::IncreaseCapacity(int size) {
1121   accounting_stats_.ExpandSpace(size);
1122 }
1123
1124
1125 void PagedSpace::ReleasePage(Page* page, bool unlink) {
1126   ASSERT(page->LiveBytes() == 0);
1127   ASSERT(AreaSize() == page->area_size());
1128
1129   // Adjust list of unswept pages if the page is the head of the list.
1130   if (first_unswept_page_ == page) {
1131     first_unswept_page_ = page->next_page();
1132     if (first_unswept_page_ == anchor()) {
1133       first_unswept_page_ = Page::FromAddress(NULL);
1134     }
1135   }
1136
1137   if (page->WasSwept()) {
1138     intptr_t size = free_list_.EvictFreeListItems(page);
1139     accounting_stats_.AllocateBytes(size);
1140     ASSERT_EQ(AreaSize(), static_cast<int>(size));
1141   } else {
1142     DecreaseUnsweptFreeBytes(page);
1143   }
1144
1145   // TODO(hpayer): This check is just used for debugging purpose and
1146   // should be removed or turned into an assert after investigating the
1147   // crash in concurrent sweeping.
1148   CHECK(!free_list_.ContainsPageFreeListItems(page));
1149
1150   if (Page::FromAllocationTop(allocation_info_.top()) == page) {
1151     allocation_info_.set_top(NULL);
1152     allocation_info_.set_limit(NULL);
1153   }
1154
1155   if (unlink) {
1156     page->Unlink();
1157   }
1158   if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
1159     heap()->isolate()->memory_allocator()->Free(page);
1160   } else {
1161     heap()->QueueMemoryChunkForFree(page);
1162   }
1163
1164   ASSERT(Capacity() > 0);
1165   accounting_stats_.ShrinkSpace(AreaSize());
1166 }
1167
1168
1169 #ifdef DEBUG
1170 void PagedSpace::Print() { }
1171 #endif
1172
1173 #ifdef VERIFY_HEAP
1174 void PagedSpace::Verify(ObjectVisitor* visitor) {
1175   // We can only iterate over the pages if they were swept precisely.
1176   if (was_swept_conservatively_) return;
1177
1178   bool allocation_pointer_found_in_space =
1179       (allocation_info_.top() == allocation_info_.limit());
1180   PageIterator page_iterator(this);
1181   while (page_iterator.has_next()) {
1182     Page* page = page_iterator.next();
1183     CHECK(page->owner() == this);
1184     if (page == Page::FromAllocationTop(allocation_info_.top())) {
1185       allocation_pointer_found_in_space = true;
1186     }
1187     CHECK(page->WasSweptPrecisely());
1188     HeapObjectIterator it(page, NULL);
1189     Address end_of_previous_object = page->area_start();
1190     Address top = page->area_end();
1191     int black_size = 0;
1192     for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1193       CHECK(end_of_previous_object <= object->address());
1194
1195       // The first word should be a map, and we expect all map pointers to
1196       // be in map space.
1197       Map* map = object->map();
1198       CHECK(map->IsMap());
1199       CHECK(heap()->map_space()->Contains(map));
1200
1201       // Perform space-specific object verification.
1202       VerifyObject(object);
1203
1204       // The object itself should look OK.
1205       object->Verify();
1206
1207       // All the interior pointers should be contained in the heap.
1208       int size = object->Size();
1209       object->IterateBody(map->instance_type(), size, visitor);
1210       if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1211         black_size += size;
1212       }
1213
1214       CHECK(object->address() + size <= top);
1215       end_of_previous_object = object->address() + size;
1216     }
1217     CHECK_LE(black_size, page->LiveBytes());
1218   }
1219   CHECK(allocation_pointer_found_in_space);
1220 }
1221 #endif  // VERIFY_HEAP
1222
1223 // -----------------------------------------------------------------------------
1224 // NewSpace implementation
1225
1226
1227 bool NewSpace::SetUp(int reserved_semispace_capacity,
1228                      int maximum_semispace_capacity) {
1229   // Set up new space based on the preallocated memory block defined by
1230   // start and size. The provided space is divided into two semi-spaces.
1231   // To support fast containment testing in the new space, the size of
1232   // this chunk must be a power of two and it must be aligned to its size.
1233   int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1234
1235   size_t size = 2 * reserved_semispace_capacity;
1236   Address base =
1237       heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1238           size, size, &reservation_);
1239   if (base == NULL) return false;
1240
1241   chunk_base_ = base;
1242   chunk_size_ = static_cast<uintptr_t>(size);
1243   LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1244
1245   ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1246   ASSERT(IsPowerOf2(maximum_semispace_capacity));
1247
1248   // Allocate and set up the histogram arrays if necessary.
1249   allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1250   promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1251
1252 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1253                        promoted_histogram_[name].set_name(#name);
1254   INSTANCE_TYPE_LIST(SET_NAME)
1255 #undef SET_NAME
1256
1257   ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1258   ASSERT(static_cast<intptr_t>(chunk_size_) >=
1259          2 * heap()->ReservedSemiSpaceSize());
1260   ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1261
1262   to_space_.SetUp(chunk_base_,
1263                   initial_semispace_capacity,
1264                   maximum_semispace_capacity);
1265   from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1266                     initial_semispace_capacity,
1267                     maximum_semispace_capacity);
1268   if (!to_space_.Commit()) {
1269     return false;
1270   }
1271   ASSERT(!from_space_.is_committed());  // No need to use memory yet.
1272
1273   start_ = chunk_base_;
1274   address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1275   object_mask_ = address_mask_ | kHeapObjectTagMask;
1276   object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1277
1278   ResetAllocationInfo();
1279
1280   return true;
1281 }
1282
1283
1284 void NewSpace::TearDown() {
1285   if (allocated_histogram_) {
1286     DeleteArray(allocated_histogram_);
1287     allocated_histogram_ = NULL;
1288   }
1289   if (promoted_histogram_) {
1290     DeleteArray(promoted_histogram_);
1291     promoted_histogram_ = NULL;
1292   }
1293
1294   start_ = NULL;
1295   allocation_info_.set_top(NULL);
1296   allocation_info_.set_limit(NULL);
1297
1298   to_space_.TearDown();
1299   from_space_.TearDown();
1300
1301   LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1302
1303   ASSERT(reservation_.IsReserved());
1304   heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1305                                                     NOT_EXECUTABLE);
1306   chunk_base_ = NULL;
1307   chunk_size_ = 0;
1308 }
1309
1310
1311 void NewSpace::Flip() {
1312   SemiSpace::Swap(&from_space_, &to_space_);
1313 }
1314
1315
1316 void NewSpace::Grow() {
1317   // Double the semispace size but only up to maximum capacity.
1318   ASSERT(Capacity() < MaximumCapacity());
1319   int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
1320   if (to_space_.GrowTo(new_capacity)) {
1321     // Only grow from space if we managed to grow to-space.
1322     if (!from_space_.GrowTo(new_capacity)) {
1323       // If we managed to grow to-space but couldn't grow from-space,
1324       // attempt to shrink to-space.
1325       if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1326         // We are in an inconsistent state because we could not
1327         // commit/uncommit memory from new space.
1328         V8::FatalProcessOutOfMemory("Failed to grow new space.");
1329       }
1330     }
1331   }
1332   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1333 }
1334
1335
1336 void NewSpace::Shrink() {
1337   int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
1338   int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1339   if (rounded_new_capacity < Capacity() &&
1340       to_space_.ShrinkTo(rounded_new_capacity))  {
1341     // Only shrink from-space if we managed to shrink to-space.
1342     from_space_.Reset();
1343     if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1344       // If we managed to shrink to-space but couldn't shrink from
1345       // space, attempt to grow to-space again.
1346       if (!to_space_.GrowTo(from_space_.Capacity())) {
1347         // We are in an inconsistent state because we could not
1348         // commit/uncommit memory from new space.
1349         V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1350       }
1351     }
1352   }
1353   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1354 }
1355
1356
1357 void NewSpace::UpdateAllocationInfo() {
1358   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1359   allocation_info_.set_top(to_space_.page_low());
1360   allocation_info_.set_limit(to_space_.page_high());
1361   UpdateInlineAllocationLimit(0);
1362   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1363 }
1364
1365
1366 void NewSpace::ResetAllocationInfo() {
1367   to_space_.Reset();
1368   UpdateAllocationInfo();
1369   pages_used_ = 0;
1370   // Clear all mark-bits in the to-space.
1371   NewSpacePageIterator it(&to_space_);
1372   while (it.has_next()) {
1373     Bitmap::Clear(it.next());
1374   }
1375 }
1376
1377
1378 void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1379   if (heap()->inline_allocation_disabled()) {
1380     // Lowest limit when linear allocation was disabled.
1381     Address high = to_space_.page_high();
1382     Address new_top = allocation_info_.top() + size_in_bytes;
1383     allocation_info_.set_limit(Min(new_top, high));
1384   } else if (inline_allocation_limit_step() == 0) {
1385     // Normal limit is the end of the current page.
1386     allocation_info_.set_limit(to_space_.page_high());
1387   } else {
1388     // Lower limit during incremental marking.
1389     Address high = to_space_.page_high();
1390     Address new_top = allocation_info_.top() + size_in_bytes;
1391     Address new_limit = new_top + inline_allocation_limit_step_;
1392     allocation_info_.set_limit(Min(new_limit, high));
1393   }
1394   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1395 }
1396
1397
1398 bool NewSpace::AddFreshPage() {
1399   Address top = allocation_info_.top();
1400   if (NewSpacePage::IsAtStart(top)) {
1401     // The current page is already empty. Don't try to make another.
1402
1403     // We should only get here if someone asks to allocate more
1404     // than what can be stored in a single page.
1405     // TODO(gc): Change the limit on new-space allocation to prevent this
1406     // from happening (all such allocations should go directly to LOSpace).
1407     return false;
1408   }
1409   if (!to_space_.AdvancePage()) {
1410     // Failed to get a new page in to-space.
1411     return false;
1412   }
1413
1414   // Clear remainder of current page.
1415   Address limit = NewSpacePage::FromLimit(top)->area_end();
1416   if (heap()->gc_state() == Heap::SCAVENGE) {
1417     heap()->promotion_queue()->SetNewLimit(limit);
1418     heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
1419   }
1420
1421   int remaining_in_page = static_cast<int>(limit - top);
1422   heap()->CreateFillerObjectAt(top, remaining_in_page);
1423   pages_used_++;
1424   UpdateAllocationInfo();
1425
1426   return true;
1427 }
1428
1429
1430 MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
1431   Address old_top = allocation_info_.top();
1432   Address high = to_space_.page_high();
1433   if (allocation_info_.limit() < high) {
1434     // Either the limit has been lowered because linear allocation was disabled
1435     // or because incremental marking wants to get a chance to do a step. Set
1436     // the new limit accordingly.
1437     Address new_top = old_top + size_in_bytes;
1438     int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1439     heap()->incremental_marking()->Step(
1440         bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
1441     UpdateInlineAllocationLimit(size_in_bytes);
1442     top_on_previous_step_ = new_top;
1443     return AllocateRaw(size_in_bytes);
1444   } else if (AddFreshPage()) {
1445     // Switched to new page. Try allocating again.
1446     int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1447     heap()->incremental_marking()->Step(
1448         bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
1449     top_on_previous_step_ = to_space_.page_low();
1450     return AllocateRaw(size_in_bytes);
1451   } else {
1452     return Failure::RetryAfterGC();
1453   }
1454 }
1455
1456
1457 #ifdef VERIFY_HEAP
1458 // We do not use the SemiSpaceIterator because verification doesn't assume
1459 // that it works (it depends on the invariants we are checking).
1460 void NewSpace::Verify() {
1461   // The allocation pointer should be in the space or at the very end.
1462   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1463
1464   // There should be objects packed in from the low address up to the
1465   // allocation pointer.
1466   Address current = to_space_.first_page()->area_start();
1467   CHECK_EQ(current, to_space_.space_start());
1468
1469   while (current != top()) {
1470     if (!NewSpacePage::IsAtEnd(current)) {
1471       // The allocation pointer should not be in the middle of an object.
1472       CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1473             current < top());
1474
1475       HeapObject* object = HeapObject::FromAddress(current);
1476
1477       // The first word should be a map, and we expect all map pointers to
1478       // be in map space.
1479       Map* map = object->map();
1480       CHECK(map->IsMap());
1481       CHECK(heap()->map_space()->Contains(map));
1482
1483       // The object should not be code or a map.
1484       CHECK(!object->IsMap());
1485       CHECK(!object->IsCode());
1486
1487       // The object itself should look OK.
1488       object->Verify();
1489
1490       // All the interior pointers should be contained in the heap.
1491       VerifyPointersVisitor visitor;
1492       int size = object->Size();
1493       object->IterateBody(map->instance_type(), size, &visitor);
1494
1495       current += size;
1496     } else {
1497       // At end of page, switch to next page.
1498       NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1499       // Next page should be valid.
1500       CHECK(!page->is_anchor());
1501       current = page->area_start();
1502     }
1503   }
1504
1505   // Check semi-spaces.
1506   CHECK_EQ(from_space_.id(), kFromSpace);
1507   CHECK_EQ(to_space_.id(), kToSpace);
1508   from_space_.Verify();
1509   to_space_.Verify();
1510 }
1511 #endif
1512
1513 // -----------------------------------------------------------------------------
1514 // SemiSpace implementation
1515
1516 void SemiSpace::SetUp(Address start,
1517                       int initial_capacity,
1518                       int maximum_capacity) {
1519   // Creates a space in the young generation. The constructor does not
1520   // allocate memory from the OS.  A SemiSpace is given a contiguous chunk of
1521   // memory of size 'capacity' when set up, and does not grow or shrink
1522   // otherwise.  In the mark-compact collector, the memory region of the from
1523   // space is used as the marking stack. It requires contiguous memory
1524   // addresses.
1525   ASSERT(maximum_capacity >= Page::kPageSize);
1526   initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1527   capacity_ = initial_capacity;
1528   maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1529   maximum_committed_ = 0;
1530   committed_ = false;
1531   start_ = start;
1532   address_mask_ = ~(maximum_capacity - 1);
1533   object_mask_ = address_mask_ | kHeapObjectTagMask;
1534   object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1535   age_mark_ = start_;
1536 }
1537
1538
1539 void SemiSpace::TearDown() {
1540   start_ = NULL;
1541   capacity_ = 0;
1542 }
1543
1544
1545 bool SemiSpace::Commit() {
1546   ASSERT(!is_committed());
1547   int pages = capacity_ / Page::kPageSize;
1548   if (!heap()->isolate()->memory_allocator()->CommitBlock(start_,
1549                                                           capacity_,
1550                                                           executable())) {
1551     return false;
1552   }
1553
1554   NewSpacePage* current = anchor();
1555   for (int i = 0; i < pages; i++) {
1556     NewSpacePage* new_page =
1557       NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
1558     new_page->InsertAfter(current);
1559     current = new_page;
1560   }
1561
1562   SetCapacity(capacity_);
1563   committed_ = true;
1564   Reset();
1565   return true;
1566 }
1567
1568
1569 bool SemiSpace::Uncommit() {
1570   ASSERT(is_committed());
1571   Address start = start_ + maximum_capacity_ - capacity_;
1572   if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
1573     return false;
1574   }
1575   anchor()->set_next_page(anchor());
1576   anchor()->set_prev_page(anchor());
1577
1578   committed_ = false;
1579   return true;
1580 }
1581
1582
1583 size_t SemiSpace::CommittedPhysicalMemory() {
1584   if (!is_committed()) return 0;
1585   size_t size = 0;
1586   NewSpacePageIterator it(this);
1587   while (it.has_next()) {
1588     size += it.next()->CommittedPhysicalMemory();
1589   }
1590   return size;
1591 }
1592
1593
1594 bool SemiSpace::GrowTo(int new_capacity) {
1595   if (!is_committed()) {
1596     if (!Commit()) return false;
1597   }
1598   ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1599   ASSERT(new_capacity <= maximum_capacity_);
1600   ASSERT(new_capacity > capacity_);
1601   int pages_before = capacity_ / Page::kPageSize;
1602   int pages_after = new_capacity / Page::kPageSize;
1603
1604   size_t delta = new_capacity - capacity_;
1605
1606   ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1607   if (!heap()->isolate()->memory_allocator()->CommitBlock(
1608       start_ + capacity_, delta, executable())) {
1609     return false;
1610   }
1611   SetCapacity(new_capacity);
1612   NewSpacePage* last_page = anchor()->prev_page();
1613   ASSERT(last_page != anchor());
1614   for (int i = pages_before; i < pages_after; i++) {
1615     Address page_address = start_ + i * Page::kPageSize;
1616     NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
1617                                                       page_address,
1618                                                       this);
1619     new_page->InsertAfter(last_page);
1620     Bitmap::Clear(new_page);
1621     // Duplicate the flags that was set on the old page.
1622     new_page->SetFlags(last_page->GetFlags(),
1623                        NewSpacePage::kCopyOnFlipFlagsMask);
1624     last_page = new_page;
1625   }
1626   return true;
1627 }
1628
1629
1630 bool SemiSpace::ShrinkTo(int new_capacity) {
1631   ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1632   ASSERT(new_capacity >= initial_capacity_);
1633   ASSERT(new_capacity < capacity_);
1634   if (is_committed()) {
1635     size_t delta = capacity_ - new_capacity;
1636     ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1637
1638     MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1639     if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
1640       return false;
1641     }
1642
1643     int pages_after = new_capacity / Page::kPageSize;
1644     NewSpacePage* new_last_page =
1645         NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
1646     new_last_page->set_next_page(anchor());
1647     anchor()->set_prev_page(new_last_page);
1648     ASSERT((current_page_ >= first_page()) && (current_page_ <= new_last_page));
1649   }
1650
1651   SetCapacity(new_capacity);
1652
1653   return true;
1654 }
1655
1656
1657 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1658   anchor_.set_owner(this);
1659   // Fixup back-pointers to anchor. Address of anchor changes
1660   // when we swap.
1661   anchor_.prev_page()->set_next_page(&anchor_);
1662   anchor_.next_page()->set_prev_page(&anchor_);
1663
1664   bool becomes_to_space = (id_ == kFromSpace);
1665   id_ = becomes_to_space ? kToSpace : kFromSpace;
1666   NewSpacePage* page = anchor_.next_page();
1667   while (page != &anchor_) {
1668     page->set_owner(this);
1669     page->SetFlags(flags, mask);
1670     if (becomes_to_space) {
1671       page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1672       page->SetFlag(MemoryChunk::IN_TO_SPACE);
1673       page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1674       page->ResetLiveBytes();
1675     } else {
1676       page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1677       page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1678     }
1679     ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1680     ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1681            page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1682     page = page->next_page();
1683   }
1684 }
1685
1686
1687 void SemiSpace::Reset() {
1688   ASSERT(anchor_.next_page() != &anchor_);
1689   current_page_ = anchor_.next_page();
1690 }
1691
1692
1693 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1694   // We won't be swapping semispaces without data in them.
1695   ASSERT(from->anchor_.next_page() != &from->anchor_);
1696   ASSERT(to->anchor_.next_page() != &to->anchor_);
1697
1698   // Swap bits.
1699   SemiSpace tmp = *from;
1700   *from = *to;
1701   *to = tmp;
1702
1703   // Fixup back-pointers to the page list anchor now that its address
1704   // has changed.
1705   // Swap to/from-space bits on pages.
1706   // Copy GC flags from old active space (from-space) to new (to-space).
1707   intptr_t flags = from->current_page()->GetFlags();
1708   to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1709
1710   from->FlipPages(0, 0);
1711 }
1712
1713
1714 void SemiSpace::SetCapacity(int new_capacity) {
1715   capacity_ = new_capacity;
1716   if (capacity_ > maximum_committed_) {
1717     maximum_committed_ = capacity_;
1718   }
1719 }
1720
1721
1722 void SemiSpace::set_age_mark(Address mark) {
1723   ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
1724   age_mark_ = mark;
1725   // Mark all pages up to the one containing mark.
1726   NewSpacePageIterator it(space_start(), mark);
1727   while (it.has_next()) {
1728     it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1729   }
1730 }
1731
1732
1733 #ifdef DEBUG
1734 void SemiSpace::Print() { }
1735 #endif
1736
1737 #ifdef VERIFY_HEAP
1738 void SemiSpace::Verify() {
1739   bool is_from_space = (id_ == kFromSpace);
1740   NewSpacePage* page = anchor_.next_page();
1741   CHECK(anchor_.semi_space() == this);
1742   while (page != &anchor_) {
1743     CHECK(page->semi_space() == this);
1744     CHECK(page->InNewSpace());
1745     CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1746                                         : MemoryChunk::IN_TO_SPACE));
1747     CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1748                                          : MemoryChunk::IN_FROM_SPACE));
1749     CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1750     if (!is_from_space) {
1751       // The pointers-from-here-are-interesting flag isn't updated dynamically
1752       // on from-space pages, so it might be out of sync with the marking state.
1753       if (page->heap()->incremental_marking()->IsMarking()) {
1754         CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1755       } else {
1756         CHECK(!page->IsFlagSet(
1757             MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1758       }
1759       // TODO(gc): Check that the live_bytes_count_ field matches the
1760       // black marking on the page (if we make it match in new-space).
1761     }
1762     CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1763     CHECK(page->prev_page()->next_page() == page);
1764     page = page->next_page();
1765   }
1766 }
1767 #endif
1768
1769 #ifdef DEBUG
1770 void SemiSpace::AssertValidRange(Address start, Address end) {
1771   // Addresses belong to same semi-space
1772   NewSpacePage* page = NewSpacePage::FromLimit(start);
1773   NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1774   SemiSpace* space = page->semi_space();
1775   CHECK_EQ(space, end_page->semi_space());
1776   // Start address is before end address, either on same page,
1777   // or end address is on a later page in the linked list of
1778   // semi-space pages.
1779   if (page == end_page) {
1780     CHECK(start <= end);
1781   } else {
1782     while (page != end_page) {
1783       page = page->next_page();
1784       CHECK_NE(page, space->anchor());
1785     }
1786   }
1787 }
1788 #endif
1789
1790
1791 // -----------------------------------------------------------------------------
1792 // SemiSpaceIterator implementation.
1793 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1794   Initialize(space->bottom(), space->top(), NULL);
1795 }
1796
1797
1798 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1799                                      HeapObjectCallback size_func) {
1800   Initialize(space->bottom(), space->top(), size_func);
1801 }
1802
1803
1804 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1805   Initialize(start, space->top(), NULL);
1806 }
1807
1808
1809 SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
1810   Initialize(from, to, NULL);
1811 }
1812
1813
1814 void SemiSpaceIterator::Initialize(Address start,
1815                                    Address end,
1816                                    HeapObjectCallback size_func) {
1817   SemiSpace::AssertValidRange(start, end);
1818   current_ = start;
1819   limit_ = end;
1820   size_func_ = size_func;
1821 }
1822
1823
1824 #ifdef DEBUG
1825 // heap_histograms is shared, always clear it before using it.
1826 static void ClearHistograms(Isolate* isolate) {
1827   // We reset the name each time, though it hasn't changed.
1828 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1829   INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1830 #undef DEF_TYPE_NAME
1831
1832 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1833   INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1834 #undef CLEAR_HISTOGRAM
1835
1836   isolate->js_spill_information()->Clear();
1837 }
1838
1839
1840 static void ClearCodeKindStatistics(int* code_kind_statistics) {
1841   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1842     code_kind_statistics[i] = 0;
1843   }
1844 }
1845
1846
1847 static void ReportCodeKindStatistics(int* code_kind_statistics) {
1848   PrintF("\n   Code kind histograms: \n");
1849   for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1850     if (code_kind_statistics[i] > 0) {
1851       PrintF("     %-20s: %10d bytes\n",
1852              Code::Kind2String(static_cast<Code::Kind>(i)),
1853              code_kind_statistics[i]);
1854     }
1855   }
1856   PrintF("\n");
1857 }
1858
1859
1860 static int CollectHistogramInfo(HeapObject* obj) {
1861   Isolate* isolate = obj->GetIsolate();
1862   InstanceType type = obj->map()->instance_type();
1863   ASSERT(0 <= type && type <= LAST_TYPE);
1864   ASSERT(isolate->heap_histograms()[type].name() != NULL);
1865   isolate->heap_histograms()[type].increment_number(1);
1866   isolate->heap_histograms()[type].increment_bytes(obj->Size());
1867
1868   if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1869     JSObject::cast(obj)->IncrementSpillStatistics(
1870         isolate->js_spill_information());
1871   }
1872
1873   return obj->Size();
1874 }
1875
1876
1877 static void ReportHistogram(Isolate* isolate, bool print_spill) {
1878   PrintF("\n  Object Histogram:\n");
1879   for (int i = 0; i <= LAST_TYPE; i++) {
1880     if (isolate->heap_histograms()[i].number() > 0) {
1881       PrintF("    %-34s%10d (%10d bytes)\n",
1882              isolate->heap_histograms()[i].name(),
1883              isolate->heap_histograms()[i].number(),
1884              isolate->heap_histograms()[i].bytes());
1885     }
1886   }
1887   PrintF("\n");
1888
1889   // Summarize string types.
1890   int string_number = 0;
1891   int string_bytes = 0;
1892 #define INCREMENT(type, size, name, camel_name)      \
1893     string_number += isolate->heap_histograms()[type].number(); \
1894     string_bytes += isolate->heap_histograms()[type].bytes();
1895   STRING_TYPE_LIST(INCREMENT)
1896 #undef INCREMENT
1897   if (string_number > 0) {
1898     PrintF("    %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1899            string_bytes);
1900   }
1901
1902   if (FLAG_collect_heap_spill_statistics && print_spill) {
1903     isolate->js_spill_information()->Print();
1904   }
1905 }
1906 #endif  // DEBUG
1907
1908
1909 // Support for statistics gathering for --heap-stats and --log-gc.
1910 void NewSpace::ClearHistograms() {
1911   for (int i = 0; i <= LAST_TYPE; i++) {
1912     allocated_histogram_[i].clear();
1913     promoted_histogram_[i].clear();
1914   }
1915 }
1916
1917
1918 // Because the copying collector does not touch garbage objects, we iterate
1919 // the new space before a collection to get a histogram of allocated objects.
1920 // This only happens when --log-gc flag is set.
1921 void NewSpace::CollectStatistics() {
1922   ClearHistograms();
1923   SemiSpaceIterator it(this);
1924   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
1925     RecordAllocation(obj);
1926 }
1927
1928
1929 static void DoReportStatistics(Isolate* isolate,
1930                                HistogramInfo* info, const char* description) {
1931   LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1932   // Lump all the string types together.
1933   int string_number = 0;
1934   int string_bytes = 0;
1935 #define INCREMENT(type, size, name, camel_name)       \
1936     string_number += info[type].number();             \
1937     string_bytes += info[type].bytes();
1938   STRING_TYPE_LIST(INCREMENT)
1939 #undef INCREMENT
1940   if (string_number > 0) {
1941     LOG(isolate,
1942         HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1943   }
1944
1945   // Then do the other types.
1946   for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1947     if (info[i].number() > 0) {
1948       LOG(isolate,
1949           HeapSampleItemEvent(info[i].name(), info[i].number(),
1950                               info[i].bytes()));
1951     }
1952   }
1953   LOG(isolate, HeapSampleEndEvent("NewSpace", description));
1954 }
1955
1956
1957 void NewSpace::ReportStatistics() {
1958 #ifdef DEBUG
1959   if (FLAG_heap_stats) {
1960     float pct = static_cast<float>(Available()) / Capacity();
1961     PrintF("  capacity: %" V8_PTR_PREFIX "d"
1962                ", available: %" V8_PTR_PREFIX "d, %%%d\n",
1963            Capacity(), Available(), static_cast<int>(pct*100));
1964     PrintF("\n  Object Histogram:\n");
1965     for (int i = 0; i <= LAST_TYPE; i++) {
1966       if (allocated_histogram_[i].number() > 0) {
1967         PrintF("    %-34s%10d (%10d bytes)\n",
1968                allocated_histogram_[i].name(),
1969                allocated_histogram_[i].number(),
1970                allocated_histogram_[i].bytes());
1971       }
1972     }
1973     PrintF("\n");
1974   }
1975 #endif  // DEBUG
1976
1977   if (FLAG_log_gc) {
1978     Isolate* isolate = heap()->isolate();
1979     DoReportStatistics(isolate, allocated_histogram_, "allocated");
1980     DoReportStatistics(isolate, promoted_histogram_, "promoted");
1981   }
1982 }
1983
1984
1985 void NewSpace::RecordAllocation(HeapObject* obj) {
1986   InstanceType type = obj->map()->instance_type();
1987   ASSERT(0 <= type && type <= LAST_TYPE);
1988   allocated_histogram_[type].increment_number(1);
1989   allocated_histogram_[type].increment_bytes(obj->Size());
1990 }
1991
1992
1993 void NewSpace::RecordPromotion(HeapObject* obj) {
1994   InstanceType type = obj->map()->instance_type();
1995   ASSERT(0 <= type && type <= LAST_TYPE);
1996   promoted_histogram_[type].increment_number(1);
1997   promoted_histogram_[type].increment_bytes(obj->Size());
1998 }
1999
2000
2001 size_t NewSpace::CommittedPhysicalMemory() {
2002   if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
2003   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2004   size_t size = to_space_.CommittedPhysicalMemory();
2005   if (from_space_.is_committed()) {
2006     size += from_space_.CommittedPhysicalMemory();
2007   }
2008   return size;
2009 }
2010
2011
2012 // -----------------------------------------------------------------------------
2013 // Free lists for old object spaces implementation
2014
2015 void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
2016   ASSERT(size_in_bytes > 0);
2017   ASSERT(IsAligned(size_in_bytes, kPointerSize));
2018
2019   // We write a map and possibly size information to the block.  If the block
2020   // is big enough to be a FreeSpace with at least one extra word (the next
2021   // pointer), we set its map to be the free space map and its size to an
2022   // appropriate array length for the desired size from HeapObject::Size().
2023   // If the block is too small (eg, one or two words), to hold both a size
2024   // field and a next pointer, we give it a filler map that gives it the
2025   // correct size.
2026   if (size_in_bytes > FreeSpace::kHeaderSize) {
2027     set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
2028     // Can't use FreeSpace::cast because it fails during deserialization.
2029     FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
2030     this_as_free_space->set_size(size_in_bytes);
2031   } else if (size_in_bytes == kPointerSize) {
2032     set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
2033   } else if (size_in_bytes == 2 * kPointerSize) {
2034     set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
2035   } else {
2036     UNREACHABLE();
2037   }
2038   // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
2039   // deserialization because the free space map is not done yet.
2040 }
2041
2042
2043 FreeListNode* FreeListNode::next() {
2044   ASSERT(IsFreeListNode(this));
2045   if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2046     ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
2047     return reinterpret_cast<FreeListNode*>(
2048         Memory::Address_at(address() + kNextOffset));
2049   } else {
2050     return reinterpret_cast<FreeListNode*>(
2051         Memory::Address_at(address() + kPointerSize));
2052   }
2053 }
2054
2055
2056 FreeListNode** FreeListNode::next_address() {
2057   ASSERT(IsFreeListNode(this));
2058   if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2059     ASSERT(Size() >= kNextOffset + kPointerSize);
2060     return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
2061   } else {
2062     return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
2063   }
2064 }
2065
2066
2067 void FreeListNode::set_next(FreeListNode* next) {
2068   ASSERT(IsFreeListNode(this));
2069   // While we are booting the VM the free space map will actually be null.  So
2070   // we have to make sure that we don't try to use it for anything at that
2071   // stage.
2072   if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2073     ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
2074     Memory::Address_at(address() + kNextOffset) =
2075         reinterpret_cast<Address>(next);
2076   } else {
2077     Memory::Address_at(address() + kPointerSize) =
2078         reinterpret_cast<Address>(next);
2079   }
2080 }
2081
2082
2083 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
2084   intptr_t free_bytes = 0;
2085   if (category->top_ != NULL) {
2086     ASSERT(category->end_ != NULL);
2087     // This is safe (not going to deadlock) since Concatenate operations
2088     // are never performed on the same free lists at the same time in
2089     // reverse order.
2090     LockGuard<Mutex> target_lock_guard(mutex());
2091     LockGuard<Mutex> source_lock_guard(category->mutex());
2092     free_bytes = category->available();
2093     if (end_ == NULL) {
2094       end_ = category->end();
2095     } else {
2096       category->end()->set_next(top_);
2097     }
2098     top_ = category->top();
2099     available_ += category->available();
2100     category->Reset();
2101   }
2102   return free_bytes;
2103 }
2104
2105
2106 void FreeListCategory::Reset() {
2107   top_ = NULL;
2108   end_ = NULL;
2109   available_ = 0;
2110 }
2111
2112
2113 intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
2114   int sum = 0;
2115   FreeListNode** n = &top_;
2116   while (*n != NULL) {
2117     if (Page::FromAddress((*n)->address()) == p) {
2118       FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
2119       sum += free_space->Size();
2120       *n = (*n)->next();
2121     } else {
2122       n = (*n)->next_address();
2123     }
2124   }
2125   if (top_ == NULL) {
2126     end_ = NULL;
2127   }
2128   available_ -= sum;
2129   return sum;
2130 }
2131
2132
2133 bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
2134   FreeListNode** n = &top_;
2135   while (*n != NULL) {
2136     if (Page::FromAddress((*n)->address()) == p) return true;
2137     n = (*n)->next_address();
2138   }
2139   return false;
2140 }
2141
2142
2143 FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
2144   FreeListNode* node = top_;
2145
2146   if (node == NULL) return NULL;
2147
2148   while (node != NULL &&
2149          Page::FromAddress(node->address())->IsEvacuationCandidate()) {
2150     available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
2151     node = node->next();
2152   }
2153
2154   if (node != NULL) {
2155     set_top(node->next());
2156     *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
2157     available_ -= *node_size;
2158   } else {
2159     set_top(NULL);
2160   }
2161
2162   if (top() == NULL) {
2163     set_end(NULL);
2164   }
2165
2166   return node;
2167 }
2168
2169
2170 FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
2171                                                  int *node_size) {
2172   FreeListNode* node = PickNodeFromList(node_size);
2173   if (node != NULL && *node_size < size_in_bytes) {
2174     Free(node, *node_size);
2175     *node_size = 0;
2176     return NULL;
2177   }
2178   return node;
2179 }
2180
2181
2182 void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
2183   node->set_next(top_);
2184   top_ = node;
2185   if (end_ == NULL) {
2186     end_ = node;
2187   }
2188   available_ += size_in_bytes;
2189 }
2190
2191
2192 void FreeListCategory::RepairFreeList(Heap* heap) {
2193   FreeListNode* n = top_;
2194   while (n != NULL) {
2195     Map** map_location = reinterpret_cast<Map**>(n->address());
2196     if (*map_location == NULL) {
2197       *map_location = heap->free_space_map();
2198     } else {
2199       ASSERT(*map_location == heap->free_space_map());
2200     }
2201     n = n->next();
2202   }
2203 }
2204
2205
2206 FreeList::FreeList(PagedSpace* owner)
2207     : owner_(owner), heap_(owner->heap()) {
2208   Reset();
2209 }
2210
2211
2212 intptr_t FreeList::Concatenate(FreeList* free_list) {
2213   intptr_t free_bytes = 0;
2214   free_bytes += small_list_.Concatenate(free_list->small_list());
2215   free_bytes += medium_list_.Concatenate(free_list->medium_list());
2216   free_bytes += large_list_.Concatenate(free_list->large_list());
2217   free_bytes += huge_list_.Concatenate(free_list->huge_list());
2218   return free_bytes;
2219 }
2220
2221
2222 void FreeList::Reset() {
2223   small_list_.Reset();
2224   medium_list_.Reset();
2225   large_list_.Reset();
2226   huge_list_.Reset();
2227 }
2228
2229
2230 int FreeList::Free(Address start, int size_in_bytes) {
2231   if (size_in_bytes == 0) return 0;
2232
2233   FreeListNode* node = FreeListNode::FromAddress(start);
2234   node->set_size(heap_, size_in_bytes);
2235   Page* page = Page::FromAddress(start);
2236
2237   // Early return to drop too-small blocks on the floor.
2238   if (size_in_bytes < kSmallListMin) {
2239     page->add_non_available_small_blocks(size_in_bytes);
2240     return size_in_bytes;
2241   }
2242
2243   // Insert other blocks at the head of a free list of the appropriate
2244   // magnitude.
2245   if (size_in_bytes <= kSmallListMax) {
2246     small_list_.Free(node, size_in_bytes);
2247     page->add_available_in_small_free_list(size_in_bytes);
2248   } else if (size_in_bytes <= kMediumListMax) {
2249     medium_list_.Free(node, size_in_bytes);
2250     page->add_available_in_medium_free_list(size_in_bytes);
2251   } else if (size_in_bytes <= kLargeListMax) {
2252     large_list_.Free(node, size_in_bytes);
2253     page->add_available_in_large_free_list(size_in_bytes);
2254   } else {
2255     huge_list_.Free(node, size_in_bytes);
2256     page->add_available_in_huge_free_list(size_in_bytes);
2257   }
2258
2259   ASSERT(IsVeryLong() || available() == SumFreeLists());
2260   return 0;
2261 }
2262
2263
2264 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2265   FreeListNode* node = NULL;
2266   Page* page = NULL;
2267
2268   if (size_in_bytes <= kSmallAllocationMax) {
2269     node = small_list_.PickNodeFromList(node_size);
2270     if (node != NULL) {
2271       ASSERT(size_in_bytes <= *node_size);
2272       page = Page::FromAddress(node->address());
2273       page->add_available_in_small_free_list(-(*node_size));
2274       ASSERT(IsVeryLong() || available() == SumFreeLists());
2275       return node;
2276     }
2277   }
2278
2279   if (size_in_bytes <= kMediumAllocationMax) {
2280     node = medium_list_.PickNodeFromList(node_size);
2281     if (node != NULL) {
2282       ASSERT(size_in_bytes <= *node_size);
2283       page = Page::FromAddress(node->address());
2284       page->add_available_in_medium_free_list(-(*node_size));
2285       ASSERT(IsVeryLong() || available() == SumFreeLists());
2286       return node;
2287     }
2288   }
2289
2290   if (size_in_bytes <= kLargeAllocationMax) {
2291     node = large_list_.PickNodeFromList(node_size);
2292     if (node != NULL) {
2293       ASSERT(size_in_bytes <= *node_size);
2294       page = Page::FromAddress(node->address());
2295       page->add_available_in_large_free_list(-(*node_size));
2296       ASSERT(IsVeryLong() || available() == SumFreeLists());
2297       return node;
2298     }
2299   }
2300
2301   int huge_list_available = huge_list_.available();
2302   for (FreeListNode** cur = huge_list_.GetTopAddress();
2303        *cur != NULL;
2304        cur = (*cur)->next_address()) {
2305     FreeListNode* cur_node = *cur;
2306     while (cur_node != NULL &&
2307            Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
2308       int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
2309       huge_list_available -= size;
2310       page = Page::FromAddress(cur_node->address());
2311       page->add_available_in_huge_free_list(-size);
2312       cur_node = cur_node->next();
2313     }
2314
2315     *cur = cur_node;
2316     if (cur_node == NULL) {
2317       huge_list_.set_end(NULL);
2318       break;
2319     }
2320
2321     ASSERT((*cur)->map() == heap_->raw_unchecked_free_space_map());
2322     FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
2323     int size = cur_as_free_space->Size();
2324     if (size >= size_in_bytes) {
2325       // Large enough node found.  Unlink it from the list.
2326       node = *cur;
2327       *cur = node->next();
2328       *node_size = size;
2329       huge_list_available -= size;
2330       page = Page::FromAddress(node->address());
2331       page->add_available_in_huge_free_list(-size);
2332       break;
2333     }
2334   }
2335
2336   if (huge_list_.top() == NULL) {
2337     huge_list_.set_end(NULL);
2338   }
2339   huge_list_.set_available(huge_list_available);
2340
2341   if (node != NULL) {
2342     ASSERT(IsVeryLong() || available() == SumFreeLists());
2343     return node;
2344   }
2345
2346   if (size_in_bytes <= kSmallListMax) {
2347     node = small_list_.PickNodeFromList(size_in_bytes, node_size);
2348     if (node != NULL) {
2349       ASSERT(size_in_bytes <= *node_size);
2350       page = Page::FromAddress(node->address());
2351       page->add_available_in_small_free_list(-(*node_size));
2352     }
2353   } else if (size_in_bytes <= kMediumListMax) {
2354     node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
2355     if (node != NULL) {
2356       ASSERT(size_in_bytes <= *node_size);
2357       page = Page::FromAddress(node->address());
2358       page->add_available_in_medium_free_list(-(*node_size));
2359     }
2360   } else if (size_in_bytes <= kLargeListMax) {
2361     node = large_list_.PickNodeFromList(size_in_bytes, node_size);
2362     if (node != NULL) {
2363       ASSERT(size_in_bytes <= *node_size);
2364       page = Page::FromAddress(node->address());
2365       page->add_available_in_large_free_list(-(*node_size));
2366     }
2367   }
2368
2369   ASSERT(IsVeryLong() || available() == SumFreeLists());
2370   return node;
2371 }
2372
2373
2374 // Allocation on the old space free list.  If it succeeds then a new linear
2375 // allocation space has been set up with the top and limit of the space.  If
2376 // the allocation fails then NULL is returned, and the caller can perform a GC
2377 // or allocate a new page before retrying.
2378 HeapObject* FreeList::Allocate(int size_in_bytes) {
2379   ASSERT(0 < size_in_bytes);
2380   ASSERT(size_in_bytes <= kMaxBlockSize);
2381   ASSERT(IsAligned(size_in_bytes, kPointerSize));
2382   // Don't free list allocate if there is linear space available.
2383   ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
2384
2385   int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2386   // Mark the old linear allocation area with a free space map so it can be
2387   // skipped when scanning the heap.  This also puts it back in the free list
2388   // if it is big enough.
2389   owner_->Free(owner_->top(), old_linear_size);
2390
2391   owner_->heap()->incremental_marking()->OldSpaceStep(
2392       size_in_bytes - old_linear_size);
2393
2394   int new_node_size = 0;
2395   FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2396   if (new_node == NULL) {
2397     owner_->SetTopAndLimit(NULL, NULL);
2398     return NULL;
2399   }
2400
2401   int bytes_left = new_node_size - size_in_bytes;
2402   ASSERT(bytes_left >= 0);
2403
2404 #ifdef DEBUG
2405   for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2406     reinterpret_cast<Object**>(new_node->address())[i] =
2407         Smi::FromInt(kCodeZapValue);
2408   }
2409 #endif
2410
2411   // The old-space-step might have finished sweeping and restarted marking.
2412   // Verify that it did not turn the page of the new node into an evacuation
2413   // candidate.
2414   ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2415
2416   const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2417
2418   // Memory in the linear allocation area is counted as allocated.  We may free
2419   // a little of this again immediately - see below.
2420   owner_->Allocate(new_node_size);
2421
2422   if (owner_->heap()->inline_allocation_disabled()) {
2423     // Keep the linear allocation area empty if requested to do so, just
2424     // return area back to the free list instead.
2425     owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2426     ASSERT(owner_->top() == NULL && owner_->limit() == NULL);
2427   } else if (bytes_left > kThreshold &&
2428              owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2429              FLAG_incremental_marking_steps) {
2430     int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2431     // We don't want to give too large linear areas to the allocator while
2432     // incremental marking is going on, because we won't check again whether
2433     // we want to do another increment until the linear area is used up.
2434     owner_->Free(new_node->address() + size_in_bytes + linear_size,
2435                  new_node_size - size_in_bytes - linear_size);
2436     owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2437                            new_node->address() + size_in_bytes + linear_size);
2438   } else if (bytes_left > 0) {
2439     // Normally we give the rest of the node to the allocator as its new
2440     // linear allocation area.
2441     owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2442                            new_node->address() + new_node_size);
2443   } else {
2444     // TODO(gc) Try not freeing linear allocation region when bytes_left
2445     // are zero.
2446     owner_->SetTopAndLimit(NULL, NULL);
2447   }
2448
2449   return new_node;
2450 }
2451
2452
2453 intptr_t FreeList::EvictFreeListItems(Page* p) {
2454   intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
2455   p->set_available_in_huge_free_list(0);
2456
2457   if (sum < p->area_size()) {
2458     sum += small_list_.EvictFreeListItemsInList(p) +
2459         medium_list_.EvictFreeListItemsInList(p) +
2460         large_list_.EvictFreeListItemsInList(p);
2461     p->set_available_in_small_free_list(0);
2462     p->set_available_in_medium_free_list(0);
2463     p->set_available_in_large_free_list(0);
2464   }
2465
2466   return sum;
2467 }
2468
2469
2470 bool FreeList::ContainsPageFreeListItems(Page* p) {
2471   return huge_list_.EvictFreeListItemsInList(p) ||
2472          small_list_.EvictFreeListItemsInList(p) ||
2473          medium_list_.EvictFreeListItemsInList(p) ||
2474          large_list_.EvictFreeListItemsInList(p);
2475 }
2476
2477
2478 void FreeList::RepairLists(Heap* heap) {
2479   small_list_.RepairFreeList(heap);
2480   medium_list_.RepairFreeList(heap);
2481   large_list_.RepairFreeList(heap);
2482   huge_list_.RepairFreeList(heap);
2483 }
2484
2485
2486 #ifdef DEBUG
2487 intptr_t FreeListCategory::SumFreeList() {
2488   intptr_t sum = 0;
2489   FreeListNode* cur = top_;
2490   while (cur != NULL) {
2491     ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
2492     FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
2493     sum += cur_as_free_space->Size();
2494     cur = cur->next();
2495   }
2496   return sum;
2497 }
2498
2499
2500 static const int kVeryLongFreeList = 500;
2501
2502
2503 int FreeListCategory::FreeListLength() {
2504   int length = 0;
2505   FreeListNode* cur = top_;
2506   while (cur != NULL) {
2507     length++;
2508     cur = cur->next();
2509     if (length == kVeryLongFreeList) return length;
2510   }
2511   return length;
2512 }
2513
2514
2515 bool FreeList::IsVeryLong() {
2516   if (small_list_.FreeListLength() == kVeryLongFreeList) return  true;
2517   if (medium_list_.FreeListLength() == kVeryLongFreeList) return  true;
2518   if (large_list_.FreeListLength() == kVeryLongFreeList) return  true;
2519   if (huge_list_.FreeListLength() == kVeryLongFreeList) return  true;
2520   return false;
2521 }
2522
2523
2524 // This can take a very long time because it is linear in the number of entries
2525 // on the free list, so it should not be called if FreeListLength returns
2526 // kVeryLongFreeList.
2527 intptr_t FreeList::SumFreeLists() {
2528   intptr_t sum = small_list_.SumFreeList();
2529   sum += medium_list_.SumFreeList();
2530   sum += large_list_.SumFreeList();
2531   sum += huge_list_.SumFreeList();
2532   return sum;
2533 }
2534 #endif
2535
2536
2537 // -----------------------------------------------------------------------------
2538 // OldSpace implementation
2539
2540 void PagedSpace::PrepareForMarkCompact() {
2541   // We don't have a linear allocation area while sweeping.  It will be restored
2542   // on the first allocation after the sweep.
2543   EmptyAllocationInfo();
2544
2545   // Stop lazy sweeping and clear marking bits for unswept pages.
2546   if (first_unswept_page_ != NULL) {
2547     Page* p = first_unswept_page_;
2548     do {
2549       // Do not use ShouldBeSweptLazily predicate here.
2550       // New evacuation candidates were selected but they still have
2551       // to be swept before collection starts.
2552       if (!p->WasSwept()) {
2553         Bitmap::Clear(p);
2554         if (FLAG_gc_verbose) {
2555           PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
2556                  reinterpret_cast<intptr_t>(p));
2557         }
2558       }
2559       p = p->next_page();
2560     } while (p != anchor());
2561   }
2562   first_unswept_page_ = Page::FromAddress(NULL);
2563   unswept_free_bytes_ = 0;
2564
2565   // Clear the free list before a full GC---it will be rebuilt afterward.
2566   free_list_.Reset();
2567 }
2568
2569
2570 intptr_t PagedSpace::SizeOfObjects() {
2571   ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0));
2572   return Size() - unswept_free_bytes_ - (limit() - top());
2573 }
2574
2575
2576 // After we have booted, we have created a map which represents free space
2577 // on the heap.  If there was already a free list then the elements on it
2578 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
2579 // fix them.
2580 void PagedSpace::RepairFreeListsAfterBoot() {
2581   free_list_.RepairLists(heap());
2582 }
2583
2584
2585 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
2586   if (IsLazySweepingComplete()) return true;
2587
2588   intptr_t freed_bytes = 0;
2589   Page* p = first_unswept_page_;
2590   do {
2591     Page* next_page = p->next_page();
2592     if (ShouldBeSweptLazily(p)) {
2593       if (FLAG_gc_verbose) {
2594         PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
2595                reinterpret_cast<intptr_t>(p));
2596       }
2597       DecreaseUnsweptFreeBytes(p);
2598       freed_bytes +=
2599           MarkCompactCollector::
2600               SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
2601                   this, NULL, p);
2602     }
2603     p = next_page;
2604   } while (p != anchor() && freed_bytes < bytes_to_sweep);
2605
2606   if (p == anchor()) {
2607     first_unswept_page_ = Page::FromAddress(NULL);
2608   } else {
2609     first_unswept_page_ = p;
2610   }
2611
2612   heap()->FreeQueuedChunks();
2613
2614   return IsLazySweepingComplete();
2615 }
2616
2617
2618 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2619   if (allocation_info_.top() >= allocation_info_.limit()) return;
2620
2621   if (Page::FromAllocationTop(allocation_info_.top())->
2622       IsEvacuationCandidate()) {
2623     // Create filler object to keep page iterable if it was iterable.
2624     int remaining =
2625         static_cast<int>(allocation_info_.limit() - allocation_info_.top());
2626     heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
2627
2628     allocation_info_.set_top(NULL);
2629     allocation_info_.set_limit(NULL);
2630   }
2631 }
2632
2633
2634 bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
2635   MarkCompactCollector* collector = heap()->mark_compact_collector();
2636   if (collector->AreSweeperThreadsActivated()) {
2637     if (collector->IsConcurrentSweepingInProgress()) {
2638       if (collector->RefillFreeLists(this) < size_in_bytes) {
2639         if (!collector->sequential_sweeping()) {
2640           collector->WaitUntilSweepingCompleted();
2641           return true;
2642         }
2643       }
2644       return false;
2645     }
2646     return true;
2647   } else {
2648     return AdvanceSweeper(size_in_bytes);
2649   }
2650 }
2651
2652
2653 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2654   // Allocation in this space has failed.
2655
2656   // If there are unswept pages advance lazy sweeper a bounded number of times
2657   // until we find a size_in_bytes contiguous piece of memory
2658   const int kMaxSweepingTries = 5;
2659   bool sweeping_complete = false;
2660
2661   for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
2662     sweeping_complete = EnsureSweeperProgress(size_in_bytes);
2663
2664     // Retry the free list allocation.
2665     HeapObject* object = free_list_.Allocate(size_in_bytes);
2666     if (object != NULL) return object;
2667   }
2668
2669   // Free list allocation failed and there is no next page.  Fail if we have
2670   // hit the old generation size limit that should cause a garbage
2671   // collection.
2672   if (!heap()->always_allocate() &&
2673       heap()->OldGenerationAllocationLimitReached()) {
2674     return NULL;
2675   }
2676
2677   // Try to expand the space and allocate in the new next page.
2678   if (Expand()) {
2679     ASSERT(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
2680     return free_list_.Allocate(size_in_bytes);
2681   }
2682
2683   // Last ditch, sweep all the remaining pages to try to find space.  This may
2684   // cause a pause.
2685   if (!IsLazySweepingComplete()) {
2686     EnsureSweeperProgress(kMaxInt);
2687
2688     // Retry the free list allocation.
2689     HeapObject* object = free_list_.Allocate(size_in_bytes);
2690     if (object != NULL) return object;
2691   }
2692
2693   // Finally, fail.
2694   return NULL;
2695 }
2696
2697
2698 #ifdef DEBUG
2699 void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
2700   CommentStatistic* comments_statistics =
2701       isolate->paged_space_comments_statistics();
2702   ReportCodeKindStatistics(isolate->code_kind_statistics());
2703   PrintF("Code comment statistics (\"   [ comment-txt   :    size/   "
2704          "count  (average)\"):\n");
2705   for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2706     const CommentStatistic& cs = comments_statistics[i];
2707     if (cs.size > 0) {
2708       PrintF("   %-30s: %10d/%6d     (%d)\n", cs.comment, cs.size, cs.count,
2709              cs.size/cs.count);
2710     }
2711   }
2712   PrintF("\n");
2713 }
2714
2715
2716 void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2717   CommentStatistic* comments_statistics =
2718       isolate->paged_space_comments_statistics();
2719   ClearCodeKindStatistics(isolate->code_kind_statistics());
2720   for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2721     comments_statistics[i].Clear();
2722   }
2723   comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2724   comments_statistics[CommentStatistic::kMaxComments].size = 0;
2725   comments_statistics[CommentStatistic::kMaxComments].count = 0;
2726 }
2727
2728
2729 // Adds comment to 'comment_statistics' table. Performance OK as long as
2730 // 'kMaxComments' is small
2731 static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2732   CommentStatistic* comments_statistics =
2733       isolate->paged_space_comments_statistics();
2734   // Do not count empty comments
2735   if (delta <= 0) return;
2736   CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2737   // Search for a free or matching entry in 'comments_statistics': 'cs'
2738   // points to result.
2739   for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2740     if (comments_statistics[i].comment == NULL) {
2741       cs = &comments_statistics[i];
2742       cs->comment = comment;
2743       break;
2744     } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2745       cs = &comments_statistics[i];
2746       break;
2747     }
2748   }
2749   // Update entry for 'comment'
2750   cs->size += delta;
2751   cs->count += 1;
2752 }
2753
2754
2755 // Call for each nested comment start (start marked with '[ xxx', end marked
2756 // with ']'.  RelocIterator 'it' must point to a comment reloc info.
2757 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2758   ASSERT(!it->done());
2759   ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2760   const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2761   if (tmp[0] != '[') {
2762     // Not a nested comment; skip
2763     return;
2764   }
2765
2766   // Search for end of nested comment or a new nested comment
2767   const char* const comment_txt =
2768       reinterpret_cast<const char*>(it->rinfo()->data());
2769   const byte* prev_pc = it->rinfo()->pc();
2770   int flat_delta = 0;
2771   it->next();
2772   while (true) {
2773     // All nested comments must be terminated properly, and therefore exit
2774     // from loop.
2775     ASSERT(!it->done());
2776     if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2777       const char* const txt =
2778           reinterpret_cast<const char*>(it->rinfo()->data());
2779       flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2780       if (txt[0] == ']') break;  // End of nested  comment
2781       // A new comment
2782       CollectCommentStatistics(isolate, it);
2783       // Skip code that was covered with previous comment
2784       prev_pc = it->rinfo()->pc();
2785     }
2786     it->next();
2787   }
2788   EnterComment(isolate, comment_txt, flat_delta);
2789 }
2790
2791
2792 // Collects code size statistics:
2793 // - by code kind
2794 // - by code comment
2795 void PagedSpace::CollectCodeStatistics() {
2796   Isolate* isolate = heap()->isolate();
2797   HeapObjectIterator obj_it(this);
2798   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2799     if (obj->IsCode()) {
2800       Code* code = Code::cast(obj);
2801       isolate->code_kind_statistics()[code->kind()] += code->Size();
2802       RelocIterator it(code);
2803       int delta = 0;
2804       const byte* prev_pc = code->instruction_start();
2805       while (!it.done()) {
2806         if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2807           delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2808           CollectCommentStatistics(isolate, &it);
2809           prev_pc = it.rinfo()->pc();
2810         }
2811         it.next();
2812       }
2813
2814       ASSERT(code->instruction_start() <= prev_pc &&
2815              prev_pc <= code->instruction_end());
2816       delta += static_cast<int>(code->instruction_end() - prev_pc);
2817       EnterComment(isolate, "NoComment", delta);
2818     }
2819   }
2820 }
2821
2822
2823 void PagedSpace::ReportStatistics() {
2824   int pct = static_cast<int>(Available() * 100 / Capacity());
2825   PrintF("  capacity: %" V8_PTR_PREFIX "d"
2826              ", waste: %" V8_PTR_PREFIX "d"
2827              ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2828          Capacity(), Waste(), Available(), pct);
2829
2830   if (was_swept_conservatively_) return;
2831   ClearHistograms(heap()->isolate());
2832   HeapObjectIterator obj_it(this);
2833   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2834     CollectHistogramInfo(obj);
2835   ReportHistogram(heap()->isolate(), true);
2836 }
2837 #endif
2838
2839
2840 // -----------------------------------------------------------------------------
2841 // MapSpace implementation
2842 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2843 // there is at least one non-inlined virtual function. I would prefer to hide
2844 // the VerifyObject definition behind VERIFY_HEAP.
2845
2846 void MapSpace::VerifyObject(HeapObject* object) {
2847   CHECK(object->IsMap());
2848 }
2849
2850
2851 // -----------------------------------------------------------------------------
2852 // CellSpace and PropertyCellSpace implementation
2853 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2854 // there is at least one non-inlined virtual function. I would prefer to hide
2855 // the VerifyObject definition behind VERIFY_HEAP.
2856
2857 void CellSpace::VerifyObject(HeapObject* object) {
2858   CHECK(object->IsCell());
2859 }
2860
2861
2862 void PropertyCellSpace::VerifyObject(HeapObject* object) {
2863   CHECK(object->IsPropertyCell());
2864 }
2865
2866
2867 // -----------------------------------------------------------------------------
2868 // LargeObjectIterator
2869
2870 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2871   current_ = space->first_page_;
2872   size_func_ = NULL;
2873 }
2874
2875
2876 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2877                                          HeapObjectCallback size_func) {
2878   current_ = space->first_page_;
2879   size_func_ = size_func;
2880 }
2881
2882
2883 HeapObject* LargeObjectIterator::Next() {
2884   if (current_ == NULL) return NULL;
2885
2886   HeapObject* object = current_->GetObject();
2887   current_ = current_->next_page();
2888   return object;
2889 }
2890
2891
2892 // -----------------------------------------------------------------------------
2893 // LargeObjectSpace
2894 static bool ComparePointers(void* key1, void* key2) {
2895     return key1 == key2;
2896 }
2897
2898
2899 LargeObjectSpace::LargeObjectSpace(Heap* heap,
2900                                    intptr_t max_capacity,
2901                                    AllocationSpace id)
2902     : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
2903       max_capacity_(max_capacity),
2904       first_page_(NULL),
2905       size_(0),
2906       page_count_(0),
2907       objects_size_(0),
2908       chunk_map_(ComparePointers, 1024) {}
2909
2910
2911 bool LargeObjectSpace::SetUp() {
2912   first_page_ = NULL;
2913   size_ = 0;
2914   maximum_committed_ = 0;
2915   page_count_ = 0;
2916   objects_size_ = 0;
2917   chunk_map_.Clear();
2918   return true;
2919 }
2920
2921
2922 void LargeObjectSpace::TearDown() {
2923   while (first_page_ != NULL) {
2924     LargePage* page = first_page_;
2925     first_page_ = first_page_->next_page();
2926     LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2927
2928     ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2929     heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2930         space, kAllocationActionFree, page->size());
2931     heap()->isolate()->memory_allocator()->Free(page);
2932   }
2933   SetUp();
2934 }
2935
2936
2937 MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
2938                                            Executability executable) {
2939   // Check if we want to force a GC before growing the old space further.
2940   // If so, fail the allocation.
2941   if (!heap()->always_allocate() &&
2942       heap()->OldGenerationAllocationLimitReached()) {
2943     return Failure::RetryAfterGC(identity());
2944   }
2945
2946   if (Size() + object_size > max_capacity_) {
2947     return Failure::RetryAfterGC(identity());
2948   }
2949
2950   LargePage* page = heap()->isolate()->memory_allocator()->
2951       AllocateLargePage(object_size, this, executable);
2952   if (page == NULL) return Failure::RetryAfterGC(identity());
2953   ASSERT(page->area_size() >= object_size);
2954
2955   size_ += static_cast<int>(page->size());
2956   objects_size_ += object_size;
2957   page_count_++;
2958   page->set_next_page(first_page_);
2959   first_page_ = page;
2960
2961   if (size_ > maximum_committed_) {
2962     maximum_committed_ = size_;
2963   }
2964
2965   // Register all MemoryChunk::kAlignment-aligned chunks covered by
2966   // this large page in the chunk map.
2967   uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2968   uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2969   for (uintptr_t key = base; key <= limit; key++) {
2970     HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2971                                               static_cast<uint32_t>(key),
2972                                               true);
2973     ASSERT(entry != NULL);
2974     entry->value = page;
2975   }
2976
2977   HeapObject* object = page->GetObject();
2978
2979   if (Heap::ShouldZapGarbage()) {
2980     // Make the object consistent so the heap can be verified in OldSpaceStep.
2981     // We only need to do this in debug builds or if verify_heap is on.
2982     reinterpret_cast<Object**>(object->address())[0] =
2983         heap()->fixed_array_map();
2984     reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2985   }
2986
2987   heap()->incremental_marking()->OldSpaceStep(object_size);
2988   return object;
2989 }
2990
2991
2992 size_t LargeObjectSpace::CommittedPhysicalMemory() {
2993   if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
2994   size_t size = 0;
2995   LargePage* current = first_page_;
2996   while (current != NULL) {
2997     size += current->CommittedPhysicalMemory();
2998     current = current->next_page();
2999   }
3000   return size;
3001 }
3002
3003
3004 // GC support
3005 MaybeObject* LargeObjectSpace::FindObject(Address a) {
3006   LargePage* page = FindPage(a);
3007   if (page != NULL) {
3008     return page->GetObject();
3009   }
3010   return Failure::Exception();
3011 }
3012
3013
3014 LargePage* LargeObjectSpace::FindPage(Address a) {
3015   uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
3016   HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
3017                                         static_cast<uint32_t>(key),
3018                                         false);
3019   if (e != NULL) {
3020     ASSERT(e->value != NULL);
3021     LargePage* page = reinterpret_cast<LargePage*>(e->value);
3022     ASSERT(page->is_valid());
3023     if (page->Contains(a)) {
3024       return page;
3025     }
3026   }
3027   return NULL;
3028 }
3029
3030
3031 void LargeObjectSpace::FreeUnmarkedObjects() {
3032   LargePage* previous = NULL;
3033   LargePage* current = first_page_;
3034   while (current != NULL) {
3035     HeapObject* object = current->GetObject();
3036     // Can this large page contain pointers to non-trivial objects.  No other
3037     // pointer object is this big.
3038     bool is_pointer_object = object->IsFixedArray();
3039     MarkBit mark_bit = Marking::MarkBitFrom(object);
3040     if (mark_bit.Get()) {
3041       mark_bit.Clear();
3042       Page::FromAddress(object->address())->ResetProgressBar();
3043       Page::FromAddress(object->address())->ResetLiveBytes();
3044       previous = current;
3045       current = current->next_page();
3046     } else {
3047       LargePage* page = current;
3048       // Cut the chunk out from the chunk list.
3049       current = current->next_page();
3050       if (previous == NULL) {
3051         first_page_ = current;
3052       } else {
3053         previous->set_next_page(current);
3054       }
3055
3056       // Free the chunk.
3057       heap()->mark_compact_collector()->ReportDeleteIfNeeded(
3058           object, heap()->isolate());
3059       size_ -= static_cast<int>(page->size());
3060       objects_size_ -= object->Size();
3061       page_count_--;
3062
3063       // Remove entries belonging to this page.
3064       // Use variable alignment to help pass length check (<= 80 characters)
3065       // of single line in tools/presubmit.py.
3066       const intptr_t alignment = MemoryChunk::kAlignment;
3067       uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
3068       uintptr_t limit = base + (page->size()-1)/alignment;
3069       for (uintptr_t key = base; key <= limit; key++) {
3070         chunk_map_.Remove(reinterpret_cast<void*>(key),
3071                           static_cast<uint32_t>(key));
3072       }
3073
3074       if (is_pointer_object) {
3075         heap()->QueueMemoryChunkForFree(page);
3076       } else {
3077         heap()->isolate()->memory_allocator()->Free(page);
3078       }
3079     }
3080   }
3081   heap()->FreeQueuedChunks();
3082 }
3083
3084
3085 bool LargeObjectSpace::Contains(HeapObject* object) {
3086   Address address = object->address();
3087   MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3088
3089   bool owned = (chunk->owner() == this);
3090
3091   SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
3092
3093   return owned;
3094 }
3095
3096
3097 #ifdef VERIFY_HEAP
3098 // We do not assume that the large object iterator works, because it depends
3099 // on the invariants we are checking during verification.
3100 void LargeObjectSpace::Verify() {
3101   for (LargePage* chunk = first_page_;
3102        chunk != NULL;
3103        chunk = chunk->next_page()) {
3104     // Each chunk contains an object that starts at the large object page's
3105     // object area start.
3106     HeapObject* object = chunk->GetObject();
3107     Page* page = Page::FromAddress(object->address());
3108     CHECK(object->address() == page->area_start());
3109
3110     // The first word should be a map, and we expect all map pointers to be
3111     // in map space.
3112     Map* map = object->map();
3113     CHECK(map->IsMap());
3114     CHECK(heap()->map_space()->Contains(map));
3115
3116     // We have only code, sequential strings, external strings
3117     // (sequential strings that have been morphed into external
3118     // strings), fixed arrays, and byte arrays in large object space.
3119     CHECK(object->IsCode() || object->IsSeqString() ||
3120            object->IsExternalString() || object->IsFixedArray() ||
3121            object->IsFixedDoubleArray() || object->IsByteArray());
3122
3123     // The object itself should look OK.
3124     object->Verify();
3125
3126     // Byte arrays and strings don't have interior pointers.
3127     if (object->IsCode()) {
3128       VerifyPointersVisitor code_visitor;
3129       object->IterateBody(map->instance_type(),
3130                           object->Size(),
3131                           &code_visitor);
3132     } else if (object->IsFixedArray()) {
3133       FixedArray* array = FixedArray::cast(object);
3134       for (int j = 0; j < array->length(); j++) {
3135         Object* element = array->get(j);
3136         if (element->IsHeapObject()) {
3137           HeapObject* element_object = HeapObject::cast(element);
3138           CHECK(heap()->Contains(element_object));
3139           CHECK(element_object->map()->IsMap());
3140         }
3141       }
3142     }
3143   }
3144 }
3145 #endif
3146
3147
3148 #ifdef DEBUG
3149 void LargeObjectSpace::Print() {
3150   LargeObjectIterator it(this);
3151   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3152     obj->Print();
3153   }
3154 }
3155
3156
3157 void LargeObjectSpace::ReportStatistics() {
3158   PrintF("  size: %" V8_PTR_PREFIX "d\n", size_);
3159   int num_objects = 0;
3160   ClearHistograms(heap()->isolate());
3161   LargeObjectIterator it(this);
3162   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3163     num_objects++;
3164     CollectHistogramInfo(obj);
3165   }
3166
3167   PrintF("  number of objects %d, "
3168          "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
3169   if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3170 }
3171
3172
3173 void LargeObjectSpace::CollectCodeStatistics() {
3174   Isolate* isolate = heap()->isolate();
3175   LargeObjectIterator obj_it(this);
3176   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
3177     if (obj->IsCode()) {
3178       Code* code = Code::cast(obj);
3179       isolate->code_kind_statistics()[code->kind()] += code->Size();
3180     }
3181   }
3182 }
3183
3184
3185 void Page::Print() {
3186   // Make a best-effort to print the objects in the page.
3187   PrintF("Page@%p in %s\n",
3188          this->address(),
3189          AllocationSpaceName(this->owner()->identity()));
3190   printf(" --------------------------------------\n");
3191   HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
3192   unsigned mark_size = 0;
3193   for (HeapObject* object = objects.Next();
3194        object != NULL;
3195        object = objects.Next()) {
3196     bool is_marked = Marking::MarkBitFrom(object).Get();
3197     PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
3198     if (is_marked) {
3199       mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
3200     }
3201     object->ShortPrint();
3202     PrintF("\n");
3203   }
3204   printf(" --------------------------------------\n");
3205   printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3206 }
3207
3208 #endif  // DEBUG
3209
3210 } }  // namespace v8::internal