v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
// All allocation spaces other than NEW_SPACE have the same effect.
- Heap::CollectGarbage(0, OLD_SPACE);
+ Heap::CollectGarbage(0, OLD_DATA_SPACE);
return v8::Undefined();
}
}
-// Symbols are created in the old generation (code space).
+// Symbols are created in the old generation (data space).
Handle<String> Factory::LookupSymbol(Vector<const char> string) {
CALL_HEAP_FUNCTION(Heap::LookupSymbol(string), String);
}
// NOTE: SpaceIterator depends on AllocationSpace enumeration values being
// consecutive.
enum AllocationSpace {
- NEW_SPACE,
- OLD_SPACE,
- CODE_SPACE,
- MAP_SPACE,
- LO_SPACE,
+ NEW_SPACE, // Semispaces collected with copying collector.
+ OLD_POINTER_SPACE, // Must be first of the paged spaces - see PagedSpaces.
+ OLD_DATA_SPACE, // May not have pointers to new space.
+ CODE_SPACE, // Also one of the old spaces. Marked executable.
+ MAP_SPACE, // Only map objects.
+ LO_SPACE, // Large objects.
FIRST_SPACE = NEW_SPACE,
- LAST_SPACE = LO_SPACE
+ LAST_SPACE = LO_SPACE // <= 5 (see kSpaceBits and kLOSpacePointer)
};
const int kSpaceTagSize = 3;
const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
+
// A flag that indicates whether objects should be pretenured when
// allocated (allocated directly into the old generation) or not
// (allocated in the young generation if the object size and type
enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
+enum Executability { NOT_EXECUTABLE, EXECUTABLE };
+
// A CodeDesc describes a buffer holding instructions and relocation
// information. The instructions start at the beginning of the buffer
}
-Object* Heap::AllocateRaw(int size_in_bytes, AllocationSpace space) {
+Object* Heap::AllocateRaw(int size_in_bytes,
+ AllocationSpace space) {
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
#ifdef DEBUG
if (FLAG_gc_interval >= 0 &&
}
Object* result;
- if (OLD_SPACE == space) {
- result = old_space_->AllocateRaw(size_in_bytes);
+ if (OLD_POINTER_SPACE == space) {
+ result = old_pointer_space_->AllocateRaw(size_in_bytes);
+ } else if (OLD_DATA_SPACE == space) {
+ result = old_data_space_->AllocateRaw(size_in_bytes);
} else if (CODE_SPACE == space) {
result = code_space_->AllocateRaw(size_in_bytes);
} else if (LO_SPACE == space) {
}
-Object* Heap::AllocateForDeserialization(int size_in_bytes,
- AllocationSpace space) {
- ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
- PagedSpace* where;
-
- switch (space) {
- case NEW_SPACE:
- return new_space_->AllocateRaw(size_in_bytes);
- case LO_SPACE:
- return lo_space_->AllocateRaw(size_in_bytes);
- case OLD_SPACE:
- where = old_space_;
- break;
- case CODE_SPACE:
- where = code_space_;
- break;
- case MAP_SPACE:
- where = map_space_;
- break;
- }
-
- // Only paged spaces fall through.
- return where->AllocateForDeserialization(size_in_bytes);
-}
-
-
Object* Heap::NumberFromInt32(int32_t value) {
if (Smi::IsValid(value)) return Smi::FromInt(value);
// Bypass NumberFromDouble to avoid various redundant checks.
}
-AllocationSpace Heap::TargetSpace(HeapObject* object) {
- // Heap numbers and sequential strings are promoted to code space, all
- // other object types are promoted to old space. We do not use
+OldSpace* Heap::TargetSpace(HeapObject* object) {
+ // Heap numbers and sequential strings are promoted to old data space, all
+ // other object types are promoted to old pointer space. We do not use
// object->IsHeapNumber() and object->IsSeqString() because we already
// know that object has the heap object tag.
InstanceType type = object->map()->instance_type();
type != HEAP_NUMBER_TYPE &&
(type >= FIRST_NONSTRING_TYPE ||
String::cast(object)->representation_tag() != kSeqStringTag);
- return has_pointers ? OLD_SPACE : CODE_SPACE;
+ return has_pointers ? old_pointer_space_ : old_data_space_;
}
NewSpace* Heap::new_space_ = NULL;
-OldSpace* Heap::old_space_ = NULL;
+OldSpace* Heap::old_pointer_space_ = NULL;
+OldSpace* Heap::old_data_space_ = NULL;
OldSpace* Heap::code_space_ = NULL;
MapSpace* Heap::map_space_ = NULL;
LargeObjectSpace* Heap::lo_space_ = NULL;
if (!HasBeenSetup()) return 0;
return new_space_->Capacity() +
- old_space_->Capacity() +
+ old_pointer_space_->Capacity() +
+ old_data_space_->Capacity() +
code_space_->Capacity() +
map_space_->Capacity();
}
if (!HasBeenSetup()) return 0;
return new_space_->Available() +
- old_space_->Available() +
+ old_pointer_space_->Available() +
+ old_data_space_->Available() +
code_space_->Available() +
map_space_->Available();
}
bool Heap::HasBeenSetup() {
return new_space_ != NULL &&
- old_space_ != NULL &&
- code_space_ != NULL &&
- map_space_ != NULL &&
- lo_space_ != NULL;
+ old_pointer_space_ != NULL &&
+ old_data_space_ != NULL &&
+ code_space_ != NULL &&
+ map_space_ != NULL &&
+ lo_space_ != NULL;
}
// Is there enough space left in OLD to guarantee that a scavenge can
// succeed?
//
- // Note that old_space_->MaxAvailable() undercounts the memory available
+ // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
// for object promotion. It counts only the bytes that the memory
// allocator has not yet allocated from the OS and assigned to any space,
// and does not count available bytes already in the old space or code
// space. Undercounting is safe---we may get an unrequested full GC when
// a scavenge would have succeeded.
- if (old_space_->MaxAvailable() <= new_space_->Size()) {
+ if (MemoryAllocator::MaxAvailable() <= new_space_->Size()) {
Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
return MARK_COMPACTOR;
}
if (FLAG_gc_verbose) Print();
if (FLAG_print_rset) {
- // By definition, code space does not have remembered set bits that we
- // care about.
- old_space_->PrintRSet();
+ // Not all spaces have remembered set bits that we care about.
+ old_pointer_space_->PrintRSet();
map_space_->PrintRSet();
lo_space_->PrintRSet();
}
}
int Heap::SizeOfObjects() {
- return new_space_->Size() +
- old_space_->Size() +
- code_space_->Size() +
- map_space_->Size() +
- lo_space_->Size();
+ int total = 0;
+ AllSpaces spaces;
+ while (Space* space = spaces.next()) total += space->Size();
+ return total;
}
void Heap::GarbageCollectionEpilogue() {
}
+void Heap::CollectAllGarbage() {
+ // Since we are ignoring the return value, the exact choice of space does
+ // not matter, so long as we do not specify NEW_SPACE, which would not
+ // cause a full GC.
+ CollectGarbage(0, OLD_POINTER_SPACE);
+}
+
+
bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
// The VM is in the GC state until exiting this function.
VMState state(GC);
switch (space) {
case NEW_SPACE:
return new_space_->Available() >= requested_size;
- case OLD_SPACE:
- return old_space_->Available() >= requested_size;
+ case OLD_POINTER_SPACE:
+ return old_pointer_space_->Available() >= requested_size;
+ case OLD_DATA_SPACE:
+ return old_data_space_->Available() >= requested_size;
case CODE_SPACE:
return code_space_->Available() >= requested_size;
case MAP_SPACE:
// If we have used the mark-compact collector to collect the new
// space, and it has not compacted the new space, we force a
- // separate scavenge collection. THIS IS A HACK. It covers the
+ // separate scavenge collection. This is a hack. It covers the
// case where (1) a new space collection was requested, (2) the
// collector selection policy selected the mark-compact collector,
// and (3) the mark-compact collector policy selected not to
#ifdef DEBUG
-// Visitor class to verify pointers in code space do not point into
+// Visitor class to verify pointers in code or data space do not point into
// new space.
-class VerifyCodeSpacePointersVisitor: public ObjectVisitor {
+class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
public:
void VisitPointers(Object** start, Object**end) {
for (Object** current = start; current < end; current++) {
void Heap::Scavenge() {
#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
- VerifyCodeSpacePointersVisitor v;
+ VerifyNonPointerSpacePointersVisitor v;
HeapObjectIterator it(code_space_);
while (it.has_next()) {
HeapObject* object = it.next();
IterateRoots(©_visitor);
// Copy objects reachable from the old generation. By definition, there
- // are no intergenerational pointers in code space.
- IterateRSet(old_space_, &CopyObject);
+ // are no intergenerational pointers in code or data spaces.
+ IterateRSet(old_pointer_space_, &CopyObject);
IterateRSet(map_space_, &CopyObject);
lo_space_->IterateRSet(&CopyObject);
void Heap::RebuildRSets() {
- // By definition, we do not care about remembered set bits in code space.
+ // By definition, we do not care about remembered set bits in code or data
+ // spaces.
map_space_->ClearRSet();
RebuildRSets(map_space_);
- old_space_->ClearRSet();
- RebuildRSets(old_space_);
+ old_pointer_space_->ClearRSet();
+ RebuildRSets(old_pointer_space_);
Heap::lo_space_->ClearRSet();
RebuildRSets(lo_space_);
// We use the first word (where the map pointer usually is) of a heap
// object to record the forwarding pointer. A forwarding pointer can
- // point to the old space, the code space, or the to space of the new
+ // point to an old space, the code space, or the to space of the new
// generation.
MapWord first_word = object->map_word();
Object* result;
// If the object should be promoted, we try to copy it to old space.
if (ShouldBePromoted(object->address(), object_size)) {
- AllocationSpace target_space = Heap::TargetSpace(object);
- if (target_space == OLD_SPACE) {
- result = old_space_->AllocateRaw(object_size);
- } else {
- ASSERT(target_space == CODE_SPACE);
- result = code_space_->AllocateRaw(object_size);
- }
+ OldSpace* target_space = Heap::TargetSpace(object);
+ ASSERT(target_space == Heap::old_pointer_space_ ||
+ target_space == Heap::old_data_space_);
+ result = target_space->AllocateRaw(object_size);
if (!result->IsFailure()) {
*p = MigrateObject(p, HeapObject::cast(result), object_size);
- if (target_space == OLD_SPACE) {
+ if (target_space == Heap::old_pointer_space_) {
// Record the object's address at the top of the to space, to allow
// it to be swept by the scavenger.
promoted_top -= kPointerSize;
Memory::Object_at(promoted_top) = *p;
} else {
#ifdef DEBUG
- // Objects promoted to the code space should not have pointers to
+ // Objects promoted to the data space should not have pointers to
// new space.
- VerifyCodeSpacePointersVisitor v;
+ VerifyNonPointerSpacePointersVisitor v;
(*p)->Iterate(&v);
#endif
}
if (obj->IsFailure()) return false;
empty_fixed_array_ = FixedArray::cast(obj);
- obj = Allocate(oddball_map(), CODE_SPACE);
+ obj = Allocate(oddball_map(), OLD_DATA_SPACE);
if (obj->IsFailure()) return false;
null_value_ = obj;
// Statically ensure that it is safe to allocate heap numbers in paged
// spaces.
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
- AllocationSpace space = (pretenure == TENURED) ? CODE_SPACE : NEW_SPACE;
+ AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
Object* result = AllocateRaw(HeapNumber::kSize, space);
if (result->IsFailure()) return result;
Object* Heap::CreateOddball(Map* map,
const char* to_string,
Object* to_number) {
- Object* result = Allocate(map, CODE_SPACE);
+ Object* result = Allocate(map, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
return Oddball::cast(result)->Initialize(to_string, to_number);
}
if (obj->IsFailure()) return false;
nan_value_ = obj;
- obj = Allocate(oddball_map(), CODE_SPACE);
+ obj = Allocate(oddball_map(), OLD_DATA_SPACE);
if (obj->IsFailure()) return false;
undefined_value_ = obj;
ASSERT(!InNewSpace(undefined_value()));
Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate proxies in paged spaces.
STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
- AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
+ AllocationSpace space =
+ (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
Object* result = Allocate(proxy_map(), space);
if (result->IsFailure()) return result;
Object* Heap::AllocateByteArray(int length) {
int size = ByteArray::SizeFor(length);
- AllocationSpace space = size > MaxHeapObjectSize() ? LO_SPACE : NEW_SPACE;
+ AllocationSpace space =
+ size > MaxHeapObjectSize() ? LO_SPACE : NEW_SPACE;
Object* result = AllocateRaw(size, space);
+
if (result->IsFailure()) return result;
reinterpret_cast<Array*>(result)->set_map(byte_array_map());
int sinfo_size = 0;
if (sinfo != NULL) sinfo_size = sinfo->Serialize(NULL);
int obj_size = Code::SizeFor(body_size, sinfo_size);
- AllocationSpace space =
- (obj_size > MaxHeapObjectSize()) ? LO_SPACE : CODE_SPACE;
+ Object* result;
+ if (obj_size > MaxHeapObjectSize()) {
+ result = lo_space_->AllocateRawCode(obj_size);
+ } else {
+ result = code_space_->AllocateRaw(obj_size);
+ }
- Object* result = AllocateRaw(obj_size, space);
if (result->IsFailure()) return result;
// Initialize the object
Object* Heap::CopyCode(Code* code) {
// Allocate an object the same size as the code object.
int obj_size = code->Size();
- AllocationSpace space =
- (obj_size > MaxHeapObjectSize()) ? LO_SPACE : CODE_SPACE;
- Object* result = AllocateRaw(obj_size, space);
+ Object* result;
+ if (obj_size > MaxHeapObjectSize()) {
+ result = lo_space_->AllocateRawCode(obj_size);
+ } else {
+ result = code_space_->AllocateRaw(obj_size);
+ }
+
if (result->IsFailure()) return result;
// Copy code object.
Object* Heap::AllocateFunction(Map* function_map,
SharedFunctionInfo* shared,
Object* prototype) {
- Object* result = Allocate(function_map, OLD_SPACE);
+ Object* result = Allocate(function_map, OLD_POINTER_SPACE);
if (result->IsFailure()) return result;
return InitializeFunction(JSFunction::cast(result), shared, prototype);
}
if (properties->IsFailure()) return properties;
// Allocate the JSObject.
- AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
+ AllocationSpace space =
+ (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
if (map->instance_size() > MaxHeapObjectSize()) space = LO_SPACE;
Object* obj = Allocate(map, space);
if (obj->IsFailure()) return obj;
}
// Allocate string.
- AllocationSpace space = (size > MaxHeapObjectSize()) ? LO_SPACE : CODE_SPACE;
+ AllocationSpace space =
+ (size > MaxHeapObjectSize()) ? LO_SPACE : OLD_DATA_SPACE;
Object* result = AllocateRaw(size, space);
if (result->IsFailure()) return result;
Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
- AllocationSpace space = (pretenure == TENURED) ? CODE_SPACE : NEW_SPACE;
+ AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
int size = AsciiString::SizeFor(length);
if (size > MaxHeapObjectSize()) {
space = LO_SPACE;
Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
- AllocationSpace space = (pretenure == TENURED) ? CODE_SPACE : NEW_SPACE;
+ AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
int size = TwoByteString::SizeFor(length);
if (size > MaxHeapObjectSize()) {
space = LO_SPACE;
Object* Heap::AllocateEmptyFixedArray() {
int size = FixedArray::SizeFor(0);
- Object* result = AllocateRaw(size, CODE_SPACE);
+ Object* result = AllocateRaw(size, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
// Initialize the object.
reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
if (size > MaxHeapObjectSize()) {
result = lo_space_->AllocateRawFixedArray(size);
} else {
- AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
+ AllocationSpace space =
+ (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
result = AllocateRaw(size, space);
}
if (result->IsFailure()) return result;
}
int size = map->instance_size();
AllocationSpace space =
- (size > MaxHeapObjectSize()) ? LO_SPACE : OLD_SPACE;
+ (size > MaxHeapObjectSize()) ? LO_SPACE : OLD_POINTER_SPACE;
Object* result = Heap::Allocate(map, space);
if (result->IsFailure()) return result;
Struct::cast(result)->InitializeBody(size);
void Heap::Print() {
if (!HasBeenSetup()) return;
Top::PrintStack();
- new_space_->Print();
- old_space_->Print();
- code_space_->Print();
- map_space_->Print();
- lo_space_->Print();
+ AllSpaces spaces;
+ while (Space* space = spaces.next()) space->Print();
}
MemoryAllocator::ReportStatistics();
PrintF("To space : ");
new_space_->ReportStatistics();
- PrintF("Old space : ");
- old_space_->ReportStatistics();
+ PrintF("Old pointer space : ");
+ old_pointer_space_->ReportStatistics();
+ PrintF("Old data space : ");
+ old_data_space_->ReportStatistics();
PrintF("Code space : ");
code_space_->ReportStatistics();
PrintF("Map space : ");
if (OS::IsOutsideAllocatedSpace(addr)) return false;
return HasBeenSetup() &&
(new_space_->ToSpaceContains(addr) ||
- old_space_->Contains(addr) ||
+ old_pointer_space_->Contains(addr) ||
+ old_data_space_->Contains(addr) ||
code_space_->Contains(addr) ||
map_space_->Contains(addr) ||
lo_space_->SlowContains(addr));
switch (space) {
case NEW_SPACE:
return new_space_->ToSpaceContains(addr);
- case OLD_SPACE:
- return old_space_->Contains(addr);
+ case OLD_POINTER_SPACE:
+ return old_pointer_space_->Contains(addr);
+ case OLD_DATA_SPACE:
+ return old_data_space_->Contains(addr);
case CODE_SPACE:
return code_space_->Contains(addr);
case MAP_SPACE:
VerifyPointersVisitor visitor;
Heap::IterateRoots(&visitor);
- Heap::new_space_->Verify();
- Heap::old_space_->Verify();
- Heap::code_space_->Verify();
- Heap::map_space_->Verify();
- Heap::lo_space_->Verify();
+ AllSpaces spaces;
+ while (Space* space = spaces.next()) {
+ space->Verify();
+ }
}
#endif // DEBUG
void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
ASSERT(Page::is_rset_in_use());
- ASSERT(space == old_space_ || space == map_space_);
+ ASSERT(space == old_pointer_space_ || space == map_space_);
PageIterator it(space, PageIterator::PAGES_IN_USE);
while (it.has_next()) {
int Heap::PromotedSpaceSize() {
- return old_space_->Size()
+ return old_pointer_space_->Size()
+ + old_data_space_->Size()
+ code_space_->Size()
+ map_space_->Size()
+ lo_space_->Size();
int old_space_size = new_space_start - old_space_start;
int code_space_size = young_generation_size_ - old_space_size;
- // Initialize new space. It will not contain code.
+ // Initialize new space.
new_space_ = new NewSpace(initial_semispace_size_,
semispace_size_,
- NEW_SPACE,
- false);
+ NEW_SPACE);
if (new_space_ == NULL) return false;
if (!new_space_->Setup(new_space_start, young_generation_size_)) return false;
// Initialize old space, set the maximum capacity to the old generation
// size. It will not contain code.
- old_space_ = new OldSpace(old_generation_size_, OLD_SPACE, false);
- if (old_space_ == NULL) return false;
- if (!old_space_->Setup(old_space_start, old_space_size)) return false;
+ old_pointer_space_ =
+ new OldSpace(old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
+ if (old_pointer_space_ == NULL) return false;
+ if (!old_pointer_space_->Setup(old_space_start, old_space_size >> 1)) {
+ return false;
+ }
+ old_data_space_ =
+ new OldSpace(old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
+ if (old_data_space_ == NULL) return false;
+ if (!old_data_space_->Setup(old_space_start + (old_space_size >> 1),
+ old_space_size >> 1)) {
+ return false;
+ }
// Initialize the code space, set its maximum capacity to the old
// generation size. It needs executable memory.
- code_space_ = new OldSpace(old_generation_size_, CODE_SPACE, true);
+ code_space_ =
+ new OldSpace(old_generation_size_, CODE_SPACE, EXECUTABLE);
if (code_space_ == NULL) return false;
if (!code_space_->Setup(code_space_start, code_space_size)) return false;
// enough to hold at least a page will cause it to allocate.
if (!map_space_->Setup(NULL, 0)) return false;
- // The large object space may contain code, so it needs executable memory.
- lo_space_ = new LargeObjectSpace(LO_SPACE, true);
+ // The large object code space may contain code or data. We set the memory
+ // to be non-executable here for safety, but this means we need to enable it
+ // explicitly when allocating large code objects.
+ lo_space_ = new LargeObjectSpace(LO_SPACE);
if (lo_space_ == NULL) return false;
if (!lo_space_->Setup()) return false;
new_space_ = NULL;
}
- if (old_space_ != NULL) {
- old_space_->TearDown();
- delete old_space_;
- old_space_ = NULL;
+ if (old_pointer_space_ != NULL) {
+ old_pointer_space_->TearDown();
+ delete old_pointer_space_;
+ old_pointer_space_ = NULL;
+ }
+
+ if (old_data_space_ != NULL) {
+ old_data_space_->TearDown();
+ delete old_data_space_;
+ old_data_space_ = NULL;
}
if (code_space_ != NULL) {
void Heap::Shrink() {
// Try to shrink map, old, and code spaces.
map_space_->Shrink();
- old_space_->Shrink();
+ old_pointer_space_->Shrink();
+ old_data_space_->Shrink();
code_space_->Shrink();
}
#endif
+Space* AllSpaces::next() {
+ switch (counter_++) {
+ case NEW_SPACE:
+ return Heap::new_space();
+ case OLD_POINTER_SPACE:
+ return Heap::old_pointer_space();
+ case OLD_DATA_SPACE:
+ return Heap::old_data_space();
+ case CODE_SPACE:
+ return Heap::code_space();
+ case MAP_SPACE:
+ return Heap::map_space();
+ case LO_SPACE:
+ return Heap::lo_space();
+ default:
+ return NULL;
+ }
+}
+
+
+PagedSpace* PagedSpaces::next() {
+ switch (counter_++) {
+ case OLD_POINTER_SPACE:
+ return Heap::old_pointer_space();
+ case OLD_DATA_SPACE:
+ return Heap::old_data_space();
+ case CODE_SPACE:
+ return Heap::code_space();
+ case MAP_SPACE:
+ return Heap::map_space();
+ default:
+ return NULL;
+ }
+}
+
+
+
+OldSpace* OldSpaces::next() {
+ switch (counter_++) {
+ case OLD_POINTER_SPACE:
+ return Heap::old_pointer_space();
+ case OLD_DATA_SPACE:
+ return Heap::old_data_space();
+ case CODE_SPACE:
+ return Heap::code_space();
+ default:
+ return NULL;
+ }
+}
+
+
SpaceIterator::SpaceIterator() : current_space_(FIRST_SPACE), iterator_(NULL) {
}
case NEW_SPACE:
iterator_ = new SemiSpaceIterator(Heap::new_space());
break;
- case OLD_SPACE:
- iterator_ = new HeapObjectIterator(Heap::old_space());
+ case OLD_POINTER_SPACE:
+ iterator_ = new HeapObjectIterator(Heap::old_pointer_space());
+ break;
+ case OLD_DATA_SPACE:
+ iterator_ = new HeapObjectIterator(Heap::old_data_space());
break;
case CODE_SPACE:
iterator_ = new HeapObjectIterator(Heap::code_space());
static Address NewSpaceTop() { return new_space_->top(); }
static NewSpace* new_space() { return new_space_; }
- static OldSpace* old_space() { return old_space_; }
+ static OldSpace* old_pointer_space() { return old_pointer_space_; }
+ static OldSpace* old_data_space() { return old_data_space_; }
static OldSpace* code_space() { return code_space_; }
static MapSpace* map_space() { return map_space_; }
static LargeObjectSpace* lo_space() { return lo_space_; }
static Object* AllocateExternalStringFromTwoByte(
ExternalTwoByteString::Resource* resource);
- // Allocates an uninitialized object.
+ // Allocates an uninitialized object. The memory is non-executable if the
+ // hardware and OS allow.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- static inline Object* AllocateRaw(int size_in_bytes, AllocationSpace space);
-
-
- // Allocate an unitialized object during deserialization. Performs linear
- // allocation (ie, guaranteed no free list allocation) and assumes the
- // spaces are all preexpanded so allocation should not fail.
- static inline Object* AllocateForDeserialization(int size_in_bytes,
- AllocationSpace space);
+ static inline Object* AllocateRaw(int size_in_bytes,
+ AllocationSpace space);
// Makes a new native code object
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// Returns whether required_space bytes are available after the collection.
static bool CollectGarbage(int required_space, AllocationSpace space);
+ // Performs a full garbage collection.
+ static void CollectAllGarbage();
+
// Utility to invoke the scavenger. This is needed in test code to
// ensure correct callback for weak global handles.
static void PerformScavenge();
static bool InSpace(HeapObject* value, AllocationSpace space);
// Finds out which space an object should get promoted to based on its type.
- static inline AllocationSpace TargetSpace(HeapObject* object);
+ static inline OldSpace* TargetSpace(HeapObject* object);
// Sets the stub_cache_ (only used when expanding the dictionary).
static void set_code_stubs(Dictionary* value) { code_stubs_ = value; }
static const int kMaxMapSpaceSize = 8*MB;
static NewSpace* new_space_;
- static OldSpace* old_space_;
+ static OldSpace* old_pointer_space_;
+ static OldSpace* old_data_space_;
static OldSpace* code_space_;
static MapSpace* map_space_;
static LargeObjectSpace* lo_space_;
bool new_object,
PretenureFlag pretenure = NOT_TENURED);
- // Allocate an uninitialized object in map space. The behavior is
- // identical to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that
- // (a) it doesn't have to test the allocation space argument and (b) can
- // reduce code size (since both AllocateRaw and AllocateRawMap are
- // inlined).
+ // Allocate an uninitialized object in map space. The behavior is identical
+ // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
+ // have to test the allocation space argument and (b) can reduce code size
+ // (since both AllocateRaw and AllocateRawMap are inlined).
static inline Object* AllocateRawMap(int size_in_bytes);
// Space iterator for iterating over all spaces of the heap.
+// Returns each space in turn, and null when it is done.
+class AllSpaces BASE_EMBEDDED {
+ public:
+ Space* next();
+ AllSpaces() { counter_ = FIRST_SPACE; }
+ private:
+ int counter_;
+};
+
+
+// Space iterator for iterating over all old spaces of the heap: Old pointer
+// space, old data space and code space.
+// Returns each space in turn, and null when it is done.
+class OldSpaces BASE_EMBEDDED {
+ public:
+ OldSpace* next();
+ OldSpaces() { counter_ = OLD_POINTER_SPACE; }
+ private:
+ int counter_;
+};
+
+
+// Space iterator for iterating over all the paged spaces of the heap:
+// Map space, old pointer space, old data space and code space.
+// Returns each space in turn, and null when it is done.
+class PagedSpaces BASE_EMBEDDED {
+ public:
+ PagedSpace* next();
+ PagedSpaces() { counter_ = OLD_POINTER_SPACE; }
+ private:
+ int counter_;
+};
+
+
+// Space iterator for iterating over all spaces of the heap.
// For each space an object iterator is provided. The deallocation of the
// returned object iterators is handled by the space iterator.
-
class SpaceIterator : public Malloced {
public:
SpaceIterator();
// collection.
int MarkCompactCollector::live_bytes_ = 0;
int MarkCompactCollector::live_young_objects_ = 0;
-int MarkCompactCollector::live_old_objects_ = 0;
-int MarkCompactCollector::live_immutable_objects_ = 0;
+int MarkCompactCollector::live_old_data_objects_ = 0;
+int MarkCompactCollector::live_old_pointer_objects_ = 0;
+int MarkCompactCollector::live_code_objects_ = 0;
int MarkCompactCollector::live_map_objects_ = 0;
int MarkCompactCollector::live_lo_objects_ = 0;
#endif
// because objects do not get promoted out of new space on non-compacting
// GCs.
if (!compacting_collection_) {
- int old_gen_recoverable = Heap::old_space()->Waste()
- + Heap::old_space()->AvailableFree()
- + Heap::code_space()->Waste()
- + Heap::code_space()->AvailableFree();
- int old_gen_used = old_gen_recoverable
- + Heap::old_space()->Size()
- + Heap::code_space()->Size();
- int old_gen_fragmentation = (old_gen_recoverable * 100) / old_gen_used;
+ int old_gen_recoverable = 0;
+ int old_gen_used = 0;
+
+ OldSpaces spaces;
+ while (OldSpace* space = spaces.next()) {
+ old_gen_recoverable += space->Waste() + space->AvailableFree();
+ old_gen_used += space->Size();
+ }
+ int old_gen_fragmentation =
+ static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
if (old_gen_fragmentation > kFragmentationLimit) {
compacting_collection_ = true;
}
}
#endif
- Heap::map_space()->PrepareForMarkCompact(compacting_collection_);
- Heap::old_space()->PrepareForMarkCompact(compacting_collection_);
- Heap::code_space()->PrepareForMarkCompact(compacting_collection_);
+ PagedSpaces spaces;
+ while (PagedSpace* space = spaces.next()) {
+ space->PrepareForMarkCompact(compacting_collection_);
+ }
Counters::global_objects.Set(0);
#ifdef DEBUG
live_bytes_ = 0;
live_young_objects_ = 0;
- live_old_objects_ = 0;
- live_immutable_objects_ = 0;
+ live_old_pointer_objects_ = 0;
+ live_old_data_objects_ = 0;
+ live_code_objects_ = 0;
live_map_objects_ = 0;
live_lo_objects_ = 0;
#endif
ScanOverflowedObjects(&new_it);
if (marking_stack.is_full()) return;
- HeapObjectIterator old_it(Heap::old_space(), &OverflowObjectSize);
- ScanOverflowedObjects(&old_it);
+ HeapObjectIterator old_pointer_it(Heap::old_pointer_space(),
+ &OverflowObjectSize);
+ ScanOverflowedObjects(&old_pointer_it);
+ if (marking_stack.is_full()) return;
+
+ HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize);
+ ScanOverflowedObjects(&old_data_it);
if (marking_stack.is_full()) return;
HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize);
} else if (Heap::map_space()->Contains(obj)) {
ASSERT(obj->IsMap());
live_map_objects_++;
- } else if (Heap::old_space()->Contains(obj)) {
- live_old_objects_++;
+ } else if (Heap::old_pointer_space()->Contains(obj)) {
+ live_old_pointer_objects_++;
+ } else if (Heap::old_data_space()->Contains(obj)) {
+ live_old_data_objects_++;
} else if (Heap::code_space()->Contains(obj)) {
- live_immutable_objects_++;
+ live_code_objects_++;
} else if (Heap::lo_space()->Contains(obj)) {
live_lo_objects_++;
} else {
void MarkCompactCollector::VerifyHeapAfterMarkingPhase() {
Heap::new_space()->Verify();
- Heap::old_space()->Verify();
+ Heap::old_pointer_space()->Verify();
+ Heap::old_data_space()->Verify();
Heap::code_space()->Verify();
Heap::map_space()->Verify();
SemiSpaceIterator new_it(Heap::new_space(), &CountMarkedCallback);
CHECK_LIVE_OBJECTS(new_it, live_young_objects_);
- HeapObjectIterator old_it(Heap::old_space(), &CountMarkedCallback);
- CHECK_LIVE_OBJECTS(old_it, live_old_objects_);
+ HeapObjectIterator old_pointer_it(Heap::old_pointer_space(),
+ &CountMarkedCallback);
+ CHECK_LIVE_OBJECTS(old_pointer_it, live_old_pointer_objects_);
+
+ HeapObjectIterator old_data_it(Heap::old_data_space(), &CountMarkedCallback);
+ CHECK_LIVE_OBJECTS(old_data_it, live_old_data_objects_);
HeapObjectIterator code_it(Heap::code_space(), &CountMarkedCallback);
- CHECK_LIVE_OBJECTS(code_it, live_immutable_objects_);
+ CHECK_LIVE_OBJECTS(code_it, live_code_objects_);
HeapObjectIterator map_it(Heap::map_space(), &CountMarkedCallback);
CHECK_LIVE_OBJECTS(map_it, live_map_objects_);
// Try to promote all objects in new space. Heap numbers and sequential
// strings are promoted to the code space, all others to the old space.
inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
- AllocationSpace target_space = Heap::TargetSpace(object);
- Object* forwarded;
- if (target_space == OLD_SPACE) {
- forwarded = Heap::old_space()->MCAllocateRaw(object_size);
- } else {
- ASSERT(target_space == CODE_SPACE);
- forwarded = Heap::code_space()->MCAllocateRaw(object_size);
- }
+ OldSpace* target_space = Heap::TargetSpace(object);
+ ASSERT(target_space == Heap::old_pointer_space() ||
+ target_space == Heap::old_data_space());
+ Object* forwarded = target_space->MCAllocateRaw(object_size);
if (forwarded->IsFailure()) {
forwarded = Heap::new_space()->MCAllocateRaw(object_size);
// Allocation functions for the paged spaces call the space's MCAllocateRaw.
-inline Object* MCAllocateFromOldSpace(HeapObject* object, int object_size) {
- return Heap::old_space()->MCAllocateRaw(object_size);
+inline Object* MCAllocateFromOldPointerSpace(HeapObject* object,
+ int object_size) {
+ return Heap::old_pointer_space()->MCAllocateRaw(object_size);
+}
+
+
+inline Object* MCAllocateFromOldDataSpace(HeapObject* object, int object_size) {
+ return Heap::old_data_space()->MCAllocateRaw(object_size);
}
}
-void MarkCompactCollector::DeallocateOldBlock(Address start,
- int size_in_bytes) {
+void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
+ int size_in_bytes) {
Heap::ClearRSetRange(start, size_in_bytes);
- Heap::old_space()->Free(start, size_in_bytes);
+ Heap::old_pointer_space()->Free(start, size_in_bytes);
+}
+
+
+void MarkCompactCollector::DeallocateOldDataBlock(Address start,
+ int size_in_bytes) {
+ Heap::old_data_space()->Free(start, size_in_bytes);
}
Heap::new_space()->MCResetRelocationInfo();
// Compute the forwarding pointers in each space.
- EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldSpace,
+ EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
+ IgnoreNonLiveObject>(
+ Heap::old_pointer_space());
+
+ EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
IgnoreNonLiveObject>(
- Heap::old_space());
+ Heap::old_data_space());
EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
LogNonLiveCodeObject>(
// Write relocation info to the top page, so we can use it later. This is
// done after promoting objects from the new space so we get the correct
// allocation top.
- Heap::old_space()->MCWriteRelocationInfoToPage();
+ Heap::old_pointer_space()->MCWriteRelocationInfoToPage();
+ Heap::old_data_space()->MCWriteRelocationInfoToPage();
Heap::code_space()->MCWriteRelocationInfoToPage();
Heap::map_space()->MCWriteRelocationInfoToPage();
}
// the map space last because freeing non-live maps overwrites them and
// the other spaces rely on possibly non-live maps to get the sizes for
// non-live objects.
- SweepSpace(Heap::old_space(), &DeallocateOldBlock);
+ SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock);
+ SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
SweepSpace(Heap::new_space());
SweepSpace(Heap::map_space(), &DeallocateMapBlock);
void MarkCompactCollector::VerifyHeapAfterEncodingForwardingAddresses() {
- Heap::new_space()->Verify();
- Heap::old_space()->Verify();
- Heap::code_space()->Verify();
- Heap::map_space()->Verify();
+ AllSpaces spaces;
+ while (Space* space = spaces.next()) space->Verify();
ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
int live_maps = IterateLiveObjects(Heap::map_space(), &VerifyMapObject);
ASSERT(live_maps == live_map_objects_);
// Verify page headers in paged spaces.
- VerifyPageHeaders(Heap::old_space());
- VerifyPageHeaders(Heap::code_space());
- VerifyPageHeaders(Heap::map_space());
+ PagedSpaces paged_spaces;
+ while (PagedSpace* space = paged_spaces.next()) VerifyPageHeaders(space);
}
new_addr = Memory::Address_at(f_addr);
#ifdef DEBUG
- ASSERT(Heap::old_space()->Contains(new_addr) ||
+ ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
+ Heap::old_data_space()->Contains(new_addr) ||
Heap::code_space()->Contains(new_addr) ||
Heap::new_space()->FromSpaceContains(new_addr));
return;
} else {
- ASSERT(Heap::old_space()->Contains(obj) ||
+ ASSERT(Heap::old_pointer_space()->Contains(obj) ||
+ Heap::old_data_space()->Contains(obj) ||
Heap::code_space()->Contains(obj) ||
Heap::map_space()->Contains(obj));
new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
- ASSERT(Heap::old_space()->Contains(new_addr) ||
+ ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
+ Heap::old_data_space()->Contains(new_addr) ||
Heap::code_space()->Contains(new_addr) ||
Heap::map_space()->Contains(new_addr));
#ifdef DEBUG
- if (Heap::old_space()->Contains(obj)) {
- ASSERT(Heap::old_space()->MCSpaceOffsetForAddress(new_addr) <=
- Heap::old_space()->MCSpaceOffsetForAddress(old_addr));
+ if (Heap::old_pointer_space()->Contains(obj)) {
+ ASSERT(Heap::old_pointer_space()->MCSpaceOffsetForAddress(new_addr) <=
+ Heap::old_pointer_space()->MCSpaceOffsetForAddress(old_addr));
+ } else if (Heap::old_data_space()->Contains(obj)) {
+ ASSERT(Heap::old_data_space()->MCSpaceOffsetForAddress(new_addr) <=
+ Heap::old_data_space()->MCSpaceOffsetForAddress(old_addr));
} else if (Heap::code_space()->Contains(obj)) {
ASSERT(Heap::code_space()->MCSpaceOffsetForAddress(new_addr) <=
Heap::code_space()->MCSpaceOffsetForAddress(old_addr));
int live_maps = IterateLiveObjects(Heap::map_space(),
&UpdatePointersInOldObject);
- int live_olds = IterateLiveObjects(Heap::old_space(),
- &UpdatePointersInOldObject);
- int live_immutables = IterateLiveObjects(Heap::code_space(),
- &UpdatePointersInOldObject);
+ int live_pointer_olds = IterateLiveObjects(Heap::old_pointer_space(),
+ &UpdatePointersInOldObject);
+ int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
+ &UpdatePointersInOldObject);
+ int live_codes = IterateLiveObjects(Heap::code_space(),
+ &UpdatePointersInOldObject);
int live_news = IterateLiveObjects(Heap::new_space(),
&UpdatePointersInNewObject);
while (it.has_next()) UpdatePointersInNewObject(it.next());
USE(live_maps);
- USE(live_olds);
- USE(live_immutables);
+ USE(live_pointer_olds);
+ USE(live_data_olds);
+ USE(live_codes);
USE(live_news);
#ifdef DEBUG
ASSERT(live_maps == live_map_objects_);
- ASSERT(live_olds == live_old_objects_);
- ASSERT(live_immutables == live_immutable_objects_);
+ ASSERT(live_data_olds == live_old_data_objects_);
+ ASSERT(live_pointer_olds == live_old_pointer_objects_);
+ ASSERT(live_codes == live_code_objects_);
ASSERT(live_news == live_young_objects_);
if (FLAG_verify_global_gc) VerifyHeapAfterUpdatingPointers();
void MarkCompactCollector::VerifyHeapAfterUpdatingPointers() {
ASSERT(state_ == UPDATE_POINTERS);
- Heap::new_space()->Verify();
- Heap::old_space()->Verify();
- Heap::code_space()->Verify();
- Heap::map_space()->Verify();
-
- // We don't have object size info after updating pointers, not much we can
- // do here.
- VerifyPageHeaders(Heap::old_space());
- VerifyPageHeaders(Heap::code_space());
- VerifyPageHeaders(Heap::map_space());
+ AllSpaces spaces;
+ while (Space* space = spaces.next()) space->Verify();
+ PagedSpaces paged_spaces;
+ while (PagedSpace* space = paged_spaces.next()) VerifyPageHeaders(space);
}
#endif
// Relocates objects, always relocate map objects first. Relocating
// objects in other space relies on map objects to get object size.
int live_maps = IterateLiveObjects(Heap::map_space(), &RelocateMapObject);
- int live_olds = IterateLiveObjects(Heap::old_space(), &RelocateOldObject);
- int live_immutables =
- IterateLiveObjects(Heap::code_space(), &RelocateCodeObject);
+ int live_pointer_olds = IterateLiveObjects(Heap::old_pointer_space(),
+ &RelocateOldPointerObject);
+ int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
+ &RelocateOldDataObject);
+ int live_codes = IterateLiveObjects(Heap::code_space(), &RelocateCodeObject);
int live_news = IterateLiveObjects(Heap::new_space(), &RelocateNewObject);
USE(live_maps);
- USE(live_olds);
- USE(live_immutables);
+ USE(live_data_olds);
+ USE(live_pointer_olds);
+ USE(live_codes);
USE(live_news);
#ifdef DEBUG
ASSERT(live_maps == live_map_objects_);
- ASSERT(live_olds == live_old_objects_);
- ASSERT(live_immutables == live_immutable_objects_);
+ ASSERT(live_data_olds == live_old_data_objects_);
+ ASSERT(live_pointer_olds == live_old_pointer_objects_);
+ ASSERT(live_codes == live_code_objects_);
ASSERT(live_news == live_young_objects_);
#endif
// page-by-page basis after committing the m-c forwarding pointer.
Page::set_rset_state(Page::IN_USE);
#endif
- Heap::map_space()->MCCommitRelocationInfo();
- Heap::old_space()->MCCommitRelocationInfo();
- Heap::code_space()->MCCommitRelocationInfo();
+ PagedSpaces spaces;
+ while (PagedSpace* space = spaces.next()) space->MCCommitRelocationInfo();
#ifdef DEBUG
if (FLAG_verify_global_gc) VerifyHeapAfterRelocatingObjects();
}
-int MarkCompactCollector::RelocateOldObject(HeapObject* obj) {
- // decode map pointer (forwarded address)
- MapWord encoding = obj->map_word();
- Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
- ASSERT(Heap::map_space()->Contains(map_addr));
-
- // Get forwarding address before resetting map pointer
- Address new_addr = GetForwardingAddressInOldSpace(obj);
-
+static inline int RelocateOldObject(HeapObject* obj,
+ OldSpace* space,
+ Address new_addr,
+ Address map_addr) {
// recover map pointer
obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
int obj_size = obj->Size();
ASSERT_OBJECT_SIZE(obj_size);
- Address old_addr = obj->address();
-
- ASSERT(Heap::old_space()->MCSpaceOffsetForAddress(new_addr) <=
- Heap::old_space()->MCSpaceOffsetForAddress(old_addr));
-
- Heap::old_space()->MCAdjustRelocationEnd(new_addr, obj_size);
-
- if (new_addr != old_addr) {
- memmove(new_addr, old_addr, obj_size); // copy contents
- }
+ ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
+ space->MCSpaceOffsetForAddress(obj->address()));
- HeapObject* copied_to = HeapObject::FromAddress(new_addr);
- if (copied_to->IsCode()) {
- // may also update inline cache target.
- Code::cast(copied_to)->Relocate(new_addr - old_addr);
- // Notify the logger that compile code has moved.
- LOG(CodeMoveEvent(old_addr, new_addr));
- }
+ space->MCAdjustRelocationEnd(new_addr, obj_size);
#ifdef DEBUG
if (FLAG_gc_verbose) {
- PrintF("relocate %p -> %p\n", old_addr, new_addr);
+ PrintF("relocate %p -> %p\n", obj->address(), new_addr);
}
#endif
}
-int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
+int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
+ OldSpace* space) {
// decode map pointer (forwarded address)
MapWord encoding = obj->map_word();
Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
- ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
+ ASSERT(Heap::map_space()->Contains(map_addr));
// Get forwarding address before resetting map pointer
Address new_addr = GetForwardingAddressInOldSpace(obj);
- // recover map pointer
- obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
-
- // This is a non-map object, it relies on the assumption that the Map space
- // is compacted before the other spaces (see RelocateObjects).
- int obj_size = obj->Size();
- ASSERT_OBJECT_SIZE(obj_size);
+ int obj_size = RelocateOldObject(obj, space, new_addr, map_addr);
Address old_addr = obj->address();
- ASSERT(Heap::code_space()->MCSpaceOffsetForAddress(new_addr) <=
- Heap::code_space()->MCSpaceOffsetForAddress(old_addr));
+ if (new_addr != old_addr) {
+ memmove(new_addr, old_addr, obj_size); // copy contents
+ }
- Heap::code_space()->MCAdjustRelocationEnd(new_addr, obj_size);
+ ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
+
+ return obj_size;
+}
+
+
+int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
+ return RelocateOldNonCodeObject(obj, Heap::old_pointer_space());
+}
+
+
+int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
+ return RelocateOldNonCodeObject(obj, Heap::old_data_space());
+}
+
+
+int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
+ // decode map pointer (forwarded address)
+ MapWord encoding = obj->map_word();
+ Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
+ ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
+
+ // Get forwarding address before resetting map pointer
+ Address new_addr = GetForwardingAddressInOldSpace(obj);
+
+ int obj_size = RelocateOldObject(obj, Heap::code_space(), new_addr, map_addr);
// convert inline cache target to address using old address
if (obj->IsCode()) {
Code::cast(obj)->ConvertICTargetsFromObjectToAddress();
}
+ Address old_addr = obj->address();
+
if (new_addr != old_addr) {
memmove(new_addr, old_addr, obj_size); // copy contents
}
LOG(CodeMoveEvent(old_addr, new_addr));
}
-#ifdef DEBUG
- if (FLAG_gc_verbose) {
- PrintF("relocate %p -> %p\n", old_addr, new_addr);
- }
-#endif
-
return obj_size;
}
ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
} else {
- AllocationSpace target_space = Heap::TargetSpace(obj);
- if (target_space == OLD_SPACE) {
- Heap::old_space()->MCAdjustRelocationEnd(new_addr, obj_size);
- } else {
- ASSERT(target_space == CODE_SPACE);
- Heap::code_space()->MCAdjustRelocationEnd(new_addr, obj_size);
- }
+ OldSpace* target_space = Heap::TargetSpace(obj);
+ ASSERT(target_space == Heap::old_pointer_space() ||
+ target_space == Heap::old_data_space());
+ target_space->MCAdjustRelocationEnd(new_addr, obj_size);
}
// New and old addresses cannot overlap.
ASSERT(state_ == RELOCATE_OBJECTS);
Heap::new_space()->Verify();
- Heap::old_space()->Verify();
- Heap::code_space()->Verify();
- Heap::map_space()->Verify();
-
- PageIterator old_it(Heap::old_space(), PageIterator::PAGES_IN_USE);
- while (old_it.has_next()) {
- Page* p = old_it.next();
- ASSERT_PAGE_OFFSET(p->Offset(p->AllocationTop()));
- }
-
- PageIterator code_it(Heap::code_space(), PageIterator::PAGES_IN_USE);
- while (code_it.has_next()) {
- Page* p = code_it.next();
- ASSERT_PAGE_OFFSET(p->Offset(p->AllocationTop()));
- }
-
- PageIterator map_it(Heap::map_space(), PageIterator::PAGES_IN_USE);
- while (map_it.has_next()) {
- Page* p = map_it.next();
- ASSERT_PAGE_OFFSET(p->Offset(p->AllocationTop()));
+ PagedSpaces spaces;
+ while (PagedSpace* space = spaces.next()) {
+ space->Verify();
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
+ while (it.has_next()) {
+ Page* p = it.next();
+ ASSERT_PAGE_OFFSET(p->Offset(p->AllocationTop()));
+ }
}
}
#endif
// Callback functions for deallocating non-live blocks in the old
// generation.
- static void DeallocateOldBlock(Address start, int size_in_bytes);
+ static void DeallocateOldPointerBlock(Address start, int size_in_bytes);
+ static void DeallocateOldDataBlock(Address start, int size_in_bytes);
static void DeallocateCodeBlock(Address start, int size_in_bytes);
static void DeallocateMapBlock(Address start, int size_in_bytes);
static int RelocateMapObject(HeapObject* obj);
// Relocates an old object.
- static int RelocateOldObject(HeapObject* obj);
+ static int RelocateOldPointerObject(HeapObject* obj);
+ static int RelocateOldDataObject(HeapObject* obj);
- // Relocates an immutable object in the code space.
+ // Helper function.
+ static inline int RelocateOldNonCodeObject(HeapObject* obj, OldSpace* space);
+
+ // Relocates an object in the code space.
static int RelocateCodeObject(HeapObject* obj);
// Copy a new object.
// Number of live objects in Heap::to_space_.
static int live_young_objects_;
- // Number of live objects in Heap::old_space_.
- static int live_old_objects_;
+ // Number of live objects in Heap::old_pointer_space_.
+ static int live_old_pointer_objects_;
+
+ // Number of live objects in Heap::old_data_space_.
+ static int live_old_data_objects_;
// Number of live objects in Heap::code_space_.
- static int live_immutable_objects_;
+ static int live_code_objects_;
// Number of live objects in Heap::map_space_.
static int live_map_objects_;
i::Bootstrapper::NativesSourceLookup(i);
}
}
- // Get rid of unreferenced scripts.
- i::Heap::CollectGarbage(0, i::OLD_SPACE);
+ // Get rid of unreferenced scripts with a global GC.
+ i::Heap::CollectAllGarbage();
i::Serializer ser;
ser.Serialize();
char* str;
// Make the clone.
Object* clone = (pretenure == NOT_TENURED) ?
Heap::Allocate(map(), NEW_SPACE) :
- Heap::Allocate(map(), OLD_SPACE);
+ Heap::Allocate(map(), OLD_POINTER_SPACE);
if (clone->IsFailure()) return clone;
JSObject::cast(clone)->CopyBody(this);
// Proxy describes objects pointing from JavaScript to C structures.
+// Since they cannot contain references to JS HeapObjects they can be
+// placed in old_data_space.
class Proxy: public HeapObject {
public:
// [proxy]: field containing the address.
static const int kMmapFdOffset = 0;
-VirtualMemory::VirtualMemory(size_t size, void* address_hint) {
- address_ = mmap(address_hint, size, PROT_NONE,
+VirtualMemory::VirtualMemory(size_t size) {
+ address_ = mmap(NULL, size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
kMmapFd, kMmapFdOffset);
size_ = size;
static const int kMmapFdOffset = 0;
-VirtualMemory::VirtualMemory(size_t size, void* address_hint) {
- address_ = mmap(address_hint, size, PROT_NONE,
+VirtualMemory::VirtualMemory(size_t size) {
+ address_ = mmap(NULL, size, PROT_NONE,
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
kMmapFd, kMmapFdOffset);
size_ = size;
}
-VirtualMemory::VirtualMemory(size_t size, void* address_hint) {
- address_ =
- VirtualAlloc(address_hint, size, MEM_RESERVE, PAGE_NOACCESS);
+VirtualMemory::VirtualMemory(size_t size) {
+ address_ = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
size_ = size;
}
class VirtualMemory {
public:
// Reserves virtual memory with size.
- VirtualMemory(size_t size, void* address_hint = 0);
+ explicit VirtualMemory(size_t size);
~VirtualMemory();
// Returns whether the memory has been reserved.
// Perform two GCs to get rid of all unreferenced scripts. The first GC gets
// rid of all the cached script wrappes and the second gets rid of the
// scripts which is no longer referenced.
- Heap::CollectGarbage(0, OLD_SPACE);
- Heap::CollectGarbage(0, OLD_SPACE);
+ Heap::CollectAllGarbage();
+ Heap::CollectAllGarbage();
// Get the number of scripts.
int count;
ASSERT(args.length() == 3);
// First perform a full GC in order to avoid references from dead objects.
- Heap::CollectGarbage(0, OLD_SPACE);
+ Heap::CollectAllGarbage();
// Check parameters.
CONVERT_CHECKED(JSObject, target, args[0]);
ASSERT(args.length() == 2);
// First perform a full GC in order to avoid dead objects.
- Heap::CollectGarbage(0, OLD_SPACE);
+ Heap::CollectAllGarbage();
// Check parameters.
CONVERT_CHECKED(JSFunction, constructor, args[0]);
// - MAP and OLD spaces: 16 bits of page number, 11 bits of word offset in page
// - NEW space: 27 bits of word offset
// - LO space: 27 bits of page number
-// 3 bits to encode the AllocationSpace
+// 3 bits to encode the AllocationSpace (special values for code in LO space)
// 2 bits identifying this as a HeapObject
const int kSpaceShift = kHeapObjectTagSize;
const int kSpaceBits = kSpaceTagSize;
const int kSpaceMask = kSpaceTagMask;
+// These value are used instead of space numbers when serializing/
+// deserializing. They indicate an object that is in large object space, but
+// should be treated specially.
+// Make the pages executable on platforms that support it:
+const int kLOSpaceExecutable = LAST_SPACE + 1;
+// Reserve space for write barrier bits (for objects that can contain
+// references to new space):
+const int kLOSpacePointer = LAST_SPACE + 2;
+
+
const int kOffsetShift = kSpaceShift + kSpaceBits;
const int kOffsetBits = 11;
const int kOffsetMask = (1 << kOffsetBits) - 1;
const int kPageAndOffsetMask = (1 << kPageAndOffsetBits) - 1;
-static inline AllocationSpace Space(Address addr) {
+static inline AllocationSpace GetSpace(Address addr) {
+ const int encoded = reinterpret_cast<int>(addr);
+ int space_number = ((encoded >> kSpaceShift) & kSpaceMask);
+ if (space_number == kLOSpaceExecutable) space_number = LO_SPACE;
+ else if (space_number == kLOSpacePointer) space_number = LO_SPACE;
+ return static_cast<AllocationSpace>(space_number);
+}
+
+
+static inline bool IsLargeExecutableObject(Address addr) {
const int encoded = reinterpret_cast<int>(addr);
- return static_cast<AllocationSpace>((encoded >> kSpaceShift) & kSpaceMask);
+ const int space_number = ((encoded >> kSpaceShift) & kSpaceMask);
+ if (space_number == kLOSpaceExecutable) return true;
+ return false;
+}
+
+
+static inline bool IsLargeFixedArray(Address addr) {
+ const int encoded = reinterpret_cast<int>(addr);
+ const int space_number = ((encoded >> kSpaceShift) & kSpaceMask);
+ if (space_number == kLOSpacePointer) return true;
+ return false;
}
class RelativeAddress {
public:
- RelativeAddress(AllocationSpace space, int page_index, int page_offset)
- : space_(space), page_index_(page_index), page_offset_(page_offset) {}
+ RelativeAddress(AllocationSpace space,
+ int page_index,
+ int page_offset)
+ : space_(space), page_index_(page_index), page_offset_(page_offset) {
+ ASSERT(space <= LAST_SPACE && space >= 0);
+ }
// Return the encoding of 'this' as an Address. Decode with constructor.
Address Encode() const;
- AllocationSpace space() const { return space_; }
+ AllocationSpace space() const {
+ if (space_ == kLOSpaceExecutable) return LO_SPACE;
+ if (space_ == kLOSpacePointer) return LO_SPACE;
+ return static_cast<AllocationSpace>(space_);
+ }
int page_index() const { return page_index_; }
int page_offset() const { return page_offset_; }
bool in_paged_space() const {
- return space_ == CODE_SPACE || space_ == OLD_SPACE || space_ == MAP_SPACE;
+ return space_ == CODE_SPACE ||
+ space_ == OLD_POINTER_SPACE ||
+ space_ == OLD_DATA_SPACE ||
+ space_ == MAP_SPACE;
}
void next_address(int offset) { page_offset_ += offset; }
void Verify();
#endif
+ void set_to_large_code_object() {
+ ASSERT(space_ == LO_SPACE);
+ space_ = kLOSpaceExecutable;
+ }
+ void set_to_large_fixed_array() {
+ ASSERT(space_ == LO_SPACE);
+ space_ = kLOSpacePointer;
+ }
+
+
private:
- AllocationSpace space_;
+ int space_;
int page_index_;
int page_offset_;
};
int result = 0;
switch (space_) {
case MAP_SPACE:
- case OLD_SPACE:
+ case OLD_POINTER_SPACE:
+ case OLD_DATA_SPACE:
case CODE_SPACE:
ASSERT_EQ(0, page_index_ & ~kPageMask);
word_offset = page_offset_ >> kObjectAlignmentBits;
result = word_offset << kPageAndOffsetShift;
break;
case LO_SPACE:
+ case kLOSpaceExecutable:
+ case kLOSpacePointer:
ASSERT_EQ(0, page_offset_);
ASSERT_EQ(0, page_index_ & ~kPageAndOffsetMask);
result = page_index_ << kPageAndOffsetShift;
ASSERT(page_offset_ >= 0 && page_index_ >= 0);
switch (space_) {
case MAP_SPACE:
- case OLD_SPACE:
+ case OLD_POINTER_SPACE:
+ case OLD_DATA_SPACE:
case CODE_SPACE:
ASSERT(Page::kObjectStartOffset <= page_offset_ &&
page_offset_ <= Page::kPageSize);
ASSERT(page_index_ == 0);
break;
case LO_SPACE:
+ case kLOSpaceExecutable:
+ case kLOSpacePointer:
ASSERT(page_offset_ == 0);
break;
}
}
#endif
+enum GCTreatment {
+ DataObject, // Object that cannot contain a reference to new space.
+ PointerObject, // Object that can contain a reference to new space.
+ CodeObject // Object that contains executable code.
+};
+
// A SimulatedHeapSpace simulates the allocation of objects in a page in
// the heap. It uses linear allocation - that is, it doesn't simulate the
// use of a free list. This simulated
// Returns the RelativeAddress where the next
// object of 'size' bytes will be allocated, and updates 'this' to
// point to the next free address beyond that object.
- RelativeAddress Allocate(int size);
+ RelativeAddress Allocate(int size, GCTreatment special_gc_treatment);
private:
RelativeAddress current_;
void SimulatedHeapSpace::InitEmptyHeap(AllocationSpace space) {
switch (space) {
case MAP_SPACE:
- case OLD_SPACE:
+ case OLD_POINTER_SPACE:
+ case OLD_DATA_SPACE:
case CODE_SPACE:
current_ = RelativeAddress(space, 0, Page::kObjectStartOffset);
break;
void SimulatedHeapSpace::InitCurrentHeap(AllocationSpace space) {
switch (space) {
case MAP_SPACE:
- case OLD_SPACE:
+ case OLD_POINTER_SPACE:
+ case OLD_DATA_SPACE:
case CODE_SPACE: {
PagedSpace* ps;
if (space == MAP_SPACE) {
ps = Heap::map_space();
- } else if (space == OLD_SPACE) {
- ps = Heap::old_space();
+ } else if (space == OLD_POINTER_SPACE) {
+ ps = Heap::old_pointer_space();
+ } else if (space == OLD_DATA_SPACE) {
+ ps = Heap::old_data_space();
} else {
ASSERT(space == CODE_SPACE);
ps = Heap::code_space();
if (it.next() == top_page) break;
page_index++;
}
- current_ = RelativeAddress(space, page_index, top_page->Offset(top));
+ current_ = RelativeAddress(space,
+ page_index,
+ top_page->Offset(top));
break;
}
case NEW_SPACE:
- current_ =
- RelativeAddress(space, 0, Heap::NewSpaceTop() - Heap::NewSpaceStart());
+ current_ = RelativeAddress(space,
+ 0,
+ Heap::NewSpaceTop() - Heap::NewSpaceStart());
break;
case LO_SPACE:
int page_index = 0;
}
-RelativeAddress SimulatedHeapSpace::Allocate(int size) {
+RelativeAddress SimulatedHeapSpace::Allocate(int size,
+ GCTreatment special_gc_treatment) {
#ifdef DEBUG
current_.Verify();
#endif
RelativeAddress result = current_;
if (current_.space() == LO_SPACE) {
current_.next_page();
+ if (special_gc_treatment == CodeObject) {
+ result.set_to_large_code_object();
+ } else if (special_gc_treatment == PointerObject) {
+ result.set_to_large_fixed_array();
+ }
} else {
current_.next_address(alloc_size);
}
// and code spaces, because objects in new space will be promoted to them.
writer_->PutC('S');
writer_->PutC('[');
- writer_->PutInt(Heap::old_space()->Size() + Heap::new_space()->Size());
+ writer_->PutInt(Heap::old_pointer_space()->Size() +
+ Heap::new_space()->Size());
+ writer_->PutC('|');
+ writer_->PutInt(Heap::old_data_space()->Size() + Heap::new_space()->Size());
writer_->PutC('|');
writer_->PutInt(Heap::code_space()->Size() + Heap::new_space()->Size());
writer_->PutC('|');
// Find out which AllocationSpace 'obj' is in.
AllocationSpace s;
bool found = false;
- for (int i = 0; !found && i <= LAST_SPACE; i++) {
+ for (int i = FIRST_SPACE; !found && i <= LAST_SPACE; i++) {
s = static_cast<AllocationSpace>(i);
found = Heap::InSpace(obj, s);
}
CHECK(found);
if (s == NEW_SPACE) {
- s = Heap::TargetSpace(obj);
+ Space* space = Heap::TargetSpace(obj);
+ ASSERT(space == Heap::old_pointer_space() ||
+ space == Heap::old_data_space());
+ s = (space == Heap::old_pointer_space()) ?
+ OLD_POINTER_SPACE :
+ OLD_DATA_SPACE;
}
int size = obj->Size();
- return allocator_[s]->Allocate(size);
+ GCTreatment gc_treatment = DataObject;
+ if (obj->IsFixedArray()) gc_treatment = PointerObject;
+ else if (obj->IsCode()) gc_treatment = CodeObject;
+ return allocator_[s]->Allocate(size, gc_treatment);
}
Deserializer::Deserializer(const char* str, int len)
: reader_(str, len),
- map_pages_(kInitArraySize), old_pages_(kInitArraySize),
- code_pages_(kInitArraySize), large_objects_(kInitArraySize),
+ map_pages_(kInitArraySize),
+ old_pointer_pages_(kInitArraySize),
+ old_data_pages_(kInitArraySize),
+ code_pages_(kInitArraySize),
+ large_objects_(kInitArraySize),
global_handles_(4) {
root_ = true;
roots_ = 0;
// during deserialization.
reader_.ExpectC('S');
reader_.ExpectC('[');
- InitPagedSpace(Heap::old_space(), reader_.GetInt(), &old_pages_);
+ InitPagedSpace(Heap::old_pointer_space(),
+ reader_.GetInt(),
+ &old_pointer_pages_);
+ reader_.ExpectC('|');
+ InitPagedSpace(Heap::old_data_space(), reader_.GetInt(), &old_data_pages_);
reader_.ExpectC('|');
InitPagedSpace(Heap::code_space(), reader_.GetInt(), &code_pages_);
reader_.ExpectC('|');
Address a = GetEncodedAddress();
// Get a raw object of the right size in the right space.
- Object* o = Heap::AllocateRaw(size, Space(a));
+ AllocationSpace space = GetSpace(a);
+ Object *o;
+ if (IsLargeExecutableObject(a)) {
+ o = Heap::lo_space()->AllocateRawCode(size);
+ } else if (IsLargeFixedArray(a)) {
+ o = Heap::lo_space()->AllocateRawFixedArray(size);
+ } else {
+ o = Heap::AllocateRaw(size, space);
+ }
ASSERT(!o->IsFailure());
// Check that the simulation of heap allocation was correct.
ASSERT(o == Resolve(a));
// Encoded addresses of HeapObjects always have 'HeapObject' tags.
ASSERT(o->IsHeapObject());
- switch (Space(encoded)) {
- // For Map space and Old space, we cache the known Pages in
- // map_pages and old_pages respectively. Even though MapSpace
- // keeps a list of page addresses, we don't rely on it since
- // GetObject uses AllocateRaw, and that appears not to update
- // the page list.
+ switch (GetSpace(encoded)) {
+ // For Map space and Old space, we cache the known Pages in map_pages,
+ // old_pointer_pages and old_data_pages. Even though MapSpace keeps a list
+ // of page addresses, we don't rely on it since GetObject uses AllocateRaw,
+ // and that appears not to update the page list.
case MAP_SPACE:
return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
Heap::map_space(), &map_pages_);
- case OLD_SPACE:
+ case OLD_POINTER_SPACE:
+ return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
+ Heap::old_pointer_space(), &old_pointer_pages_);
+ case OLD_DATA_SPACE:
return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
- Heap::old_space(), &old_pages_);
+ Heap::old_data_space(), &old_data_pages_);
case CODE_SPACE:
return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
Heap::code_space(), &code_pages_);
bool has_log_; // The file has log information.
// Resolve caches the following:
- List<Page*> map_pages_; // All pages in the map space.
- List<Page*> old_pages_; // All pages in the old space.
+ List<Page*> map_pages_; // All pages in the map space.
+ List<Page*> old_pointer_pages_; // All pages in the old pointer space.
+ List<Page*> old_data_pages_; // All pages in the old data space.
List<Page*> code_pages_;
- List<Object*> large_objects_; // All known large objects.
+ List<Object*> large_objects_; // All known large objects.
// A list of global handles at deserialization time.
List<Object**> global_handles_;
Address Page::AllocationTop() {
PagedSpace* owner = MemoryAllocator::PageOwner(this);
- if (Heap::old_space() == owner) {
- return Heap::old_space()->PageAllocationTop(this);
- } else if (Heap::code_space() == owner) {
- return Heap::code_space()->PageAllocationTop(this);
- } else {
- ASSERT(Heap::map_space() == owner);
- return Heap::map_space()->PageAllocationTop(this);
- }
+ return owner->PageAllocationTop(this);
}
}
-// Allocating during deserialization. Always roll to the next page in the
-// space, which should be suitably expanded.
-Object* PagedSpace::AllocateForDeserialization(int size_in_bytes) {
- ASSERT(HasBeenSetup());
- ASSERT_OBJECT_SIZE(size_in_bytes);
- HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes);
- if (object != NULL) return object;
-
- // The space should be pre-expanded.
- Page* current_page = Page::FromAllocationTop(allocation_info_.top);
- ASSERT(current_page->next_page()->is_valid());
- object = AllocateInNextPage(current_page, size_in_bytes);
-
- ASSERT(object != NULL);
- return object;
-}
-
-
// -----------------------------------------------------------------------------
// LargeObjectChunk
void* MemoryAllocator::AllocateRawMemory(const size_t requested,
size_t* allocated,
- bool executable) {
+ Executability executable) {
if (size_ + static_cast<int>(requested) > capacity_) return NULL;
- void* mem = OS::Allocate(requested, allocated, executable);
+ void* mem = OS::Allocate(requested, allocated, executable == EXECUTABLE);
int alloced = *allocated;
size_ += alloced;
Counters::memory_allocated.Increment(alloced);
ASSERT(initial_chunk_->address() <= start);
ASSERT(start + size <= reinterpret_cast<Address>(initial_chunk_->address())
+ initial_chunk_->size());
- if (!initial_chunk_->Commit(start, size, owner->executable())) {
+ if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
return Page::FromAddress(NULL);
}
Counters::memory_allocated.Increment(size);
bool MemoryAllocator::CommitBlock(Address start,
size_t size,
- bool executable) {
+ Executability executable) {
ASSERT(start != NULL);
ASSERT(size > 0);
ASSERT(initial_chunk_ != NULL);
// -----------------------------------------------------------------------------
// PagedSpace implementation
-PagedSpace::PagedSpace(int max_capacity, AllocationSpace id, bool executable)
+PagedSpace::PagedSpace(int max_capacity,
+ AllocationSpace id,
+ Executability executable)
: Space(id, executable) {
max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
* Page::kObjectAreaSize;
int num_pages = 0;
// Try to use the virtual memory range passed to us. If it is too small to
// contain at least one page, ignore it and allocate instead.
- if (PagesInChunk(start, size) > 0) {
- first_page_ = MemoryAllocator::CommitPages(start, size, this, &num_pages);
+ int pages_in_chunk = PagesInChunk(start, size);
+ if (pages_in_chunk > 0) {
+ first_page_ = MemoryAllocator::CommitPages(RoundUp(start, Page::kPageSize),
+ Page::kPageSize * pages_in_chunk,
+ this, &num_pages);
} else {
int requested_pages = Min(MemoryAllocator::kPagesPerChunk,
max_capacity_ / Page::kObjectAreaSize);
NewSpace::NewSpace(int initial_semispace_capacity,
int maximum_semispace_capacity,
- AllocationSpace id,
- bool executable)
- : Space(id, executable) {
+ AllocationSpace id)
+ : Space(id, NOT_EXECUTABLE) {
ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
ASSERT(IsPowerOf2(maximum_semispace_capacity));
maximum_capacity_ = maximum_semispace_capacity;
capacity_ = initial_semispace_capacity;
- to_space_ = new SemiSpace(capacity_, maximum_capacity_, id, executable);
- from_space_ = new SemiSpace(capacity_, maximum_capacity_, id, executable);
+ to_space_ = new SemiSpace(capacity_, maximum_capacity_, id);
+ from_space_ = new SemiSpace(capacity_, maximum_capacity_, id);
// Allocate and setup the histogram arrays if necessary.
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
SemiSpace::SemiSpace(int initial_capacity,
int maximum_capacity,
- AllocationSpace id,
- bool executable)
- : Space(id, executable), capacity_(initial_capacity),
+ AllocationSpace id)
+ : Space(id, NOT_EXECUTABLE), capacity_(initial_capacity),
maximum_capacity_(maximum_capacity), start_(NULL), age_mark_(NULL) {
}
#ifdef DEBUG
void SemiSpace::Print() { }
+
+
+void SemiSpace::Verify() { }
#endif
LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
size_t* chunk_size,
- bool executable) {
+ Executability executable) {
size_t requested = ChunkSizeFor(size_in_bytes);
void* mem = MemoryAllocator::AllocateRawMemory(requested,
chunk_size,
// -----------------------------------------------------------------------------
// LargeObjectSpace
-LargeObjectSpace::LargeObjectSpace(AllocationSpace id, bool executable)
- : Space(id, executable),
+LargeObjectSpace::LargeObjectSpace(AllocationSpace id)
+ : Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis
first_chunk_(NULL),
size_(0),
page_count_(0) {}
Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
- int object_size) {
+ int object_size,
+ Executability executable) {
ASSERT(0 < object_size && object_size <= requested_size);
size_t chunk_size;
LargeObjectChunk* chunk =
- LargeObjectChunk::New(requested_size, &chunk_size, executable());
+ LargeObjectChunk::New(requested_size, &chunk_size, executable);
if (chunk == NULL) {
return Failure::RetryAfterGC(requested_size, identity());
}
}
-Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
+Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
ASSERT(0 < size_in_bytes);
- return AllocateRawInternal(size_in_bytes, size_in_bytes);
+ return AllocateRawInternal(size_in_bytes,
+ size_in_bytes,
+ EXECUTABLE);
}
Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
+ ASSERT(0 < size_in_bytes);
int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes);
- return AllocateRawInternal(size_in_bytes + extra_rset_bytes, size_in_bytes);
+ return AllocateRawInternal(size_in_bytes + extra_rset_bytes,
+ size_in_bytes,
+ NOT_EXECUTABLE);
+}
+
+
+Object* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
+ ASSERT(0 < size_in_bytes);
+ return AllocateRawInternal(size_in_bytes,
+ size_in_bytes,
+ NOT_EXECUTABLE);
}
// 8K bytes per page.
static const int kPageSizeBits = 13;
- // Page size in bytes.
+ // Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
// Page size mask.
//---------------------------------------------------------------------------
// Page header description.
//
- // If a page is not in a large object space, the first word,
+ // If a page is not in the large object space, the first word,
// opaque_header, encodes the next page address (aligned to kPageSize 8K)
// and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use
// opaque_header. The value range of the opaque_header is [0..kPageSize[,
// Space is the abstract superclass for all allocation spaces.
class Space : public Malloced {
public:
- Space(AllocationSpace id, bool executable)
+ Space(AllocationSpace id, Executability executable)
: id_(id), executable_(executable) {}
+ virtual ~Space() {}
// Does the space need executable memory?
- bool executable() { return executable_; }
+ Executability executable() { return executable_; }
// Identity used in error reporting.
AllocationSpace identity() { return id_; }
+ virtual int Size() = 0;
+#ifdef DEBUG
+ virtual void Verify() = 0;
+ virtual void Print() = 0;
+#endif
private:
AllocationSpace id_;
- bool executable_;
+ Executability executable_;
};
// the address is not NULL, the size is greater than zero, and that the
// block is contained in the initial chunk. Returns true if it succeeded
// and false otherwise.
- static bool CommitBlock(Address start, size_t size, bool executable);
+ static bool CommitBlock(Address start, size_t size, Executability executable);
// Attempts to allocate the requested (non-zero) number of pages from the
// OS. Fewer pages might be allocated than requested. If it fails to
// but keep track of allocated bytes as part of heap.
static void* AllocateRawMemory(const size_t requested,
size_t* allocated,
- bool executable);
+ Executability executable);
static void FreeRawMemory(void* buf, size_t length);
// Returns the maximum available bytes of heaps.
static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
+ // Returns maximum available bytes that the old space can have.
+ static int MaxAvailable() {
+ return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
+ }
+
// Links two pages.
static inline void SetNextPage(Page* prev, Page* next);
friend class PageIterator;
public:
// Creates a space with a maximum capacity, and an id.
- PagedSpace(int max_capacity, AllocationSpace id, bool executable);
+ PagedSpace(int max_capacity, AllocationSpace id, Executability executable);
virtual ~PagedSpace() {}
// Clears remembered sets of pages in this space.
void ClearRSet();
+ // Prepares for a mark-compact GC.
+ virtual void PrepareForMarkCompact(bool will_compact) = 0;
+
+ virtual Address PageAllocationTop(Page* page) = 0;
+
// Current capacity without growing (Size() + Available() + Waste()).
int Capacity() { return accounting_stats_.Capacity(); }
int Available() { return accounting_stats_.Available(); }
// Allocated bytes in this space.
- int Size() { return accounting_stats_.Size(); }
+ virtual int Size() { return accounting_stats_.Size(); }
// Wasted bytes due to fragmentation and not recoverable until the
// next GC of this space.
inline Object* MCAllocateRaw(int size_in_bytes);
- // Allocate the requested number of bytes during deserialization.
- inline Object* AllocateForDeserialization(int size_in_bytes);
-
// ---------------------------------------------------------------------------
// Mark-compact collection support functions
// of the space.
int MCSpaceOffsetForAddress(Address addr);
+ // Updates the allocation pointer to the relocation top after a mark-compact
+ // collection.
+ virtual void MCCommitRelocationInfo() = 0;
+
// Releases half of unused pages.
void Shrink();
#ifdef DEBUG
// Print meta info and objects in this space.
- void Print();
+ virtual void Print();
// Report code object related statistics
void CollectCodeStatistics();
// addresses.
SemiSpace(int initial_capacity,
int maximum_capacity,
- AllocationSpace id,
- bool executable);
+ AllocationSpace id);
+ virtual ~SemiSpace() {}
// Sets up the semispace using the given chunk.
bool Setup(Address start, int size);
// The offset of an address from the begining of the space.
int SpaceOffsetForAddress(Address addr) { return addr - low(); }
+ // If we don't have this here then SemiSpace will be abstract. However
+ // it should never be called.
+ virtual int Size() {
+ UNREACHABLE();
+ return 0;
+ }
+
#ifdef DEBUG
- void Print();
+ virtual void Print();
+ virtual void Verify();
#endif
private:
// and it must be aligned to its size.
NewSpace(int initial_semispace_capacity,
int maximum_semispace_capacity,
- AllocationSpace id,
- bool executable);
+ AllocationSpace id);
+ virtual ~NewSpace() {}
// Sets up the new space using the given chunk.
bool Setup(Address start, int size);
}
// Return the allocated bytes in the active semispace.
- int Size() { return top() - bottom(); }
+ virtual int Size() { return top() - bottom(); }
// Return the current capacity of a semispace.
int Capacity() { return capacity_; }
// Return the available bytes without growing in the active semispace.
#ifdef DEBUG
// Verify the active semispace.
- void Verify();
+ virtual void Verify();
// Print the active semispace.
- void Print() { to_space_->Print(); }
+ virtual void Print() { to_space_->Print(); }
#endif
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
public:
// Creates an old space object with a given maximum capacity.
// The constructor does not allocate pages from OS.
- explicit OldSpace(int max_capacity, AllocationSpace id, bool executable)
+ explicit OldSpace(int max_capacity,
+ AllocationSpace id,
+ Executability executable)
: PagedSpace(max_capacity, id, executable), free_list_(id) {
}
- // Returns maximum available bytes that the old space can have.
- int MaxAvailable() {
- return (MemoryAllocator::Available() / Page::kPageSize)
- * Page::kObjectAreaSize;
- }
-
// The bytes available on the free list (ie, not above the linear allocation
// pointer).
int AvailableFree() { return free_list_.available(); }
// The top of allocation in a page in this space. Undefined if page is unused.
- Address PageAllocationTop(Page* page) {
+ virtual Address PageAllocationTop(Page* page) {
return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd();
}
// Prepare for full garbage collection. Resets the relocation pointer and
// clears the free list.
- void PrepareForMarkCompact(bool will_compact);
+ virtual void PrepareForMarkCompact(bool will_compact);
// Adjust the top of relocation pointer to point to the end of the object
// given by 'address' and 'size_in_bytes'. Move it to the next page if
// Updates the allocation pointer to the relocation top after a mark-compact
// collection.
- void MCCommitRelocationInfo();
+ virtual void MCCommitRelocationInfo();
#ifdef DEBUG
// Verify integrity of this space.
- void Verify();
+ virtual void Verify();
// Reports statistics for the space
void ReportStatistics();
public:
// Creates a map space object with a maximum capacity.
explicit MapSpace(int max_capacity, AllocationSpace id)
- : PagedSpace(max_capacity, id, false), free_list_(id) { }
-
- // The bytes available on the free list (ie, not above the linear allocation
- // pointer).
- int AvailableFree() { return free_list_.available(); }
+ : PagedSpace(max_capacity, id, NOT_EXECUTABLE), free_list_(id) { }
// The top of allocation in a page in this space. Undefined if page is unused.
- Address PageAllocationTop(Page* page) {
+ virtual Address PageAllocationTop(Page* page) {
return page == TopPageOf(allocation_info_) ? top()
: page->ObjectAreaEnd() - kPageExtra;
}
Address PageAddress(int page_index) { return page_addresses_[page_index]; }
// Prepares for a mark-compact GC.
- void PrepareForMarkCompact(bool will_compact);
+ virtual void PrepareForMarkCompact(bool will_compact);
// Updates the allocation pointer to the relocation top after a mark-compact
// collection.
- void MCCommitRelocationInfo();
+ virtual void MCCommitRelocationInfo();
#ifdef DEBUG
// Verify integrity of this space.
- void Verify();
+ virtual void Verify();
// Reports statistic info of the space
void ReportStatistics();
// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
// A large object always starts at Page::kObjectStartOffset to a page.
// Large objects do not move during garbage collections.
-//
// A LargeObjectChunk holds exactly one large object page with exactly one
// large object.
// parameter chunk_size.
static LargeObjectChunk* New(int size_in_bytes,
size_t* chunk_size,
- bool executable);
+ Executability executable);
// Interpret a raw address as a large object chunk.
static LargeObjectChunk* FromAddress(Address address) {
class LargeObjectSpace : public Space {
friend class LargeObjectIterator;
public:
- explicit LargeObjectSpace(AllocationSpace id, bool executable);
+ explicit LargeObjectSpace(AllocationSpace id);
+ virtual ~LargeObjectSpace() {}
// Initializes internal data structures.
bool Setup();
// Releases internal resources, frees objects in this space.
void TearDown();
- // Allocates a (non-FixedArray) large object.
+ // Allocates a (non-FixedArray, non-Code) large object.
Object* AllocateRaw(int size_in_bytes);
+ // Allocates a large Code object.
+ Object* AllocateRawCode(int size_in_bytes);
// Allocates a large FixedArray.
Object* AllocateRawFixedArray(int size_in_bytes);
return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
}
- int Size() {
+ virtual int Size() {
return size_;
}
bool IsEmpty() { return first_chunk_ == NULL; }
#ifdef DEBUG
- void Verify();
- void Print();
+ virtual void Verify();
+ virtual void Print();
void ReportStatistics();
void CollectCodeStatistics();
// Dump the remembered sets in the space to stdout.
int page_count_; // number of chunks
- // Shared implementation of AllocateRaw and AllocateRawFixedArray.
- Object* AllocateRawInternal(int requested_size, int object_size);
+ // Shared implementation of AllocateRaw, AllocateRawCode and
+ // AllocateRawFixedArray.
+ Object* AllocateRawInternal(int requested_size,
+ int object_size,
+ Executability executable);
// Returns the number of extra bytes (rounded up to the nearest full word)
// required for extra_object_bytes of extra pointers (in bytes).
CHECK(source->IsExternal());
CHECK_EQ(resource,
static_cast<TestResource*>(source->GetExternalStringResource()));
- v8::internal::Heap::CollectGarbage(0, v8::internal::OLD_SPACE);
+ v8::internal::Heap::CollectAllGarbage();
CHECK_EQ(0, TestResource::dispose_count);
}
- v8::internal::Heap::CollectGarbage(0, v8::internal::OLD_SPACE);
+ v8::internal::Heap::CollectAllGarbage();
CHECK_EQ(1, TestResource::dispose_count);
}
Local<Value> value = script->Run();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value());
- v8::internal::Heap::CollectGarbage(0, v8::internal::OLD_SPACE);
+ v8::internal::Heap::CollectAllGarbage();
CHECK_EQ(0, TestAsciiResource::dispose_count);
}
- v8::internal::Heap::CollectGarbage(0, v8::internal::OLD_SPACE);
+ v8::internal::Heap::CollectAllGarbage();
CHECK_EQ(1, TestAsciiResource::dispose_count);
}
CHECK_EQ(v8::Integer::New(3), args[2]);
CHECK_EQ(v8::Undefined(), args[3]);
v8::HandleScope scope;
- i::Heap::CollectGarbage(0, i::OLD_SPACE);
+ i::Heap::CollectAllGarbage();
return v8::Undefined();
}
static void EnsureNoSurvivingGlobalObjects() {
int count = 0;
- v8::internal::Heap::CollectGarbage(0, v8::internal::OLD_SPACE);
+ v8::internal::Heap::CollectAllGarbage();
v8::internal::HeapIterator it;
while (it.has_next()) {
v8::internal::HeapObject* object = it.next();
Heap::CollectGarbage(0, v8::internal::NEW_SPACE);
} else {
// Mark sweep (and perhaps compact).
- Heap::CollectGarbage(0, v8::internal::OLD_SPACE);
+ Heap::CollectAllGarbage();
}
}
}
CHECK_EQ(2 + i * 3, break_point_hit_count);
// Mark sweep (and perhaps compact) and call function.
- Heap::CollectGarbage(0, v8::internal::OLD_SPACE);
+ Heap::CollectAllGarbage();
f->Call(recv, 0, NULL);
CHECK_EQ(3 + i * 3, break_point_hit_count);
}
CHECK(Failure::RetryAfterGC(12, NEW_SPACE)->IsFailure());
CHECK_EQ(12, Failure::RetryAfterGC(12, NEW_SPACE)->requested());
CHECK_EQ(NEW_SPACE, Failure::RetryAfterGC(12, NEW_SPACE)->allocation_space());
- CHECK_EQ(OLD_SPACE, Failure::RetryAfterGC(12, OLD_SPACE)->allocation_space());
+ CHECK_EQ(OLD_POINTER_SPACE,
+ Failure::RetryAfterGC(12, OLD_POINTER_SPACE)->allocation_space());
CHECK(Failure::Exception()->IsFailure());
CHECK(Smi::FromInt(Smi::kMinValue)->IsSmi());
CHECK(Smi::FromInt(Smi::kMaxValue)->IsSmi());
Handle<Object> h1 = GlobalHandles::Create(i);
Handle<Object> h2 = GlobalHandles::Create(u);
- CHECK(Heap::CollectGarbage(0, OLD_SPACE));
+ CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
CHECK(Heap::CollectGarbage(0, NEW_SPACE));
// Make sure the object is promoted.
CHECK(!GlobalHandles::IsNearDeath(h1.location()));
CHECK(!GlobalHandles::IsNearDeath(h2.location()));
- CHECK(Heap::CollectGarbage(0, OLD_SPACE));
+ CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
CHECK((*h1)->IsString());
CHECK(!WeakPointerCleared);
// Mark-compact treats weak reference properly.
- CHECK(Heap::CollectGarbage(0, OLD_SPACE));
+ CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
CHECK(WeakPointerCleared);
}
Handle<Object> objs[objs_count];
int next_objs_index = 0;
- // Allocate a JS array to OLD_SPACE and NEW_SPACE
+ // Allocate a JS array to OLD_POINTER_SPACE and NEW_SPACE
objs[next_objs_index++] = Factory::NewJSArray(10);
objs[next_objs_index++] = Factory::NewJSArray(10, TENURED);
- // Allocate a small string to CODE_SPACE and NEW_SPACE
+ // Allocate a small string to OLD_DATA_SPACE and NEW_SPACE
objs[next_objs_index++] =
Factory::NewStringFromAscii(CStrVector("abcdefghij"));
objs[next_objs_index++] =
CHECK(Heap::InSpace(*array, NEW_SPACE));
// Call the m-c collector, so array becomes an old object.
- CHECK(Heap::CollectGarbage(0, OLD_SPACE));
+ CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
// Array now sits in the old space
- CHECK(Heap::InSpace(*array, OLD_SPACE));
+ CHECK(Heap::InSpace(*array, OLD_POINTER_SPACE));
}
v8::HandleScope sc;
// Do a mark compact GC to shrink the heap.
- CHECK(Heap::CollectGarbage(0, OLD_SPACE));
+ CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
// Allocate a big Fixed array in the new space.
int size = (Heap::MaxHeapObjectSize() - Array::kHeaderSize) / kPointerSize;
}
// Call mark compact GC, and it should pass.
- CHECK(Heap::CollectGarbage(0, OLD_SPACE));
+ CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
// array should not be promoted because the old space is full.
CHECK(Heap::InSpace(*array, NEW_SPACE));
v8::HandleScope sc;
// call mark-compact when heap is empty
- CHECK(Heap::CollectGarbage(0, OLD_SPACE));
+ CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
// keep allocating garbage in new space until it fails
const int ARRAY_SIZE = 100;
Top::context()->global()->SetProperty(func_name, function, NONE);
JSObject* obj = JSObject::cast(Heap::AllocateJSObject(function));
- CHECK(Heap::CollectGarbage(0, OLD_SPACE));
+ CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
func_name = String::cast(Heap::LookupAsciiSymbol("theFunction"));
CHECK(Top::context()->global()->HasLocalProperty(func_name));
String* prop_name = String::cast(Heap::LookupAsciiSymbol("theSlot"));
obj->SetProperty(prop_name, Smi::FromInt(23), NONE);
- CHECK(Heap::CollectGarbage(0, OLD_SPACE));
+ CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
obj_name = String::cast(Heap::LookupAsciiSymbol("theObject"));
CHECK(Top::context()->global()->HasLocalProperty(obj_name));
CHECK_EQ(0, gc_starts);
CHECK_EQ(gc_ends, gc_starts);
- CHECK(Heap::CollectGarbage(0, OLD_SPACE));
+ CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
CHECK_EQ(1, gc_starts);
CHECK_EQ(gc_ends, gc_starts);
}
GlobalHandles::AddToGroup(reinterpret_cast<void*>(2), g2s1.location());
GlobalHandles::AddToGroup(reinterpret_cast<void*>(2), g2s2.location());
// Do a full GC
- CHECK(Heap::CollectGarbage(0, OLD_SPACE));
+ CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
// All object should be alive.
CHECK_EQ(0, NumberOfWeakCalls);
GlobalHandles::AddToGroup(reinterpret_cast<void*>(2), g2s1.location());
GlobalHandles::AddToGroup(reinterpret_cast<void*>(2), g2s2.location());
- CHECK(Heap::CollectGarbage(0, OLD_SPACE));
+ CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
// All objects should be gone. 5 global handles in total.
CHECK_EQ(5, NumberOfWeakCalls);
CHECK(Heap::ConfigureHeapDefault());
CHECK(MemoryAllocator::Setup(Heap::MaxCapacity()));
- OldSpace faked_space(Heap::MaxCapacity(), OLD_SPACE, false);
+ OldSpace faked_space(Heap::MaxCapacity(), OLD_POINTER_SPACE, NOT_EXECUTABLE);
int total_pages = 0;
int requested = 2;
int allocated;
NewSpace* s = new NewSpace(Heap::InitialSemiSpaceSize(),
Heap::SemiSpaceSize(),
- NEW_SPACE,
- false);
+ NEW_SPACE);
CHECK(s != NULL);
void* chunk =
CHECK(Heap::ConfigureHeapDefault());
CHECK(MemoryAllocator::Setup(Heap::MaxCapacity()));
- OldSpace* s = new OldSpace(Heap::OldGenerationSize(), OLD_SPACE, false);
+ OldSpace* s = new OldSpace(Heap::OldGenerationSize(),
+ OLD_POINTER_SPACE,
+ NOT_EXECUTABLE);
CHECK(s != NULL);
void* chunk =
CHECK(Heap::ConfigureHeapDefault());
MemoryAllocator::Setup(Heap::MaxCapacity());
- LargeObjectSpace* lo = new LargeObjectSpace(LO_SPACE, false);
+ LargeObjectSpace* lo = new LargeObjectSpace(LO_SPACE);
CHECK(lo != NULL);
CHECK(lo->Setup());