AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object* result = nullptr;
- AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
+ AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
if (!allocation.To(&result)) return allocation;
// Map::cast cannot be used due to uninitialized map field.
int instance_size,
ElementsKind elements_kind) {
HeapObject* result = nullptr;
- AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
+ AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
if (!allocation.To(&result)) return allocation;
result->set_map_no_write_barrier(meta_map());
HeapObject* obj = nullptr;
{
AllocationAlignment align = double_align ? kDoubleAligned : kWordAligned;
- AllocationResult allocation = AllocateRaw(size, space, align);
+ AllocationResult allocation = AllocateRaw(size, space, space, align);
if (!allocation.To(&obj)) return allocation;
}
#ifdef DEBUG
HeapObject* result = nullptr;
{
- AllocationResult allocation = AllocateRaw(size, space, kDoubleUnaligned);
+ AllocationResult allocation =
+ AllocateRaw(size, space, OLD_SPACE, kDoubleUnaligned);
if (!allocation.To(&result)) return allocation;
}
HeapObject* result = nullptr; \
{ \
AllocationResult allocation = \
- AllocateRaw(size, space, kSimd128Unaligned); \
+ AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned); \
if (!allocation.To(&result)) return allocation; \
} \
\
HeapObject* result = nullptr;
{
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
result->set_map_no_write_barrier(cell_map());
STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
HeapObject* result = nullptr;
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
result->set_map_no_write_barrier(global_property_cell_map());
STATIC_ASSERT(WeakCell::kSize <= Page::kMaxRegularHeapObjectSize);
HeapObject* result = nullptr;
{
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
result->set_map_no_write_barrier(weak_cell_map());
AllocationSpace space = SelectSpace(pretenure);
HeapObject* result = nullptr;
{
- AllocationResult allocation = AllocateRaw(size, space);
+ AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
int size = BytecodeArray::SizeFor(length);
HeapObject* result = nullptr;
{
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
AllocationSpace space = SelectSpace(pretenure);
HeapObject* result = nullptr;
{
- AllocationResult allocation = AllocateRaw(size, space);
+ AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
HeapObject* object = nullptr;
AllocationResult allocation = AllocateRaw(
- size, space,
+ size, space, OLD_SPACE,
array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned);
if (!allocation.To(&object)) return allocation;
AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
- AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE);
+ AllocationResult allocation =
+ AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
HeapObject* result = nullptr;
if (!allocation.To(&result)) return allocation;
HeapObject* result = nullptr;
// Allocate an object the same size as the code object.
int obj_size = code->Size();
- allocation = AllocateRaw(obj_size, CODE_SPACE);
+ allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
if (!allocation.To(&result)) return allocation;
// Copy code object.
static_cast<size_t>(code->instruction_end() - old_addr);
HeapObject* result = nullptr;
- AllocationResult allocation = AllocateRaw(new_obj_size, CODE_SPACE);
+ AllocationResult allocation =
+ AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
if (!allocation.To(&result)) return allocation;
// Copy code object.
AllocationSite* allocation_site) {
DCHECK(gc_state_ == NOT_IN_GC);
DCHECK(map->instance_type() != MAP_TYPE);
+ // If allocation failures are disallowed, we may allocate in a different
+ // space when new space is full and the object is not a large object.
+ AllocationSpace retry_space = (space != NEW_SPACE) ? space : OLD_SPACE;
int size = map->instance_size();
if (allocation_site != NULL) {
size += AllocationMemento::kSize;
}
HeapObject* result = nullptr;
- AllocationResult allocation = AllocateRaw(size, space);
+ AllocationResult allocation = AllocateRaw(size, space, retry_space);
if (!allocation.To(&result)) return allocation;
// No need for write barrier since object is white and map is in old space.
result->set_map_no_write_barrier(map);
DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
- int adjusted_object_size =
- site != NULL ? object_size + AllocationMemento::kSize : object_size;
- AllocationResult allocation = AllocateRaw(adjusted_object_size, NEW_SPACE);
- if (!allocation.To(&clone)) return allocation;
+ WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
+
+ // If we're forced to always allocate, we use the general allocation
+ // functions which may leave us with an object in old space.
+ if (always_allocate()) {
+ {
+ AllocationResult allocation =
+ AllocateRaw(object_size, NEW_SPACE, OLD_SPACE);
+ if (!allocation.To(&clone)) return allocation;
+ }
+ Address clone_address = clone->address();
+ CopyBlock(clone_address, source->address(), object_size);
- SLOW_DCHECK(InNewSpace(clone));
- // Since we know the clone is allocated in new space, we can copy
- // the contents without worrying about updating the write barrier.
- CopyBlock(clone->address(), source->address(), object_size);
+ // Update write barrier for all tagged fields that lie beyond the header.
+ const int start_offset = JSObject::kHeaderSize;
+ const int end_offset = object_size;
- if (site != NULL) {
- AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
- reinterpret_cast<Address>(clone) + object_size);
- InitializeAllocationMemento(alloc_memento, site);
+#if V8_DOUBLE_FIELDS_UNBOXING
+ LayoutDescriptorHelper helper(map);
+ bool has_only_tagged_fields = helper.all_fields_tagged();
+
+ if (!has_only_tagged_fields) {
+ for (int offset = start_offset; offset < end_offset;) {
+ int end_of_region_offset;
+ if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
+ RecordWrites(clone_address, offset,
+ (end_of_region_offset - offset) / kPointerSize);
+ }
+ offset = end_of_region_offset;
+ }
+ } else {
+#endif
+ // Object has only tagged fields.
+ RecordWrites(clone_address, start_offset,
+ (end_offset - start_offset) / kPointerSize);
+#if V8_DOUBLE_FIELDS_UNBOXING
+ }
+#endif
+
+ } else {
+ wb_mode = SKIP_WRITE_BARRIER;
+
+ {
+ int adjusted_object_size =
+ site != NULL ? object_size + AllocationMemento::kSize : object_size;
+ AllocationResult allocation =
+ AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
+ if (!allocation.To(&clone)) return allocation;
+ }
+ SLOW_DCHECK(InNewSpace(clone));
+ // Since we know the clone is allocated in new space, we can copy
+ // the contents without worrying about updating the write barrier.
+ CopyBlock(clone->address(), source->address(), object_size);
+
+ if (site != NULL) {
+ AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
+ reinterpret_cast<Address>(clone) + object_size);
+ InitializeAllocationMemento(alloc_memento, site);
+ }
}
SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
}
if (!allocation.To(&elem)) return allocation;
}
- JSObject::cast(clone)->set_elements(elem, SKIP_WRITE_BARRIER);
+ JSObject::cast(clone)->set_elements(elem, wb_mode);
}
// Update properties if necessary.
if (properties->length() > 0) {
AllocationResult allocation = CopyFixedArray(properties);
if (!allocation.To(&prop)) return allocation;
}
- JSObject::cast(clone)->set_properties(prop, SKIP_WRITE_BARRIER);
+ JSObject::cast(clone)->set_properties(prop, wb_mode);
}
// Return the new clone.
return clone;
// Allocate string.
HeapObject* result = nullptr;
{
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
HeapObject* result = nullptr;
{
- AllocationResult allocation = AllocateRaw(size, space);
+ AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
HeapObject* result = nullptr;
{
- AllocationResult allocation = AllocateRaw(size, space);
+ AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
int size = FixedArray::SizeFor(0);
HeapObject* result = nullptr;
{
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
// Initialize the object.
int size = FixedArray::SizeFor(length);
AllocationSpace space = SelectSpace(pretenure);
- return AllocateRaw(size, space);
+ return AllocateRaw(size, space, OLD_SPACE);
}
HeapObject* object = nullptr;
{
- AllocationResult allocation = AllocateRaw(size, space, kDoubleAligned);
+ AllocationResult allocation =
+ AllocateRaw(size, space, OLD_SPACE, kDoubleAligned);
if (!allocation.To(&object)) return allocation;
}
STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
HeapObject* result = nullptr;
- AllocationResult allocation = AllocateRaw(Symbol::kSize, OLD_SPACE);
+ AllocationResult allocation =
+ AllocateRaw(Symbol::kSize, OLD_SPACE, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
result->set_map_no_write_barrier(symbol_map());
}
+static int LenFromSize(int size) {
+ return (size - FixedArray::kHeaderSize) / kPointerSize;
+}
+
+
+HEAP_TEST(Regression39128) {
+ // Test case for crbug.com/39128.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = CcTest::heap();
+
+ // Increase the chance of 'bump-the-pointer' allocation in old space.
+ heap->CollectAllGarbage();
+
+ v8::HandleScope scope(CcTest::isolate());
+
+ // The plan: create JSObject which references objects in new space.
+ // Then clone this object (forcing it to go into old space) and check
+ // that region dirty marks are updated correctly.
+
+ // Step 1: prepare a map for the object. We add 1 inobject property to it.
+ // Create a map with single inobject property.
+ Handle<Map> my_map = Map::Create(CcTest::i_isolate(), 1);
+ int n_properties = my_map->GetInObjectProperties();
+ CHECK_GT(n_properties, 0);
+
+ int object_size = my_map->instance_size();
+
+ // Step 2: allocate a lot of objects so to almost fill new space: we need
+ // just enough room to allocate JSObject and thus fill the newspace.
+
+ int allocation_amount = Min(FixedArray::kMaxSize,
+ Page::kMaxRegularHeapObjectSize + kPointerSize);
+ int allocation_len = LenFromSize(allocation_amount);
+ NewSpace* new_space = heap->new_space();
+ DisableInlineAllocationSteps(new_space);
+ Address* top_addr = new_space->allocation_top_address();
+ Address* limit_addr = new_space->allocation_limit_address();
+ while ((*limit_addr - *top_addr) > allocation_amount) {
+ CHECK(!heap->always_allocate());
+ Object* array = heap->AllocateFixedArray(allocation_len).ToObjectChecked();
+ CHECK(new_space->Contains(array));
+ }
+
+ // Step 3: now allocate fixed array and JSObject to fill the whole new space.
+ int to_fill = static_cast<int>(*limit_addr - *top_addr - object_size);
+ int fixed_array_len = LenFromSize(to_fill);
+ CHECK(fixed_array_len < FixedArray::kMaxLength);
+
+ CHECK(!heap->always_allocate());
+ Object* array = heap->AllocateFixedArray(fixed_array_len).ToObjectChecked();
+ CHECK(new_space->Contains(array));
+
+ Object* object = heap->AllocateJSObjectFromMap(*my_map).ToObjectChecked();
+ CHECK(new_space->Contains(object));
+ JSObject* jsobject = JSObject::cast(object);
+ CHECK_EQ(0, FixedArray::cast(jsobject->elements())->length());
+ CHECK_EQ(0, jsobject->properties()->length());
+ // Create a reference to object in new space in jsobject.
+ FieldIndex index = FieldIndex::ForInObjectOffset(
+ JSObject::kHeaderSize - kPointerSize);
+ jsobject->FastPropertyAtPut(index, array);
+
+ CHECK_EQ(0, static_cast<int>(*limit_addr - *top_addr));
+
+ // Step 4: clone jsobject, but force always allocate first to create a clone
+ // in old pointer space.
+ Address old_space_top = heap->old_space()->top();
+ AlwaysAllocateScope aa_scope(isolate);
+ Object* clone_obj = heap->CopyJSObject(jsobject).ToObjectChecked();
+ JSObject* clone = JSObject::cast(clone_obj);
+ if (clone->address() != old_space_top) {
+ // Alas, got allocated from free list, we cannot do checks.
+ return;
+ }
+ CHECK(heap->old_space()->Contains(clone->address()));
+}
+
+
UNINITIALIZED_TEST(TestCodeFlushing) {
// If we do not flush code this test is invalid.
if (!FLAG_flush_code) return;
}
+TEST(Regress2237) {
+ i::FLAG_stress_compaction = false;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ v8::HandleScope scope(CcTest::isolate());
+ Handle<String> slice(CcTest::heap()->empty_string());
+
+ {
+ // Generate a parent that lives in new-space.
+ v8::HandleScope inner_scope(CcTest::isolate());
+ const char* c = "This text is long enough to trigger sliced strings.";
+ Handle<String> s = factory->NewStringFromAsciiChecked(c);
+ CHECK(s->IsSeqOneByteString());
+ CHECK(CcTest::heap()->InNewSpace(*s));
+
+ // Generate a sliced string that is based on the above parent and
+ // lives in old-space.
+ SimulateFullSpace(CcTest::heap()->new_space());
+ AlwaysAllocateScope always_allocate(isolate);
+ Handle<String> t = factory->NewProperSubString(s, 5, 35);
+ CHECK(t->IsSlicedString());
+ CHECK(!CcTest::heap()->InNewSpace(*t));
+ *slice.location() = *t.location();
+ }
+
+ CHECK(SlicedString::cast(*slice)->parent()->IsSeqOneByteString());
+ CcTest::heap()->CollectAllGarbage();
+ CHECK(SlicedString::cast(*slice)->parent()->IsSeqOneByteString());
+}
+
+
#ifdef OBJECT_PRINT
TEST(PrintSharedFunctionInfo) {
CcTest::InitializeVM();
}
+static int LenFromSize(int size) {
+ return (size - FixedArray::kHeaderSize) / kPointerSize;
+}
+
+
+HEAP_TEST(WriteBarriersInCopyJSObject) {
+ FLAG_max_semi_space_size = 1; // Ensure new space is not growing.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = CcTest::heap();
+
+ v8::HandleScope scope(CcTest::isolate());
+
+ // The plan: create JSObject which contains unboxed double value that looks
+ // like a reference to an object in new space.
+ // Then clone this object (forcing it to go into old space) and check
+ // that the value of the unboxed double property of the cloned object has
+ // was not corrupted by GC.
+
+ // Step 1: prepare a map for the object. We add unboxed double property to it.
+ // Create a map with single inobject property.
+ Handle<Map> my_map = Map::Create(isolate, 1);
+ Handle<String> name = isolate->factory()->InternalizeUtf8String("foo");
+ my_map = Map::CopyWithField(my_map, name, HeapType::Any(isolate), NONE,
+ Representation::Double(),
+ INSERT_TRANSITION).ToHandleChecked();
+
+ int object_size = my_map->instance_size();
+
+ // Step 2: allocate a lot of objects so to almost fill new space: we need
+ // just enough room to allocate JSObject and thus fill the newspace.
+
+ int allocation_amount =
+ Min(FixedArray::kMaxSize, Page::kMaxRegularHeapObjectSize + kPointerSize);
+ int allocation_len = LenFromSize(allocation_amount);
+ NewSpace* new_space = heap->new_space();
+ DisableInlineAllocationSteps(new_space);
+ Address* top_addr = new_space->allocation_top_address();
+ Address* limit_addr = new_space->allocation_limit_address();
+ while ((*limit_addr - *top_addr) > allocation_amount) {
+ CHECK(!heap->always_allocate());
+ Object* array = heap->AllocateFixedArray(allocation_len).ToObjectChecked();
+ CHECK(new_space->Contains(array));
+ }
+
+ // Step 3: now allocate fixed array and JSObject to fill the whole new space.
+ int to_fill = static_cast<int>(*limit_addr - *top_addr - object_size);
+ int fixed_array_len = LenFromSize(to_fill);
+ CHECK(fixed_array_len < FixedArray::kMaxLength);
+
+ CHECK(!heap->always_allocate());
+ Object* array = heap->AllocateFixedArray(fixed_array_len).ToObjectChecked();
+ CHECK(new_space->Contains(array));
+
+ Object* object = heap->AllocateJSObjectFromMap(*my_map).ToObjectChecked();
+ CHECK(new_space->Contains(object));
+ JSObject* jsobject = JSObject::cast(object);
+ CHECK_EQ(0, FixedArray::cast(jsobject->elements())->length());
+ CHECK_EQ(0, jsobject->properties()->length());
+
+ // Construct a double value that looks like a pointer to the new space object
+ // and store it into the obj.
+ Address fake_object = reinterpret_cast<Address>(array) + kPointerSize;
+ double boom_value = bit_cast<double>(fake_object);
+ FieldIndex index = FieldIndex::ForDescriptor(*my_map, 0);
+ jsobject->RawFastDoublePropertyAtPut(index, boom_value);
+
+ CHECK_EQ(0, static_cast<int>(*limit_addr - *top_addr));
+
+ // Step 4: clone jsobject, but force always allocate first to create a clone
+ // in old pointer space.
+ AlwaysAllocateScope aa_scope(isolate);
+ Object* clone_obj = heap->CopyJSObject(jsobject).ToObjectChecked();
+ Handle<JSObject> clone(JSObject::cast(clone_obj));
+ CHECK(heap->old_space()->Contains(clone->address()));
+
+ CcTest::heap()->CollectGarbage(NEW_SPACE, "boom");
+
+ // The value in cloned object should not be corrupted by GC.
+ CHECK_EQ(boom_value, clone->RawFastDoublePropertyAt(index));
+}
+
+
static void TestWriteBarrier(Handle<Map> map, Handle<Map> new_map,
int tagged_descriptor, int double_descriptor,
bool check_tagged_value = true) {