first = isolate->factory()->undefined_value();
}
- if (!heap->lo_space()->Contains(*elms_obj)) {
+ if (!heap->CanMoveObjectStart(*elms_obj)) {
array->set_elements(LeftTrimFixedArray(heap, *elms_obj, 1));
} else {
// Shift the elements.
heap->MoveElements(*elms, delta, 0, actual_start);
}
- elms_obj = handle(LeftTrimFixedArray(heap, *elms_obj, delta));
-
+ if (heap->CanMoveObjectStart(*elms_obj)) {
+ // On the fast path we move the start of the object in memory.
+ elms_obj = handle(LeftTrimFixedArray(heap, *elms_obj, delta));
+ } else {
+ // This is the slow path. We are going to move the elements to the left
+ // by copying them. For trimmed values we store the hole.
+ if (elms_obj->IsFixedDoubleArray()) {
+ Handle<FixedDoubleArray> elms =
+ Handle<FixedDoubleArray>::cast(elms_obj);
+ MoveDoubleElements(*elms, 0, *elms, delta, len - delta);
+ FillWithHoles(*elms, len - delta, len);
+ } else {
+ Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
+ DisallowHeapAllocation no_gc;
+ heap->MoveElements(*elms, 0, delta, len - delta);
+ FillWithHoles(heap, *elms, len - delta, len);
+ }
+ }
elms_changed = true;
} else {
if (elms_obj->IsFixedDoubleArray()) {
}
+bool Heap::CanMoveObjectStart(HeapObject* object) {
+ Address address = object->address();
+ bool is_in_old_pointer_space = InOldPointerSpace(address);
+ bool is_in_old_data_space = InOldDataSpace(address);
+
+ if (lo_space()->Contains(object)) return false;
+
+ // We cannot move the object start if the given old space page is
+ // concurrently swept.
+ return (!is_in_old_pointer_space && !is_in_old_data_space) ||
+ Page::FromAddress(address)->parallel_sweeping() <=
+ MemoryChunk::PARALLEL_SWEEPING_FINALIZE;
+}
+
+
void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
if (incremental_marking()->IsMarking() &&
Marking::IsBlack(Marking::MarkBitFrom(address))) {
// when shortening objects.
void CreateFillerObjectAt(Address addr, int size);
+ bool CanMoveObjectStart(HeapObject* object);
+
enum InvocationMode { FROM_GC, FROM_MUTATOR };
// Maintain marking consistency for IncrementalMarking.
if (p->TryParallelSweeping()) {
SweepConservatively<SWEEP_IN_PARALLEL>(space, &private_free_list, p);
free_list->Concatenate(&private_free_list);
+ p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
}
}
}
PageIterator it(space);
while (it.has_next()) {
Page* p = it.next();
- if (p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_IN_PROGRESS) {
+ if (p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_FINALIZE) {
p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_DONE);
p->MarkSweptConservatively();
}
+ ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
}
}
intptr_t GetFlags() { return flags_; }
- // PARALLEL_SWEEPING_PENDING - This page is ready for parallel sweeping.
- // PARALLEL_SWEEPING_IN_PROGRESS - This page is currently swept or was
- // swept by a sweeper thread.
// PARALLEL_SWEEPING_DONE - The page state when sweeping is complete or
// sweeping must not be performed on that page.
+ // PARALLEL_SWEEPING_FINALIZE - A sweeper thread is done sweeping this
+ // page and will not touch the page memory anymore.
+ // PARALLEL_SWEEPING_IN_PROGRESS - This page is currently swept by a
+ // sweeper thread.
+ // PARALLEL_SWEEPING_PENDING - This page is ready for parallel sweeping.
enum ParallelSweepingState {
PARALLEL_SWEEPING_DONE,
+ PARALLEL_SWEEPING_FINALIZE,
PARALLEL_SWEEPING_IN_PROGRESS,
PARALLEL_SWEEPING_PENDING
};