# Enable disassembler for `--print-code` v8 options
'v8_enable_disassembler': 1,
+ # Disable support for postmortem debugging, continuously broken.
+ 'v8_postmortem_support%': 'false',
+
# Don't bake anything extra into the snapshot.
'v8_use_external_startup_data%': 0,
'conditions': [
['OS == "win"', {
'os_posix': 0,
- 'v8_postmortem_support%': 'false',
}, {
'os_posix': 1,
- 'v8_postmortem_support%': 'true',
}],
['GENERATOR == "ninja" or OS== "mac"', {
'OBJ_DIR': '<(PRODUCT_DIR)/obj',
Jay Freeman <saurik@saurik.com>
James Pike <g00gle@chilon.net>
Jianghua Yang <jianghua.yjh@alibaba-inc.com>
-Joel Stanley <joel.stan@gmail.com>
+Joel Stanley <joel@jms.id.au>
Johan Bergström <johan@bergstroem.nu>
Jonathan Liu <net147@gmail.com>
Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu>
#define V8_MAJOR_VERSION 4
#define V8_MINOR_VERSION 2
#define V8_BUILD_NUMBER 77
-#define V8_PATCH_LEVEL 15
+#define V8_PATCH_LEVEL 18
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
internal::Object** escape_slot_;
};
+class V8_EXPORT SealHandleScope {
+ public:
+ SealHandleScope(Isolate* isolate);
+ ~SealHandleScope();
+
+ private:
+ // Make it hard to create heap-allocated or illegal handle scopes by
+ // disallowing certain operations.
+ SealHandleScope(const SealHandleScope&);
+ void operator=(const SealHandleScope&);
+ void* operator new(size_t size);
+ void operator delete(void*, size_t);
+
+ internal::Isolate* isolate_;
+ int prev_level_;
+ internal::Object** prev_limit_;
+};
+
/**
* A simple Maybe type, representing an object which may or may not have a
Handle<Integer> script_id_;
};
-class V8_EXPORT SealHandleScope {
- public:
- SealHandleScope(Isolate* isolate);
- ~SealHandleScope();
-
- private:
- // Make it hard to create heap-allocated or illegal handle scopes by
- // disallowing certain operations.
- SealHandleScope(const SealHandleScope&);
- void operator=(const SealHandleScope&);
- void* operator new(size_t size);
- void operator delete(void*, size_t);
-
- internal::Isolate* isolate_;
- int prev_level_;
- internal::Object** prev_limit_;
-};
-
/**
* A compiled JavaScript script, not yet tied to a Context.
Node* value = node->InputAt(0);
if (CanCover(node, value)) {
switch (value->opcode()) {
- case IrOpcode::kWord64Sar: {
+ case IrOpcode::kWord64Sar:
+ case IrOpcode::kWord64Shr: {
Int64BinopMatcher m(value);
- if (m.right().IsInRange(1, 32)) {
+ if (m.right().Is(32)) {
Emit(kX64Shr, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()),
- g.UseImmediate(m.right().node()));
- return;
- }
- break;
- }
- case IrOpcode::kWord64Shl: {
- Int64BinopMatcher m(value);
- if (m.right().IsInRange(1, 31)) {
- Emit(kX64Shl32, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()),
- g.UseImmediate(m.right().node()));
+ g.UseRegister(m.left().node()), g.TempImmediate(32));
return;
}
break;
break;
}
}
- // Otherwise truncation from 64-bit to 32-bit is a no-nop, as 32-bit
- // operations just ignore the upper 64-bit.
- Emit(kArchNop, g.DefineAsRegister(node), g.Use(value));
+ Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
}
// That is the maximum idle time we will have during frame rendering.
static const size_t kMaxFrameRenderingIdleTime = 16;
+ // Minimum idle time to start incremental marking.
+ static const size_t kMinIdleTimeToStartIncrementalMarking = 10;
+
// If we haven't recorded any scavenger events yet, we use a conservative
// lower bound for the scavenger speed.
static const size_t kInitialConservativeScavengeSpeed = 100 * KB;
allocation_timeout_(0),
#endif // DEBUG
old_generation_allocation_limit_(initial_old_generation_size_),
+ idle_old_generation_allocation_limit_(
+ kMinimumOldGenerationAllocationLimit),
old_gen_exhausted_(false),
inline_allocation_disabled_(false),
store_buffer_rebuilder_(store_buffer()),
// Temporarily set the limit for case when PostGarbageCollectionProcessing
// allocates and triggers GC. The real limit is set at after
// PostGarbageCollectionProcessing.
- old_generation_allocation_limit_ =
- OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
+ SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
old_gen_exhausted_ = false;
old_generation_size_configured_ = true;
} else {
// Register the amount of external allocated memory.
amount_of_external_allocated_memory_at_last_global_gc_ =
amount_of_external_allocated_memory_;
- old_generation_allocation_limit_ = OldGenerationAllocationLimit(
- PromotedSpaceSizeOfObjects(), freed_global_handles);
+ SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
+ freed_global_handles);
// We finished a marking cycle. We can uncommit the marking deque until
// we start marking again.
mark_compact_collector_.UncommitMarkingDeque();
bool Heap::WorthActivatingIncrementalMarking() {
return incremental_marking()->IsStopped() &&
- incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull();
+ incremental_marking()->ShouldActivate();
}
static_cast<double>(base::Time::kMillisecondsPerSecond);
HistogramTimerScope idle_notification_scope(
isolate_->counters()->gc_idle_notification());
+ double idle_time_in_ms = deadline_in_ms - MonotonicallyIncreasingTimeInMs();
GCIdleTimeHandler::HeapState heap_state;
heap_state.contexts_disposed = contexts_disposed_;
heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
// TODO(ulan): Start incremental marking only for large heaps.
+ intptr_t limit = old_generation_allocation_limit_;
+ if (static_cast<size_t>(idle_time_in_ms) >
+ GCIdleTimeHandler::kMinIdleTimeToStartIncrementalMarking) {
+ limit = idle_old_generation_allocation_limit_;
+ }
+
heap_state.can_start_incremental_marking =
- incremental_marking()->ShouldActivate() && FLAG_incremental_marking;
+ incremental_marking()->WorthActivating() &&
+ NextGCIsLikelyToBeFull(limit) && FLAG_incremental_marking;
heap_state.sweeping_in_progress =
mark_compact_collector()->sweeping_in_progress();
heap_state.mark_compact_speed_in_bytes_per_ms =
static_cast<size_t>(
tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
- double idle_time_in_ms = deadline_in_ms - MonotonicallyIncreasingTimeInMs();
GCIdleTimeAction action =
gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
}
-intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
- int freed_global_handles) {
+intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
+ intptr_t old_gen_size) {
+ CHECK(factor > 1.0);
+ CHECK(old_gen_size > 0);
+ intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
+ limit = Max(limit, kMinimumOldGenerationAllocationLimit);
+ limit += new_space_.Capacity();
+ intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
+ return Min(limit, halfway_to_the_max);
+}
+
+
+void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
+ int freed_global_handles) {
const int kMaxHandles = 1000;
const int kMinHandles = 100;
- double min_factor = 1.1;
+ const double min_factor = 1.1;
double max_factor = 4;
+ const double idle_max_factor = 1.5;
// We set the old generation growing factor to 2 to grow the heap slower on
// memory-constrained devices.
if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
max_factor = 2;
}
+
// If there are many freed global handles, then the next full GC will
// likely collect a lot of garbage. Choose the heap growing factor
// depending on freed global handles.
// TODO(ulan, hpayer): Take into account mutator utilization.
+ // TODO(hpayer): The idle factor could make the handles heuristic obsolete.
+ // Look into that.
double factor;
if (freed_global_handles <= kMinHandles) {
factor = max_factor;
factor = min_factor;
}
- intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
- limit = Max(limit, kMinimumOldGenerationAllocationLimit);
- limit += new_space_.Capacity();
- intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
- return Min(limit, halfway_to_the_max);
+ old_generation_allocation_limit_ =
+ CalculateOldGenerationAllocationLimit(factor, old_gen_size);
+ idle_old_generation_allocation_limit_ = CalculateOldGenerationAllocationLimit(
+ Min(factor, idle_max_factor), old_gen_size);
}
// Returns of size of all objects residing in the heap.
intptr_t SizeOfObjects();
+ intptr_t old_generation_allocation_limit() const {
+ return old_generation_allocation_limit_;
+ }
+
// Return the starting address and a mask for the new space. And-masking an
// address with the mask will result in the start address of the new space
// for all addresses in either semispace.
static const int kMaxExecutableSizeHugeMemoryDevice =
256 * kPointerMultiplier;
- intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size,
- int freed_global_handles);
+ // Calculates the allocation limit based on a given growing factor and a
+ // given old generation size.
+ intptr_t CalculateOldGenerationAllocationLimit(double factor,
+ intptr_t old_gen_size);
+
+ // Sets the allocation limit to trigger the next full garbage collection.
+ void SetOldGenerationAllocationLimit(intptr_t old_gen_size,
+ int freed_global_handles);
// Indicates whether inline bump-pointer allocation has been disabled.
bool inline_allocation_disabled() { return inline_allocation_disabled_; }
survived_since_last_expansion_ += survived;
}
- inline bool NextGCIsLikelyToBeFull() {
+ inline bool NextGCIsLikelyToBeFull(intptr_t limit) {
if (FLAG_gc_global) return true;
if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
- intptr_t adjusted_allocation_limit =
- old_generation_allocation_limit_ - new_space_.Capacity();
+ intptr_t adjusted_allocation_limit = limit - new_space_.Capacity();
if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
// generation and on every allocation in large object space.
intptr_t old_generation_allocation_limit_;
+ // The allocation limit when there is > kMinIdleTimeToStartIncrementalMarking
+ // idle time in the idle time handler.
+ intptr_t idle_old_generation_allocation_limit_;
+
// Indicates that an allocation has failed in the old generation since the
// last GC.
bool old_gen_exhausted_;
bool IncrementalMarking::ShouldActivate() {
- return WorthActivating() && heap_->NextGCIsLikelyToBeFull();
+ return WorthActivating() &&
+ heap_->NextGCIsLikelyToBeFull(
+ heap_->old_generation_allocation_limit());
}
PropertyAccessType access_type,
LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode) {
+ DCHECK(top_info()->IsStub() || checked_object->IsCompareMap() ||
+ checked_object->IsCheckMaps());
DCHECK((!IsExternalArrayElementsKind(elements_kind) &&
!IsFixedTypedArrayElementsKind(elements_kind)) ||
!is_js_array);
new_size = AddUncasted<HAdd>(length, graph()->GetConstant1());
bool is_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- BuildUncheckedMonomorphicElementAccess(array, length,
- value_to_push, is_array,
- elements_kind, STORE,
- NEVER_RETURN_HOLE,
- STORE_AND_GROW_NO_TRANSITION);
+ HValue* checked_array = Add<HCheckMaps>(array, receiver_map);
+ BuildUncheckedMonomorphicElementAccess(
+ checked_array, length, value_to_push, is_array, elements_kind,
+ STORE, NEVER_RETURN_HOLE, STORE_AND_GROW_NO_TRANSITION);
if (!ast_context()->IsEffect()) Push(new_size);
Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
int args_count_no_receiver = arguments_count - 1;
if (function->IsConstant() &&
HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
- HValue* receiver = environment()->ExpressionStackAt(args_count_no_receiver);
- Handle<Map> receiver_map;
- if (receiver->IsConstant() &&
- HConstant::cast(receiver)->handle(isolate())->IsHeapObject()) {
- receiver_map =
- handle(Handle<HeapObject>::cast(
- HConstant::cast(receiver)->handle(isolate()))->map());
- }
-
known_function =
Handle<JSFunction>::cast(HConstant::cast(function)->handle(isolate()));
- if (TryInlineBuiltinMethodCall(expr, known_function, receiver_map,
+ if (TryInlineBuiltinMethodCall(expr, known_function, Handle<Map>(),
args_count_no_receiver)) {
if (FLAG_trace_inlining) {
PrintF("Inlining builtin ");
}
-TEST(SealHandleScope) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handle_scope(isolate);
- LocalContext env;
-
- v8::SealHandleScope seal(isolate);
-
- // Should fail
- v8::Local<v8::Object> obj = v8::Object::New(isolate);
-
- USE(obj);
-}
-
-
-TEST(SealHandleScopeNested) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handle_scope(isolate);
- LocalContext env;
-
- v8::SealHandleScope seal(isolate);
-
- {
- v8::HandleScope handle_scope(isolate);
-
- // Should work
- v8::Local<v8::Object> obj = v8::Object::New(isolate);
-
- USE(obj);
- }
-}
-
-
TEST(CallCompletedCallbackOneException) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
}
free(buffer);
}
+
+
+TEST(SealHandleScope) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ LocalContext env;
+
+ v8::SealHandleScope seal(isolate);
+
+ // Should fail
+ v8::Local<v8::Object> obj = v8::Object::New(isolate);
+
+ USE(obj);
+}
+
+
+TEST(SealHandleScopeNested) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ LocalContext env;
+
+ v8::SealHandleScope seal(isolate);
+
+ {
+ v8::HandleScope handle_scope(isolate);
+
+ // Should work
+ v8::Local<v8::Object> obj = v8::Object::New(isolate);
+
+ USE(obj);
+ }
+}
--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var a = [1.5];
+
+function p() {
+ Array.prototype.push.call(a, 1.7);
+}
+
+p();
+p();
+p();
+%OptimizeFunctionOnNextCall(p);
+p();
+a.push({});
+p();
+assertEquals(1.7, a[a.length - 1]);
}
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
+ StreamBuilder m(this, kMachInt32, kMachInt64);
+ m.Return(m.TruncateInt64ToInt32(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
+}
+
+
// -----------------------------------------------------------------------------
// Loads and stores
// TruncateInt64ToInt32.
-TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
StreamBuilder m(this, kMachInt32, kMachInt64);
- m.Return(m.TruncateInt64ToInt32(m.Parameter(0)));
+ Node* const p = m.Parameter(0);
+ Node* const t = m.TruncateInt64ToInt32(m.Word64Sar(p, m.Int64Constant(32)));
+ m.Return(t);
Stream s = m.Build();
- ASSERT_EQ(0U, s.size());
-}
-
-
-TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
- TRACED_FORRANGE(int32_t, k, 1, 32) {
- StreamBuilder m(this, kMachInt32, kMachInt64);
- Node* const p = m.Parameter(0);
- Node* const t = m.TruncateInt64ToInt32(m.Word64Sar(p, m.Int64Constant(k)));
- m.Return(t);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kX64Shr, s[0]->arch_opcode());
- ASSERT_EQ(2U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
- EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(1)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
- EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
- }
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Shr, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(32, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
+ EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
}
-TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shl) {
- TRACED_FORRANGE(int32_t, k, 1, 31) {
- StreamBuilder m(this, kMachInt32, kMachInt64);
- Node* const p = m.Parameter(0);
- Node* const t = m.TruncateInt64ToInt32(m.Word64Shl(p, m.Int64Constant(k)));
- m.Return(t);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kX64Shl32, s[0]->arch_opcode());
- ASSERT_EQ(2U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
- EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(1)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
- EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
- }
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shr) {
+ StreamBuilder m(this, kMachInt32, kMachInt64);
+ Node* const p = m.Parameter(0);
+ Node* const t = m.TruncateInt64ToInt32(m.Word64Shr(p, m.Int64Constant(32)));
+ m.Return(t);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Shr, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(32, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
+ EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
}
{ 'name': 'prop_idx_first',
'value': 'DescriptorArray::kFirstIndex' },
{ 'name': 'prop_type_field',
- 'value': 'DATA' },
+ 'value': 'FIELD' },
{ 'name': 'prop_type_mask',
'value': 'PropertyDetails::TypeField::kMask' },
{ 'name': 'prop_index_mask',