From: yangguo@chromium.org Date: Tue, 24 Jun 2014 09:31:30 +0000 (+0000) Subject: Do not eagerly update allow_osr_at_loop_nesting_level. X-Git-Tag: upstream/4.7.83~8582 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=438f49a32228cc0ecc7decb3a0e314111059fb82;p=platform%2Fupstream%2Fv8.git Do not eagerly update allow_osr_at_loop_nesting_level. Having debug break points prevents OSR. That causes allow_osr_at_loop_nesting_level and the actually patched state to go out of sync. R=jkummerow@chromium.org BUG=387599 LOG=Y Review URL: https://codereview.chromium.org/346223007 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@21958 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- diff --git a/src/full-codegen.cc b/src/full-codegen.cc index 32241c2..088a9c9 100644 --- a/src/full-codegen.cc +++ b/src/full-codegen.cc @@ -328,7 +328,6 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) { code->set_allow_osr_at_loop_nesting_level(0); code->set_profiler_ticks(0); code->set_back_edge_table_offset(table_offset); - code->set_back_edges_patched_for_osr(false); CodeGenerator::PrintCode(code, info); info->SetCode(code); #ifdef ENABLE_GDB_JIT_INTERFACE @@ -348,7 +347,7 @@ unsigned FullCodeGenerator::EmitBackEdgeTable() { // The back edge table consists of a length (in number of entries) // field, and then a sequence of entries. Each entry is a pair of AST id // and code-relative pc offset. - masm()->Align(kIntSize); + masm()->Align(kPointerSize); unsigned offset = masm()->pc_offset(); unsigned length = back_edges_.length(); __ dd(length); @@ -1617,9 +1616,11 @@ void BackEdgeTable::Patch(Isolate* isolate, Code* unoptimized) { DisallowHeapAllocation no_gc; Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement); - // Iterate over the back edge table and patch every interrupt + // Increment loop nesting level by one and iterate over the back edge table + // to find the matching loops to patch the interrupt // call to an unconditional call to the replacement code. - int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level(); + int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level() + 1; + if (loop_nesting_level > Code::kMaxLoopNestingMarker) return; BackEdgeTable back_edges(unoptimized, &no_gc); for (uint32_t i = 0; i < back_edges.length(); i++) { @@ -1631,8 +1632,8 @@ void BackEdgeTable::Patch(Isolate* isolate, Code* unoptimized) { } } - unoptimized->set_back_edges_patched_for_osr(true); - ASSERT(Verify(isolate, unoptimized, loop_nesting_level)); + unoptimized->set_allow_osr_at_loop_nesting_level(loop_nesting_level); + ASSERT(Verify(isolate, unoptimized)); } @@ -1641,7 +1642,6 @@ void BackEdgeTable::Revert(Isolate* isolate, Code* unoptimized) { Code* patch = isolate->builtins()->builtin(Builtins::kInterruptCheck); // Iterate over the back edge table and revert the patched interrupt calls. - ASSERT(unoptimized->back_edges_patched_for_osr()); int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level(); BackEdgeTable back_edges(unoptimized, &no_gc); @@ -1654,10 +1654,9 @@ void BackEdgeTable::Revert(Isolate* isolate, Code* unoptimized) { } } - unoptimized->set_back_edges_patched_for_osr(false); unoptimized->set_allow_osr_at_loop_nesting_level(0); // Assert that none of the back edges are patched anymore. - ASSERT(Verify(isolate, unoptimized, -1)); + ASSERT(Verify(isolate, unoptimized)); } @@ -1683,10 +1682,9 @@ void BackEdgeTable::RemoveStackCheck(Handle code, uint32_t pc_offset) { #ifdef DEBUG -bool BackEdgeTable::Verify(Isolate* isolate, - Code* unoptimized, - int loop_nesting_level) { +bool BackEdgeTable::Verify(Isolate* isolate, Code* unoptimized) { DisallowHeapAllocation no_gc; + int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level(); BackEdgeTable back_edges(unoptimized, &no_gc); for (uint32_t i = 0; i < back_edges.length(); i++) { uint32_t loop_depth = back_edges.loop_depth(i); diff --git a/src/full-codegen.h b/src/full-codegen.h index bd6aa13..cede9ac 100644 --- a/src/full-codegen.h +++ b/src/full-codegen.h @@ -890,10 +890,8 @@ class BackEdgeTable { OSR_AFTER_STACK_CHECK }; - // Patch all interrupts with allowed loop depth in the unoptimized code to - // unconditionally call replacement_code. - static void Patch(Isolate* isolate, - Code* unoptimized_code); + // Increase allowed loop nesting level by one and patch those matching loops. + static void Patch(Isolate* isolate, Code* unoptimized_code); // Patch the back edge to the target state, provided the correct callee. static void PatchAt(Code* unoptimized_code, @@ -919,9 +917,7 @@ class BackEdgeTable { #ifdef DEBUG // Verify that all back edges of a certain loop depth are patched. - static bool Verify(Isolate* isolate, - Code* unoptimized_code, - int loop_nesting_level); + static bool Verify(Isolate* isolate, Code* unoptimized_code); #endif // DEBUG private: diff --git a/src/globals.h b/src/globals.h index 742daef..c76dc85 100644 --- a/src/globals.h +++ b/src/globals.h @@ -168,6 +168,8 @@ const size_t kMaximalCodeRangeSize = 0 * MB; #endif #endif +STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2)); + const int kBitsPerByte = 8; const int kBitsPerByteLog2 = 3; const int kBitsPerPointer = kPointerSize * kBitsPerByte; diff --git a/src/objects-inl.h b/src/objects-inl.h index 7640a76..ed18e2a 100644 --- a/src/objects-inl.h +++ b/src/objects-inl.h @@ -4651,14 +4651,17 @@ void Code::set_compiled_optimizable(bool value) { int Code::allow_osr_at_loop_nesting_level() { ASSERT_EQ(FUNCTION, kind()); - return READ_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset); + int fields = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset); + return AllowOSRAtLoopNestingLevelField::decode(fields); } void Code::set_allow_osr_at_loop_nesting_level(int level) { ASSERT_EQ(FUNCTION, kind()); ASSERT(level >= 0 && level <= kMaxLoopNestingMarker); - WRITE_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset, level); + int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset); + int updated = AllowOSRAtLoopNestingLevelField::update(previous, level); + WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated); } @@ -4711,13 +4714,14 @@ void Code::set_safepoint_table_offset(unsigned offset) { unsigned Code::back_edge_table_offset() { ASSERT_EQ(FUNCTION, kind()); return BackEdgeTableOffsetField::decode( - READ_UINT32_FIELD(this, kKindSpecificFlags2Offset)); + READ_UINT32_FIELD(this, kKindSpecificFlags2Offset)) << kPointerSizeLog2; } void Code::set_back_edge_table_offset(unsigned offset) { ASSERT_EQ(FUNCTION, kind()); - ASSERT(IsAligned(offset, static_cast(kIntSize))); + ASSERT(IsAligned(offset, static_cast(kPointerSize))); + offset = offset >> kPointerSizeLog2; int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset); int updated = BackEdgeTableOffsetField::update(previous, offset); WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated); @@ -4726,20 +4730,10 @@ void Code::set_back_edge_table_offset(unsigned offset) { bool Code::back_edges_patched_for_osr() { ASSERT_EQ(FUNCTION, kind()); - return BackEdgesPatchedForOSRField::decode( - READ_UINT32_FIELD(this, kKindSpecificFlags2Offset)); -} - - -void Code::set_back_edges_patched_for_osr(bool value) { - ASSERT_EQ(FUNCTION, kind()); - int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset); - int updated = BackEdgesPatchedForOSRField::update(previous, value); - WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated); + return allow_osr_at_loop_nesting_level() > 0; } - byte Code::to_boolean_state() { return extra_ic_state(); } diff --git a/src/objects.h b/src/objects.h index 23924ca..1cf6ec1 100644 --- a/src/objects.h +++ b/src/objects.h @@ -5569,7 +5569,6 @@ class Code: public HeapObject { inline void set_back_edge_table_offset(unsigned offset); inline bool back_edges_patched_for_osr(); - inline void set_back_edges_patched_for_osr(bool value); // [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in. inline byte to_boolean_state(); @@ -5814,8 +5813,7 @@ class Code: public HeapObject { class FullCodeFlagsHasDebugBreakSlotsField: public BitField {}; class FullCodeFlagsIsCompiledOptimizable: public BitField {}; - static const int kAllowOSRAtLoopNestingLevelOffset = kFullCodeFlags + 1; - static const int kProfilerTicksOffset = kAllowOSRAtLoopNestingLevelOffset + 1; + static const int kProfilerTicksOffset = kFullCodeFlags + 1; // Flags layout. BitField. class ICStateField: public BitField {}; @@ -5886,9 +5884,10 @@ class Code: public HeapObject { // KindSpecificFlags2 layout (FUNCTION) class BackEdgeTableOffsetField: public BitField {}; // NOLINT - class BackEdgesPatchedForOSRField: public BitField {}; // NOLINT + kIsCrankshaftedBit + 1, 27> {}; // NOLINT + class AllowOSRAtLoopNestingLevelField: public BitField {}; // NOLINT + STATIC_ASSERT(AllowOSRAtLoopNestingLevelField::kMax >= kMaxLoopNestingMarker); static const int kArgumentsBits = 16; static const int kMaxArguments = (1 << kArgumentsBits) - 1; diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc index dddcad0..cba563f 100644 --- a/src/runtime-profiler.cc +++ b/src/runtime-profiler.cc @@ -110,7 +110,9 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) { } -void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) { +void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function, + int loop_nesting_levels) { + SharedFunctionInfo* shared = function->shared(); // See AlwaysFullCompiler (in compiler.cc) comment on why we need // Debug::has_break_points(). if (!FLAG_use_osr || @@ -119,7 +121,6 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) { return; } - SharedFunctionInfo* shared = function->shared(); // If the code is not optimizable, don't try OSR. if (!shared->code()->optimizable()) return; @@ -137,7 +138,9 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) { PrintF("]\n"); } - BackEdgeTable::Patch(isolate_, shared->code()); + for (int i = 0; i < loop_nesting_levels; i++) { + BackEdgeTable::Patch(isolate_, shared->code()); + } } @@ -175,14 +178,8 @@ void RuntimeProfiler::OptimizeNow() { if (shared_code->kind() != Code::FUNCTION) continue; if (function->IsInOptimizationQueue()) continue; - if (FLAG_always_osr && - shared_code->allow_osr_at_loop_nesting_level() == 0) { - // Testing mode: always try an OSR compile for every function. - for (int i = 0; i < Code::kMaxLoopNestingMarker; i++) { - // TODO(titzer): fix AttemptOnStackReplacement to avoid this dumb loop. - shared_code->set_allow_osr_at_loop_nesting_level(i); - AttemptOnStackReplacement(function); - } + if (FLAG_always_osr) { + AttemptOnStackReplacement(function, Code::kMaxLoopNestingMarker); // Fall through and do a normal optimized compile as well. } else if (!frame->is_optimized() && (function->IsMarkedForOptimization() || @@ -196,12 +193,7 @@ void RuntimeProfiler::OptimizeNow() { if (shared_code->CodeSize() > allowance) { if (ticks < 255) shared_code->set_profiler_ticks(ticks + 1); } else { - int nesting = shared_code->allow_osr_at_loop_nesting_level(); - if (nesting < Code::kMaxLoopNestingMarker) { - int new_nesting = nesting + 1; - shared_code->set_allow_osr_at_loop_nesting_level(new_nesting); - AttemptOnStackReplacement(function); - } + AttemptOnStackReplacement(function); } continue; } diff --git a/src/runtime-profiler.h b/src/runtime-profiler.h index fa8352d..14ced76 100644 --- a/src/runtime-profiler.h +++ b/src/runtime-profiler.h @@ -23,7 +23,7 @@ class RuntimeProfiler { void NotifyICChanged() { any_ic_changed_ = true; } - void AttemptOnStackReplacement(JSFunction* function); + void AttemptOnStackReplacement(JSFunction* function, int nesting_levels = 1); private: void Optimize(JSFunction* function, const char* reason); diff --git a/src/runtime.cc b/src/runtime.cc index ca533df..bd0b742 100644 --- a/src/runtime.cc +++ b/src/runtime.cc @@ -8530,16 +8530,11 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) { if (args.length() == 2 && unoptimized->kind() == Code::FUNCTION) { CONVERT_ARG_HANDLE_CHECKED(String, type, 1); - if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr"))) { + if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr")) && FLAG_use_osr) { // Start patching from the currently patched loop nesting level. - int current_level = unoptimized->allow_osr_at_loop_nesting_level(); - ASSERT(BackEdgeTable::Verify(isolate, unoptimized, current_level)); - if (FLAG_use_osr) { - for (int i = current_level + 1; i <= Code::kMaxLoopNestingMarker; i++) { - unoptimized->set_allow_osr_at_loop_nesting_level(i); - isolate->runtime_profiler()->AttemptOnStackReplacement(*function); - } - } + ASSERT(BackEdgeTable::Verify(isolate, unoptimized)); + isolate->runtime_profiler()->AttemptOnStackReplacement( + *function, Code::kMaxLoopNestingMarker); } else if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("concurrent")) && isolate->concurrent_recompilation_enabled()) { function->MarkForConcurrentOptimization(); diff --git a/test/mjsunit/regress/regress-crbug-387599.js b/test/mjsunit/regress/regress-crbug-387599.js new file mode 100644 index 0000000..98750aa --- /dev/null +++ b/test/mjsunit/regress/regress-crbug-387599.js @@ -0,0 +1,19 @@ +// Copyright 2014 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax --expose-debug-as debug + +Debug = debug.Debug; +Debug.setListener(function() {}); + +function f() { + for (var i = 0; i < 100; i++) { + %OptimizeFunctionOnNextCall(f, "osr"); + } +} + +Debug.setBreakPoint(f, 0, 0); +f(); +f(); +Debug.setListener(null);