code->set_allow_osr_at_loop_nesting_level(0);
code->set_profiler_ticks(0);
code->set_back_edge_table_offset(table_offset);
- code->set_back_edges_patched_for_osr(false);
CodeGenerator::PrintCode(code, info);
info->SetCode(code);
#ifdef ENABLE_GDB_JIT_INTERFACE
// The back edge table consists of a length (in number of entries)
// field, and then a sequence of entries. Each entry is a pair of AST id
// and code-relative pc offset.
- masm()->Align(kIntSize);
+ masm()->Align(kPointerSize);
unsigned offset = masm()->pc_offset();
unsigned length = back_edges_.length();
__ dd(length);
DisallowHeapAllocation no_gc;
Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement);
- // Iterate over the back edge table and patch every interrupt
+ // Increment loop nesting level by one and iterate over the back edge table
+ // to find the matching loops to patch the interrupt
// call to an unconditional call to the replacement code.
- int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
+ int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level() + 1;
+ if (loop_nesting_level > Code::kMaxLoopNestingMarker) return;
BackEdgeTable back_edges(unoptimized, &no_gc);
for (uint32_t i = 0; i < back_edges.length(); i++) {
}
}
- unoptimized->set_back_edges_patched_for_osr(true);
- ASSERT(Verify(isolate, unoptimized, loop_nesting_level));
+ unoptimized->set_allow_osr_at_loop_nesting_level(loop_nesting_level);
+ ASSERT(Verify(isolate, unoptimized));
}
Code* patch = isolate->builtins()->builtin(Builtins::kInterruptCheck);
// Iterate over the back edge table and revert the patched interrupt calls.
- ASSERT(unoptimized->back_edges_patched_for_osr());
int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
BackEdgeTable back_edges(unoptimized, &no_gc);
}
}
- unoptimized->set_back_edges_patched_for_osr(false);
unoptimized->set_allow_osr_at_loop_nesting_level(0);
// Assert that none of the back edges are patched anymore.
- ASSERT(Verify(isolate, unoptimized, -1));
+ ASSERT(Verify(isolate, unoptimized));
}
#ifdef DEBUG
-bool BackEdgeTable::Verify(Isolate* isolate,
- Code* unoptimized,
- int loop_nesting_level) {
+bool BackEdgeTable::Verify(Isolate* isolate, Code* unoptimized) {
DisallowHeapAllocation no_gc;
+ int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
BackEdgeTable back_edges(unoptimized, &no_gc);
for (uint32_t i = 0; i < back_edges.length(); i++) {
uint32_t loop_depth = back_edges.loop_depth(i);
OSR_AFTER_STACK_CHECK
};
- // Patch all interrupts with allowed loop depth in the unoptimized code to
- // unconditionally call replacement_code.
- static void Patch(Isolate* isolate,
- Code* unoptimized_code);
+ // Increase allowed loop nesting level by one and patch those matching loops.
+ static void Patch(Isolate* isolate, Code* unoptimized_code);
// Patch the back edge to the target state, provided the correct callee.
static void PatchAt(Code* unoptimized_code,
#ifdef DEBUG
// Verify that all back edges of a certain loop depth are patched.
- static bool Verify(Isolate* isolate,
- Code* unoptimized_code,
- int loop_nesting_level);
+ static bool Verify(Isolate* isolate, Code* unoptimized_code);
#endif // DEBUG
private:
#endif
#endif
+STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
+
const int kBitsPerByte = 8;
const int kBitsPerByteLog2 = 3;
const int kBitsPerPointer = kPointerSize * kBitsPerByte;
int Code::allow_osr_at_loop_nesting_level() {
ASSERT_EQ(FUNCTION, kind());
- return READ_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset);
+ int fields = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
+ return AllowOSRAtLoopNestingLevelField::decode(fields);
}
void Code::set_allow_osr_at_loop_nesting_level(int level) {
ASSERT_EQ(FUNCTION, kind());
ASSERT(level >= 0 && level <= kMaxLoopNestingMarker);
- WRITE_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset, level);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
+ int updated = AllowOSRAtLoopNestingLevelField::update(previous, level);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
}
unsigned Code::back_edge_table_offset() {
ASSERT_EQ(FUNCTION, kind());
return BackEdgeTableOffsetField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
+ READ_UINT32_FIELD(this, kKindSpecificFlags2Offset)) << kPointerSizeLog2;
}
void Code::set_back_edge_table_offset(unsigned offset) {
ASSERT_EQ(FUNCTION, kind());
- ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
+ ASSERT(IsAligned(offset, static_cast<unsigned>(kPointerSize)));
+ offset = offset >> kPointerSizeLog2;
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
int updated = BackEdgeTableOffsetField::update(previous, offset);
WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
bool Code::back_edges_patched_for_osr() {
ASSERT_EQ(FUNCTION, kind());
- return BackEdgesPatchedForOSRField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
-}
-
-
-void Code::set_back_edges_patched_for_osr(bool value) {
- ASSERT_EQ(FUNCTION, kind());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
- int updated = BackEdgesPatchedForOSRField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
+ return allow_osr_at_loop_nesting_level() > 0;
}
-
byte Code::to_boolean_state() {
return extra_ic_state();
}
inline void set_back_edge_table_offset(unsigned offset);
inline bool back_edges_patched_for_osr();
- inline void set_back_edges_patched_for_osr(bool value);
// [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
inline byte to_boolean_state();
class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {};
class FullCodeFlagsIsCompiledOptimizable: public BitField<bool, 2, 1> {};
- static const int kAllowOSRAtLoopNestingLevelOffset = kFullCodeFlags + 1;
- static const int kProfilerTicksOffset = kAllowOSRAtLoopNestingLevelOffset + 1;
+ static const int kProfilerTicksOffset = kFullCodeFlags + 1;
// Flags layout. BitField<type, shift, size>.
class ICStateField: public BitField<InlineCacheState, 0, 3> {};
// KindSpecificFlags2 layout (FUNCTION)
class BackEdgeTableOffsetField: public BitField<int,
- kIsCrankshaftedBit + 1, 29> {}; // NOLINT
- class BackEdgesPatchedForOSRField: public BitField<bool,
- kIsCrankshaftedBit + 1 + 29, 1> {}; // NOLINT
+ kIsCrankshaftedBit + 1, 27> {}; // NOLINT
+ class AllowOSRAtLoopNestingLevelField: public BitField<int,
+ kIsCrankshaftedBit + 1 + 27, 4> {}; // NOLINT
+ STATIC_ASSERT(AllowOSRAtLoopNestingLevelField::kMax >= kMaxLoopNestingMarker);
static const int kArgumentsBits = 16;
static const int kMaxArguments = (1 << kArgumentsBits) - 1;
}
-void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
+void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function,
+ int loop_nesting_levels) {
+ SharedFunctionInfo* shared = function->shared();
// See AlwaysFullCompiler (in compiler.cc) comment on why we need
// Debug::has_break_points().
if (!FLAG_use_osr ||
return;
}
- SharedFunctionInfo* shared = function->shared();
// If the code is not optimizable, don't try OSR.
if (!shared->code()->optimizable()) return;
PrintF("]\n");
}
- BackEdgeTable::Patch(isolate_, shared->code());
+ for (int i = 0; i < loop_nesting_levels; i++) {
+ BackEdgeTable::Patch(isolate_, shared->code());
+ }
}
if (shared_code->kind() != Code::FUNCTION) continue;
if (function->IsInOptimizationQueue()) continue;
- if (FLAG_always_osr &&
- shared_code->allow_osr_at_loop_nesting_level() == 0) {
- // Testing mode: always try an OSR compile for every function.
- for (int i = 0; i < Code::kMaxLoopNestingMarker; i++) {
- // TODO(titzer): fix AttemptOnStackReplacement to avoid this dumb loop.
- shared_code->set_allow_osr_at_loop_nesting_level(i);
- AttemptOnStackReplacement(function);
- }
+ if (FLAG_always_osr) {
+ AttemptOnStackReplacement(function, Code::kMaxLoopNestingMarker);
// Fall through and do a normal optimized compile as well.
} else if (!frame->is_optimized() &&
(function->IsMarkedForOptimization() ||
if (shared_code->CodeSize() > allowance) {
if (ticks < 255) shared_code->set_profiler_ticks(ticks + 1);
} else {
- int nesting = shared_code->allow_osr_at_loop_nesting_level();
- if (nesting < Code::kMaxLoopNestingMarker) {
- int new_nesting = nesting + 1;
- shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
- AttemptOnStackReplacement(function);
- }
+ AttemptOnStackReplacement(function);
}
continue;
}
void NotifyICChanged() { any_ic_changed_ = true; }
- void AttemptOnStackReplacement(JSFunction* function);
+ void AttemptOnStackReplacement(JSFunction* function, int nesting_levels = 1);
private:
void Optimize(JSFunction* function, const char* reason);
if (args.length() == 2 &&
unoptimized->kind() == Code::FUNCTION) {
CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
- if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr"))) {
+ if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr")) && FLAG_use_osr) {
// Start patching from the currently patched loop nesting level.
- int current_level = unoptimized->allow_osr_at_loop_nesting_level();
- ASSERT(BackEdgeTable::Verify(isolate, unoptimized, current_level));
- if (FLAG_use_osr) {
- for (int i = current_level + 1; i <= Code::kMaxLoopNestingMarker; i++) {
- unoptimized->set_allow_osr_at_loop_nesting_level(i);
- isolate->runtime_profiler()->AttemptOnStackReplacement(*function);
- }
- }
+ ASSERT(BackEdgeTable::Verify(isolate, unoptimized));
+ isolate->runtime_profiler()->AttemptOnStackReplacement(
+ *function, Code::kMaxLoopNestingMarker);
} else if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("concurrent")) &&
isolate->concurrent_recompilation_enabled()) {
function->MarkForConcurrentOptimization();
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-debug-as debug
+
+Debug = debug.Debug;
+Debug.setListener(function() {});
+
+function f() {
+ for (var i = 0; i < 100; i++) {
+ %OptimizeFunctionOnNextCall(f, "osr");
+ }
+}
+
+Debug.setBreakPoint(f, 0, 0);
+f();
+f();
+Debug.setListener(null);