}
+void InterruptStub::Generate(MacroAssembler* masm) {
+ __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
+}
+
+
void MathPowStub::Generate(MacroAssembler* masm) {
CpuFeatures::Scope vfp3_scope(VFP3);
const Register base = r1;
}
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
+ Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ add(r0, r0, Operand(Smi::FromInt(1)));
__ push(r0);
- EmitStackCheck(stmt);
+ EmitStackCheck(stmt, &loop);
__ b(&loop);
// Remove the pointers stored on the stack.
V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \
V(StackCheck) \
+ V(Interrupt) \
V(FastNewClosure) \
V(FastNewContext) \
V(FastNewBlockContext) \
};
+class InterruptStub : public CodeStub {
+ public:
+ InterruptStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return Interrupt; }
+ int MinorKey() { return 0; }
+};
+
+
class ToNumberStub: public CodeStub {
public:
ToNumberStub() { }
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
}
isolate->counters()->stack_interrupts()->Increment();
- if (stack_guard->IsRuntimeProfilerTick()) {
+ // If FLAG_count_based_interrupts, every interrupt is a profiler interrupt.
+ if (FLAG_count_based_interrupts ||
+ stack_guard->IsRuntimeProfilerTick()) {
isolate->counters()->runtime_profiler_ticks()->Increment();
stack_guard->Continue(RUNTIME_PROFILER_TICK);
isolate->runtime_profiler()->OptimizeNow();
return isolate->heap()->undefined_value();
}
+
} } // namespace v8::internal
DEFINE_bool(watch_ic_patching, false, "profiler considers IC stability")
DEFINE_bool(self_optimization, false,
"primitive functions trigger their own optimization")
+DEFINE_bool(count_based_interrupts, false,
+ "trigger profiler ticks based on counting instead of timing")
+DEFINE_bool(weighted_back_edges, false,
+ "weight back edges by jump distance for interrupt triggering")
+DEFINE_int(interrupt_budget, 100,
+ "execution budget before interrupt is triggered")
DEFINE_implication(experimental_profiler, watch_ic_patching)
DEFINE_implication(experimental_profiler, self_optimization)
+DEFINE_implication(experimental_profiler, count_based_interrupts)
// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
DEFINE_bool(debug_code, false,
// Check stack before looping.
PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
__ bind(&stack_check);
- EmitStackCheck(stmt);
+ EmitStackCheck(stmt, &body);
__ jmp(&body);
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
SetStatementPosition(stmt);
// Check stack before looping.
- EmitStackCheck(stmt);
+ EmitStackCheck(stmt, &body);
__ bind(&test);
VisitForControl(stmt->cond(),
SetStatementPosition(stmt);
// Check stack before looping.
- EmitStackCheck(stmt);
+ EmitStackCheck(stmt, &body);
__ bind(&test);
if (stmt->cond() != NULL) {
// Platform-specific code for checking the stack limit at the back edge of
// a loop.
- void EmitStackCheck(IterationStatement* stmt);
+ // This is meant to be called at loop back edges, |back_edge_target| is
+ // the jump target of the back edge and is used to approximate the amount
+ // of code inside the loop.
+ void EmitStackCheck(IterationStatement* stmt, Label* back_edge_target);
// Record the OSR AST id corresponding to a stack check in the code.
void RecordStackCheck(unsigned osr_ast_id);
// Emit a table of stack check ids and pcs into the code stream. Return
ZoneList<BailoutEntry> stack_checks_;
ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
Handle<FixedArray> handler_table_;
+ Handle<JSGlobalPropertyCell> profiling_counter_;
friend class NestedStatement;
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
- // If we decide not to perform on-stack replacement we perform a
- // stack guard check to enable interrupts.
+ // Insert a stack guard check so that if we decide not to perform
+ // on-stack replacement right away, the function calling this stub can
+ // still be interrupted.
__ bind(&stack_check);
Label ok;
ExternalReference stack_limit =
}
+void InterruptStub::Generate(MacroAssembler* masm) {
+ __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
+}
+
+
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
}
+static const byte kJnsInstruction = 0x79;
+static const byte kJnsOffset = 0x11;
+static const byte kJaeInstruction = 0x73;
+static const byte kJaeOffset = 0x07;
+static const byte kCallInstruction = 0xe8;
+static const byte kNopByteOne = 0x66;
+static const byte kNopByteTwo = 0x90;
+
+
void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
Code* check_code,
// call <on-stack replacment>
// test eax, <loop nesting depth>
// ok:
- ASSERT(*(call_target_address - 3) == 0x73 && // jae
- *(call_target_address - 2) == 0x07 && // offset
- *(call_target_address - 1) == 0xe8); // call
- *(call_target_address - 3) = 0x66; // 2 byte nop part 1
- *(call_target_address - 2) = 0x90; // 2 byte nop part 2
+
+ if (FLAG_count_based_interrupts) {
+ ASSERT(*(call_target_address - 3) == kJnsInstruction);
+ ASSERT(*(call_target_address - 2) == kJnsOffset);
+ } else {
+ ASSERT(*(call_target_address - 3) == kJaeInstruction);
+ ASSERT(*(call_target_address - 2) == kJaeOffset);
+ }
+ ASSERT(*(call_target_address - 1) == kCallInstruction);
+ *(call_target_address - 3) = kNopByteOne;
+ *(call_target_address - 2) = kNopByteTwo;
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
Address call_target_address = pc_after - kIntSize;
ASSERT(replacement_code->entry() ==
Assembler::target_address_at(call_target_address));
+
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch.
- ASSERT(*(call_target_address - 3) == 0x66 && // 2 byte nop part 1
- *(call_target_address - 2) == 0x90 && // 2 byte nop part 2
- *(call_target_address - 1) == 0xe8); // call
- *(call_target_address - 3) = 0x73; // jae
- *(call_target_address - 2) = 0x07; // offset
+ ASSERT(*(call_target_address - 3) == kNopByteOne &&
+ *(call_target_address - 2) == kNopByteTwo &&
+ *(call_target_address - 1) == kCallInstruction);
+ if (FLAG_count_based_interrupts) {
+ *(call_target_address - 3) = kJnsInstruction;
+ *(call_target_address - 2) = kJnsOffset;
+ } else {
+ *(call_target_address - 3) = kJaeInstruction;
+ *(call_target_address - 2) = kJaeOffset;
+ }
Assembler::set_target_address_at(call_target_address,
check_code->entry());
scope_ = info->scope();
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+ profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
}
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
+ Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
Label ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ CallStub(&stub);
+
+ if (FLAG_count_based_interrupts) {
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->pc_offset() - back_edge_target->pos();
+ weight = Min(127, Max(1, distance / 100));
+ }
+ __ sub(Operand::Cell(profiling_counter_), Immediate(Smi::FromInt(weight)));
+ __ j(positive, &ok, Label::kNear);
+ InterruptStub stub;
+ __ CallStub(&stub);
+ } else {
+ // Count based interrupts happen often enough when they are enabled
+ // that the additional stack checks are not necessary (they would
+ // only check for interrupts).
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ }
+
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
ASSERT(loop_depth() > 0);
__ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
+ if (FLAG_count_based_interrupts) {
+ // Reset the countdown.
+ __ mov(Operand::Cell(profiling_counter_),
+ Immediate(Smi::FromInt(FLAG_interrupt_budget)));
+ }
+
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
__ bind(loop_statement.continue_label());
__ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
- EmitStackCheck(stmt);
+ EmitStackCheck(stmt, &loop);
__ jmp(&loop);
// Remove the pointers stored on the stack.
}
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
+ Label* back_edge_target) {
// The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
// to make sure it is constant. Branch may emit a skip-or-jump sequence
// instead of the normal Branch. It seems that the "skip" part of that
__ Addu(a0, a0, Operand(Smi::FromInt(1)));
__ push(a0);
- EmitStackCheck(stmt);
+ EmitStackCheck(stmt, &loop);
__ Branch(&loop);
// Remove the pointers stored on the stack.
// Get the stack check stub code object to match against. We aren't
// prepared to generate it, but we don't expect to have to.
- StackCheckStub check_stub;
+ bool found_code = false;
Code* stack_check_code = NULL;
- if (check_stub.FindCodeInCache(&stack_check_code)) {
+ if (FLAG_count_based_interrupts) {
+ InterruptStub interrupt_stub;
+ found_code = interrupt_stub.FindCodeInCache(&stack_check_code);
+ } else {
+ StackCheckStub check_stub;
+ found_code = check_stub.FindCodeInCache(&stack_check_code);
+ }
+ if (found_code) {
Code* replacement_code =
isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
Code* unoptimized_code = shared->code();
} else {
function->shared()->set_profiler_ticks(ticks + 1);
}
- } else { // !FLAG_counting_profiler
+ } else { // !FLAG_watch_ic_patching
samples[sample_count++] = function;
int function_size = function->shared()->SourceSize();
if (FLAG_watch_ic_patching) {
any_ic_changed_ = false;
code_generated_ = false;
- } else { // !FLAG_counting_profiler
+ } else { // !FLAG_watch_ic_patching
// Add the collected functions as samples. It's important not to do
// this as part of collecting them because this will interfere with
// the sample lookup in case of recursive functions.
void RuntimeProfiler::NotifyTick() {
+ if (FLAG_count_based_interrupts) return;
isolate_->stack_guard()->RequestRuntimeProfilerTick();
}
void RuntimeProfiler::Reset() {
if (FLAG_watch_ic_patching) {
total_code_generated_ = 0;
- } else { // !FLAG_counting_profiler
+ } else { // !FLAG_watch_ic_patching
sampler_threshold_ = kSamplerThresholdInit;
sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
sampler_ticks_until_threshold_adjustment_ =
function->PrintName();
PrintF("]\n");
}
- StackCheckStub check_stub;
- Handle<Code> check_code = check_stub.GetCode();
+ Handle<Code> check_code;
+ if (FLAG_count_based_interrupts) {
+ InterruptStub interrupt_stub;
+ check_code = interrupt_stub.GetCode();
+ } else {
+ StackCheckStub check_stub;
+ check_code = check_stub.GetCode();
+ }
Handle<Code> replacement_code = isolate->builtins()->OnStackReplacement();
Deoptimizer::RevertStackCheckCode(*unoptimized,
*check_code,
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_Interrupt) {
+ ASSERT(args.length() == 0);
+ return Execution::HandleStackGuardInterrupt();
+}
+
+
static int StackSize() {
int n = 0;
for (JavaScriptFrameIterator it; !it.done(); it.Advance()) n++;
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
F(ReThrow, 1, 1) \
F(ThrowReferenceError, 1, 1) \
F(StackGuard, 0, 1) \
+ F(Interrupt, 0, 1) \
F(PromoteScheduledException, 0, 1) \
\
/* Contexts */ \
}
+void InterruptStub::Generate(MacroAssembler* masm) {
+ __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
+}
+
+
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
}
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
+ Label* back_edge_target) {
Comment cmnt(masm_, "[ Stack check");
Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ bind(loop_statement.continue_label());
__ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1));
- EmitStackCheck(stmt);
+ EmitStackCheck(stmt, &loop);
__ jmp(&loop);
// Remove the pointers stored on the stack.
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --count-based-interrupts --interrupt-budget=10 --weighted-back-edges --allow-natives-syntax
+
+// Test that OSR works properly when using count-based interrupting/profiling.
+
+function osr_this() {
+ var a = 1;
+ // Trigger OSR.
+ while (%GetOptimizationStatus(osr_this) == 2) {}
+ return a;
+}
+assertEquals(1, osr_this());