}
+void Builtins::Generate_InstallRecompiledCode(MacroAssembler* masm) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve the function.
+ __ push(r1);
+ // Push call kind information.
+ __ push(r5);
+
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(r1);
+ __ CallRuntime(Runtime::kInstallRecompiledCode, 1);
+ // Calculate the entry point.
+ __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Restore call kind information.
+ __ pop(r5);
+ // Restore saved function.
+ __ pop(r1);
+
+ // Tear down internal frame.
+ }
+
+ // Do a tail-call of the compiled function.
+ __ Jump(r2);
+}
+
+
void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
Code::kNoExtraICState) \
V(InRecompileQueue, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(InstallRecompiledCode, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
CFunctionId id,
BuiltinExtraArguments extra_args);
static void Generate_InRecompileQueue(MacroAssembler* masm);
+ static void Generate_InstallRecompiledCode(MacroAssembler* masm);
static void Generate_ParallelRecompile(MacroAssembler* masm);
static void Generate_JSConstructStubCountdown(MacroAssembler* masm);
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
#include "codegen.h"
#include "compilation-cache.h"
#include "debug.h"
+#include "deoptimizer.h"
#include "full-codegen.h"
#include "gdb-jit.h"
#include "hydrogen.h"
void Compiler::RecompileParallel(Handle<JSFunction> closure) {
- if (closure->IsInRecompileQueue()) return;
ASSERT(closure->IsMarkedForParallelRecompilation());
Isolate* isolate = closure->GetIsolate();
{
CompilationHandleScope handle_scope(*info);
- if (!FLAG_manual_parallel_recompilation &&
- InstallCodeFromOptimizedCodeMap(*info)) {
+ if (InstallCodeFromOptimizedCodeMap(*info)) {
return;
}
new(info->zone()) OptimizingCompiler(*info);
OptimizingCompiler::Status status = compiler->CreateGraph();
if (status == OptimizingCompiler::SUCCEEDED) {
- isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
+ closure->MarkInRecompileQueue();
shared->code()->set_profiler_ticks(0);
- closure->ReplaceCode(isolate->builtins()->builtin(
- Builtins::kInRecompileQueue));
info.Detach();
+ isolate->optimizing_compiler_thread()->QueueForOptimization(compiler);
} else if (status == OptimizingCompiler::BAILED_OUT) {
isolate->clear_pending_exception();
InstallFullCode(*info);
}
}
- if (isolate->has_pending_exception()) {
- isolate->clear_pending_exception();
+ if (shared->code()->stack_check_patched_for_osr()) {
+ // At this point we either put the function on recompilation queue or
+ // aborted optimization. In either case we want to continue executing
+ // the unoptimized code without running into OSR. If the unoptimized
+ // code has been patched for OSR, unpatch it.
+ InterruptStub interrupt_stub;
+ Handle<Code> check_code = interrupt_stub.GetCode(isolate);
+ Handle<Code> replacement_code =
+ isolate->builtins()->OnStackReplacement();
+ Deoptimizer::RevertStackCheckCode(shared->code(),
+ *check_code,
+ *replacement_code);
}
+
+ if (isolate->has_pending_exception()) isolate->clear_pending_exception();
}
void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
SmartPointer<CompilationInfo> info(optimizing_compiler->info());
- // Function may have been optimized meanwhile by OSR.
- if (FLAG_use_osr) {
- // Function may have already been optimized meanwhile by OSR.
- if (!info->code().is_null() &&
- info->code()->kind() == Code::OPTIMIZED_FUNCTION) {
- return;
- }
- // OSR may also have caused optimization to be disabled.
- if (info->shared_info()->optimization_disabled()) return;
- }
+ ASSERT(info->closure()->IsMarkedForInstallingRecompiledCode());
Isolate* isolate = info->isolate();
VMState state(isolate, PARALLEL_COMPILER);
info->closure()->context()->native_context()) == -1) {
InsertCodeIntoOptimizedCodeMap(*info);
}
+ if (FLAG_trace_parallel_recompilation) {
+ PrintF(" ** Optimized code for ");
+ info->closure()->PrintName();
+ PrintF(" installed.\n");
+ }
} else {
info->SetCode(Handle<Code>(info->shared_info()->code()));
InstallFullCode(*info);
}
+ // Optimized code is finally replacing unoptimized code. Reset the latter's
+ // profiler ticks to prevent too soon re-opt after a deopt.
+ info->shared_info()->code()->set_profiler_ticks(0);
}
// Iterate over the stack check table and patch every stack check
// call to an unconditional call to the replacement code.
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
+ ASSERT(!unoptimized_code->stack_check_patched_for_osr());
Address stack_check_cursor = unoptimized_code->instruction_start() +
unoptimized_code->stack_check_table_offset();
uint32_t table_length = Memory::uint32_at(stack_check_cursor);
replacement_code);
stack_check_cursor += 2 * kIntSize;
}
+ unoptimized_code->set_stack_check_patched_for_osr(true);
}
// Iterate over the stack check table and revert the patched
// stack check calls.
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
+ ASSERT(unoptimized_code->stack_check_patched_for_osr());
Address stack_check_cursor = unoptimized_code->instruction_start() +
unoptimized_code->stack_check_table_offset();
uint32_t table_length = Memory::uint32_at(stack_check_cursor);
replacement_code);
stack_check_cursor += 2 * kIntSize;
}
+ unoptimized_code->set_stack_check_patched_for_osr(false);
}
}
-void StackGuard::RequestCodeReadyEvent() {
- ASSERT(FLAG_parallel_recompilation);
- if (ExecutionAccess::TryLock(isolate_)) {
- thread_local_.interrupt_flags_ |= CODE_READY;
- if (thread_local_.postpone_interrupts_nesting_ == 0) {
- thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
- isolate_->heap()->SetStackLimits();
- }
- ExecutionAccess::Unlock(isolate_);
- }
-}
-
-
-bool StackGuard::IsCodeReadyEvent() {
- ExecutionAccess access(isolate_);
- return (thread_local_.interrupt_flags_ & CODE_READY) != 0;
-}
-
-
bool StackGuard::IsGCRequest() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0;
stack_guard->Continue(GC_REQUEST);
}
- if (stack_guard->IsCodeReadyEvent()) {
- ASSERT(FLAG_parallel_recompilation);
- if (FLAG_trace_parallel_recompilation) {
- PrintF(" ** CODE_READY event received.\n");
- }
- stack_guard->Continue(CODE_READY);
- }
- if (!stack_guard->IsTerminateExecution() &&
- !FLAG_manual_parallel_recompilation) {
- isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
- }
isolate->counters()->stack_interrupts()->Increment();
isolate->counters()->runtime_profiler_ticks()->Increment();
DEBUGCOMMAND = 1 << 2,
PREEMPT = 1 << 3,
TERMINATE = 1 << 4,
- GC_REQUEST = 1 << 5,
- CODE_READY = 1 << 6
+ GC_REQUEST = 1 << 5
};
DEFINE_bool(parallel_recompilation, false,
"optimizing hot functions asynchronously on a separate thread")
DEFINE_bool(trace_parallel_recompilation, false, "track parallel recompilation")
-DEFINE_int(parallel_recompilation_queue_length, 2,
+DEFINE_int(parallel_recompilation_queue_length, 3,
"the length of the parallel compilation queue")
DEFINE_int(parallel_recompilation_delay, 0,
"artificial compilation delay in ms")
-DEFINE_bool(manual_parallel_recompilation, false,
- "disable automatic optimization")
-DEFINE_implication(manual_parallel_recompilation, parallel_recompilation)
DEFINE_bool(omit_prototype_checks_for_leaf_maps, true,
"do not emit prototype checks if all prototypes have leaf maps, "
"deoptimize the optimized code if the layout of the maps changes.")
code->set_allow_osr_at_loop_nesting_level(0);
code->set_profiler_ticks(0);
code->set_stack_check_table_offset(table_offset);
+ code->set_stack_check_patched_for_osr(false);
CodeGenerator::PrintCode(code, info);
info->SetCode(code); // May be an empty handle.
#ifdef ENABLE_GDB_JIT_INTERFACE
}
+void Builtins::Generate_InstallRecompiledCode(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push a copy of the function.
+ __ push(edi);
+ // Push call kind information.
+ __ push(ecx);
+
+ __ push(edi); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kInstallRecompiledCode, 1);
+
+ // Restore call kind information.
+ __ pop(ecx);
+ // Restore receiver.
+ __ pop(edi);
+
+ // Tear down internal frame.
+ }
+
+ // Do a tail-call of the compiled function.
+ __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
+ __ jmp(eax);
+}
+
+
void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
}
+void Builtins::Generate_InstallRecompiledCode(MacroAssembler* masm) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve the function.
+ __ push(a1);
+ // Push call kind information.
+ __ push(t1);
+
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(a1);
+ __ CallRuntime(Runtime::kInstallRecompiledCode, 1);
+ // Calculate the entry point.
+ __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Restore call kind information.
+ __ pop(t1);
+ // Restore saved function.
+ __ pop(a1);
+
+ // Tear down temporary frame.
+ }
+
+ // Do a tail-call of the compiled function.
+ __ Jump(t9);
+}
+
+
void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
}
+bool Code::stack_check_patched_for_osr() {
+ ASSERT_EQ(FUNCTION, kind());
+ return StackCheckPatchedForOSRField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
+}
+
+
+void Code::set_stack_check_patched_for_osr(bool value) {
+ ASSERT_EQ(FUNCTION, kind());
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
+ int updated = StackCheckPatchedForOSRField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
+}
+
+
+
CheckType Code::check_type() {
ASSERT(is_call_stub() || is_keyed_call_stub());
byte type = READ_BYTE_FIELD(this, kCheckTypeOffset);
kIsExpressionBit)
BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
kIsTopLevelBit)
+
BOOL_GETTER(SharedFunctionInfo,
compiler_hints,
has_only_simple_this_property_assignments,
}
+bool JSFunction::IsMarkedForInstallingRecompiledCode() {
+ return code() == GetIsolate()->builtins()->builtin(
+ Builtins::kInstallRecompiledCode);
+}
+
+
bool JSFunction::IsMarkedForParallelRecompilation() {
- return code() ==
- GetIsolate()->builtins()->builtin(Builtins::kParallelRecompile);
+ return code() == GetIsolate()->builtins()->builtin(
+ Builtins::kParallelRecompile);
}
}
+void JSFunction::set_code_no_write_barrier(Code* value) {
+ ASSERT(!HEAP->InNewSpace(value));
+ Address entry = value->entry();
+ WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
+}
+
+
void JSFunction::ReplaceCode(Code* code) {
bool was_optimized = IsOptimized();
bool is_optimized = code->kind() == Code::OPTIMIZED_FUNCTION;
ASSERT(is_compiled() && !IsOptimized());
ASSERT(shared()->allows_lazy_compilation() ||
code()->optimizable());
- Builtins* builtins = GetIsolate()->builtins();
- ReplaceCode(builtins->builtin(Builtins::kLazyRecompile));
+ set_code_no_write_barrier(
+ GetIsolate()->builtins()->builtin(Builtins::kLazyRecompile));
+ // No write barrier required, since the builtin is part of the root set.
}
+
void JSFunction::MarkForParallelRecompilation() {
ASSERT(is_compiled() && !IsOptimized());
ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
- Builtins* builtins = GetIsolate()->builtins();
- ReplaceCode(builtins->builtin(Builtins::kParallelRecompile));
+ ASSERT(FLAG_parallel_recompilation);
+ if (FLAG_trace_parallel_recompilation) {
+ PrintF(" ** Marking ");
+ PrintName();
+ PrintF(" for parallel recompilation.\n");
+ }
+ set_code_no_write_barrier(
+ GetIsolate()->builtins()->builtin(Builtins::kParallelRecompile));
+ // No write barrier required, since the builtin is part of the root set.
+}
+
- // Unlike MarkForLazyRecompilation, after queuing a function for
- // recompilation on the compiler thread, we actually tail-call into
- // the full code. We reset the profiler ticks here so that the
- // function doesn't bother the runtime profiler too much.
- shared()->code()->set_profiler_ticks(0);
+void JSFunction::MarkForInstallingRecompiledCode() {
+ ASSERT(is_compiled() && !IsOptimized());
+ ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
+ ASSERT(FLAG_parallel_recompilation);
+ set_code_no_write_barrier(
+ GetIsolate()->builtins()->builtin(Builtins::kInstallRecompiledCode));
+ // No write barrier required, since the builtin is part of the root set.
}
+
+void JSFunction::MarkInRecompileQueue() {
+ ASSERT(is_compiled() && !IsOptimized());
+ ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
+ ASSERT(FLAG_parallel_recompilation);
+ if (FLAG_trace_parallel_recompilation) {
+ PrintF(" ** Queueing ");
+ PrintName();
+ PrintF(" for parallel recompilation.\n");
+ }
+ set_code_no_write_barrier(
+ GetIsolate()->builtins()->builtin(Builtins::kInRecompileQueue));
+ // No write barrier required, since the builtin is part of the root set.
+}
+
+
static bool CompileLazyHelper(CompilationInfo* info,
ClearExceptionFlag flag) {
// Compile the source information to a code object.
inline unsigned stack_check_table_offset();
inline void set_stack_check_table_offset(unsigned offset);
+ inline bool stack_check_patched_for_osr();
+ inline void set_stack_check_patched_for_osr(bool value);
+
// [check type]: For kind CALL_IC, tells how to check if the
// receiver is valid for the given call.
inline CheckType check_type();
// KindSpecificFlags2 layout (FUNCTION)
class StackCheckTableOffsetField: public BitField<int, 0, 31> {};
+ class StackCheckPatchedForOSRField: public BitField<bool, 31, 1> {};
// Signed field cannot be encoded using the BitField class.
static const int kArgumentsCountShift = 17;
// Bit positions in start_position_and_type.
// The source code start position is in the 30 most significant bits of
// the start_position_and_type field.
- static const int kIsExpressionBit = 0;
- static const int kIsTopLevelBit = 1;
+ static const int kIsExpressionBit = 0;
+ static const int kIsTopLevelBit = 1;
static const int kStartPositionShift = 2;
- static const int kStartPositionMask = ~((1 << kStartPositionShift) - 1);
+ static const int kStartPositionMask = ~((1 << kStartPositionShift) - 1);
// Bit positions in compiler_hints.
static const int kCodeAgeSize = 3;
// 8.6.2, page 27.
inline Code* code();
inline void set_code(Code* code);
+ inline void set_code_no_write_barrier(Code* code);
inline void ReplaceCode(Code* code);
inline Code* unchecked_code();
// recompiled the next time it is executed.
void MarkForLazyRecompilation();
void MarkForParallelRecompilation();
+ void MarkForInstallingRecompiledCode();
+ void MarkInRecompileQueue();
// Helpers to compile this function. Returns true on success, false on
// failure (e.g., stack overflow during compilation).
// recompilation.
inline bool IsMarkedForLazyRecompilation();
inline bool IsMarkedForParallelRecompilation();
+ inline bool IsMarkedForInstallingRecompiledCode();
// Tells whether or not the function is on the parallel
// recompilation queue.
CompileNext();
- if (!FLAG_manual_parallel_recompilation) {
- isolate_->stack_guard()->RequestCodeReadyEvent();
- } else {
- // In manual mode, do not trigger a code ready event.
- // Instead, wait for the optimized functions to be installed manually.
- output_queue_semaphore_->Signal();
- }
-
if (FLAG_trace_parallel_recompilation) {
time_spent_compiling_ += OS::Ticks() - compiling_start;
}
input_queue_.Dequeue(&optimizing_compiler);
Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
- // Function may have been optimized meanwhile by OSR.
- if (FLAG_use_osr &&
- optimizing_compiler->info()->closure()->IsOptimized()) {
- return;
- }
+ ASSERT(optimizing_compiler->info()->closure()->IsInRecompileQueue());
OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph();
ASSERT(status != OptimizingCompiler::FAILED);
USE(status);
output_queue_.Enqueue(optimizing_compiler);
+
+ // The execution thread can call InstallOptimizedFunctions() at any time,
+ // including at this point, after queuing for install and before marking
+ // for install. To avoid race condition, functions that are queued but not
+ // yet marked for install are not processed by InstallOptimizedFunctions().
+
+ ASSERT(optimizing_compiler->info()->closure()->IsInRecompileQueue());
+ // Mark function to generate and install optimized code. We assume this
+ // write to be atomic.
+ optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
}
void OptimizingCompilerThread::Stop() {
+ ASSERT(!IsOptimizerThread());
Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
input_queue_semaphore_->Signal();
stop_semaphore_->Wait();
void OptimizingCompilerThread::InstallOptimizedFunctions() {
+ ASSERT(!IsOptimizerThread());
HandleScope handle_scope(isolate_);
int functions_installed = 0;
while (!output_queue_.IsEmpty()) {
- if (FLAG_manual_parallel_recompilation) {
- output_queue_semaphore_->Wait();
+ OptimizingCompiler* compiler = *output_queue_.Peek();
+
+ if (compiler->info()->closure()->IsInRecompileQueue()) {
+ // A function may be queued for install, but not marked as such yet.
+ // We continue with the output queue the next to avoid race condition.
+ break;
}
- OptimizingCompiler* compiler = NULL;
output_queue_.Dequeue(&compiler);
+
+#ifdef DEBUG
+ // Create new closure handle since the deferred handle is about to die.
+ Handle<JSFunction> closure(*compiler->info()->closure());
+#endif // DEBUG
+
Compiler::InstallOptimizedCode(compiler);
+ // Assert that the marker builtin has been replaced by actual code.
+ ASSERT(!closure->IsInRecompileQueue());
functions_installed++;
}
- if (FLAG_trace_parallel_recompilation && functions_installed != 0) {
- PrintF(" ** Installed %d function(s).\n", functions_installed);
- }
-}
-
-
-Handle<SharedFunctionInfo>
- OptimizingCompilerThread::InstallNextOptimizedFunction() {
- ASSERT(FLAG_manual_parallel_recompilation ||
- FLAG_parallel_recompilation_delay != 0);
- output_queue_semaphore_->Wait();
- OptimizingCompiler* compiler = NULL;
- output_queue_.Dequeue(&compiler);
- // Copy a handle from deferred handle scope to the normal handle scope.
- Handle<SharedFunctionInfo> shared(*compiler->info()->shared_info());
- Compiler::InstallOptimizedCode(compiler);
- return shared;
}
void OptimizingCompilerThread::QueueForOptimization(
OptimizingCompiler* optimizing_compiler) {
ASSERT(IsQueueAvailable());
+ ASSERT(!IsOptimizerThread());
Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1));
input_queue_.Enqueue(optimizing_compiler);
input_queue_semaphore_->Signal();
}
+
#ifdef DEBUG
bool OptimizingCompilerThread::IsOptimizerThread() {
if (!FLAG_parallel_recompilation) return false;
isolate_(isolate),
stop_semaphore_(OS::CreateSemaphore(0)),
input_queue_semaphore_(OS::CreateSemaphore(0)),
- output_queue_semaphore_(OS::CreateSemaphore(0)),
time_spent_compiling_(0),
time_spent_total_(0) {
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
void InstallOptimizedFunctions();
- // Wait for the next optimized function and install it.
- Handle<SharedFunctionInfo> InstallNextOptimizedFunction();
-
inline bool IsQueueAvailable() {
// We don't need a barrier since we have a data dependency right
// after.
#endif
~OptimizingCompilerThread() {
- delete output_queue_semaphore_; // Only used for manual mode.
delete input_queue_semaphore_;
delete stop_semaphore_;
}
void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
ASSERT(function->IsOptimizable());
- // If we are in manual mode, don't auto-optimize anything.
- if (FLAG_manual_parallel_recompilation) return;
if (FLAG_trace_opt) {
PrintF("[marking ");
}
if (FLAG_parallel_recompilation) {
+ ASSERT(!function->IsMarkedForInstallingRecompiledCode());
+ ASSERT(!function->IsInRecompileQueue());
function->MarkForParallelRecompilation();
} else {
// The next call to the function will trigger optimization.
// See AlwaysFullCompiler (in compiler.cc) comment on why we need
// Debug::has_break_points().
ASSERT(function->IsMarkedForLazyRecompilation() ||
- function->IsMarkedForParallelRecompilation());
+ function->IsMarkedForParallelRecompilation() ||
+ function->IsOptimized());
if (!FLAG_use_osr ||
isolate_->DebuggerHasBreakPoints() ||
function->IsBuiltin()) {
void RuntimeProfiler::OptimizeNow() {
HandleScope scope(isolate_);
+ if (FLAG_parallel_recompilation) {
+ // Take this as opportunity to process the optimizing compiler thread's
+ // output queue so that it does not unnecessarily keep objects alive.
+ isolate_->optimizing_compiler_thread()->InstallOptimizedFunctions();
+ }
+
// Run through the JavaScript frames and collect them. If we already
// have a sample of the function, we mark it for optimizations
// (eagerly or lazily).
Code* shared_code = shared->code();
if (shared_code->kind() != Code::FUNCTION) continue;
-
- if (function->IsMarkedForLazyRecompilation() ||
- function->IsMarkedForParallelRecompilation()) {
+ if (function->IsInRecompileQueue()) continue;
+
+ // Attempt OSR if we are still running unoptimized code even though the
+ // the function has long been marked or even already been optimized.
+ if (!frame->is_optimized() &&
+ (function->IsMarkedForLazyRecompilation() ||
+ function->IsMarkedForParallelRecompilation() ||
+ function->IsOptimized())) {
int nesting = shared_code->allow_osr_at_loop_nesting_level();
if (nesting == 0) AttemptOnStackReplacement(function);
int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
RUNTIME_FUNCTION(MaybeObject*, Runtime_ParallelRecompile) {
HandleScope handle_scope(isolate);
- Handle<JSFunction> function = args.at<JSFunction>(0);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
if (!AllowOptimization(isolate, function)) {
function->ReplaceCode(function->shared()->code());
return function->code();
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ForceParallelRecompile) {
- HandleScope handle_scope(isolate);
- if (!V8::UseCrankshaft()) return isolate->heap()->undefined_value();
- ASSERT(FLAG_parallel_recompilation && FLAG_manual_parallel_recompilation);
- if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
- return isolate->Throw(*isolate->factory()->InternalizeOneByteString(
- STATIC_ASCII_VECTOR("Recompile queue is full.")));
- }
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- fun->ReplaceCode(isolate->builtins()->builtin(Builtins::kParallelRecompile));
- Compiler::RecompileParallel(fun);
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_InstallRecompiledCode) {
HandleScope handle_scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
if (!V8::UseCrankshaft()) return isolate->heap()->undefined_value();
- ASSERT(FLAG_parallel_recompilation && FLAG_manual_parallel_recompilation);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+ ASSERT(FLAG_parallel_recompilation);
OptimizingCompilerThread* opt_thread = isolate->optimizing_compiler_thread();
- Handle<SharedFunctionInfo> shared(fun->shared());
- while (*opt_thread->InstallNextOptimizedFunction() != *shared) { }
- return isolate->heap()->undefined_value();
+ opt_thread->InstallOptimizedFunctions();
+ return function->code();
}
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_WaitUntilOptimized) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ if (FLAG_parallel_recompilation) {
+ if (V8::UseCrankshaft() && function->IsOptimizable()) {
+ while (!function->IsOptimized()) OS::Sleep(50);
+ }
+ }
+ return isolate->heap()->undefined_value();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
F(LazyCompile, 1, 1) \
F(LazyRecompile, 1, 1) \
F(ParallelRecompile, 1, 1) \
- F(ForceParallelRecompile, 1, 1) \
F(InstallRecompiledCode, 1, 1) \
F(NotifyDeoptimized, 1, 1) \
F(NotifyStubFailure, 0, 1) \
F(ClearFunctionTypeFeedback, 1, 1) \
F(RunningInSimulator, 0, 1) \
F(OptimizeFunctionOnNextCall, -1, 1) \
+ F(WaitUntilOptimized, 1, 1) \
F(GetOptimizationStatus, 1, 1) \
F(GetOptimizationCount, 1, 1) \
F(CompileForOnStackReplacement, 1, 1) \
}
+void Builtins::Generate_InstallRecompiledCode(MacroAssembler* masm) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push a copy of the function onto the stack.
+ __ push(rdi);
+ // Push call kind information.
+ __ push(rcx);
+
+ __ push(rdi); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kInstallRecompiledCode, 1);
+
+ // Restore call kind information.
+ __ pop(rcx);
+ // Restore function.
+ __ pop(rdi);
+
+ // Tear down internal frame.
+ }
+
+ // Do a tail-call of the compiled function.
+ __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
+ __ jmp(rax);
+}
+
+
void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
-// Flags: --parallel-recompilation --manual-parallel-recompilation
+// Flags: --allow-natives-syntax --parallel-recompilation
function f(foo) { return foo.bar(); }
assertEquals(1, f(o));
assertEquals(1, f(o));
-%ForceParallelRecompile(f);
+%OptimizeFunctionOnNextCall(f, "parallel");
+assertEquals(1, f(o));
// Change the prototype chain during optimization.
o.__proto__.__proto__ = { bar: function() { return 2; } };
-%InstallRecompiledCode(f);
+
+%WaitUntilOptimized(f);
assertEquals(2, f(o));
"LazyCompile": true,
"LazyRecompile": true,
"ParallelRecompile": true,
+ "InstallRecompiledCode": true,
"NotifyDeoptimized": true,
"NotifyStubFailure": true,
"NotifyOSR": true,
"_GetCachedArrayIndex": true,
"_OneByteSeqStringSetChar": true,
"_TwoByteSeqStringSetChar": true,
-
- // Only for debugging parallel recompilation.
- "InstallRecompiledCode": true,
- "ForceParallelRecompile": true
};
var currentlyUncallable = {
"LazyCompile": true,
"LazyRecompile": true,
"ParallelRecompile": true,
+ "InstallRecompiledCode": true,
"NotifyDeoptimized": true,
"NotifyOSR": true,
"CreateObjectLiteralBoilerplate": true,
"_GetCachedArrayIndex": true,
"_OneByteSeqStringSetChar": true,
"_TwoByteSeqStringSetChar": true,
-
- // Only for debugging parallel recompilation.
- "InstallRecompiledCode": true,
- "ForceParallelRecompile": true
};
var currentlyUncallable = {
"LazyCompile": true,
"LazyRecompile": true,
"ParallelRecompile": true,
+ "InstallRecompiledCode": true,
"NotifyDeoptimized": true,
"NotifyOSR": true,
"CreateObjectLiteralBoilerplate": true,
"_GetCachedArrayIndex": true,
"_OneByteSeqStringSetChar": true,
"_TwoByteSeqStringSetChar": true,
-
- // Only for debugging parallel recompilation.
- "InstallRecompiledCode": true,
- "ForceParallelRecompile": true
};
var currentlyUncallable = {
"LazyCompile": true,
"LazyRecompile": true,
"ParallelRecompile": true,
+ "InstallRecompiledCode": true,
"NotifyDeoptimized": true,
"NotifyOSR": true,
"CreateObjectLiteralBoilerplate": true,
"_GetCachedArrayIndex": true,
"_OneByteSeqStringSetChar": true,
"_TwoByteSeqStringSetChar": true,
-
- // Only for debugging parallel recompilation.
- "InstallRecompiledCode": true,
- "ForceParallelRecompile": true
};
var currentlyUncallable = {
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --expose-gc
-// Flags: --parallel-recompilation --manual-parallel-recompilation
-
-function assertOptimized(fun) {
- // This assertion takes --always-opt and --nocrankshaft flags into account.
- assertTrue(%GetOptimizationStatus(fun) != 2);
-}
+// Flags: --allow-natives-syntax --expose-gc --parallel-recompilation
function assertUnoptimized(fun) {
assertTrue(%GetOptimizationStatus(fun) != 1);
}
f(g(1));
-f(g(2));
assertUnoptimized(f);
assertUnoptimized(g);
-%ForceParallelRecompile(f);
-%ForceParallelRecompile(g);
-assertUnoptimized(f);
-assertUnoptimized(g);
-
-var sum = 0;
-for (var i = 0; i < 10000; i++) sum += f(i) + g(i);
-gc();
+%OptimizeFunctionOnNextCall(f, "parallel");
+%OptimizeFunctionOnNextCall(g, "parallel");
+f(g(2));
-assertEquals(95274, sum);
assertUnoptimized(f);
assertUnoptimized(g);
-%InstallRecompiledCode(f);
-assertOptimized(f);
-assertUnoptimized(g);
-
-%InstallRecompiledCode(g);
-assertOptimized(g);
+%WaitUntilOptimized(f);
+%WaitUntilOptimized(g);
f();
f();
-%OptimizeFunctionOnNextCall(g, "parallel");
%OptimizeFunctionOnNextCall(f);
+%OptimizeFunctionOnNextCall(g, "parallel");
f(0); // g() is disabled for optimization on inlining attempt.
-g(); // Attempt to optimize g() should not run into any assertion.
-
+// Attempt to optimize g() should not run into any assertion.
+%WaitUntilOptimized(g);