From 32ceb917356df530cf27423e66cee973fb708569 Mon Sep 17 00:00:00 2001 From: "yangguo@chromium.org" Date: Thu, 19 Sep 2013 09:08:08 +0000 Subject: [PATCH] Refactor back edge table related code into a new class. This is mostly moving and renaming, except for the BackEdgeTableIterator. Motivation is that the back edges in unoptimized code has nothing to do with the deoptimizer. R=titzer@chromium.org BUG= Review URL: https://codereview.chromium.org/23526069 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@16815 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/arm/deoptimizer-arm.cc | 94 ---------------------- src/arm/full-codegen-arm.cc | 95 ++++++++++++++++++++++ src/deoptimizer.cc | 79 ------------------- src/deoptimizer.h | 38 --------- src/full-codegen.cc | 73 +++++++++++++++++ src/full-codegen.h | 144 ++++++++++++++++++++-------------- src/ia32/deoptimizer-ia32.cc | 81 ------------------- src/ia32/full-codegen-ia32.cc | 82 +++++++++++++++++++ src/mips/deoptimizer-mips.cc | 82 ------------------- src/mips/full-codegen-mips.cc | 83 ++++++++++++++++++++ src/objects.cc | 19 +++-- src/runtime-profiler.cc | 2 +- src/runtime.cc | 7 +- src/x64/deoptimizer-x64.cc | 81 ------------------- src/x64/full-codegen-x64.cc | 82 +++++++++++++++++++ 15 files changed, 513 insertions(+), 529 deletions(-) diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc index 3c57b6439..9f8da50f9 100644 --- a/src/arm/deoptimizer-arm.cc +++ b/src/arm/deoptimizer-arm.cc @@ -81,100 +81,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { } -static const int32_t kBranchBeforeInterrupt = 0x5a000004; - -// The back edge bookkeeping code matches the pattern: -// -// -// 2a 00 00 01 bpl ok -// e5 9f c? ?? ldr ip, [pc, ] -// e1 2f ff 3c blx ip -// ok-label -// -// We patch the code to the following form: -// -// -// e1 a0 00 00 mov r0, r0 (NOP) -// e5 9f c? ?? ldr ip, [pc, ] -// e1 2f ff 3c blx ip -// ok-label - -void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code, - Address pc_after, - Code* replacement_code) { - static const int kInstrSize = Assembler::kInstrSize; - // Turn the jump into nops. - CodePatcher patcher(pc_after - 3 * kInstrSize, 1); - patcher.masm()->nop(); - // Replace the call address. - uint32_t interrupt_address_offset = Memory::uint16_at(pc_after - - 2 * kInstrSize) & 0xfff; - Address interrupt_address_pointer = pc_after + interrupt_address_offset; - Memory::uint32_at(interrupt_address_pointer) = - reinterpret_cast(replacement_code->entry()); - - unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( - unoptimized_code, pc_after - 2 * kInstrSize, replacement_code); -} - - -void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code, - Address pc_after, - Code* interrupt_code) { - static const int kInstrSize = Assembler::kInstrSize; - // Restore the original jump. - CodePatcher patcher(pc_after - 3 * kInstrSize, 1); - patcher.masm()->b(4 * kInstrSize, pl); // ok-label is 4 instructions later. - ASSERT_EQ(kBranchBeforeInterrupt, - Memory::int32_at(pc_after - 3 * kInstrSize)); - // Restore the original call address. - uint32_t interrupt_address_offset = Memory::uint16_at(pc_after - - 2 * kInstrSize) & 0xfff; - Address interrupt_address_pointer = pc_after + interrupt_address_offset; - Memory::uint32_at(interrupt_address_pointer) = - reinterpret_cast(interrupt_code->entry()); - - interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( - unoptimized_code, pc_after - 2 * kInstrSize, interrupt_code); -} - - -#ifdef DEBUG -Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState( - Isolate* isolate, - Code* unoptimized_code, - Address pc_after) { - static const int kInstrSize = Assembler::kInstrSize; - ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp); - - uint32_t interrupt_address_offset = - Memory::uint16_at(pc_after - 2 * kInstrSize) & 0xfff; - Address interrupt_address_pointer = pc_after + interrupt_address_offset; - - if (Assembler::IsNop(Assembler::instr_at(pc_after - 3 * kInstrSize))) { - ASSERT(Assembler::IsLdrPcImmediateOffset( - Assembler::instr_at(pc_after - 2 * kInstrSize))); - Code* osr_builtin = - isolate->builtins()->builtin(Builtins::kOnStackReplacement); - ASSERT(reinterpret_cast(osr_builtin->entry()) == - Memory::uint32_at(interrupt_address_pointer)); - return PATCHED_FOR_OSR; - } else { - // Get the interrupt stub code object to match against from cache. - Code* interrupt_builtin = - isolate->builtins()->builtin(Builtins::kInterruptCheck); - ASSERT(Assembler::IsLdrPcImmediateOffset( - Assembler::instr_at(pc_after - 2 * kInstrSize))); - ASSERT_EQ(kBranchBeforeInterrupt, - Memory::int32_at(pc_after - 3 * kInstrSize)); - ASSERT(reinterpret_cast(interrupt_builtin->entry()) == - Memory::uint32_at(interrupt_address_pointer)); - return NOT_PATCHED; - } -} -#endif // DEBUG - - void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { // Set the register values. The values are not important as there are no // callee saved registers in JavaScript frames, so all registers are diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc index 751854591..195fc8c5b 100644 --- a/src/arm/full-codegen-arm.cc +++ b/src/arm/full-codegen-arm.cc @@ -4892,6 +4892,101 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit( #undef __ + +static const int32_t kBranchBeforeInterrupt = 0x5a000004; + +// The back edge bookkeeping code matches the pattern: +// +// +// 2a 00 00 01 bpl ok +// e5 9f c? ?? ldr ip, [pc, ] +// e1 2f ff 3c blx ip +// ok-label +// +// We patch the code to the following form: +// +// +// e1 a0 00 00 mov r0, r0 (NOP) +// e5 9f c? ?? ldr ip, [pc, ] +// e1 2f ff 3c blx ip +// ok-label + +void BackEdgeTable::PatchAt(Code* unoptimized_code, + Address pc_after, + Code* replacement_code) { + static const int kInstrSize = Assembler::kInstrSize; + // Turn the jump into nops. + CodePatcher patcher(pc_after - 3 * kInstrSize, 1); + patcher.masm()->nop(); + // Replace the call address. + uint32_t interrupt_address_offset = Memory::uint16_at(pc_after - + 2 * kInstrSize) & 0xfff; + Address interrupt_address_pointer = pc_after + interrupt_address_offset; + Memory::uint32_at(interrupt_address_pointer) = + reinterpret_cast(replacement_code->entry()); + + unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( + unoptimized_code, pc_after - 2 * kInstrSize, replacement_code); +} + + +void BackEdgeTable::RevertAt(Code* unoptimized_code, + Address pc_after, + Code* interrupt_code) { + static const int kInstrSize = Assembler::kInstrSize; + // Restore the original jump. + CodePatcher patcher(pc_after - 3 * kInstrSize, 1); + patcher.masm()->b(4 * kInstrSize, pl); // ok-label is 4 instructions later. + ASSERT_EQ(kBranchBeforeInterrupt, + Memory::int32_at(pc_after - 3 * kInstrSize)); + // Restore the original call address. + uint32_t interrupt_address_offset = Memory::uint16_at(pc_after - + 2 * kInstrSize) & 0xfff; + Address interrupt_address_pointer = pc_after + interrupt_address_offset; + Memory::uint32_at(interrupt_address_pointer) = + reinterpret_cast(interrupt_code->entry()); + + interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( + unoptimized_code, pc_after - 2 * kInstrSize, interrupt_code); +} + + +#ifdef DEBUG +BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState( + Isolate* isolate, + Code* unoptimized_code, + Address pc_after) { + static const int kInstrSize = Assembler::kInstrSize; + ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp); + + uint32_t interrupt_address_offset = + Memory::uint16_at(pc_after - 2 * kInstrSize) & 0xfff; + Address interrupt_address_pointer = pc_after + interrupt_address_offset; + + if (Assembler::IsNop(Assembler::instr_at(pc_after - 3 * kInstrSize))) { + ASSERT(Assembler::IsLdrPcImmediateOffset( + Assembler::instr_at(pc_after - 2 * kInstrSize))); + Code* osr_builtin = + isolate->builtins()->builtin(Builtins::kOnStackReplacement); + ASSERT(reinterpret_cast(osr_builtin->entry()) == + Memory::uint32_at(interrupt_address_pointer)); + return ON_STACK_REPLACEMENT; + } else { + // Get the interrupt stub code object to match against from cache. + Code* interrupt_builtin = + isolate->builtins()->builtin(Builtins::kInterruptCheck); + ASSERT(Assembler::IsLdrPcImmediateOffset( + Assembler::instr_at(pc_after - 2 * kInstrSize))); + ASSERT_EQ(kBranchBeforeInterrupt, + Memory::int32_at(pc_after - 3 * kInstrSize)); + ASSERT(reinterpret_cast(interrupt_builtin->entry()) == + Memory::uint32_at(interrupt_address_pointer)); + return INTERRUPT; + } +} +#endif // DEBUG + + } } // namespace v8::internal #endif // V8_TARGET_ARCH_ARM diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc index 0c9760e0f..9a1bb9d8e 100644 --- a/src/deoptimizer.cc +++ b/src/deoptimizer.cc @@ -2338,85 +2338,6 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, } -void Deoptimizer::PatchInterruptCode(Isolate* isolate, - Code* unoptimized) { - DisallowHeapAllocation no_gc; - Code* replacement_code = - isolate->builtins()->builtin(Builtins::kOnStackReplacement); - - // Iterate over the back edge table and patch every interrupt - // call to an unconditional call to the replacement code. - int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level(); - - for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc); - !back_edges.Done(); - back_edges.Next()) { - if (static_cast(back_edges.loop_depth()) == loop_nesting_level) { - ASSERT_EQ(NOT_PATCHED, GetInterruptPatchState(isolate, - unoptimized, - back_edges.pc())); - PatchInterruptCodeAt(unoptimized, - back_edges.pc(), - replacement_code); - } - } - - unoptimized->set_back_edges_patched_for_osr(true); - ASSERT(Deoptimizer::VerifyInterruptCode( - isolate, unoptimized, loop_nesting_level)); -} - - -void Deoptimizer::RevertInterruptCode(Isolate* isolate, - Code* unoptimized) { - DisallowHeapAllocation no_gc; - Code* interrupt_code = - isolate->builtins()->builtin(Builtins::kInterruptCheck); - - // Iterate over the back edge table and revert the patched interrupt calls. - ASSERT(unoptimized->back_edges_patched_for_osr()); - int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level(); - - for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc); - !back_edges.Done(); - back_edges.Next()) { - if (static_cast(back_edges.loop_depth()) <= loop_nesting_level) { - ASSERT_EQ(PATCHED_FOR_OSR, GetInterruptPatchState(isolate, - unoptimized, - back_edges.pc())); - RevertInterruptCodeAt(unoptimized, back_edges.pc(), interrupt_code); - } - } - - unoptimized->set_back_edges_patched_for_osr(false); - unoptimized->set_allow_osr_at_loop_nesting_level(0); - // Assert that none of the back edges are patched anymore. - ASSERT(Deoptimizer::VerifyInterruptCode(isolate, unoptimized, -1)); -} - - -#ifdef DEBUG -bool Deoptimizer::VerifyInterruptCode(Isolate* isolate, - Code* unoptimized, - int loop_nesting_level) { - DisallowHeapAllocation no_gc; - for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc); - !back_edges.Done(); - back_edges.Next()) { - uint32_t loop_depth = back_edges.loop_depth(); - CHECK_LE(static_cast(loop_depth), Code::kMaxLoopNestingMarker); - // Assert that all back edges for shallower loops (and only those) - // have already been patched. - CHECK_EQ((static_cast(loop_depth) <= loop_nesting_level), - GetInterruptPatchState(isolate, - unoptimized, - back_edges.pc()) != NOT_PATCHED); - } - return true; -} -#endif // DEBUG - - unsigned Deoptimizer::ComputeInputFrameSize() const { unsigned fixed_size = ComputeFixedSize(function_); // The fp-to-sp delta already takes the context and the function diff --git a/src/deoptimizer.h b/src/deoptimizer.h index 7ee5908f7..8c1699384 100644 --- a/src/deoptimizer.h +++ b/src/deoptimizer.h @@ -131,11 +131,6 @@ class Deoptimizer : public Malloced { DEBUGGER }; - enum InterruptPatchState { - NOT_PATCHED, - PATCHED_FOR_OSR - }; - static const int kBailoutTypesWithCodeEntry = SOFT + 1; struct JumpTableEntry { @@ -213,39 +208,6 @@ class Deoptimizer : public Malloced { // The size in bytes of the code required at a lazy deopt patch site. static int patch_size(); - // Patch all interrupts with allowed loop depth in the unoptimized code to - // unconditionally call replacement_code. - static void PatchInterruptCode(Isolate* isolate, - Code* unoptimized_code); - - // Patch the interrupt at the instruction before pc_after in - // the unoptimized code to unconditionally call replacement_code. - static void PatchInterruptCodeAt(Code* unoptimized_code, - Address pc_after, - Code* replacement_code); - - // Change all patched interrupts patched in the unoptimized code - // back to normal interrupts. - static void RevertInterruptCode(Isolate* isolate, - Code* unoptimized_code); - - // Change patched interrupt in the unoptimized code - // back to a normal interrupt. - static void RevertInterruptCodeAt(Code* unoptimized_code, - Address pc_after, - Code* interrupt_code); - -#ifdef DEBUG - static InterruptPatchState GetInterruptPatchState(Isolate* isolate, - Code* unoptimized_code, - Address pc_after); - - // Verify that all back edges of a certain loop depth are patched. - static bool VerifyInterruptCode(Isolate* isolate, - Code* unoptimized_code, - int loop_nesting_level); -#endif // DEBUG - ~Deoptimizer(); void MaterializeHeapObjects(JavaScriptFrameIterator* it); diff --git a/src/full-codegen.cc b/src/full-codegen.cc index 91a51731a..f1877fbf5 100644 --- a/src/full-codegen.cc +++ b/src/full-codegen.cc @@ -1615,6 +1615,79 @@ bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) { } +void BackEdgeTable::Patch(Isolate* isolate, + Code* unoptimized) { + DisallowHeapAllocation no_gc; + Code* replacement_code = + isolate->builtins()->builtin(Builtins::kOnStackReplacement); + + // Iterate over the back edge table and patch every interrupt + // call to an unconditional call to the replacement code. + int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level(); + + BackEdgeTable back_edges(unoptimized, &no_gc); + for (uint32_t i = 0; i < back_edges.length(); i++) { + if (static_cast(back_edges.loop_depth(i)) == loop_nesting_level) { + ASSERT_EQ(INTERRUPT, GetBackEdgeState(isolate, + unoptimized, + back_edges.pc(i))); + PatchAt(unoptimized, back_edges.pc(i), replacement_code); + } + } + + unoptimized->set_back_edges_patched_for_osr(true); + ASSERT(Verify(isolate, unoptimized, loop_nesting_level)); +} + + +void BackEdgeTable::Revert(Isolate* isolate, + Code* unoptimized) { + DisallowHeapAllocation no_gc; + Code* interrupt_code = + isolate->builtins()->builtin(Builtins::kInterruptCheck); + + // Iterate over the back edge table and revert the patched interrupt calls. + ASSERT(unoptimized->back_edges_patched_for_osr()); + int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level(); + + BackEdgeTable back_edges(unoptimized, &no_gc); + for (uint32_t i = 0; i < back_edges.length(); i++) { + if (static_cast(back_edges.loop_depth(i)) <= loop_nesting_level) { + ASSERT_EQ(ON_STACK_REPLACEMENT, GetBackEdgeState(isolate, + unoptimized, + back_edges.pc(i))); + RevertAt(unoptimized, back_edges.pc(i), interrupt_code); + } + } + + unoptimized->set_back_edges_patched_for_osr(false); + unoptimized->set_allow_osr_at_loop_nesting_level(0); + // Assert that none of the back edges are patched anymore. + ASSERT(Verify(isolate, unoptimized, -1)); +} + + +#ifdef DEBUG +bool BackEdgeTable::Verify(Isolate* isolate, + Code* unoptimized, + int loop_nesting_level) { + DisallowHeapAllocation no_gc; + BackEdgeTable back_edges(unoptimized, &no_gc); + for (uint32_t i = 0; i < back_edges.length(); i++) { + uint32_t loop_depth = back_edges.loop_depth(i); + CHECK_LE(static_cast(loop_depth), Code::kMaxLoopNestingMarker); + // Assert that all back edges for shallower loops (and only those) + // have already been patched. + CHECK_EQ((static_cast(loop_depth) <= loop_nesting_level), + GetBackEdgeState(isolate, + unoptimized, + back_edges.pc(i)) != INTERRUPT); + } + return true; +} +#endif // DEBUG + + #undef __ diff --git a/src/full-codegen.h b/src/full-codegen.h index 5580cb3e8..adfa1c147 100644 --- a/src/full-codegen.h +++ b/src/full-codegen.h @@ -139,65 +139,6 @@ class FullCodeGenerator: public AstVisitor { #error Unsupported target architecture. #endif - class BackEdgeTableIterator { - public: - explicit BackEdgeTableIterator(Code* unoptimized, - DisallowHeapAllocation* required) { - ASSERT(unoptimized->kind() == Code::FUNCTION); - instruction_start_ = unoptimized->instruction_start(); - cursor_ = instruction_start_ + unoptimized->back_edge_table_offset(); - ASSERT(cursor_ < instruction_start_ + unoptimized->instruction_size()); - table_length_ = Memory::uint32_at(cursor_); - cursor_ += kTableLengthSize; - end_ = cursor_ + table_length_ * kEntrySize; - } - - bool Done() { return cursor_ >= end_; } - - void Next() { - ASSERT(!Done()); - cursor_ += kEntrySize; - } - - BailoutId ast_id() { - ASSERT(!Done()); - return BailoutId(static_cast( - Memory::uint32_at(cursor_ + kAstIdOffset))); - } - - uint32_t loop_depth() { - ASSERT(!Done()); - return Memory::uint32_at(cursor_ + kLoopDepthOffset); - } - - uint32_t pc_offset() { - ASSERT(!Done()); - return Memory::uint32_at(cursor_ + kPcOffsetOffset); - } - - Address pc() { - ASSERT(!Done()); - return instruction_start_ + pc_offset(); - } - - uint32_t table_length() { return table_length_; } - - private: - static const int kTableLengthSize = kIntSize; - static const int kAstIdOffset = 0 * kIntSize; - static const int kPcOffsetOffset = 1 * kIntSize; - static const int kLoopDepthOffset = 2 * kIntSize; - static const int kEntrySize = 3 * kIntSize; - - Address cursor_; - Address end_; - Address instruction_start_; - uint32_t table_length_; - - DISALLOW_COPY_AND_ASSIGN(BackEdgeTableIterator); - }; - - private: class Breakable; class Iteration; @@ -940,6 +881,91 @@ class AccessorTable: public TemplateHashMapkind() == Code::FUNCTION); + instruction_start_ = code->instruction_start(); + Address table_address = instruction_start_ + code->back_edge_table_offset(); + length_ = Memory::uint32_at(table_address); + start_ = table_address + kTableLengthSize; + } + + uint32_t length() { return length_; } + + BailoutId ast_id(uint32_t index) { + return BailoutId(static_cast( + Memory::uint32_at(entry_at(index) + kAstIdOffset))); + } + + uint32_t loop_depth(uint32_t index) { + return Memory::uint32_at(entry_at(index) + kLoopDepthOffset); + } + + uint32_t pc_offset(uint32_t index) { + return Memory::uint32_at(entry_at(index) + kPcOffsetOffset); + } + + Address pc(uint32_t index) { + return instruction_start_ + pc_offset(index); + } + + enum BackEdgeState { + INTERRUPT, + ON_STACK_REPLACEMENT + }; + + // Patch all interrupts with allowed loop depth in the unoptimized code to + // unconditionally call replacement_code. + static void Patch(Isolate* isolate, + Code* unoptimized_code); + + // Patch the interrupt at the instruction before pc_after in + // the unoptimized code to unconditionally call replacement_code. + static void PatchAt(Code* unoptimized_code, + Address pc_after, + Code* replacement_code); + + // Change all patched interrupts patched in the unoptimized code + // back to normal interrupts. + static void Revert(Isolate* isolate, + Code* unoptimized_code); + + // Change patched interrupt in the unoptimized code + // back to a normal interrupt. + static void RevertAt(Code* unoptimized_code, + Address pc_after, + Code* interrupt_code); + +#ifdef DEBUG + static BackEdgeState GetBackEdgeState(Isolate* isolate, + Code* unoptimized_code, + Address pc_after); + + // Verify that all back edges of a certain loop depth are patched. + static bool Verify(Isolate* isolate, + Code* unoptimized_code, + int loop_nesting_level); +#endif // DEBUG + + private: + Address entry_at(uint32_t index) { + ASSERT(index < length_); + return start_ + index * kEntrySize; + } + + static const int kTableLengthSize = kIntSize; + static const int kAstIdOffset = 0 * kIntSize; + static const int kPcOffsetOffset = 1 * kIntSize; + static const int kLoopDepthOffset = 2 * kIntSize; + static const int kEntrySize = 3 * kIntSize; + + Address start_; + Address instruction_start_; + uint32_t length_; +}; + + } } // namespace v8::internal #endif // V8_FULL_CODEGEN_H_ diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc index 13a70afe5..649bf9cff 100644 --- a/src/ia32/deoptimizer-ia32.cc +++ b/src/ia32/deoptimizer-ia32.cc @@ -177,87 +177,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { } -static const byte kJnsInstruction = 0x79; -static const byte kJnsOffset = 0x11; -static const byte kCallInstruction = 0xe8; -static const byte kNopByteOne = 0x66; -static const byte kNopByteTwo = 0x90; - -// The back edge bookkeeping code matches the pattern: -// -// sub , -// jns ok -// call -// ok: -// -// The patched back edge looks like this: -// -// sub , ;; Not changed -// nop -// nop -// call -// ok: - -void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code, - Address pc_after, - Code* replacement_code) { - // Turn the jump into nops. - Address call_target_address = pc_after - kIntSize; - *(call_target_address - 3) = kNopByteOne; - *(call_target_address - 2) = kNopByteTwo; - // Replace the call address. - Assembler::set_target_address_at(call_target_address, - replacement_code->entry()); - - unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( - unoptimized_code, call_target_address, replacement_code); -} - - -void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code, - Address pc_after, - Code* interrupt_code) { - // Restore the original jump. - Address call_target_address = pc_after - kIntSize; - *(call_target_address - 3) = kJnsInstruction; - *(call_target_address - 2) = kJnsOffset; - // Restore the original call address. - Assembler::set_target_address_at(call_target_address, - interrupt_code->entry()); - - interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( - unoptimized_code, call_target_address, interrupt_code); -} - - -#ifdef DEBUG -Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState( - Isolate* isolate, - Code* unoptimized_code, - Address pc_after) { - Address call_target_address = pc_after - kIntSize; - ASSERT_EQ(kCallInstruction, *(call_target_address - 1)); - if (*(call_target_address - 3) == kNopByteOne) { - ASSERT_EQ(kNopByteTwo, *(call_target_address - 2)); - Code* osr_builtin = - isolate->builtins()->builtin(Builtins::kOnStackReplacement); - ASSERT_EQ(osr_builtin->entry(), - Assembler::target_address_at(call_target_address)); - return PATCHED_FOR_OSR; - } else { - // Get the interrupt stub code object to match against from cache. - Code* interrupt_builtin = - isolate->builtins()->builtin(Builtins::kInterruptCheck); - ASSERT_EQ(interrupt_builtin->entry(), - Assembler::target_address_at(call_target_address)); - ASSERT_EQ(kJnsInstruction, *(call_target_address - 3)); - ASSERT_EQ(kJnsOffset, *(call_target_address - 2)); - return NOT_PATCHED; - } -} -#endif // DEBUG - - void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { // Set the register values. The values are not important as there are no // callee saved registers in JavaScript frames, so all registers are diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc index c9f7f32b6..9a2c3ce72 100644 --- a/src/ia32/full-codegen-ia32.cc +++ b/src/ia32/full-codegen-ia32.cc @@ -4891,6 +4891,88 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit( #undef __ + +static const byte kJnsInstruction = 0x79; +static const byte kJnsOffset = 0x11; +static const byte kCallInstruction = 0xe8; +static const byte kNopByteOne = 0x66; +static const byte kNopByteTwo = 0x90; + +// The back edge bookkeeping code matches the pattern: +// +// sub , +// jns ok +// call +// ok: +// +// The patched back edge looks like this: +// +// sub , ;; Not changed +// nop +// nop +// call +// ok: + +void BackEdgeTable::PatchAt(Code* unoptimized_code, + Address pc, + Code* replacement_code) { + // Turn the jump into nops. + Address call_target_address = pc - kIntSize; + *(call_target_address - 3) = kNopByteOne; + *(call_target_address - 2) = kNopByteTwo; + // Replace the call address. + Assembler::set_target_address_at(call_target_address, + replacement_code->entry()); + + unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( + unoptimized_code, call_target_address, replacement_code); +} + + +void BackEdgeTable::RevertAt(Code* unoptimized_code, + Address pc, + Code* interrupt_code) { + // Restore the original jump. + Address call_target_address = pc - kIntSize; + *(call_target_address - 3) = kJnsInstruction; + *(call_target_address - 2) = kJnsOffset; + // Restore the original call address. + Assembler::set_target_address_at(call_target_address, + interrupt_code->entry()); + + interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( + unoptimized_code, call_target_address, interrupt_code); +} + + +#ifdef DEBUG +BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState( + Isolate* isolate, + Code* unoptimized_code, + Address pc) { + Address call_target_address = pc - kIntSize; + ASSERT_EQ(kCallInstruction, *(call_target_address - 1)); + if (*(call_target_address - 3) == kNopByteOne) { + ASSERT_EQ(kNopByteTwo, *(call_target_address - 2)); + Code* osr_builtin = + isolate->builtins()->builtin(Builtins::kOnStackReplacement); + ASSERT_EQ(osr_builtin->entry(), + Assembler::target_address_at(call_target_address)); + return ON_STACK_REPLACEMENT; + } else { + // Get the interrupt stub code object to match against from cache. + Code* interrupt_builtin = + isolate->builtins()->builtin(Builtins::kInterruptCheck); + ASSERT_EQ(interrupt_builtin->entry(), + Assembler::target_address_at(call_target_address)); + ASSERT_EQ(kJnsInstruction, *(call_target_address - 3)); + ASSERT_EQ(kJnsOffset, *(call_target_address - 2)); + return INTERRUPT; + } +} +#endif // DEBUG + + } } // namespace v8::internal #endif // V8_TARGET_ARCH_IA32 diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc index 16f75b863..4426d90b7 100644 --- a/src/mips/deoptimizer-mips.cc +++ b/src/mips/deoptimizer-mips.cc @@ -78,88 +78,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { } -// This structure comes from FullCodeGenerator::EmitBackEdgeBookkeeping. -// The back edge bookkeeping code matches the pattern: -// -// sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts) -// beq at, zero_reg, ok -// lui t9, upper -// ori t9, lower -// jalr t9 -// nop -// ok-label ----- pc_after points here -// -// We patch the code to the following form: -// -// addiu at, zero_reg, 1 -// beq at, zero_reg, ok ;; Not changed -// lui t9, upper -// ori t9, lower -// jalr t9 ;; Not changed -// nop ;; Not changed -// ok-label ----- pc_after points here - -void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code, - Address pc_after, - Code* replacement_code) { - static const int kInstrSize = Assembler::kInstrSize; - // Replace the sltu instruction with load-imm 1 to at, so beq is not taken. - CodePatcher patcher(pc_after - 6 * kInstrSize, 1); - patcher.masm()->addiu(at, zero_reg, 1); - // Replace the stack check address in the load-immediate (lui/ori pair) - // with the entry address of the replacement code. - Assembler::set_target_address_at(pc_after - 4 * kInstrSize, - replacement_code->entry()); - - unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( - unoptimized_code, pc_after - 4 * kInstrSize, replacement_code); -} - - -void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code, - Address pc_after, - Code* interrupt_code) { - static const int kInstrSize = Assembler::kInstrSize; - // Restore the sltu instruction so beq can be taken again. - CodePatcher patcher(pc_after - 6 * kInstrSize, 1); - patcher.masm()->slt(at, a3, zero_reg); - // Restore the original call address. - Assembler::set_target_address_at(pc_after - 4 * kInstrSize, - interrupt_code->entry()); - - interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( - unoptimized_code, pc_after - 4 * kInstrSize, interrupt_code); -} - - -#ifdef DEBUG -Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState( - Isolate* isolate, - Code* unoptimized_code, - Address pc_after) { - static const int kInstrSize = Assembler::kInstrSize; - ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize))); - if (Assembler::IsAddImmediate( - Assembler::instr_at(pc_after - 6 * kInstrSize))) { - Code* osr_builtin = - isolate->builtins()->builtin(Builtins::kOnStackReplacement); - ASSERT(reinterpret_cast( - Assembler::target_address_at(pc_after - 4 * kInstrSize)) == - reinterpret_cast(osr_builtin->entry())); - return PATCHED_FOR_OSR; - } else { - // Get the interrupt stub code object to match against from cache. - Code* interrupt_builtin = - isolate->builtins()->builtin(Builtins::kInterruptCheck); - ASSERT(reinterpret_cast( - Assembler::target_address_at(pc_after - 4 * kInstrSize)) == - reinterpret_cast(interrupt_builtin->entry())); - return NOT_PATCHED; - } -} -#endif // DEBUG - - void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { // Set the register values. The values are not important as there are no // callee saved registers in JavaScript frames, so all registers are diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc index c2dbabd65..853ee0896 100644 --- a/src/mips/full-codegen-mips.cc +++ b/src/mips/full-codegen-mips.cc @@ -4924,6 +4924,89 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit( #undef __ + +// This structure comes from FullCodeGenerator::EmitBackEdgeBookkeeping. +// The back edge bookkeeping code matches the pattern: +// +// sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts) +// beq at, zero_reg, ok +// lui t9, upper +// ori t9, lower +// jalr t9 +// nop +// ok-label ----- pc_after points here +// +// We patch the code to the following form: +// +// addiu at, zero_reg, 1 +// beq at, zero_reg, ok ;; Not changed +// lui t9, upper +// ori t9, lower +// jalr t9 ;; Not changed +// nop ;; Not changed +// ok-label ----- pc_after points here + +void BackEdgeTable::PatchAt(Code* unoptimized_code, + Address pc_after, + Code* replacement_code) { + static const int kInstrSize = Assembler::kInstrSize; + // Replace the sltu instruction with load-imm 1 to at, so beq is not taken. + CodePatcher patcher(pc_after - 6 * kInstrSize, 1); + patcher.masm()->addiu(at, zero_reg, 1); + // Replace the stack check address in the load-immediate (lui/ori pair) + // with the entry address of the replacement code. + Assembler::set_target_address_at(pc_after - 4 * kInstrSize, + replacement_code->entry()); + + unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( + unoptimized_code, pc_after - 4 * kInstrSize, replacement_code); +} + + +void BackEdgeTable::RevertAt(Code* unoptimized_code, + Address pc_after, + Code* interrupt_code) { + static const int kInstrSize = Assembler::kInstrSize; + // Restore the sltu instruction so beq can be taken again. + CodePatcher patcher(pc_after - 6 * kInstrSize, 1); + patcher.masm()->slt(at, a3, zero_reg); + // Restore the original call address. + Assembler::set_target_address_at(pc_after - 4 * kInstrSize, + interrupt_code->entry()); + + interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( + unoptimized_code, pc_after - 4 * kInstrSize, interrupt_code); +} + + +#ifdef DEBUG +BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState( + Isolate* isolate, + Code* unoptimized_code, + Address pc_after) { + static const int kInstrSize = Assembler::kInstrSize; + ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize))); + if (Assembler::IsAddImmediate( + Assembler::instr_at(pc_after - 6 * kInstrSize))) { + Code* osr_builtin = + isolate->builtins()->builtin(Builtins::kOnStackReplacement); + ASSERT(reinterpret_cast( + Assembler::target_address_at(pc_after - 4 * kInstrSize)) == + reinterpret_cast(osr_builtin->entry())); + return ON_STACK_REPLACEMENT; + } else { + // Get the interrupt stub code object to match against from cache. + Code* interrupt_builtin = + isolate->builtins()->builtin(Builtins::kInterruptCheck); + ASSERT(reinterpret_cast( + Assembler::target_address_at(pc_after - 4 * kInstrSize)) == + reinterpret_cast(interrupt_builtin->entry())); + return INTERRUPT; + } +} +#endif // DEBUG + + } } // namespace v8::internal #endif // V8_TARGET_ARCH_MIPS diff --git a/src/objects.cc b/src/objects.cc index 7165abe11..a5fe097fd 100644 --- a/src/objects.cc +++ b/src/objects.cc @@ -10367,10 +10367,9 @@ void Code::ClearTypeFeedbackCells(Heap* heap) { BailoutId Code::TranslatePcOffsetToAstId(uint32_t pc_offset) { DisallowHeapAllocation no_gc; ASSERT(kind() == FUNCTION); - for (FullCodeGenerator::BackEdgeTableIterator it(this, &no_gc); - !it.Done(); - it.Next()) { - if (it.pc_offset() == pc_offset) return it.ast_id(); + BackEdgeTable back_edges(this, &no_gc); + for (uint32_t i = 0; i < back_edges.length(); i++) { + if (back_edges.pc_offset(i) == pc_offset) return back_edges.ast_id(i); } return BailoutId::None(); } @@ -10838,15 +10837,15 @@ void Code::Disassemble(const char* name, FILE* out) { // (due to alignment) the end of the instruction stream. if (static_cast(offset) < instruction_size()) { DisallowHeapAllocation no_gc; - FullCodeGenerator::BackEdgeTableIterator back_edges(this, &no_gc); + BackEdgeTable back_edges(this, &no_gc); - PrintF(out, "Back edges (size = %u)\n", back_edges.table_length()); + PrintF(out, "Back edges (size = %u)\n", back_edges.length()); PrintF(out, "ast_id pc_offset loop_depth\n"); - for ( ; !back_edges.Done(); back_edges.Next()) { - PrintF(out, "%6d %9u %10u\n", back_edges.ast_id().ToInt(), - back_edges.pc_offset(), - back_edges.loop_depth()); + for (uint32_t i = 0; i < back_edges.length(); i++) { + PrintF(out, "%6d %9u %10u\n", back_edges.ast_id(i).ToInt(), + back_edges.pc_offset(i), + back_edges.loop_depth(i)); } PrintF(out, "\n"); diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc index 95dcc4f98..32a85cc89 100644 --- a/src/runtime-profiler.cc +++ b/src/runtime-profiler.cc @@ -185,7 +185,7 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) { PrintF("]\n"); } - Deoptimizer::PatchInterruptCode(isolate_, shared->code()); + BackEdgeTable::Patch(isolate_, shared->code()); } diff --git a/src/runtime.cc b/src/runtime.cc index e7c4cc906..0e18586db 100644 --- a/src/runtime.cc +++ b/src/runtime.cc @@ -8497,8 +8497,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) { if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr"))) { // Start patching from the currently patched loop nesting level. int current_level = unoptimized->allow_osr_at_loop_nesting_level(); - ASSERT(Deoptimizer::VerifyInterruptCode( - isolate, unoptimized, current_level)); + ASSERT(BackEdgeTable::Verify(isolate, unoptimized, current_level)); for (int i = current_level + 1; i <= Code::kMaxLoopNestingMarker; i++) { unoptimized->set_allow_osr_at_loop_nesting_level(i); isolate->runtime_profiler()->AttemptOnStackReplacement(*function); @@ -8651,8 +8650,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) { result = JSFunction::CompileOsr(function, ast_id, CLEAR_EXCEPTION); } - // Revert the patched interrupt now, regardless of whether OSR succeeds. - Deoptimizer::RevertInterruptCode(isolate, *unoptimized); + // Revert the patched back edge table, regardless of whether OSR succeeds. + BackEdgeTable::Revert(isolate, *unoptimized); // Check whether we ended up with usable optimized code. if (!result.is_null() && result->kind() == Code::OPTIMIZED_FUNCTION) { diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc index 303b756ca..a5e4583aa 100644 --- a/src/x64/deoptimizer-x64.cc +++ b/src/x64/deoptimizer-x64.cc @@ -82,87 +82,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { } -static const byte kJnsInstruction = 0x79; -static const byte kJnsOffset = 0x1d; -static const byte kCallInstruction = 0xe8; -static const byte kNopByteOne = 0x66; -static const byte kNopByteTwo = 0x90; - -// The back edge bookkeeping code matches the pattern: -// -// add , <-delta> -// jns ok -// call -// ok: -// -// We will patch away the branch so the code is: -// -// add , <-delta> ;; Not changed -// nop -// nop -// call -// ok: - -void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code, - Address pc_after, - Code* replacement_code) { - // Turn the jump into nops. - Address call_target_address = pc_after - kIntSize; - *(call_target_address - 3) = kNopByteOne; - *(call_target_address - 2) = kNopByteTwo; - // Replace the call address. - Assembler::set_target_address_at(call_target_address, - replacement_code->entry()); - - unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( - unoptimized_code, call_target_address, replacement_code); -} - - -void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code, - Address pc_after, - Code* interrupt_code) { - // Restore the original jump. - Address call_target_address = pc_after - kIntSize; - *(call_target_address - 3) = kJnsInstruction; - *(call_target_address - 2) = kJnsOffset; - // Restore the original call address. - Assembler::set_target_address_at(call_target_address, - interrupt_code->entry()); - - interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( - unoptimized_code, call_target_address, interrupt_code); -} - - -#ifdef DEBUG -Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState( - Isolate* isolate, - Code* unoptimized_code, - Address pc_after) { - Address call_target_address = pc_after - kIntSize; - ASSERT_EQ(kCallInstruction, *(call_target_address - 1)); - if (*(call_target_address - 3) == kNopByteOne) { - ASSERT_EQ(kNopByteTwo, *(call_target_address - 2)); - Code* osr_builtin = - isolate->builtins()->builtin(Builtins::kOnStackReplacement); - ASSERT_EQ(osr_builtin->entry(), - Assembler::target_address_at(call_target_address)); - return PATCHED_FOR_OSR; - } else { - // Get the interrupt stub code object to match against from cache. - Code* interrupt_builtin = - isolate->builtins()->builtin(Builtins::kInterruptCheck); - ASSERT_EQ(interrupt_builtin->entry(), - Assembler::target_address_at(call_target_address)); - ASSERT_EQ(kJnsInstruction, *(call_target_address - 3)); - ASSERT_EQ(kJnsOffset, *(call_target_address - 2)); - return NOT_PATCHED; - } -} -#endif // DEBUG - - void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { // Set the register values. The values are not important as there are no // callee saved registers in JavaScript frames, so all registers are diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc index 931a227ed..f9d1ffab0 100644 --- a/src/x64/full-codegen-x64.cc +++ b/src/x64/full-codegen-x64.cc @@ -4877,6 +4877,88 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit( #undef __ + +static const byte kJnsInstruction = 0x79; +static const byte kJnsOffset = 0x1d; +static const byte kCallInstruction = 0xe8; +static const byte kNopByteOne = 0x66; +static const byte kNopByteTwo = 0x90; + +// The back edge bookkeeping code matches the pattern: +// +// add , <-delta> +// jns ok +// call +// ok: +// +// We will patch away the branch so the code is: +// +// add , <-delta> ;; Not changed +// nop +// nop +// call +// ok: + +void BackEdgeTable::PatchAt(Code* unoptimized_code, + Address pc_after, + Code* replacement_code) { + // Turn the jump into nops. + Address call_target_address = pc_after - kIntSize; + *(call_target_address - 3) = kNopByteOne; + *(call_target_address - 2) = kNopByteTwo; + // Replace the call address. + Assembler::set_target_address_at(call_target_address, + replacement_code->entry()); + + unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( + unoptimized_code, call_target_address, replacement_code); +} + + +void BackEdgeTable::RevertAt(Code* unoptimized_code, + Address pc_after, + Code* interrupt_code) { + // Restore the original jump. + Address call_target_address = pc_after - kIntSize; + *(call_target_address - 3) = kJnsInstruction; + *(call_target_address - 2) = kJnsOffset; + // Restore the original call address. + Assembler::set_target_address_at(call_target_address, + interrupt_code->entry()); + + interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( + unoptimized_code, call_target_address, interrupt_code); +} + + +#ifdef DEBUG +BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState( + Isolate* isolate, + Code* unoptimized_code, + Address pc_after) { + Address call_target_address = pc_after - kIntSize; + ASSERT_EQ(kCallInstruction, *(call_target_address - 1)); + if (*(call_target_address - 3) == kNopByteOne) { + ASSERT_EQ(kNopByteTwo, *(call_target_address - 2)); + Code* osr_builtin = + isolate->builtins()->builtin(Builtins::kOnStackReplacement); + ASSERT_EQ(osr_builtin->entry(), + Assembler::target_address_at(call_target_address)); + return ON_STACK_REPLACEMENT; + } else { + // Get the interrupt stub code object to match against from cache. + Code* interrupt_builtin = + isolate->builtins()->builtin(Builtins::kInterruptCheck); + ASSERT_EQ(interrupt_builtin->entry(), + Assembler::target_address_at(call_target_address)); + ASSERT_EQ(kJnsInstruction, *(call_target_address - 3)); + ASSERT_EQ(kJnsOffset, *(call_target_address - 2)); + return INTERRUPT; + } +} +#endif // DEBUG + + } } // namespace v8::internal #endif // V8_TARGET_ARCH_X64 -- 2.34.1