MIPS: Less aggressive polling when concurrently compiling for OSR.
authorplind44@gmail.com <plind44@gmail.com@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Wed, 25 Sep 2013 16:27:52 +0000 (16:27 +0000)
committerplind44@gmail.com <plind44@gmail.com@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Wed, 25 Sep 2013 16:27:52 +0000 (16:27 +0000)
Port r16934 (b12e96b)

Original commit message:
Changes include:
- completed concurrent OSR tasks trigger a stack check interrupt.
- polling for completion is now guarded by a stack check.
- circular buffer for completed OSR tasks instead of list.

BUG=
R=plind44@gmail.com

Review URL: https://codereview.chromium.org/24590002

Patch from Balazs Kilvady <kilvadyb@homejinni.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@16947 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

src/mips/builtins-mips.cc
src/mips/full-codegen-mips.cc

index 1b18140..ed7764b 100644 (file)
@@ -966,6 +966,23 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
 }
 
 
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+  // We check the stack limit as indicator that recompilation might be done.
+  Label ok;
+  __ LoadRoot(at, Heap::kStackLimitRootIndex);
+  __ Branch(&ok, hs, sp, Operand(at));
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kStackGuard, 0);
+  }
+  __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
+          RelocInfo::CODE_TARGET);
+
+  __ bind(&ok);
+  __ Ret();
+}
+
+
 void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
   // 1. Make sure we have at least one argument.
   // a0: actual number of arguments
index f889470..b9e282f 100644 (file)
@@ -4926,86 +4926,80 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
 #undef __
 
 
-// This structure comes from FullCodeGenerator::EmitBackEdgeBookkeeping.
-// The back edge bookkeeping code matches the pattern:
-//
-// sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
-// beq at, zero_reg, ok
-// lui t9, <interrupt stub address> upper
-// ori t9, <interrupt stub address> lower
-// jalr t9
-// nop
-// ok-label ----- pc_after points here
-//
-// We patch the code to the following form:
-//
-// addiu at, zero_reg, 1
-// beq at, zero_reg, ok  ;; Not changed
-// lui t9, <on-stack replacement address> upper
-// ori t9, <on-stack replacement address> lower
-// jalr t9  ;; Not changed
-// nop  ;; Not changed
-// ok-label ----- pc_after points here
-
 void BackEdgeTable::PatchAt(Code* unoptimized_code,
-                            Address pc_after,
+                            Address pc,
+                            BackEdgeState target_state,
                             Code* replacement_code) {
   static const int kInstrSize = Assembler::kInstrSize;
-  // Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
-  CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
-  patcher.masm()->addiu(at, zero_reg, 1);
+  Address branch_address = pc - 6 * kInstrSize;
+  CodePatcher patcher(branch_address, 1);
+
+  switch (target_state) {
+    case INTERRUPT:
+      // slt at, a3, zero_reg (in case of count based interrupts)
+      // beq at, zero_reg, ok
+      // lui t9, <interrupt stub address> upper
+      // ori t9, <interrupt stub address> lower
+      // jalr t9
+      // nop
+      // ok-label ----- pc_after points here
+      patcher.masm()->slt(at, a3, zero_reg);
+      break;
+    case ON_STACK_REPLACEMENT:
+    case OSR_AFTER_STACK_CHECK:
+      // addiu at, zero_reg, 1
+      // beq at, zero_reg, ok  ;; Not changed
+      // lui t9, <on-stack replacement address> upper
+      // ori t9, <on-stack replacement address> lower
+      // jalr t9  ;; Not changed
+      // nop  ;; Not changed
+      // ok-label ----- pc_after points here
+      patcher.masm()->addiu(at, zero_reg, 1);
+      break;
+  }
+  Address pc_immediate_load_address = pc - 4 * kInstrSize;
   // Replace the stack check address in the load-immediate (lui/ori pair)
   // with the entry address of the replacement code.
-  Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
+  Assembler::set_target_address_at(pc_immediate_load_address,
                                    replacement_code->entry());
 
   unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
-      unoptimized_code, pc_after - 4 * kInstrSize, replacement_code);
-}
-
-
-void BackEdgeTable::RevertAt(Code* unoptimized_code,
-                             Address pc_after,
-                             Code* interrupt_code) {
-  static const int kInstrSize = Assembler::kInstrSize;
-  // Restore the sltu instruction so beq can be taken again.
-  CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
-  patcher.masm()->slt(at, a3, zero_reg);
-  // Restore the original call address.
-  Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
-                                   interrupt_code->entry());
-
-  interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
-      unoptimized_code, pc_after - 4 * kInstrSize, interrupt_code);
+      unoptimized_code, pc_immediate_load_address, replacement_code);
 }
 
 
-#ifdef DEBUG
 BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
     Isolate* isolate,
     Code* unoptimized_code,
-    Address pc_after) {
+    Address pc) {
   static const int kInstrSize = Assembler::kInstrSize;
-  ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
-  if (Assembler::IsAddImmediate(
-      Assembler::instr_at(pc_after - 6 * kInstrSize))) {
-    Code* osr_builtin =
-        isolate->builtins()->builtin(Builtins::kOnStackReplacement);
-    ASSERT(reinterpret_cast<uint32_t>(
-        Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
-        reinterpret_cast<uint32_t>(osr_builtin->entry()));
-    return ON_STACK_REPLACEMENT;
-  } else {
-    // Get the interrupt stub code object to match against from cache.
-    Code* interrupt_builtin =
-        isolate->builtins()->builtin(Builtins::kInterruptCheck);
+  Address branch_address = pc - 6 * kInstrSize;
+  Address pc_immediate_load_address = pc - 4 * kInstrSize;
+
+  ASSERT(Assembler::IsBeq(Assembler::instr_at(pc - 5 * kInstrSize)));
+  if (!Assembler::IsAddImmediate(Assembler::instr_at(branch_address))) {
     ASSERT(reinterpret_cast<uint32_t>(
-        Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
-        reinterpret_cast<uint32_t>(interrupt_builtin->entry()));
+        Assembler::target_address_at(pc_immediate_load_address)) ==
+           reinterpret_cast<uint32_t>(
+               isolate->builtins()->InterruptCheck()->entry()));
     return INTERRUPT;
   }
+
+  ASSERT(Assembler::IsAddImmediate(Assembler::instr_at(branch_address)));
+
+  if (reinterpret_cast<uint32_t>(
+      Assembler::target_address_at(pc_immediate_load_address)) ==
+          reinterpret_cast<uint32_t>(
+              isolate->builtins()->OnStackReplacement()->entry())) {
+    return ON_STACK_REPLACEMENT;
+  }
+
+  ASSERT(reinterpret_cast<uint32_t>(
+      Assembler::target_address_at(pc_immediate_load_address)) ==
+         reinterpret_cast<uint32_t>(
+             isolate->builtins()->OsrAfterStackCheck()->entry()));
+  return OSR_AFTER_STACK_CHECK;
 }
-#endif  // DEBUG
 
 
 } }  // namespace v8::internal