From 74ef753067e14f60e576d410fda3a1b60fc14dd4 Mon Sep 17 00:00:00 2001 From: "vegorov@chromium.org" Date: Wed, 11 Jan 2012 09:39:37 +0000 Subject: [PATCH] Change inlined cache of intanceof stub to use indirection through cell. The stub was directly patching caller's code without issuing write barrier which violated incremental marking invariants. R=mstarzinger@chromium.org BUG=http://crbug.com/109448 TEST=cctest/test-heap/InstanceOfStubWriteBarrier Review URL: http://codereview.chromium.org/9158015 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10380 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/arm/code-stubs-arm.cc | 5 ++-- src/arm/lithium-codegen-arm.cc | 5 +++- src/ia32/code-stubs-ia32.cc | 11 ++++---- src/ia32/lithium-codegen-ia32.cc | 4 ++- src/incremental-marking.h | 5 +--- src/x64/code-stubs-x64.cc | 4 ++- src/x64/lithium-codegen-x64.cc | 7 ++--- test/cctest/test-heap.cc | 56 ++++++++++++++++++++++++++++++++++++++++ 8 files changed, 80 insertions(+), 17 deletions(-) diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc index ee7fbf0..2e1291e 100644 --- a/src/arm/code-stubs-arm.cc +++ b/src/arm/code-stubs-arm.cc @@ -4081,7 +4081,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { const Register inline_site = r9; const Register scratch = r2; - const int32_t kDeltaToLoadBoolResult = 3 * kPointerSize; + const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize; Label slow, loop, is_instance, is_not_instance, not_js_object; @@ -4132,7 +4132,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ sub(inline_site, lr, scratch); // Get the map location in scratch and patch it. __ GetRelocatedValueLocation(inline_site, scratch); - __ str(map, MemOperand(scratch)); + __ ldr(scratch, MemOperand(scratch)); + __ str(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); } // Register mapping: r3 is object map and r4 is function prototype. diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc index 1cd5fb5..7ddfb27 100644 --- a/src/arm/lithium-codegen-arm.cc +++ b/src/arm/lithium-codegen-arm.cc @@ -2143,7 +2143,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { // We use Factory::the_hole_value() on purpose instead of loading from the // root array to force relocation to be able to later patch with // the cached map. - __ mov(ip, Operand(factory()->the_hole_value())); + Handle cell = + factory()->NewJSGlobalPropertyCell(factory()->the_hole_value()); + __ mov(ip, Operand(Handle(cell))); + __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); __ cmp(map, Operand(ip)); __ b(ne, &cache_miss); // We use Factory::the_hole_value() on purpose instead of loading from the diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc index 58ccff5..c51e2fe 100644 --- a/src/ia32/code-stubs-ia32.cc +++ b/src/ia32/code-stubs-ia32.cc @@ -5081,8 +5081,8 @@ void InstanceofStub::Generate(MacroAssembler* masm) { static const int kDeltaToCmpImmediate = 2; static const int kDeltaToMov = 8; static const int kDeltaToMovImmediate = 9; - static const int8_t kCmpEdiImmediateByte1 = BitCast(0x81); - static const int8_t kCmpEdiImmediateByte2 = BitCast(0xff); + static const int8_t kCmpEdiOperandByte1 = BitCast(0x3b); + static const int8_t kCmpEdiOperandByte2 = BitCast(0x3d); static const int8_t kMovEaxImmediateByte = BitCast(0xb8); ExternalReference roots_array_start = @@ -5147,12 +5147,13 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ mov(scratch, Operand(esp, 0 * kPointerSize)); __ sub(scratch, Operand(esp, 1 * kPointerSize)); if (FLAG_debug_code) { - __ cmpb(Operand(scratch, 0), kCmpEdiImmediateByte1); + __ cmpb(Operand(scratch, 0), kCmpEdiOperandByte1); __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 1)"); - __ cmpb(Operand(scratch, 1), kCmpEdiImmediateByte2); + __ cmpb(Operand(scratch, 1), kCmpEdiOperandByte2); __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 2)"); } - __ mov(Operand(scratch, kDeltaToCmpImmediate), map); + __ mov(scratch, Operand(scratch, kDeltaToCmpImmediate)); + __ mov(Operand(scratch, 0), map); } // Loop through the prototype chain of the object looking for the function diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc index aee5cf0..9940f8a 100644 --- a/src/ia32/lithium-codegen-ia32.cc +++ b/src/ia32/lithium-codegen-ia32.cc @@ -1975,7 +1975,9 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { Register map = ToRegister(instr->TempAt(0)); __ mov(map, FieldOperand(object, HeapObject::kMapOffset)); __ bind(deferred->map_check()); // Label for calculating code patching. - __ cmp(map, factory()->the_hole_value()); // Patched to cached map. + Handle cache_cell = + factory()->NewJSGlobalPropertyCell(factory()->the_hole_value()); + __ cmp(map, Operand::Cell(cache_cell)); // Patched to cached map. __ j(not_equal, &cache_miss, Label::kNear); __ mov(eax, factory()->the_hole_value()); // Patched to either true or false. __ jmp(&done); diff --git a/src/incremental-marking.h b/src/incremental-marking.h index 25def87..4f8fa6b 100644 --- a/src/incremental-marking.h +++ b/src/incremental-marking.h @@ -56,6 +56,7 @@ class IncrementalMarking { } bool should_hurry() { return should_hurry_; } + void set_should_hurry(bool val) { should_hurry_ = val; } inline bool IsStopped() { return state() == STOPPED; } @@ -219,10 +220,6 @@ class IncrementalMarking { void UncommitMarkingDeque(); private: - void set_should_hurry(bool val) { - should_hurry_ = val; - } - int64_t SpaceLeftInOldSpace(); void ResetStepCounters(); diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc index 4ddd80e..3cd6740 100644 --- a/src/x64/code-stubs-x64.cc +++ b/src/x64/code-stubs-x64.cc @@ -4156,12 +4156,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) { // Get return address and delta to inlined map check. __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize)); __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); - __ movq(Operand(kScratchRegister, kOffsetToMapCheckValue), rax); if (FLAG_debug_code) { __ movl(rdi, Immediate(kWordBeforeMapCheckValue)); __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi); __ Assert(equal, "InstanceofStub unexpected call site cache (check)."); } + __ movq(kScratchRegister, + Operand(kScratchRegister, kOffsetToMapCheckValue)); + __ movq(Operand(kScratchRegister, 0), rax); } __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset)); diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc index 113e98a..de6c5a5 100644 --- a/src/x64/lithium-codegen-x64.cc +++ b/src/x64/lithium-codegen-x64.cc @@ -1901,9 +1901,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { Register map = ToRegister(instr->TempAt(0)); __ movq(map, FieldOperand(object, HeapObject::kMapOffset)); __ bind(deferred->map_check()); // Label for calculating code patching. - __ movq(kScratchRegister, factory()->the_hole_value(), - RelocInfo::EMBEDDED_OBJECT); - __ cmpq(map, kScratchRegister); // Patched to cached map. + Handle cache_cell = + factory()->NewJSGlobalPropertyCell(factory()->the_hole_value()); + __ movq(kScratchRegister, cache_cell, RelocInfo::GLOBAL_PROPERTY_CELL); + __ cmpq(map, Operand(kScratchRegister, 0)); __ j(not_equal, &cache_miss, Label::kNear); // Patched to load either true or false. __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex); diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc index 42b5789..b643e7d 100644 --- a/test/cctest/test-heap.cc +++ b/test/cctest/test-heap.cc @@ -1476,3 +1476,59 @@ TEST(LeakGlobalContextViaMapProto) { HEAP->CollectAllAvailableGarbage(); CHECK_EQ(0, NumberOfGlobalObjects()); } + + +TEST(InstanceOfStubWriteBarrier) { + if (!i::FLAG_crankshaft) return; + i::FLAG_allow_natives_syntax = true; + i::FLAG_verify_heap = true; + InitializeVM(); + v8::HandleScope outer_scope; + + { + v8::HandleScope scope; + CompileRun( + "function foo () { }" + "function mkbar () { return new (new Function(\"\")) (); }" + "function f (x) { return (x instanceof foo); }" + "function g () { f(mkbar()); }" + "f(new foo()); f(new foo());" + "%OptimizeFunctionOnNextCall(f);" + "f(new foo()); g();"); + } + + IncrementalMarking* marking = HEAP->incremental_marking(); + marking->Abort(); + marking->Start(); + + Handle f = + v8::Utils::OpenHandle( + *v8::Handle::Cast( + v8::Context::GetCurrent()->Global()->Get(v8_str("f")))); + + CHECK(f->IsOptimized()); + + while (!Marking::IsBlack(Marking::MarkBitFrom(f->code())) && + !marking->IsStopped()) { + marking->Step(MB); + } + + CHECK(marking->IsMarking()); + + // Discard any pending GC requests otherwise we will get GC when we enter + // code below. + if (ISOLATE->stack_guard()->IsGCRequest()) { + ISOLATE->stack_guard()->Continue(GC_REQUEST); + } + + { + v8::HandleScope scope; + v8::Handle global = v8::Context::GetCurrent()->Global(); + v8::Handle g = + v8::Handle::Cast(global->Get(v8_str("g"))); + g->Call(global, 0, NULL); + } + + HEAP->incremental_marking()->set_should_hurry(true); + HEAP->CollectGarbage(OLD_POINTER_SPACE); +} -- 2.7.4