From c810016e77a0440b846c4fc86cf73d22878ef7eb Mon Sep 17 00:00:00 2001 From: "mstarzinger@chromium.org" Date: Thu, 3 May 2012 10:54:17 +0000 Subject: [PATCH] Implement clearing of CompareICs. This allows CompareICs to be cleared during garbage collection to avoid cross-context garbage retention through maps stored in CompareIC stubs for the KNOWN_OBJECTS state. R=vegorov@chromium.org BUG=v8:2102 Review URL: https://chromiumcodereview.appspot.com/10263008 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@11491 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/arm/ic-arm.cc | 47 +++++++++++++++++++--------------------- src/arm/macro-assembler-arm.cc | 2 +- src/code-stubs.h | 1 + src/ia32/assembler-ia32.h | 3 +++ src/ia32/ic-ia32.cc | 22 +++++++++++-------- src/ia32/macro-assembler-ia32.cc | 2 +- src/ic.cc | 32 +++++++++++++++++++++++---- src/ic.h | 12 +++++++++- src/objects-inl.h | 12 ++++++++++ src/objects.cc | 4 ++++ src/objects.h | 7 ++++++ src/x64/assembler-x64.h | 3 ++- src/x64/ic-x64.cc | 22 +++++++++++-------- src/x64/macro-assembler-x64.cc | 2 +- 14 files changed, 119 insertions(+), 52 deletions(-) diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc index e843657..8973e6d 100644 --- a/src/arm/ic-arm.cc +++ b/src/arm/ic-arm.cc @@ -1690,12 +1690,12 @@ void CompareIC::UpdateCaches(Handle x, Handle y) { // Activate inlined smi code. if (previous_state == UNINITIALIZED) { - PatchInlinedSmiCode(address()); + PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK); } } -void PatchInlinedSmiCode(Address address) { +void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { Address cmp_instruction_address = address + Assembler::kCallTargetAddressOffset; @@ -1729,34 +1729,31 @@ void PatchInlinedSmiCode(Address address) { Instr instr_at_patch = Assembler::instr_at(patch_address); Instr branch_instr = Assembler::instr_at(patch_address + Instruction::kInstrSize); - ASSERT(Assembler::IsCmpRegister(instr_at_patch)); - ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(), - Assembler::GetRm(instr_at_patch).code()); + // This is patching a conditional "jump if not smi/jump if smi" site. + // Enabling by changing from + // cmp rx, rx + // b eq/ne, + // to + // tst rx, #kSmiTagMask + // b ne/eq, + // and vice-versa to be disabled again. + CodePatcher patcher(patch_address, 2); + Register reg = Assembler::GetRn(instr_at_patch); + if (check == ENABLE_INLINED_SMI_CHECK) { + ASSERT(Assembler::IsCmpRegister(instr_at_patch)); + ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(), + Assembler::GetRm(instr_at_patch).code()); + patcher.masm()->tst(reg, Operand(kSmiTagMask)); + } else { + ASSERT(check == DISABLE_INLINED_SMI_CHECK); + ASSERT(Assembler::IsTstImmediate(instr_at_patch)); + patcher.masm()->cmp(reg, reg); + } ASSERT(Assembler::IsBranch(branch_instr)); if (Assembler::GetCondition(branch_instr) == eq) { - // This is patching a "jump if not smi" site to be active. - // Changing - // cmp rx, rx - // b eq, - // to - // tst rx, #kSmiTagMask - // b ne, - CodePatcher patcher(patch_address, 2); - Register reg = Assembler::GetRn(instr_at_patch); - patcher.masm()->tst(reg, Operand(kSmiTagMask)); patcher.EmitCondition(ne); } else { ASSERT(Assembler::GetCondition(branch_instr) == ne); - // This is patching a "jump if smi" site to be active. - // Changing - // cmp rx, rx - // b ne, - // to - // tst rx, #kSmiTagMask - // b eq, - CodePatcher patcher(patch_address, 2); - Register reg = Assembler::GetRn(instr_at_patch); - patcher.masm()->tst(reg, Operand(kSmiTagMask)); patcher.EmitCondition(eq); } } diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc index 42c9961..4da2fec 100644 --- a/src/arm/macro-assembler-arm.cc +++ b/src/arm/macro-assembler-arm.cc @@ -3738,7 +3738,7 @@ CodePatcher::CodePatcher(byte* address, int instructions) : address_(address), instructions_(instructions), size_(instructions * Assembler::kInstrSize), - masm_(Isolate::Current(), address, size_ + Assembler::kGap) { + masm_(NULL, address, size_ + Assembler::kGap) { // Create a new macro assembler pointing to the address of the code to patch. // The size is adjusted with kGap on order for the assembler to generate size // bytes of instructions without failing with buffer size constraints. diff --git a/src/code-stubs.h b/src/code-stubs.h index b67e961..5c87178 100644 --- a/src/code-stubs.h +++ b/src/code-stubs.h @@ -498,6 +498,7 @@ class ICCompareStub: public CodeStub { virtual void FinishCode(Handle code) { code->set_compare_state(state_); + code->set_compare_operation(op_); } virtual CodeStub::Major MajorKey() { return CompareIC; } diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h index 929b485..4ead80b 100644 --- a/src/ia32/assembler-ia32.h +++ b/src/ia32/assembler-ia32.h @@ -640,6 +640,9 @@ class Assembler : public AssemblerBase { static const byte kJccShortPrefix = 0x70; static const byte kJncShortOpcode = kJccShortPrefix | not_carry; static const byte kJcShortOpcode = kJccShortPrefix | carry; + static const byte kJnzShortOpcode = kJccShortPrefix | not_zero; + static const byte kJzShortOpcode = kJccShortPrefix | zero; + // --------------------------------------------------------------------------- // Code generation diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc index d4f85cc..fc1699d 100644 --- a/src/ia32/ic-ia32.cc +++ b/src/ia32/ic-ia32.cc @@ -1727,12 +1727,12 @@ void CompareIC::UpdateCaches(Handle x, Handle y) { // Activate inlined smi code. if (previous_state == UNINITIALIZED) { - PatchInlinedSmiCode(address()); + PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK); } } -void PatchInlinedSmiCode(Address address) { +void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { // The address of the instruction following the call. Address test_instruction_address = address + Assembler::kCallTargetAddressOffset; @@ -1753,14 +1753,18 @@ void PatchInlinedSmiCode(Address address) { address, test_instruction_address, delta); } - // Patch with a short conditional jump. There must be a - // short jump-if-carry/not-carry at this position. + // Patch with a short conditional jump. Enabling means switching from a short + // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the + // reverse operation of that. Address jmp_address = test_instruction_address - delta; - ASSERT(*jmp_address == Assembler::kJncShortOpcode || - *jmp_address == Assembler::kJcShortOpcode); - Condition cc = *jmp_address == Assembler::kJncShortOpcode - ? not_zero - : zero; + ASSERT((check == ENABLE_INLINED_SMI_CHECK) + ? (*jmp_address == Assembler::kJncShortOpcode || + *jmp_address == Assembler::kJcShortOpcode) + : (*jmp_address == Assembler::kJnzShortOpcode || + *jmp_address == Assembler::kJzShortOpcode)); + Condition cc = (check == ENABLE_INLINED_SMI_CHECK) + ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero) + : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry); *jmp_address = static_cast(Assembler::kJccShortPrefix | cc); } diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc index 60e38a6..c31b0c2 100644 --- a/src/ia32/macro-assembler-ia32.cc +++ b/src/ia32/macro-assembler-ia32.cc @@ -2566,7 +2566,7 @@ bool AreAliased(Register r1, Register r2, Register r3, Register r4) { CodePatcher::CodePatcher(byte* address, int size) : address_(address), size_(size), - masm_(Isolate::Current(), address, size + Assembler::kGap) { + masm_(NULL, address, size + Assembler::kGap) { // Create a new macro assembler pointing to the address of the code to patch. // The size is adjusted with kGap on order for the assembler to generate size // bytes of instructions without failing with buffer size constraints. diff --git a/src/ic.cc b/src/ic.cc index 643fa88..9772b94 100644 --- a/src/ic.cc +++ b/src/ic.cc @@ -352,9 +352,9 @@ void IC::Clear(Address address) { return KeyedStoreIC::Clear(address, target); case Code::CALL_IC: return CallIC::Clear(address, target); case Code::KEYED_CALL_IC: return KeyedCallIC::Clear(address, target); + case Code::COMPARE_IC: return CompareIC::Clear(address, target); case Code::UNARY_OP_IC: case Code::BINARY_OP_IC: - case Code::COMPARE_IC: case Code::TO_BOOLEAN_IC: // Clearing these is tricky and does not // make any performance difference. @@ -365,9 +365,8 @@ void IC::Clear(Address address) { void CallICBase::Clear(Address address, Code* target) { + if (target->ic_state() == UNINITIALIZED) return; bool contextual = CallICBase::Contextual::decode(target->extra_ic_state()); - State state = target->ic_state(); - if (state == UNINITIALIZED) return; Code* code = Isolate::Current()->stub_cache()->FindCallInitialize( target->arguments_count(), @@ -410,6 +409,17 @@ void KeyedStoreIC::Clear(Address address, Code* target) { } +void CompareIC::Clear(Address address, Code* target) { + // Only clear ICCompareStubs, we currently cannot clear generic CompareStubs. + if (target->major_key() != CodeStub::CompareIC) return; + // Only clear CompareICs that can retain objects. + if (target->compare_state() != KNOWN_OBJECTS) return; + Token::Value op = CompareIC::ComputeOperation(target); + SetTargetAtAddress(address, GetRawUninitialized(op)); + PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK); +} + + static bool HasInterceptorGetter(JSObject* object) { return !object->GetNamedInterceptor()->getter()->IsUndefined(); } @@ -2396,7 +2406,7 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) { // Activate inlined smi code. if (previous_type == BinaryOpIC::UNINITIALIZED) { - PatchInlinedSmiCode(ic.address()); + PatchInlinedSmiCode(ic.address(), ENABLE_INLINED_SMI_CHECK); } } @@ -2457,6 +2467,14 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) { } +Code* CompareIC::GetRawUninitialized(Token::Value op) { + ICCompareStub stub(op, UNINITIALIZED); + Code* code = NULL; + CHECK(stub.FindCodeInCache(&code)); + return code; +} + + Handle CompareIC::GetUninitialized(Token::Value op) { ICCompareStub stub(op, UNINITIALIZED); return stub.GetCode(); @@ -2471,6 +2489,12 @@ CompareIC::State CompareIC::ComputeState(Code* target) { } +Token::Value CompareIC::ComputeOperation(Code* target) { + ASSERT(target->major_key() == CodeStub::CompareIC); + return static_cast(target->compare_operation()); +} + + const char* CompareIC::GetStateName(State state) { switch (state) { case UNINITIALIZED: return "UNINITIALIZED"; diff --git a/src/ic.h b/src/ic.h index 5662552..3b44abf 100644 --- a/src/ic.h +++ b/src/ic.h @@ -794,6 +794,9 @@ class CompareIC: public IC { // Helper function for determining the state of a compare IC. static State ComputeState(Code* target); + // Helper function for determining the operation a compare IC is for. + static Token::Value ComputeOperation(Code* target); + static const char* GetStateName(State state); private: @@ -804,7 +807,13 @@ class CompareIC: public IC { Condition GetCondition() const { return ComputeCondition(op_); } State GetState() { return ComputeState(target()); } + static Code* GetRawUninitialized(Token::Value op); + + static void Clear(Address address, Code* target); + Token::Value op_; + + friend class IC; }; @@ -817,7 +826,8 @@ class ToBooleanIC: public IC { // Helper for BinaryOpIC and CompareIC. -void PatchInlinedSmiCode(Address address); +enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK }; +void PatchInlinedSmiCode(Address address, InlinedSmiCheck check); } } // namespace v8::internal diff --git a/src/objects-inl.h b/src/objects-inl.h index 459420c..793e017 100644 --- a/src/objects-inl.h +++ b/src/objects-inl.h @@ -3193,6 +3193,18 @@ void Code::set_compare_state(byte value) { } +byte Code::compare_operation() { + ASSERT(is_compare_ic_stub()); + return READ_BYTE_FIELD(this, kCompareOperationOffset); +} + + +void Code::set_compare_operation(byte value) { + ASSERT(is_compare_ic_stub()); + WRITE_BYTE_FIELD(this, kCompareOperationOffset, value); +} + + byte Code::to_boolean_state() { ASSERT(is_to_boolean_ic_stub()); return READ_BYTE_FIELD(this, kToBooleanTypeOffset); diff --git a/src/objects.cc b/src/objects.cc index 76a8266..3022d14 100644 --- a/src/objects.cc +++ b/src/objects.cc @@ -8378,6 +8378,10 @@ void Code::Disassemble(const char* name, FILE* out) { CompareIC::State state = CompareIC::ComputeState(this); PrintF(out, "compare_state = %s\n", CompareIC::GetStateName(state)); } + if (is_compare_ic_stub() && major_key() == CodeStub::CompareIC) { + Token::Value op = CompareIC::ComputeOperation(this); + PrintF(out, "compare_operation = %s\n", Token::Name(op)); + } } if ((name != NULL) && (name[0] != '\0')) { PrintF(out, "name = %s\n", name); diff --git a/src/objects.h b/src/objects.h index 80d1fd4..6217b94 100644 --- a/src/objects.h +++ b/src/objects.h @@ -4291,6 +4291,11 @@ class Code: public HeapObject { inline byte compare_state(); inline void set_compare_state(byte value); + // [compare_operation]: For kind COMPARE_IC tells what compare operation the + // stub was generated for. + inline byte compare_operation(); + inline void set_compare_operation(byte value); + // [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in. inline byte to_boolean_state(); inline void set_to_boolean_state(byte value); @@ -4474,6 +4479,8 @@ class Code: public HeapObject { static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1; + static const int kCompareOperationOffset = kCompareStateOffset + 1; + static const int kAllowOSRAtLoopNestingLevelOffset = kFullCodeFlags + 1; static const int kProfilerTicksOffset = kAllowOSRAtLoopNestingLevelOffset + 1; diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h index 60b29e6..9f5f850 100644 --- a/src/x64/assembler-x64.h +++ b/src/x64/assembler-x64.h @@ -629,7 +629,8 @@ class Assembler : public AssemblerBase { static const byte kJccShortPrefix = 0x70; static const byte kJncShortOpcode = kJccShortPrefix | not_carry; static const byte kJcShortOpcode = kJccShortPrefix | carry; - + static const byte kJnzShortOpcode = kJccShortPrefix | not_zero; + static const byte kJzShortOpcode = kJccShortPrefix | zero; // --------------------------------------------------------------------------- diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc index 0632ce4..6ba5fb6 100644 --- a/src/x64/ic-x64.cc +++ b/src/x64/ic-x64.cc @@ -1741,11 +1741,11 @@ void CompareIC::UpdateCaches(Handle x, Handle y) { // Activate inlined smi code. if (previous_state == UNINITIALIZED) { - PatchInlinedSmiCode(address()); + PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK); } } -void PatchInlinedSmiCode(Address address) { +void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { // The address of the instruction following the call. Address test_instruction_address = address + Assembler::kCallTargetAddressOffset; @@ -1766,14 +1766,18 @@ void PatchInlinedSmiCode(Address address) { address, test_instruction_address, delta); } - // Patch with a short conditional jump. There must be a - // short jump-if-carry/not-carry at this position. + // Patch with a short conditional jump. Enabling means switching from a short + // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the + // reverse operation of that. Address jmp_address = test_instruction_address - delta; - ASSERT(*jmp_address == Assembler::kJncShortOpcode || - *jmp_address == Assembler::kJcShortOpcode); - Condition cc = *jmp_address == Assembler::kJncShortOpcode - ? not_zero - : zero; + ASSERT((check == ENABLE_INLINED_SMI_CHECK) + ? (*jmp_address == Assembler::kJncShortOpcode || + *jmp_address == Assembler::kJcShortOpcode) + : (*jmp_address == Assembler::kJnzShortOpcode || + *jmp_address == Assembler::kJzShortOpcode)); + Condition cc = (check == ENABLE_INLINED_SMI_CHECK) + ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero) + : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry); *jmp_address = static_cast(Assembler::kJccShortPrefix | cc); } diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc index 53becf6..3d380a2 100644 --- a/src/x64/macro-assembler-x64.cc +++ b/src/x64/macro-assembler-x64.cc @@ -4188,7 +4188,7 @@ bool AreAliased(Register r1, Register r2, Register r3, Register r4) { CodePatcher::CodePatcher(byte* address, int size) : address_(address), size_(size), - masm_(Isolate::Current(), address, size + Assembler::kGap) { + masm_(NULL, address, size + Assembler::kGap) { // Create a new macro assembler pointing to the address of the code to patch. // The size is adjusted with kGap on order for the assembler to generate size // bytes of instructions without failing with buffer size constraints. -- 2.7.4