From: dusan.milosavljevic Date: Sat, 1 Aug 2015 17:04:28 +0000 (-0700) Subject: MIPS64: Fix hidden bug in relocations for j and jal. X-Git-Tag: upstream/4.7.83~1078 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=38501327599ae6ac7c255205c61c00aeed09da13;p=platform%2Fupstream%2Fv8.git MIPS64: Fix hidden bug in relocations for j and jal. Introduce new mechanism for relocating j/jal. Resolves flaky failures of mozilla regress tests. Additionally: - internal encoded references are not relocated during code generation phase. - remove asserts from j and jal which are not valid because addresses are not final and valid in code generation phase. TEST=mozilla/js1_5/Regress/regress-280769-2, regress-367561-01, mozilla/ecma_3/Statements/regress-444979 BUG= R=paul.lind@imgtec.com Review URL: https://codereview.chromium.org/1216823003 . Patch from dusan.milosavljevic . Cr-Commit-Position: refs/heads/master@{#29962} --- diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc index 8fb1609..8a5ae77 100644 --- a/src/mips/assembler-mips.cc +++ b/src/mips/assembler-mips.cc @@ -665,7 +665,7 @@ int Assembler::target_at(int pos, bool is_internal) { // Check we have a branch or jump instruction. DCHECK(IsBranch(instr) || IsLui(instr)); // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming - // the compiler uses arithmectic shifts for signed integers. + // the compiler uses arithmetic shifts for signed integers. if (IsBranch(instr)) { int32_t imm18 = ((instr & static_cast(kImm16Mask)) << 16) >> 14; diff --git a/src/mips64/assembler-mips64.cc b/src/mips64/assembler-mips64.cc index 61b7296..c4e2b01 100644 --- a/src/mips64/assembler-mips64.cc +++ b/src/mips64/assembler-mips64.cc @@ -681,11 +681,9 @@ int Assembler::target_at(int pos, bool is_internal) { // EndOfChain sentinel is returned directly, not relative to pc or pos. return kEndOfChain; } else { - uint64_t instr_address = reinterpret_cast(buffer_ + pos); - instr_address &= kImm28Mask; - int delta = static_cast(instr_address - imm28); - DCHECK(pos > delta); - return pos - delta; + // Sign extend 28-bit offset. + int32_t delta = static_cast((imm28 << 4) >> 4); + return pos + delta; } } } @@ -706,7 +704,6 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) { return; } - DCHECK(IsBranch(instr) || IsJ(instr) || IsJal(instr) || IsLui(instr)); if (IsBranch(instr)) { int32_t imm18 = target_pos - (pos + kBranchPCOffset); DCHECK((imm18 & 3) == 0); @@ -736,16 +733,25 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) { instr_ori | ((imm >> 16) & kImm16Mask)); instr_at_put(pos + 3 * Assembler::kInstrSize, instr_ori2 | (imm & kImm16Mask)); - } else { - DCHECK(IsJ(instr) || IsJal(instr)); - uint64_t imm28 = reinterpret_cast(buffer_) + target_pos; - imm28 &= kImm28Mask; + } else if (IsJ(instr) || IsJal(instr)) { + int32_t imm28 = target_pos - pos; DCHECK((imm28 & 3) == 0); - instr &= ~kImm26Mask; uint32_t imm26 = static_cast(imm28 >> 2); DCHECK(is_uint26(imm26)); + // Place 26-bit signed offset with markings. + // When code is committed it will be resolved to j/jal. + int32_t mark = IsJ(instr) ? kJRawMark : kJalRawMark; + instr_at_put(pos, mark | (imm26 & kImm26Mask)); + } else { + int32_t imm28 = target_pos - pos; + DCHECK((imm28 & 3) == 0); + uint32_t imm26 = static_cast(imm28 >> 2); + DCHECK(is_uint26(imm26)); + // Place raw 26-bit signed offset. + // When code is committed it will be resolved to j/jal. + instr &= ~kImm26Mask; instr_at_put(pos, instr | (imm26 & kImm26Mask)); } } @@ -1027,6 +1033,26 @@ uint64_t Assembler::jump_address(Label* L) { } +uint64_t Assembler::jump_offset(Label* L) { + int64_t target_pos; + if (L->is_bound()) { + target_pos = L->pos(); + } else { + if (L->is_linked()) { + target_pos = L->pos(); // L's link. + L->link_to(pc_offset()); + } else { + L->link_to(pc_offset()); + return kEndOfJumpChain; + } + } + int64_t imm = target_pos - pc_offset(); + DCHECK((imm & 3) == 0); + + return static_cast(imm); +} + + int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { int32_t target_pos; if (L->is_bound()) { @@ -1405,19 +1431,32 @@ void Assembler::bnezc(Register rs, int32_t offset) { void Assembler::j(int64_t target) { -#if DEBUG - // Get pc of delay slot. - if (target != kEndOfJumpChain) { - uint64_t ipc = reinterpret_cast(pc_ + 1 * kInstrSize); - bool in_range = ((ipc ^ static_cast(target)) >> - (kImm26Bits + kImmFieldShift)) == 0; - DCHECK(in_range && ((target & 3) == 0)); - } -#endif GenInstrJump(J, static_cast(target >> 2) & kImm26Mask); } +void Assembler::j(Label* target) { + uint64_t imm = jump_offset(target); + if (target->is_bound()) { + GenInstrJump(static_cast(kJRawMark), + static_cast(imm >> 2) & kImm26Mask); + } else { + j(imm); + } +} + + +void Assembler::jal(Label* target) { + uint64_t imm = jump_offset(target); + if (target->is_bound()) { + GenInstrJump(static_cast(kJalRawMark), + static_cast(imm >> 2) & kImm26Mask); + } else { + jal(imm); + } +} + + void Assembler::jr(Register rs) { if (kArchVariant != kMips64r6) { BlockTrampolinePoolScope block_trampoline_pool(this); @@ -1433,15 +1472,6 @@ void Assembler::jr(Register rs) { void Assembler::jal(int64_t target) { -#ifdef DEBUG - // Get pc of delay slot. - if (target != kEndOfJumpChain) { - uint64_t ipc = reinterpret_cast(pc_ + 1 * kInstrSize); - bool in_range = ((ipc ^ static_cast(target)) >> - (kImm26Bits + kImmFieldShift)) == 0; - DCHECK(in_range && ((target & 3) == 0)); - } -#endif positions_recorder()->WriteRecordedPositions(); GenInstrJump(JAL, static_cast(target >> 2) & kImm26Mask); } @@ -2920,7 +2950,6 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc, } Instr instr = instr_at(pc); DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode)); - DCHECK(IsJ(instr) || IsLui(instr) || IsJal(instr)); if (IsLui(instr)) { Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize); Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize); @@ -2951,22 +2980,30 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, byte* pc, instr_at_put(pc + 3 * Assembler::kInstrSize, instr_ori2 | (imm & kImm16Mask)); return 4; // Number of instructions patched. - } else { + } else if (IsJ(instr) || IsJal(instr)) { + // Regular j/jal relocation. uint32_t imm28 = (instr & static_cast(kImm26Mask)) << 2; - if (static_cast(imm28) == kEndOfJumpChain) { - return 0; // Number of instructions patched. - } - imm28 += pc_delta; imm28 &= kImm28Mask; - DCHECK((imm28 & 3) == 0); - instr &= ~kImm26Mask; - uint32_t imm26 = imm28 >> 2; - DCHECK(is_uint26(imm26)); - + DCHECK((imm28 & 3) == 0); + uint32_t imm26 = static_cast(imm28 >> 2); instr_at_put(pc, instr | (imm26 & kImm26Mask)); return 1; // Number of instructions patched. + } else { + // Unbox raw offset and emit j/jal. + int32_t imm28 = (instr & static_cast(kImm26Mask)) << 2; + // Sign extend 28-bit offset to 32-bit. + imm28 = (imm28 << 4) >> 4; + uint64_t target = + static_cast(imm28) + reinterpret_cast(pc); + target &= kImm28Mask; + DCHECK((imm28 & 3) == 0); + uint32_t imm26 = static_cast(target >> 2); + // Check markings whether to emit j or jal. + uint32_t unbox = (instr & kJRawMark) ? J : JAL; + instr_at_put(pc, unbox | (imm26 & kImm26Mask)); + return 1; // Number of instructions patched. } } @@ -3009,8 +3046,7 @@ void Assembler::GrowBuffer() { // Relocate runtime entries. for (RelocIterator it(desc); !it.done(); it.next()) { RelocInfo::Mode rmode = it.rinfo()->rmode(); - if (rmode == RelocInfo::INTERNAL_REFERENCE_ENCODED || - rmode == RelocInfo::INTERNAL_REFERENCE) { + if (rmode == RelocInfo::INTERNAL_REFERENCE) { byte* p = reinterpret_cast(it.rinfo()->pc()); RelocateInternalReference(rmode, p, pc_delta); } @@ -3130,14 +3166,12 @@ void Assembler::CheckTrampolinePool() { int pool_start = pc_offset(); for (int i = 0; i < unbound_labels_count_; i++) { - uint64_t imm64; - imm64 = jump_address(&after_pool); { BlockGrowBufferScope block_buf_growth(this); // Buffer growth (and relocation) must be blocked for internal // references until associated instructions are emitted and available // to be patched. RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED); - j(imm64); + j(&after_pool); } nop(); } diff --git a/src/mips64/assembler-mips64.h b/src/mips64/assembler-mips64.h index 923b203..d15dcc7 100644 --- a/src/mips64/assembler-mips64.h +++ b/src/mips64/assembler-mips64.h @@ -484,6 +484,7 @@ class Assembler : public AssemblerBase { return o >> 2; } uint64_t jump_address(Label* L); + uint64_t jump_offset(Label* L); // Puts a labels target address at the given position. // The high 8 bits are set to zero. @@ -736,6 +737,8 @@ class Assembler : public AssemblerBase { // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits. void j(int64_t target); void jal(int64_t target); + void j(Label* target); + void jal(Label* target); void jalr(Register rs, Register rd = ra); void jr(Register target); void jic(Register rt, int16_t offset); diff --git a/src/mips64/constants-mips64.h b/src/mips64/constants-mips64.h index 0284478..4c663e2 100644 --- a/src/mips64/constants-mips64.h +++ b/src/mips64/constants-mips64.h @@ -282,6 +282,8 @@ const int kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1; const int64_t kHi16MaskOf64 = (int64_t)0xffff << 48; const int64_t kSe16MaskOf64 = (int64_t)0xffff << 32; const int64_t kTh16MaskOf64 = (int64_t)0xffff << 16; +const int32_t kJalRawMark = 0x00000000; +const int32_t kJRawMark = 0xf0000000; // ----- MIPS Opcodes and Function Fields. // We use this presentation to stay close to the table representation in diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc index d0236a0..5664b05 100644 --- a/src/mips64/macro-assembler-mips64.cc +++ b/src/mips64/macro-assembler-mips64.cc @@ -2757,7 +2757,7 @@ void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs, Label skip; Condition neg_cond = NegateCondition(cond); BranchShort(&skip, neg_cond, rs, rt); - J(L, bdslot); + Jal(L, bdslot); bind(&skip); } } else { @@ -3195,15 +3195,12 @@ void MacroAssembler::Ret(Condition cond, void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) { BlockTrampolinePoolScope block_trampoline_pool(this); - - uint64_t imm28; - imm28 = jump_address(L); { BlockGrowBufferScope block_buf_growth(this); // Buffer growth (and relocation) must be blocked for internal references // until associated instructions are emitted and available to be patched. RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED); - j(imm28); + j(L); } // Emit a nop in the branch delay slot if required. if (bdslot == PROTECT) nop(); @@ -3212,15 +3209,12 @@ void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) { void MacroAssembler::Jal(Label* L, BranchDelaySlot bdslot) { BlockTrampolinePoolScope block_trampoline_pool(this); - - uint64_t imm28; - imm28 = jump_address(L); { BlockGrowBufferScope block_buf_growth(this); // Buffer growth (and relocation) must be blocked for internal references // until associated instructions are emitted and available to be patched. RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED); - jal(imm28); + jal(L); } // Emit a nop in the branch delay slot if required. if (bdslot == PROTECT) nop();