}
+void Assembler::Unreachable() {
+#ifdef USE_SIMULATOR
+ debug("UNREACHABLE", __LINE__, BREAK);
+#else
+ // Crash by branching to 0. lr now points near the fault.
+ Emit(BLR | Rn(xzr));
+#endif
+}
+
+
Address Assembler::target_pointer_address_at(Address pc) {
Instruction* instr = reinterpret_cast<Instruction*>(pc);
ASSERT(instr->IsLdrLiteralX());
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
+ unresolved_branches_(),
positions_recorder_(this) {
const_pool_blocked_nesting_ = 0;
Reset();
#ifdef DEBUG
if (label->is_linked()) {
int linkoffset = label->pos();
- bool start_of_chain = false;
- while (!start_of_chain) {
+ bool end_of_chain = false;
+ while (!end_of_chain) {
Instruction * link = InstructionAt(linkoffset);
int linkpcoffset = link->ImmPCOffset();
int prevlinkoffset = linkoffset + linkpcoffset;
- start_of_chain = (linkoffset == prevlinkoffset);
+ end_of_chain = (linkoffset == prevlinkoffset);
linkoffset = linkoffset + linkpcoffset;
}
}
}
+void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
+ Label* label,
+ Instruction* label_veneer) {
+ ASSERT(label->is_linked());
+
+ CheckLabelLinkChain(label);
+
+ Instruction* link = InstructionAt(label->pos());
+ Instruction* prev_link = link;
+ Instruction* next_link;
+ bool end_of_chain = false;
+
+ while (link != branch && !end_of_chain) {
+ next_link = link->ImmPCOffsetTarget();
+ end_of_chain = (link == next_link);
+ prev_link = link;
+ link = next_link;
+ }
+
+ ASSERT(branch == link);
+ next_link = branch->ImmPCOffsetTarget();
+
+ if (branch == prev_link) {
+ // The branch is the first instruction in the chain.
+ if (branch == next_link) {
+ // It is also the last instruction in the chain, so it is the only branch
+ // currently referring to this label.
+ label->Unuse();
+ } else {
+ label->link_to(reinterpret_cast<byte*>(next_link) - buffer_);
+ }
+
+ } else if (branch == next_link) {
+ // The branch is the last (but not also the first) instruction in the chain.
+ prev_link->SetImmPCOffsetTarget(prev_link);
+
+ } else {
+ // The branch is in the middle of the chain.
+ if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
+ prev_link->SetImmPCOffsetTarget(next_link);
+ } else if (label_veneer != NULL) {
+ // Use the veneer for all previous links in the chain.
+ prev_link->SetImmPCOffsetTarget(prev_link);
+
+ end_of_chain = false;
+ link = next_link;
+ while (!end_of_chain) {
+ next_link = link->ImmPCOffsetTarget();
+ end_of_chain = (link == next_link);
+ link->SetImmPCOffsetTarget(label_veneer);
+ link = next_link;
+ }
+ } else {
+ // The assert below will fire.
+ // Some other work could be attempted to fix up the chain, but it would be
+ // rather complicated. If we crash here, we may want to consider using an
+ // other mechanism than a chain of branches.
+ //
+ // Note that this situation currently should not happen, as we always call
+ // this function with a veneer to the target label.
+ // However this could happen with a MacroAssembler in the following state:
+ // [previous code]
+ // B(label);
+ // [20KB code]
+ // Tbz(label); // First tbz. Pointing to unconditional branch.
+ // [20KB code]
+ // Tbz(label); // Second tbz. Pointing to the first tbz.
+ // [more code]
+ // and this function is called to remove the first tbz from the label link
+ // chain. Since tbz has a range of +-32KB, the second tbz cannot point to
+ // the unconditional branch.
+ CHECK(prev_link->IsTargetInImmPCOffsetRange(next_link));
+ UNREACHABLE();
+ }
+ }
+
+ CheckLabelLinkChain(label);
+}
+
+
void Assembler::bind(Label* label) {
// Bind label to the address at pc_. All instructions (most likely branches)
// that are linked to this label will be updated to point to the newly-bound
ASSERT(label->is_bound());
ASSERT(!label->is_linked());
+
+ DeleteUnresolvedBranchInfoForLabel(label);
}
}
+void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
+ // Branches to this label will be resolved when the label is bound below.
+ std::multimap<int, FarBranchInfo>::iterator it_tmp, it;
+ it = unresolved_branches_.begin();
+ while (it != unresolved_branches_.end()) {
+ it_tmp = it++;
+ if (it_tmp->second.label_ == label) {
+ CHECK(it_tmp->first >= pc_offset());
+ unresolved_branches_.erase(it_tmp);
+ }
+ }
+}
+
+
void Assembler::StartBlockConstPool() {
if (const_pool_blocked_nesting_++ == 0) {
// Prevent constant pool checks happening by setting the next check to
instr->preceding()->Rt() == xzr.code());
#endif
- // Crash by branching to 0. lr now points near the fault.
- // TODO(all): update the simulator to trap this pattern.
+ // We must generate only one instruction.
Emit(BLR | Rn(xzr));
}
#define V8_A64_ASSEMBLER_A64_H_
#include <list>
+#include <map>
#include "globals.h"
#include "utils.h"
// of m. m must be a power of 2 (>= 4).
void Align(int m);
+ inline void Unreachable();
+
// Label --------------------------------------------------------------------
// Bind a label to the current pc. Note that labels can only be bound once,
// and if labels are linked to other instructions, they _must_ be bound
static inline LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
const CPURegister& rt, const CPURegister& rt2);
+ // Remove the specified branch from the unbound label link chain.
+ // If available, a veneer for this label can be used for other branches in the
+ // chain if the link chain cannot be fixed up without this branch.
+ void RemoveBranchFromLabelLinkChain(Instruction* branch,
+ Label* label,
+ Instruction* label_veneer = NULL);
private:
// Instruction helpers.
// stream.
static const int kGap = 128;
+ public:
+ class FarBranchInfo {
+ public:
+ FarBranchInfo(int offset, Label* label)
+ : pc_offset_(offset), label_(label) {}
+ // Offset of the branch in the code generation buffer.
+ int pc_offset_;
+ // The label branched to.
+ Label* label_;
+ };
+
+ protected:
+ // Information about unresolved (forward) branches.
+ // The Assembler is only allowed to delete out-of-date information from here
+ // after a label is bound. The MacroAssembler uses this information to
+ // generate veneers.
+ //
+ // The second member gives information about the unresolved branch. The first
+ // member of the pair is the maximum offset that the branch can reach in the
+ // buffer. The map is sorted according to this reachable offset, allowing to
+ // easily check when veneers need to be emitted.
+ // Note that the maximum reachable offset (first member of the pairs) should
+ // always be positive but has the same type as the return value for
+ // pc_offset() for convenience.
+ std::multimap<int, FarBranchInfo> unresolved_branches_;
+
+ private:
+ // If a veneer is emitted for a branch instruction, that instruction must be
+ // removed from the associated label's link chain so that the assembler does
+ // not later attempt (likely unsuccessfully) to patch it to branch directly to
+ // the label.
+ void DeleteUnresolvedBranchInfoForLabel(Label* label);
+
private:
// TODO(jbramley): VIXL uses next_literal_pool_check_ and
// literal_pool_monitor_ to determine when to consider emitting a literal
}
+bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
+ int32_t offset) {
+ return is_intn(offset, ImmBranchRangeBitwidth(branch_type));
+}
+
+
+bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
+ int offset = target - this;
+ return IsValidImmPCOffset(BranchType(), offset);
+}
+
+
void Instruction::SetImmPCOffsetTarget(Instruction* target) {
if (IsPCRelAddressing()) {
SetPCRelImmTarget(target);
}
}
+ static int ImmBranchRangeBitwidth(ImmBranchType branch_type) {
+ switch (branch_type) {
+ case UncondBranchType:
+ return ImmUncondBranch_width;
+ case CondBranchType:
+ return ImmCondBranch_width;
+ case CompareBranchType:
+ return ImmCmpBranch_width;
+ case TestBranchType:
+ return ImmTestBranch_width;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+ }
+
+ // The range of the branch instruction, expressed as 'instr +- range'.
+ static int32_t ImmBranchRange(ImmBranchType branch_type) {
+ return
+ (1 << (ImmBranchRangeBitwidth(branch_type) + kInstructionSizeLog2)) / 2 -
+ kInstructionSize;
+ }
+
int ImmBranch() const {
switch (BranchType()) {
case CondBranchType: return ImmCondBranch();
// PC-relative addressing instruction.
Instruction* ImmPCOffsetTarget();
+ static bool IsValidImmPCOffset(ImmBranchType branch_type, int32_t offset);
+ bool IsTargetInImmPCOffsetRange(Instruction* target);
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
// a PC-relative addressing instruction.
void SetImmPCOffsetTarget(Instruction* target);
void MacroAssembler::B(Label* label) {
b(label);
+ CheckVeneers(false);
}
}
-void MacroAssembler::B(Label* label, Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT((cond != al) && (cond != nv));
- b(label, cond);
-}
-
-
void MacroAssembler::Bfi(const Register& rd,
const Register& rn,
unsigned lsb,
}
-void MacroAssembler::Cbnz(const Register& rt, Label* label) {
- ASSERT(allow_macro_instructions_);
- cbnz(rt, label);
-}
-
-
-void MacroAssembler::Cbz(const Register& rt, Label* label) {
- ASSERT(allow_macro_instructions_);
- cbz(rt, label);
-}
-
-
void MacroAssembler::Cinc(const Register& rd,
const Register& rn,
Condition cond) {
ASSERT(allow_macro_instructions_);
ASSERT(!xn.IsZero());
ret(xn);
+ CheckVeneers(false);
}
}
-void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
- ASSERT(allow_macro_instructions_);
- tbnz(rt, bit_pos, label);
-}
-
-
-void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
- ASSERT(allow_macro_instructions_);
- tbz(rt, bit_pos, label);
-}
-
-
void MacroAssembler::Ubfiz(const Register& rd,
const Register& rn,
unsigned lsb,
}
-void MacroAssembler::Unreachable() {
- ASSERT(allow_macro_instructions_);
- hlt(kImmExceptionIsUnreachable);
-}
-
-
void MacroAssembler::Uxtb(const Register& rd, const Register& rn) {
ASSERT(allow_macro_instructions_);
ASSERT(!rd.IsZero());
}
+bool MacroAssembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
+ // Account for the branch around the veneers and the guard.
+ int protection_offset = 2 * kInstructionSize;
+ return pc_offset() > max_reachable_pc - margin - protection_offset -
+ static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
+}
+
+
+void MacroAssembler::EmitVeneers(bool need_protection) {
+ RecordComment("[ Veneers");
+
+ Label end;
+ if (need_protection) {
+ B(&end);
+ }
+
+ EmitVeneersGuard();
+
+ {
+ InstructionAccurateScope scope(this);
+ Label size_check;
+
+ std::multimap<int, FarBranchInfo>::iterator it, it_to_delete;
+
+ it = unresolved_branches_.begin();
+ while (it != unresolved_branches_.end()) {
+ if (ShouldEmitVeneer(it->first)) {
+ Instruction* branch = InstructionAt(it->second.pc_offset_);
+ Label* label = it->second.label_;
+
+#ifdef DEBUG
+ __ bind(&size_check);
+#endif
+ // Patch the branch to point to the current position, and emit a branch
+ // to the label.
+ Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
+ RemoveBranchFromLabelLinkChain(branch, label, veneer);
+ branch->SetImmPCOffsetTarget(veneer);
+ b(label);
+#ifdef DEBUG
+ ASSERT(SizeOfCodeGeneratedSince(&size_check) <= kMaxVeneerCodeSize);
+ size_check.Unuse();
+#endif
+
+ it_to_delete = it++;
+ unresolved_branches_.erase(it_to_delete);
+ } else {
+ ++it;
+ }
+ }
+ }
+
+ Bind(&end);
+
+ RecordComment("]");
+}
+
+
+void MacroAssembler::EmitVeneersGuard() {
+ if (emit_debug_code()) {
+ Unreachable();
+ }
+}
+
+
+void MacroAssembler::CheckVeneers(bool need_protection) {
+ if (unresolved_branches_.empty()) {
+ return;
+ }
+
+ CHECK(pc_offset() < unresolved_branches_first_limit());
+ int margin = kVeneerDistanceMargin;
+ if (!need_protection) {
+ // Prefer emitting veneers protected by an existing instruction.
+ // The 4 divisor is a finger in the air guess. With a default margin of 2KB,
+ // that leaves 512B = 128 instructions of extra margin to avoid requiring a
+ // protective branch.
+ margin += margin / 4;
+ }
+ if (ShouldEmitVeneer(unresolved_branches_first_limit(), margin)) {
+ EmitVeneers(need_protection);
+ }
+}
+
+
+bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
+ Label *label, ImmBranchType b_type) {
+ bool need_longer_range = false;
+ // There are two situations in which we care about the offset being out of
+ // range:
+ // - The label is bound but too far away.
+ // - The label is not bound but linked, and the previous branch
+ // instruction in the chain is too far away.
+ if (label->is_bound() || label->is_linked()) {
+ need_longer_range =
+ !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
+ }
+ if (!need_longer_range && !label->is_bound()) {
+ int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
+ unresolved_branches_.insert(
+ std::pair<int, FarBranchInfo>(max_reachable_pc,
+ FarBranchInfo(pc_offset(), label)));
+ }
+ return need_longer_range;
+}
+
+
+void MacroAssembler::B(Label* label, Condition cond) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT((cond != al) && (cond != nv));
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
+
+ if (need_extra_instructions) {
+ b(&done, InvertCondition(cond));
+ b(label);
+ } else {
+ b(label, cond);
+ }
+ CheckVeneers(!need_extra_instructions);
+ bind(&done);
+}
+
+
+void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
+ ASSERT(allow_macro_instructions_);
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
+
+ if (need_extra_instructions) {
+ tbz(rt, bit_pos, &done);
+ b(label);
+ } else {
+ tbnz(rt, bit_pos, label);
+ }
+ CheckVeneers(!need_extra_instructions);
+ bind(&done);
+}
+
+
+void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
+ ASSERT(allow_macro_instructions_);
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
+
+ if (need_extra_instructions) {
+ tbnz(rt, bit_pos, &done);
+ b(label);
+ } else {
+ tbz(rt, bit_pos, label);
+ }
+ CheckVeneers(!need_extra_instructions);
+ bind(&done);
+}
+
+
+void MacroAssembler::Cbnz(const Register& rt, Label* label) {
+ ASSERT(allow_macro_instructions_);
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
+
+ if (need_extra_instructions) {
+ cbz(rt, &done);
+ b(label);
+ } else {
+ cbnz(rt, label);
+ }
+ CheckVeneers(!need_extra_instructions);
+ bind(&done);
+}
+
+
+void MacroAssembler::Cbz(const Register& rt, Label* label) {
+ ASSERT(allow_macro_instructions_);
+
+ Label done;
+ bool need_extra_instructions =
+ NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
+
+ if (need_extra_instructions) {
+ cbnz(rt, &done);
+ b(label);
+ } else {
+ cbz(rt, label);
+ }
+ CheckVeneers(!need_extra_instructions);
+ bind(&done);
+}
+
+
// Pseudo-instructions.
inline void Asr(const Register& rd, const Register& rn, const Register& rm);
inline void B(Label* label);
inline void B(Condition cond, Label* label);
- inline void B(Label* label, Condition cond);
+ void B(Label* label, Condition cond);
inline void Bfi(const Register& rd,
const Register& rn,
unsigned lsb,
inline void Blr(const Register& xn);
inline void Br(const Register& xn);
inline void Brk(int code);
- inline void Cbnz(const Register& rt, Label* label);
- inline void Cbz(const Register& rt, Label* label);
+ void Cbnz(const Register& rt, Label* label);
+ void Cbz(const Register& rt, Label* label);
inline void Cinc(const Register& rd, const Register& rn, Condition cond);
inline void Cinv(const Register& rd, const Register& rn, Condition cond);
inline void Cls(const Register& rd, const Register& rn);
inline void Sxtb(const Register& rd, const Register& rn);
inline void Sxth(const Register& rd, const Register& rn);
inline void Sxtw(const Register& rd, const Register& rn);
- inline void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
- inline void Tbz(const Register& rt, unsigned bit_pos, Label* label);
+ void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
+ void Tbz(const Register& rt, unsigned bit_pos, Label* label);
inline void Ubfiz(const Register& rd,
const Register& rn,
unsigned lsb,
const Register& rn,
const Register& rm,
const Register& ra);
- inline void Unreachable();
inline void Uxtb(const Register& rd, const Register& rn);
inline void Uxth(const Register& rd, const Register& rn);
inline void Uxtw(const Register& rd, const Register& rn);
Heap::RootListIndex map_index,
Register scratch1,
Register scratch2);
+
+ public:
+ // Far branches resolving.
+ //
+ // The various classes of branch instructions with immediate offsets have
+ // different ranges. While the Assembler will fail to assemble a branch
+ // exceeding its range, the MacroAssembler offers a mechanism to resolve
+ // branches to too distant targets, either by tweaking the generated code to
+ // use branch instructions with wider ranges or generating veneers.
+ //
+ // Currently branches to distant targets are resolved using unconditional
+ // branch isntructions with a range of +-128MB. If that becomes too little
+ // (!), the mechanism can be extended to generate special veneers for really
+ // far targets.
+
+ // Returns true if we should emit a veneer as soon as possible for a branch
+ // which can at most reach to specified pc.
+ bool ShouldEmitVeneer(int max_reachable_pc,
+ int margin = kVeneerDistanceMargin);
+
+ // The maximum code size generated for a veneer. Currently one branch
+ // instruction. This is for code size checking purposes, and can be extended
+ // in the future for example if we decide to add nops between the veneers.
+ static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
+
+ // Emits veneers for branches that are approaching their maximum range.
+ // If need_protection is true, the veneers are protected by a branch jumping
+ // over the code.
+ void EmitVeneers(bool need_protection);
+ void EmitVeneersGuard();
+ // Checks wether veneers need to be emitted at this point.
+ void CheckVeneers(bool need_protection);
+
+ // Helps resolve branching to labels potentially out of range.
+ // If the label is not bound, it registers the information necessary to later
+ // be able to emit a veneer for this branch if necessary.
+ // If the label is bound, it returns true if the label (or the previous link
+ // in the label chain) is out of range. In that case the caller is responsible
+ // for generating appropriate code.
+ // Otherwise it returns false.
+ // This function also checks wether veneers need to be emitted.
+ bool NeedExtraInstructionsOrRegisterBranch(Label *label,
+ ImmBranchType branch_type);
+
+ private:
+ // We generate a veneer for a branch if we reach within this distance of the
+ // limit of the range.
+ static const int kVeneerDistanceMargin = 2 * KB;
+ int unresolved_branches_first_limit() const {
+ ASSERT(!unresolved_branches_.empty());
+ return unresolved_branches_.begin()->first;
+ }
};
// emitted is what you specified when creating the scope.
class InstructionAccurateScope BASE_EMBEDDED {
public:
- explicit InstructionAccurateScope(MacroAssembler* masm)
- : masm_(masm), size_(0) {
- masm_->StartBlockConstPool();
-#ifdef DEBUG
- previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
- masm_->set_allow_macro_instructions(false);
-#endif
- }
-
- InstructionAccurateScope(MacroAssembler* masm, size_t count)
+ InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
: masm_(masm), size_(count * kInstructionSize) {
masm_->StartBlockConstPool();
#ifdef DEBUG
- masm_->bind(&start_);
+ if (count != 0) {
+ masm_->bind(&start_);
+ }
previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
masm_->set_allow_macro_instructions(false);
#endif
}
+TEST(far_branch_backward) {
+ INIT_V8();
+
+ // Test that the MacroAssembler correctly resolves backward branches to labels
+ // that are outside the immediate range of branch instructions.
+ int max_range =
+ std::max(Instruction::ImmBranchRange(TestBranchType),
+ std::max(Instruction::ImmBranchRange(CompareBranchType),
+ Instruction::ImmBranchRange(CondBranchType)));
+
+ SETUP_SIZE(max_range + 1000 * kInstructionSize);
+
+ START();
+
+ Label done, fail;
+ Label test_tbz, test_cbz, test_bcond;
+ Label success_tbz, success_cbz, success_bcond;
+
+ __ Mov(x0, 0);
+ __ Mov(x1, 1);
+ __ Mov(x10, 0);
+
+ __ B(&test_tbz);
+ __ Bind(&success_tbz);
+ __ Orr(x0, x0, 1 << 0);
+ __ B(&test_cbz);
+ __ Bind(&success_cbz);
+ __ Orr(x0, x0, 1 << 1);
+ __ B(&test_bcond);
+ __ Bind(&success_bcond);
+ __ Orr(x0, x0, 1 << 2);
+
+ __ B(&done);
+
+ // Generate enough code to overflow the immediate range of the three types of
+ // branches below.
+ for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
+ if (i % 100 == 0) {
+ // If we do land in this code, we do not want to execute so many nops
+ // before reaching the end of test (especially if tracing is activated).
+ __ B(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+ __ B(&fail);
+
+ __ Bind(&test_tbz);
+ __ Tbz(x10, 7, &success_tbz);
+ __ Bind(&test_cbz);
+ __ Cbz(x10, &success_cbz);
+ __ Bind(&test_bcond);
+ __ Cmp(x10, 0);
+ __ B(eq, &success_bcond);
+
+ // For each out-of-range branch instructions, at least two instructions should
+ // have been generated.
+ CHECK_GE(7 * kInstructionSize, __ SizeOfCodeGeneratedSince(&test_tbz));
+
+ __ Bind(&fail);
+ __ Mov(x1, 0);
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x7, x0);
+ ASSERT_EQUAL_64(0x1, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(far_branch_simple_veneer) {
+ INIT_V8();
+
+ // Test that the MacroAssembler correctly emits veneers for forward branches
+ // to labels that are outside the immediate range of branch instructions.
+ int max_range =
+ std::max(Instruction::ImmBranchRange(TestBranchType),
+ std::max(Instruction::ImmBranchRange(CompareBranchType),
+ Instruction::ImmBranchRange(CondBranchType)));
+
+ SETUP_SIZE(max_range + 1000 * kInstructionSize);
+
+ START();
+
+ Label done, fail;
+ Label test_tbz, test_cbz, test_bcond;
+ Label success_tbz, success_cbz, success_bcond;
+
+ __ Mov(x0, 0);
+ __ Mov(x1, 1);
+ __ Mov(x10, 0);
+
+ __ Bind(&test_tbz);
+ __ Tbz(x10, 7, &success_tbz);
+ __ Bind(&test_cbz);
+ __ Cbz(x10, &success_cbz);
+ __ Bind(&test_bcond);
+ __ Cmp(x10, 0);
+ __ B(eq, &success_bcond);
+
+ // Generate enough code to overflow the immediate range of the three types of
+ // branches below.
+ for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
+ if (i % 100 == 0) {
+ // If we do land in this code, we do not want to execute so many nops
+ // before reaching the end of test (especially if tracing is activated).
+ // Also, the branches give the MacroAssembler the opportunity to emit the
+ // veneers.
+ __ B(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+ __ B(&fail);
+
+ __ Bind(&success_tbz);
+ __ Orr(x0, x0, 1 << 0);
+ __ B(&test_cbz);
+ __ Bind(&success_cbz);
+ __ Orr(x0, x0, 1 << 1);
+ __ B(&test_bcond);
+ __ Bind(&success_bcond);
+ __ Orr(x0, x0, 1 << 2);
+
+ __ B(&done);
+ __ Bind(&fail);
+ __ Mov(x1, 0);
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x7, x0);
+ ASSERT_EQUAL_64(0x1, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(far_branch_veneer_link_chain) {
+ INIT_V8();
+
+ // Test that the MacroAssembler correctly emits veneers for forward branches
+ // that target out-of-range labels and are part of multiple instructions
+ // jumping to that label.
+ //
+ // We test the three situations with the different types of instruction:
+ // (1)- When the branch is at the start of the chain with tbz.
+ // (2)- When the branch is in the middle of the chain with cbz.
+ // (3)- When the branch is at the end of the chain with bcond.
+ int max_range =
+ std::max(Instruction::ImmBranchRange(TestBranchType),
+ std::max(Instruction::ImmBranchRange(CompareBranchType),
+ Instruction::ImmBranchRange(CondBranchType)));
+
+ SETUP_SIZE(max_range + 1000 * kInstructionSize);
+
+ START();
+
+ Label skip, fail, done;
+ Label test_tbz, test_cbz, test_bcond;
+ Label success_tbz, success_cbz, success_bcond;
+
+ __ Mov(x0, 0);
+ __ Mov(x1, 1);
+ __ Mov(x10, 0);
+
+ __ B(&skip);
+ // Branches at the start of the chain for situations (2) and (3).
+ __ B(&success_cbz);
+ __ B(&success_bcond);
+ __ Nop();
+ __ B(&success_bcond);
+ __ B(&success_cbz);
+ __ Bind(&skip);
+
+ __ Bind(&test_tbz);
+ __ Tbz(x10, 7, &success_tbz);
+ __ Bind(&test_cbz);
+ __ Cbz(x10, &success_cbz);
+ __ Bind(&test_bcond);
+ __ Cmp(x10, 0);
+ __ B(eq, &success_bcond);
+
+ skip.Unuse();
+ __ B(&skip);
+ // Branches at the end of the chain for situations (1) and (2).
+ __ B(&success_cbz);
+ __ B(&success_tbz);
+ __ Nop();
+ __ B(&success_tbz);
+ __ B(&success_cbz);
+ __ Bind(&skip);
+
+ // Generate enough code to overflow the immediate range of the three types of
+ // branches below.
+ for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
+ if (i % 100 == 0) {
+ // If we do land in this code, we do not want to execute so many nops
+ // before reaching the end of test (especially if tracing is activated).
+ // Also, the branches give the MacroAssembler the opportunity to emit the
+ // veneers.
+ __ B(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+ __ B(&fail);
+
+ __ Bind(&success_tbz);
+ __ Orr(x0, x0, 1 << 0);
+ __ B(&test_cbz);
+ __ Bind(&success_cbz);
+ __ Orr(x0, x0, 1 << 1);
+ __ B(&test_bcond);
+ __ Bind(&success_bcond);
+ __ Orr(x0, x0, 1 << 2);
+
+ __ B(&done);
+ __ Bind(&fail);
+ __ Mov(x1, 0);
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x7, x0);
+ ASSERT_EQUAL_64(0x1, x1);
+
+ TEARDOWN();
+}
+
+
+TEST(far_branch_veneer_broken_link_chain) {
+ INIT_V8();
+
+ // Check that the MacroAssembler correctly handles the situation when removing
+ // a branch from the link chain of a label and the two links on each side of
+ // the removed branch cannot be linked together (out of range).
+ //
+ // We test with tbz because it has a small range.
+ int max_range = Instruction::ImmBranchRange(TestBranchType);
+ int inter_range = max_range / 2 + max_range / 10;
+
+ SETUP_SIZE(3 * inter_range + 1000 * kInstructionSize);
+
+ START();
+
+ Label skip, fail, done;
+ Label test_1, test_2, test_3;
+ Label far_target;
+
+ __ Mov(x0, 0); // Indicates the origin of the branch.
+ __ Mov(x1, 1);
+ __ Mov(x10, 0);
+
+ // First instruction in the label chain.
+ __ Bind(&test_1);
+ __ Mov(x0, 1);
+ __ B(&far_target);
+
+ for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
+ if (i % 100 == 0) {
+ // Do not allow generating veneers. They should not be needed.
+ __ b(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+
+ // Will need a veneer to point to reach the target.
+ __ Bind(&test_2);
+ __ Mov(x0, 2);
+ __ Tbz(x10, 7, &far_target);
+
+ for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
+ if (i % 100 == 0) {
+ // Do not allow generating veneers. They should not be needed.
+ __ b(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+
+ // Does not need a veneer to reach the target, but the initial branch
+ // instruction is out of range.
+ __ Bind(&test_3);
+ __ Mov(x0, 3);
+ __ Tbz(x10, 7, &far_target);
+
+ for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
+ if (i % 100 == 0) {
+ // Allow generating veneers.
+ __ B(&fail);
+ } else {
+ __ Nop();
+ }
+ }
+
+ __ B(&fail);
+
+ __ Bind(&far_target);
+ __ Cmp(x0, 1);
+ __ B(eq, &test_2);
+ __ Cmp(x0, 2);
+ __ B(eq, &test_3);
+
+ __ B(&done);
+ __ Bind(&fail);
+ __ Mov(x1, 0);
+ __ Bind(&done);
+
+ END();
+
+ RUN();
+
+ ASSERT_EQUAL_64(0x3, x0);
+ ASSERT_EQUAL_64(0x1, x1);
+
+ TEARDOWN();
+}
+
+
TEST(ldr_str_offset) {
INIT_V8();
SETUP();
# BUG(v8:3147). It works on other architectures by accident.
'regress/regress-conditional-position': [FAIL],
- # BUG(v8:3148): Invalid branch instruction emitted.
- 'debug-references': [PASS, ['mode == debug', SKIP]],
- 'mirror-array': [PASS, ['mode == debug', SKIP]],
-
# BUG(v8:3156): Fails on gc stress bots.
'compiler/concurrent-invalidate-transition-map': [PASS, ['gc_stress == True', FAIL]],
}], # 'arch == a64'
['arch == a64', {
- # BUG(v8:3148): Invalid branch instruction emitted.
- 'ecma/Date/15.9.5.26-1': [SKIP],
- 'js1_5/extensions/regress-396326': [SKIP],
- 'js1_5/Regress/regress-80981': [SKIP],
- 'ecma/Date/15.9.5.28-1': [PASS, ['mode == debug', SKIP]],
-
# BUG(v8:3152): Runs out of stack in debug mode.
'js1_5/extensions/regress-355497': [FAIL_OK, ['mode == debug', SKIP]],
}], # 'arch == a64'