// RelocInfo.
void RelocInfo::apply(intptr_t delta) {
- // On MIPS we do not use pc relative addressing, so we don't need to patch the
- // code here.
+ if (IsInternalReference(rmode_)) {
+ // Absolute code pointer inside code object moves with the code object.
+ byte* p = reinterpret_cast<byte*>(pc_);
+ int count = Assembler::RelocateInternalReference(p, delta);
+ CPU::FlushICache(p, count * sizeof(uint32_t));
+ }
}
void Assembler::emit(Instr x) {
- CheckBuffer();
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
*reinterpret_cast<Instr*>(pc_) = x;
pc_ += kInstrSize;
CheckTrampolinePoolQuick();
// -----------------------------------------------------------------------------
// Implementation of RelocInfo.
-const int RelocInfo::kApplyMask = 0;
+const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
bool RelocInfo::IsCodedSpecially() {
last_trampoline_pool_end_ = 0;
no_trampoline_pool_before_ = 0;
trampoline_pool_blocked_nesting_ = 0;
- next_buffer_check_ = kMaxBranchOffset - kTrampolineSize;
+ // We leave space (16 * kTrampolineSlotsSize)
+ // for BlockTrampolinePoolScope buffer.
+ next_buffer_check_ = kMaxBranchOffset - kTrampolineSlotsSize * 16;
internal_trampoline_exception_ = false;
last_bound_pos_ = 0;
+ trampoline_emitted_ = false;
+ unbound_labels_count_ = 0;
+ block_buffer_growth_ = false;
+
ast_id_for_reloc_info_ = kNoASTId;
}
}
+uint32_t Assembler::GetFunction(Instr instr) {
+ return (instr & kFunctionFieldMask) >> kFunctionShift;
+}
+
+
+uint32_t Assembler::GetFunctionField(Instr instr) {
+ return instr & kFunctionFieldMask;
+}
+
+
uint32_t Assembler::GetImmediate16(Instr instr) {
return instr & kImm16Mask;
}
// code is conv to an 18-bit value addressing bytes, hence the -4 value.
const int kEndOfChain = -4;
+// Determines the end of the Jump chain (a subset of the label link chain).
+const int kEndOfJumpChain = 0;
bool Assembler::IsBranch(Instr instr) {
}
+bool Assembler::IsJump(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rt_field = GetRtField(instr);
+ uint32_t rd_field = GetRdField(instr);
+ uint32_t function_field = GetFunctionField(instr);
+ // Checks if the instruction is a jump.
+ return opcode == J || opcode == JAL ||
+ (opcode == SPECIAL && rt_field == 0 &&
+ ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
+}
+
+
+bool Assembler::IsJ(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is a jump.
+ return opcode == J;
+}
+
+
+bool Assembler::IsLui(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is a load upper immediate.
+ return opcode == LUI;
+}
+
+
+bool Assembler::IsOri(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ // Checks if the instruction is a load upper immediate.
+ return opcode == ORI;
+}
+
+
bool Assembler::IsNop(Instr instr, unsigned int type) {
// See Assembler::nop(type).
ASSERT(type < 32);
return (imm18 + pos);
}
}
- // Check we have a branch instruction.
- ASSERT(IsBranch(instr));
+ // Check we have a branch or jump instruction.
+ ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
// Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
// the compiler uses arithmectic shifts for signed integers.
- int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+ if (IsBranch(instr)) {
+ int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
- if (imm18 == kEndOfChain) {
- // EndOfChain sentinel is returned directly, not relative to pc or pos.
- return kEndOfChain;
+ if (imm18 == kEndOfChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ return pos + kBranchPCOffset + imm18;
+ }
+ } else if (IsLui(instr)) {
+ Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
+ Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
+ ASSERT(IsOri(instr_ori));
+ int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+ imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+
+ if (imm == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
+ int32_t delta = instr_address - imm;
+ ASSERT(pos > delta);
+ return pos - delta;
+ }
} else {
- return pos + kBranchPCOffset + imm18;
+ int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
+ if (imm28 == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
+ instr_address &= kImm28Mask;
+ int32_t delta = instr_address - imm28;
+ ASSERT(pos > delta);
+ return pos - delta;
+ }
}
}
return;
}
- ASSERT(IsBranch(instr));
- int32_t imm18 = target_pos - (pos + kBranchPCOffset);
- ASSERT((imm18 & 3) == 0);
+ ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
+ if (IsBranch(instr)) {
+ int32_t imm18 = target_pos - (pos + kBranchPCOffset);
+ ASSERT((imm18 & 3) == 0);
+
+ instr &= ~kImm16Mask;
+ int32_t imm16 = imm18 >> 2;
+ ASSERT(is_int16(imm16));
+
+ instr_at_put(pos, instr | (imm16 & kImm16Mask));
+ } else if (IsLui(instr)) {
+ Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
+ Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
+ ASSERT(IsOri(instr_ori));
+ uint32_t imm = (uint32_t)buffer_ + target_pos;
+ ASSERT((imm & 3) == 0);
+
+ instr_lui &= ~kImm16Mask;
+ instr_ori &= ~kImm16Mask;
+
+ instr_at_put(pos + 0 * Assembler::kInstrSize,
+ instr_lui | ((imm & kHiMask) >> kLuiShift));
+ instr_at_put(pos + 1 * Assembler::kInstrSize,
+ instr_ori | (imm & kImm16Mask));
+ } else {
+ uint32_t imm28 = (uint32_t)buffer_ + target_pos;
+ imm28 &= kImm28Mask;
+ ASSERT((imm28 & 3) == 0);
- instr &= ~kImm16Mask;
- int32_t imm16 = imm18 >> 2;
- ASSERT(is_int16(imm16));
+ instr &= ~kImm26Mask;
+ uint32_t imm26 = imm28 >> 2;
+ ASSERT(is_uint26(imm26));
- instr_at_put(pos, instr | (imm16 & kImm16Mask));
+ instr_at_put(pos, instr | (imm26 & kImm26Mask));
+ }
}
void Assembler::bind_to(Label* L, int pos) {
ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
+ int32_t trampoline_pos = kInvalidSlotPos;
+ if (L->is_linked() && !trampoline_emitted_) {
+ unbound_labels_count_--;
+ next_buffer_check_ += kTrampolineSlotsSize;
+ }
+
while (L->is_linked()) {
int32_t fixup_pos = L->pos();
int32_t dist = pos - fixup_pos;
next(L); // Call next before overwriting link with target at fixup_pos.
- if (dist > kMaxBranchOffset) {
- do {
- int32_t trampoline_pos = get_trampoline_entry(fixup_pos);
- if (kInvalidSlotPos == trampoline_pos) {
- // Internal error.
- return;
+ Instr instr = instr_at(fixup_pos);
+ if (IsBranch(instr)) {
+ if (dist > kMaxBranchOffset) {
+ if (trampoline_pos == kInvalidSlotPos) {
+ trampoline_pos = get_trampoline_entry(fixup_pos);
+ CHECK(trampoline_pos != kInvalidSlotPos);
}
ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
target_at_put(fixup_pos, trampoline_pos);
fixup_pos = trampoline_pos;
dist = pos - fixup_pos;
- } while (dist > kMaxBranchOffset);
- } else if (dist < -kMaxBranchOffset) {
- do {
- int32_t trampoline_pos = get_trampoline_entry(fixup_pos, false);
- if (kInvalidSlotPos == trampoline_pos) {
- // Internal error.
- return;
- }
- ASSERT((trampoline_pos - fixup_pos) >= -kMaxBranchOffset);
- target_at_put(fixup_pos, trampoline_pos);
- fixup_pos = trampoline_pos;
- dist = pos - fixup_pos;
- } while (dist < -kMaxBranchOffset);
- };
- target_at_put(fixup_pos, pos);
+ }
+ target_at_put(fixup_pos, pos);
+ } else {
+ ASSERT(IsJ(instr) || IsLui(instr));
+ target_at_put(fixup_pos, pos);
+ }
}
L->bind_to(pos);
}
-void Assembler::link_to(Label* L, Label* appendix) {
- if (appendix->is_linked()) {
- if (L->is_linked()) {
- // Append appendix to L's list.
- int fixup_pos;
- int link = L->pos();
- do {
- fixup_pos = link;
- link = target_at(fixup_pos);
- } while (link > 0);
- ASSERT(link == kEndOfChain);
- target_at_put(fixup_pos, appendix->pos());
- } else {
- // L is empty, simply use appendix.
- *L = *appendix;
- }
- }
- appendix->Unuse(); // Appendix should not be used anymore.
-}
-
-
void Assembler::bind(Label* L) {
ASSERT(!L->is_bound()); // Label can only be bound once.
bind_to(L, pc_offset());
}
}
+bool Assembler::is_near(Label* L) {
+ if (L->is_bound()) {
+ return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
+ }
+ return false;
+}
// We have to use a temporary register for things that can be relocated even
// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
}
-// Registers are in the order of the instruction encoding, from left to right.
void Assembler::GenInstrJump(Opcode opcode,
uint32_t address) {
BlockTrampolinePoolScope block_trampoline_pool(this);
}
-// Returns the next free label entry from the next trampoline pool.
-int32_t Assembler::get_label_entry(int32_t pos, bool next_pool) {
- int trampoline_count = trampolines_.length();
- int32_t label_entry = 0;
- ASSERT(trampoline_count > 0);
+// Returns the next free trampoline entry.
+int32_t Assembler::get_trampoline_entry(int32_t pos) {
+ int32_t trampoline_entry = kInvalidSlotPos;
- if (next_pool) {
- for (int i = 0; i < trampoline_count; i++) {
- if (trampolines_[i].start() > pos) {
- label_entry = trampolines_[i].take_label();
- break;
- }
+ if (!internal_trampoline_exception_) {
+ if (trampoline_.start() > pos) {
+ trampoline_entry = trampoline_.take_slot();
}
- } else { // Caller needs a label entry from the previous pool.
- for (int i = trampoline_count-1; i >= 0; i--) {
- if (trampolines_[i].end() < pos) {
- label_entry = trampolines_[i].take_label();
- break;
- }
+
+ if (kInvalidSlotPos == trampoline_entry) {
+ internal_trampoline_exception_ = true;
}
}
- return label_entry;
+ return trampoline_entry;
}
-// Returns the next free trampoline entry from the next trampoline pool.
-int32_t Assembler::get_trampoline_entry(int32_t pos, bool next_pool) {
- int trampoline_count = trampolines_.length();
- int32_t trampoline_entry = kInvalidSlotPos;
- ASSERT(trampoline_count > 0);
+uint32_t Assembler::jump_address(Label* L) {
+ int32_t target_pos;
- if (!internal_trampoline_exception_) {
- if (next_pool) {
- for (int i = 0; i < trampoline_count; i++) {
- if (trampolines_[i].start() > pos) {
- trampoline_entry = trampolines_[i].take_slot();
- break;
- }
- }
- } else { // Caller needs a trampoline entry from the previous pool.
- for (int i = trampoline_count-1; i >= 0; i--) {
- if (trampolines_[i].end() < pos) {
- trampoline_entry = trampolines_[i].take_slot();
- break;
- }
- }
- }
- if (kInvalidSlotPos == trampoline_entry) {
- internal_trampoline_exception_ = true;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link.
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ return kEndOfJumpChain;
}
}
- return trampoline_entry;
+
+ uint32_t imm = (uint32_t)buffer_ + target_pos;
+ ASSERT((imm & 3) == 0);
+
+ return imm;
}
int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
int32_t target_pos;
- int32_t pc_offset_v = pc_offset();
if (L->is_bound()) {
target_pos = L->pos();
- int32_t dist = pc_offset_v - target_pos;
- if (dist > kMaxBranchOffset) {
- do {
- int32_t trampoline_pos = get_trampoline_entry(target_pos);
- if (kInvalidSlotPos == trampoline_pos) {
- // Internal error.
- return 0;
- }
- ASSERT((trampoline_pos - target_pos) > 0);
- ASSERT((trampoline_pos - target_pos) <= kMaxBranchOffset);
- target_at_put(trampoline_pos, target_pos);
- target_pos = trampoline_pos;
- dist = pc_offset_v - target_pos;
- } while (dist > kMaxBranchOffset);
- } else if (dist < -kMaxBranchOffset) {
- do {
- int32_t trampoline_pos = get_trampoline_entry(target_pos, false);
- if (kInvalidSlotPos == trampoline_pos) {
- // Internal error.
- return 0;
- }
- ASSERT((target_pos - trampoline_pos) > 0);
- ASSERT((target_pos - trampoline_pos) <= kMaxBranchOffset);
- target_at_put(trampoline_pos, target_pos);
- target_pos = trampoline_pos;
- dist = pc_offset_v - target_pos;
- } while (dist < -kMaxBranchOffset);
- }
} else {
if (L->is_linked()) {
- target_pos = L->pos(); // L's link.
- int32_t dist = pc_offset_v - target_pos;
- if (dist > kMaxBranchOffset) {
- do {
- int32_t label_pos = get_label_entry(target_pos);
- ASSERT((label_pos - target_pos) < kMaxBranchOffset);
- label_at_put(L, label_pos);
- target_pos = label_pos;
- dist = pc_offset_v - target_pos;
- } while (dist > kMaxBranchOffset);
- } else if (dist < -kMaxBranchOffset) {
- do {
- int32_t label_pos = get_label_entry(target_pos, false);
- ASSERT((label_pos - target_pos) > -kMaxBranchOffset);
- label_at_put(L, label_pos);
- target_pos = label_pos;
- dist = pc_offset_v - target_pos;
- } while (dist < -kMaxBranchOffset);
- }
+ target_pos = L->pos();
L->link_to(pc_offset());
} else {
L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
return kEndOfChain;
}
}
} else {
target_pos = kEndOfChain;
instr_at_put(at_offset, 0);
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
}
L->link_to(at_offset);
}
}
+int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
+ Instr instr = instr_at(pc);
+ ASSERT(IsJ(instr) || IsLui(instr));
+ if (IsLui(instr)) {
+ Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
+ Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
+ ASSERT(IsOri(instr_ori));
+ int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+ imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+ if (imm == kEndOfJumpChain) {
+ return 0; // Number of instructions patched.
+ }
+ imm += pc_delta;
+ ASSERT((imm & 3) == 0);
+
+ instr_lui &= ~kImm16Mask;
+ instr_ori &= ~kImm16Mask;
+
+ instr_at_put(pc + 0 * Assembler::kInstrSize,
+ instr_lui | ((imm >> kLuiShift) & kImm16Mask));
+ instr_at_put(pc + 1 * Assembler::kInstrSize,
+ instr_ori | (imm & kImm16Mask));
+ return 2; // Number of instructions patched.
+ } else {
+ uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
+ if ((int32_t)imm28 == kEndOfJumpChain) {
+ return 0; // Number of instructions patched.
+ }
+ imm28 += pc_delta;
+ imm28 &= kImm28Mask;
+ ASSERT((imm28 & 3) == 0);
+
+ instr &= ~kImm26Mask;
+ uint32_t imm26 = imm28 >> 2;
+ ASSERT(is_uint26(imm26));
+
+ instr_at_put(pc, instr | (imm26 & kImm26Mask));
+ return 1; // Number of instructions patched.
+ }
+}
+
+
void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
- // On ia32 and ARM pc relative addressing is used, and we thus need to apply a
- // shift by pc_delta. But on MIPS the target address it directly loaded, so
- // we do not need to relocate here.
+ // Relocate runtime entries.
+ for (RelocIterator it(desc); !it.done(); it.next()) {
+ RelocInfo::Mode rmode = it.rinfo()->rmode();
+ if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+ byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
+ RelocateInternalReference(p, pc_delta);
+ }
+ }
ASSERT(!overflow());
}
}
-void Assembler::CheckTrampolinePool(bool force_emit) {
- // Calculate the offset of the next check.
- next_buffer_check_ = pc_offset() + kCheckConstInterval;
-
- int dist = pc_offset() - last_trampoline_pool_end_;
-
- if (dist <= kMaxDistBetweenPools && !force_emit) {
- return;
- }
-
+void Assembler::CheckTrampolinePool() {
// Some small sequences of instructions must not be broken up by the
// insertion of a trampoline pool; such sequences are protected by setting
// either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
return;
}
- // First we emit jump (2 instructions), then we emit trampoline pool.
- { BlockTrampolinePoolScope block_trampoline_pool(this);
- Label after_pool;
- b(&after_pool);
- nop();
-
- int pool_start = pc_offset();
- for (int i = 0; i < kSlotsPerTrampoline; i++) {
+ ASSERT(!trampoline_emitted_);
+ ASSERT(unbound_labels_count_ >= 0);
+ if (unbound_labels_count_ > 0) {
+ // First we emit jump (2 instructions), then we emit trampoline pool.
+ { BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label after_pool;
b(&after_pool);
nop();
+
+ int pool_start = pc_offset();
+ for (int i = 0; i < unbound_labels_count_; i++) {
+ uint32_t imm32;
+ imm32 = jump_address(&after_pool);
+ { BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal
+ // references until associated instructions are emitted and available
+ // to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ lui(at, (imm32 & kHiMask) >> kLuiShift);
+ ori(at, at, (imm32 & kImm16Mask));
+ }
+ jr(at);
+ nop();
+ }
+ bind(&after_pool);
+ trampoline_ = Trampoline(pool_start, unbound_labels_count_);
+
+ trampoline_emitted_ = true;
+ // As we are only going to emit trampoline once, we need to prevent any
+ // further emission.
+ next_buffer_check_ = kMaxInt;
}
- for (int i = 0; i < kLabelsPerTrampoline; i++) {
- emit(0);
- }
- last_trampoline_pool_end_ = pc_offset() - kInstrSize;
- bind(&after_pool);
- trampolines_.Add(Trampoline(pool_start,
- kSlotsPerTrampoline,
- kLabelsPerTrampoline));
-
- // Since a trampoline pool was just emitted,
- // move the check offset forward by the standard interval.
- next_buffer_check_ = last_trampoline_pool_end_ + kMaxDistBetweenPools;
+ } else {
+ // Number of branches to unbound label at this point is zero, so we can
+ // move next buffer check to maximum.
+ next_buffer_check_ = pc_offset() +
+ kMaxBranchOffset - kTrampolineSlotsSize * 16;
}
return;
}
// Note: The same Label can be used for forward and backward branches
// but it may be bound only once.
void bind(Label* L); // Binds an unbound label L to current code position.
+ // Determines if Label is bound and near enough so that branch instruction
+ // can be used to reach it, instead of jump instruction.
+ bool is_near(Label* L);
// Returns the branch offset to the given label from the current code
// position. Links the label to the current position if it is still unbound.
ASSERT((o & 3) == 0); // Assert the offset is aligned.
return o >> 2;
}
+ uint32_t jump_address(Label* L);
// Puts a labels target address at the given position.
// The high 8 bits are set to zero.
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
};
+ // Class for postponing the assembly buffer growth. Typically used for
+ // sequences of instructions that must be emitted as a unit, before
+ // buffer growth (and relocation) can occur.
+ // This blocking scope is not nestable.
+ class BlockGrowBufferScope {
+ public:
+ explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockGrowBuffer();
+ }
+ ~BlockGrowBufferScope() {
+ assem_->EndBlockGrowBuffer();
+ }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
+ };
+
// Debugging.
// Mark address of the ExitJSFrame code.
// Use --code-comments to enable.
void RecordComment(const char* msg);
+ static int RelocateInternalReference(byte* pc, intptr_t pc_delta);
+
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
void db(uint8_t data);
static bool IsBeq(Instr instr);
static bool IsBne(Instr instr);
+ static bool IsJump(Instr instr);
+ static bool IsJ(Instr instr);
+ static bool IsLui(Instr instr);
+ static bool IsOri(Instr instr);
+
static bool IsNop(Instr instr, unsigned int type);
static bool IsPop(Instr instr);
static bool IsPush(Instr instr);
static uint32_t GetSa(Instr instr);
static uint32_t GetSaField(Instr instr);
static uint32_t GetOpcodeField(Instr instr);
+ static uint32_t GetFunction(Instr instr);
+ static uint32_t GetFunctionField(Instr instr);
static uint32_t GetImmediate16(Instr instr);
static uint32_t GetLabelConst(Instr instr);
static bool IsAndImmediate(Instr instr);
- void CheckTrampolinePool(bool force_emit = false);
+ void CheckTrampolinePool();
protected:
// Relocation for a type-recording IC has the AST id added to it. This
void StartBlockTrampolinePool() {
trampoline_pool_blocked_nesting_++;
}
+
void EndBlockTrampolinePool() {
trampoline_pool_blocked_nesting_--;
}
return internal_trampoline_exception_;
}
+ bool is_trampoline_emitted() const {
+ return trampoline_emitted_;
+ }
+
+ // Temporarily block automatic assembly buffer growth.
+ void StartBlockGrowBuffer() {
+ ASSERT(!block_buffer_growth_);
+ block_buffer_growth_ = true;
+ }
+
+ void EndBlockGrowBuffer() {
+ ASSERT(block_buffer_growth_);
+ block_buffer_growth_ = false;
+ }
+
+ bool is_buffer_growth_blocked() const {
+ return block_buffer_growth_;
+ }
+
private:
// Code buffer:
// The buffer into which code and relocation info are generated.
// Keep track of the last emitted pool to guarantee a maximal distance.
int last_trampoline_pool_end_; // pc offset of the end of the last pool.
+ // Automatic growth of the assembly buffer may be blocked for some sequences.
+ bool block_buffer_growth_; // Block growth when true.
+
// Relocation information generation.
// Each relocation is encoded as a variable size value.
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
// Labels.
void print(Label* L);
void bind_to(Label* L, int pos);
- void link_to(Label* L, Label* appendix);
void next(Label* L);
// One trampoline consists of:
// label_count * kInstrSize.
class Trampoline {
public:
- Trampoline(int start, int slot_count, int label_count) {
+ Trampoline() {
+ start_ = 0;
+ next_slot_ = 0;
+ free_slot_count_ = 0;
+ end_ = 0;
+ }
+ Trampoline(int start, int slot_count) {
start_ = start;
next_slot_ = start;
free_slot_count_ = slot_count;
- next_label_ = start + slot_count * 2 * kInstrSize;
- free_label_count_ = label_count;
- end_ = next_label_ + (label_count - 1) * kInstrSize;
+ end_ = start + slot_count * kTrampolineSlotsSize;
}
int start() {
return start_;
} else {
trampoline_slot = next_slot_;
free_slot_count_--;
- next_slot_ += 2*kInstrSize;
+ next_slot_ += kTrampolineSlotsSize;
}
return trampoline_slot;
}
- int take_label() {
- int label_pos = next_label_;
- ASSERT(free_label_count_ > 0);
- free_label_count_--;
- next_label_ += kInstrSize;
- return label_pos;
- }
-
private:
int start_;
int end_;
int next_slot_;
int free_slot_count_;
- int next_label_;
- int free_label_count_;
};
- int32_t get_label_entry(int32_t pos, bool next_pool = true);
- int32_t get_trampoline_entry(int32_t pos, bool next_pool = true);
-
- static const int kSlotsPerTrampoline = 2304;
- static const int kLabelsPerTrampoline = 8;
- static const int kTrampolineInst =
- 2 * kSlotsPerTrampoline + kLabelsPerTrampoline;
- static const int kTrampolineSize = kTrampolineInst * kInstrSize;
+ int32_t get_trampoline_entry(int32_t pos);
+ int unbound_labels_count_;
+ // If trampoline is emitted, generated code is becoming large. As this is
+ // already a slow case which can possibly break our code generation for the
+ // extreme case, we use this information to trigger different mode of
+ // branch instruction generation, where we use jump instructions rather
+ // than regular branch instructions.
+ bool trampoline_emitted_;
+ static const int kTrampolineSlotsSize = 4 * kInstrSize;
static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
- static const int kMaxDistBetweenPools =
- kMaxBranchOffset - 2 * kTrampolineSize;
static const int kInvalidSlotPos = -1;
- List<Trampoline> trampolines_;
+ Trampoline trampoline_;
bool internal_trampoline_exception_;
friend class RegExpMacroAssemblerMIPS;
// Handle construction of an empty array of a certain size. Bail out if size
// is too large to actually allocate an elements array.
ASSERT(kSmiTag == 0);
- __ Branch(call_generic_code, ge, a2,
+ __ Branch(call_generic_code, Ugreater_equal, a2,
Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
// a0: argc
static const int kImm16Bits = 16;
static const int kImm26Shift = 0;
static const int kImm26Bits = 26;
+static const int kImm28Shift = 0;
+static const int kImm28Bits = 28;
static const int kFsShift = 11;
static const int kFsBits = 5;
static const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
static const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
static const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
+static const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift;
static const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift;
static const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift;
static const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
(cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
+bool MacroAssembler::UseAbsoluteCodePointers() {
+ if (is_trampoline_emitted()) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+
void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
+ BranchShort(offset, bdslot);
+}
+
+
+void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BranchShort(offset, cond, rs, rt, bdslot);
+}
+
+
+void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
+ bool is_label_near = is_near(L);
+ if (UseAbsoluteCodePointers() && !is_label_near) {
+ Jr(L, bdslot);
+ } else {
+ BranchShort(L, bdslot);
+ }
+}
+
+
+void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ bool is_label_near = is_near(L);
+ if (UseAbsoluteCodePointers() && !is_label_near) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jr(L, bdslot);
+ bind(&skip);
+ } else {
+ BranchShort(L, cond, rs, rt, bdslot);
+ }
+}
+
+
+void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
b(offset);
// Emit a nop in the branch delay slot if required.
}
-void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
+void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
ASSERT(!rs.is(zero_reg));
Register r2 = no_reg;
break;
case Uless:
if (r2.is(zero_reg)) {
- b(offset);
+ // No code needs to be emitted.
+ return;
} else {
sltu(scratch, rs, r2);
bne(scratch, zero_reg, offset);
} else {
r2 = scratch;
li(r2, rt);
- sltu(scratch, rs, r2);
+ slt(scratch, rs, r2);
beq(scratch, zero_reg, offset);
}
break;
break;
case Uless:
if (rt.imm32_ == 0) {
- b(offset);
+ // No code needs to be emitted.
+ return;
} else if (is_int16(rt.imm32_)) {
sltiu(scratch, rs, rt.imm32_);
bne(scratch, zero_reg, offset);
}
-void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
// We use branch_offset as an argument for the branch instructions to be sure
// it is called just before generating the branch instruction, as needed.
}
-void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
+void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
int32_t offset;
break;
case Uless:
if (r2.is(zero_reg)) {
- offset = shifted_branch_offset(L, false);
- b(offset);
+ // No code needs to be emitted.
+ return;
} else {
sltu(scratch, rs, r2);
offset = shifted_branch_offset(L, false);
} else {
r2 = scratch;
li(r2, rt);
- sltu(scratch, rs, r2);
+ slt(scratch, rs, r2);
offset = shifted_branch_offset(L, false);
beq(scratch, zero_reg, offset);
}
break;
case Uless:
if (rt.imm32_ == 0) {
- offset = shifted_branch_offset(L, false);
- b(offset);
+ // No code needs to be emitted.
+ return;
} else if (is_int16(rt.imm32_)) {
sltiu(scratch, rs, rt.imm32_);
offset = shifted_branch_offset(L, false);
}
+void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
+ BranchAndLinkShort(offset, bdslot);
+}
+
+
+void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BranchAndLinkShort(offset, cond, rs, rt, bdslot);
+}
+
+
+void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
+ bool is_label_near = is_near(L);
+ if (UseAbsoluteCodePointers() && !is_label_near) {
+ Jalr(L, bdslot);
+ } else {
+ BranchAndLinkShort(L, bdslot);
+ }
+}
+
+
+void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ bool is_label_near = is_near(L);
+ if (UseAbsoluteCodePointers() && !is_label_near) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rs, rt);
+ Jalr(L, bdslot);
+ bind(&skip);
+ } else {
+ BranchAndLinkShort(L, cond, rs, rt, bdslot);
+ }
+}
+
+
// We need to use a bgezal or bltzal, but they can't be used directly with the
// slt instructions. We could use sub or add instead but we would miss overflow
// cases, so we keep slt and add an intermediate third instruction.
-void MacroAssembler::BranchAndLink(int16_t offset,
- BranchDelaySlot bdslot) {
+void MacroAssembler::BranchAndLinkShort(int16_t offset,
+ BranchDelaySlot bdslot) {
bal(offset);
// Emit a nop in the branch delay slot if required.
}
-void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
+void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
+ Register rs, const Operand& rt,
+ BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
Register r2 = no_reg;
Register scratch = at;
}
-void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
+void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
bal(shifted_branch_offset(L, false));
// Emit a nop in the branch delay slot if required.
}
-void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
- const Operand& rt,
- BranchDelaySlot bdslot) {
+void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
int32_t offset;
}
+void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ uint32_t imm28;
+ imm28 = jump_address(L);
+ imm28 &= kImm28Mask;
+ { BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ j(imm28);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ uint32_t imm32;
+ imm32 = jump_address(L);
+ { BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ lui(at, (imm32 & kHiMask) >> kLuiShift);
+ ori(at, at, (imm32 & kImm16Mask));
+ }
+ jr(at);
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+
+ uint32_t imm32;
+ imm32 = jump_address(L);
+ { BlockGrowBufferScope block_buf_growth(this);
+ // Buffer growth (and relocation) must be blocked for internal references
+ // until associated instructions are emitted and available to be patched.
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ lui(at, (imm32 & kHiMask) >> kLuiShift);
+ ori(at, at, (imm32 & kImm16Mask));
+ }
+ jalr(at);
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
void MacroAssembler::Jump(const Operand& target, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (target.is_reg()) {
Register scratch,
int num_arguments);
+ void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
+ void BranchShort(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot = PROTECT);
+ void BranchShort(Label* L, BranchDelaySlot bdslot = PROTECT);
+ void BranchShort(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot = PROTECT);
+ void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
+ void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot = PROTECT);
+ void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
+ void BranchAndLinkShort(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot = PROTECT);
+ void J(Label* L, BranchDelaySlot bdslot);
+ void Jr(Label* L, BranchDelaySlot bdslot);
+ void Jalr(Label* L, BranchDelaySlot bdslot);
+
void Jump(intptr_t target, RelocInfo::Mode rmode,
BranchDelaySlot bd = PROTECT);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+ bool UseAbsoluteCodePointers();
+
bool generating_stub_;
bool allow_stub_calls_;
// This handle will be patched with the code object on installation.