static int GetCmpImmediateRawImmediate(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
+ // Buffer size and constant pool distance are checked together at regular
+ // intervals of kBufferCheckInterval emitted bytes
+ static const int kBufferCheckInterval = 1*KB/2;
+ // Constants in pools are accessed via pc relative addressing, which can
+ // reach +/-4KB thereby defining a maximum distance between the instruction
+ // and the accessed constant. We satisfy this constraint by limiting the
+ // distance between pools.
+ static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
+ static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
+
// Check if is time to emit a constant pool for pending reloc info entries
void CheckConstPool(bool force_emit, bool require_jump);
// True if the assembler owns the buffer, false if buffer is external.
bool own_buffer_;
- // Buffer size and constant pool distance are checked together at regular
- // intervals of kBufferCheckInterval emitted bytes
- static const int kBufferCheckInterval = 1*KB/2;
int next_buffer_check_; // pc offset of next buffer check
// Code generation
// regular intervals of kDistBetweenPools bytes
static const int kDistBetweenPools = 1*KB;
- // Constants in pools are accessed via pc relative addressing, which can
- // reach +/-4KB thereby defining a maximum distance between the instruction
- // and the accessed constant. We satisfy this constraint by limiting the
- // distance between pools.
- static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
-
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
int no_const_pool_before_; // Block emission before this pc offset.
// stored in a separate buffer until a constant pool is emitted.
// If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction.
- static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
RelocInfo prinfo_[kMaxNumPRInfo]; // the buffer of pending relocation info
int num_prinfo_; // number of pending reloc info entries in the buffer
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
+ GenerateDeoptJumpTable() &&
GenerateSafepointTable();
}
__ jmp(code->exit());
}
- // Force constant pool emission at the end of deferred code to make
- // sure that no constant pools are emitted after the official end of
- // the instruction sequence.
+ // Force constant pool emission at the end of the deferred code to make
+ // sure that no constant pools are emitted after.
masm()->CheckConstPool(true, false);
- // Deferred code is the last part of the instruction sequence. Mark
- // the generated code as done unless we bailed out.
+ return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeoptJumpTable() {
+ // Check that the jump table is accessible from everywhere in the function
+ // code, ie that offsets to the table can be encoded in the 24bit signed
+ // immediate of a branch instruction.
+ // To simplify we consider the code size from the first instruction to the
+ // end of the jump table. We also don't consider the pc load delta.
+ // Each entry in the jump table generates one instruction and inlines one
+ // 32bit data after it.
+ if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
+ deopt_jump_table_.length() * 2)) {
+ Abort("Generated code is too large");
+ }
+
+ // Block the constant pool emission during the jump table emission.
+ __ BlockConstPoolFor(deopt_jump_table_.length());
+ __ RecordComment("[ Deoptimisation jump table");
+ Label table_start;
+ __ bind(&table_start);
+ for (int i = 0; i < deopt_jump_table_.length(); i++) {
+ __ bind(&deopt_jump_table_[i].label);
+ __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta));
+ __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address));
+ }
+ ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
+ deopt_jump_table_.length() * 2);
+ __ RecordComment("]");
+
+ // The deoptimization jump table is the last part of the instruction
+ // sequence. Mark the generated code as done unless we bailed out.
if (!is_aborted()) status_ = DONE;
return !is_aborted();
}
return;
}
+ if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
+
if (cc == al) {
- if (FLAG_trap_on_deopt) __ stop("trap_on_deopt");
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
} else {
- if (FLAG_trap_on_deopt) {
- Label done;
- __ b(&done, NegateCondition(cc));
- __ stop("trap_on_deopt");
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
- __ bind(&done);
- } else {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc);
+ // We often have several deopts to the same entry, reuse the last
+ // jump entry if this is the case.
+ if (deopt_jump_table_.is_empty() ||
+ (deopt_jump_table_.last().address != entry)) {
+ deopt_jump_table_.Add(JumpTableEntry(entry));
}
+ __ b(cc, &deopt_jump_table_.last().label);
}
}
current_instruction_(-1),
instructions_(chunk->instructions()),
deoptimizations_(4),
+ deopt_jump_table_(4),
deoptimization_literals_(8),
inlined_function_count_(0),
scope_(info->scope()),
bool GeneratePrologue();
bool GenerateBody();
bool GenerateDeferredCode();
+ bool GenerateDeoptJumpTable();
bool GenerateSafepointTable();
enum SafepointMode {
Handle<Map> type,
Handle<String> name);
+ struct JumpTableEntry {
+ explicit inline JumpTableEntry(Address entry)
+ : label(),
+ address(entry) { }
+ Label label;
+ Address address;
+ };
+
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
int current_instruction_;
const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
+ ZoneList<JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;