}
-// Assembler
+// Constant Pool.
+void ConstPool::RecordEntry(intptr_t data,
+ RelocInfo::Mode mode) {
+ ASSERT(mode != RelocInfo::COMMENT &&
+ mode != RelocInfo::POSITION &&
+ mode != RelocInfo::STATEMENT_POSITION &&
+ mode != RelocInfo::CONST_POOL &&
+ mode != RelocInfo::VENEER_POOL &&
+ mode != RelocInfo::CODE_AGE_SEQUENCE);
+
+ uint64_t raw_data = static_cast<uint64_t>(data);
+ int offset = assm_->pc_offset();
+ if (IsEmpty()) {
+ first_use_ = offset;
+ }
+
+ std::pair<uint64_t, int> entry = std::make_pair(raw_data, offset);
+ if (CanBeShared(mode)) {
+ shared_entries_.insert(entry);
+ if (shared_entries_.count(entry.first) == 1) {
+ shared_entries_count++;
+ }
+ } else {
+ unique_entries_.push_back(entry);
+ }
+
+ if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) {
+ // Request constant pool emission after the next instruction.
+ assm_->SetNextConstPoolCheckIn(1);
+ }
+}
+
+
+int ConstPool::DistanceToFirstUse() {
+ ASSERT(first_use_ >= 0);
+ return assm_->pc_offset() - first_use_;
+}
+
+
+int ConstPool::MaxPcOffset() {
+ // There are no pending entries in the pool so we can never get out of
+ // range.
+ if (IsEmpty()) return kMaxInt;
+
+ // Entries are not necessarily emitted in the order they are added so in the
+ // worst case the first constant pool use will be accessing the last entry.
+ return first_use_ + kMaxLoadLiteralRange - WorstCaseSize();
+}
+
+
+int ConstPool::WorstCaseSize() {
+ if (IsEmpty()) return 0;
+
+ // Max size prologue:
+ // b over
+ // ldr xzr, #pool_size
+ // blr xzr
+ // nop
+ // All entries are 64-bit for now.
+ return 4 * kInstructionSize + EntryCount() * kPointerSize;
+}
+
+
+int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) {
+ if (IsEmpty()) return 0;
+
+ // Prologue is:
+ // b over ;; if require_jump
+ // ldr xzr, #pool_size
+ // blr xzr
+ // nop ;; if not 64-bit aligned
+ int prologue_size = require_jump ? kInstructionSize : 0;
+ prologue_size += 2 * kInstructionSize;
+ prologue_size += IsAligned(assm_->pc_offset() + prologue_size, 8) ?
+ 0 : kInstructionSize;
+
+ // All entries are 64-bit for now.
+ return prologue_size + EntryCount() * kPointerSize;
+}
+
+
+void ConstPool::Emit(bool require_jump) {
+ ASSERT(!assm_->is_const_pool_blocked());
+ // Prevent recursive pool emission and protect from veneer pools.
+ Assembler::BlockPoolsScope block_pools(assm_);
+
+ int size = SizeIfEmittedAtCurrentPc(require_jump);
+ Label size_check;
+ assm_->bind(&size_check);
+
+ assm_->RecordConstPool(size);
+ // Emit the constant pool. It is preceded by an optional branch if
+ // require_jump and a header which will:
+ // 1) Encode the size of the constant pool, for use by the disassembler.
+ // 2) Terminate the program, to try to prevent execution from accidentally
+ // flowing into the constant pool.
+ // 3) align the pool entries to 64-bit.
+ // The header is therefore made of up to three arm64 instructions:
+ // ldr xzr, #<size of the constant pool in 32-bit words>
+ // blr xzr
+ // nop
+ //
+ // If executed, the header will likely segfault and lr will point to the
+ // instruction following the offending blr.
+ // TODO(all): Make the alignment part less fragile. Currently code is
+ // allocated as a byte array so there are no guarantees the alignment will
+ // be preserved on compaction. Currently it works as allocation seems to be
+ // 64-bit aligned.
+
+ // Emit branch if required
+ Label after_pool;
+ if (require_jump) {
+ assm_->b(&after_pool);
+ }
+
+ // Emit the header.
+ assm_->RecordComment("[ Constant Pool");
+ EmitMarker();
+ EmitGuard();
+ assm_->Align(8);
+
+ // Emit constant pool entries.
+ // TODO(all): currently each relocated constant is 64 bits, consider adding
+ // support for 32-bit entries.
+ EmitEntries();
+ assm_->RecordComment("]");
+
+ if (after_pool.is_linked()) {
+ assm_->bind(&after_pool);
+ }
+
+ ASSERT(assm_->SizeOfCodeGeneratedSince(&size_check) ==
+ static_cast<unsigned>(size));
+}
+
+
+void ConstPool::Clear() {
+ shared_entries_.clear();
+ shared_entries_count = 0;
+ unique_entries_.clear();
+ first_use_ = -1;
+}
+
+
+bool ConstPool::CanBeShared(RelocInfo::Mode mode) {
+ // Constant pool currently does not support 32-bit entries.
+ ASSERT(mode != RelocInfo::NONE32);
+
+ return RelocInfo::IsNone(mode) ||
+ (!assm_->serializer_enabled() && (mode >= RelocInfo::CELL));
+}
+
+
+void ConstPool::EmitMarker() {
+ // A constant pool size is expressed in number of 32-bits words.
+ // Currently all entries are 64-bit.
+ // + 1 is for the crash guard.
+ // + 0/1 for alignment.
+ int word_count = EntryCount() * 2 + 1 +
+ (IsAligned(assm_->pc_offset(), 8)) ? 0 : 1;
+ assm_->Emit(LDR_x_lit |
+ Assembler::ImmLLiteral(word_count) |
+ Assembler::Rt(xzr));
+}
+
+void ConstPool::EmitGuard() {
+#ifdef DEBUG
+ Instruction* instr = reinterpret_cast<Instruction*>(assm_->pc());
+ ASSERT(instr->preceding()->IsLdrLiteralX() &&
+ instr->preceding()->Rt() == xzr.code());
+#endif
+ assm_->EmitPoolGuard();
+}
+
+
+void ConstPool::EmitEntries() {
+ ASSERT(IsAligned(assm_->pc_offset(), 8));
+
+ typedef std::multimap<uint64_t, int>::const_iterator SharedEntriesIterator;
+ SharedEntriesIterator value_it;
+ // Iterate through the keys (constant pool values).
+ for (value_it = shared_entries_.begin();
+ value_it != shared_entries_.end();
+ value_it = shared_entries_.upper_bound(value_it->first)) {
+ std::pair<SharedEntriesIterator, SharedEntriesIterator> range;
+ uint64_t data = value_it->first;
+ range = shared_entries_.equal_range(data);
+ SharedEntriesIterator offset_it;
+ // Iterate through the offsets of a given key.
+ for (offset_it = range.first; offset_it != range.second; offset_it++) {
+ Instruction* instr = assm_->InstructionAt(offset_it->second);
+
+ // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
+ ASSERT(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
+ instr->SetImmPCOffsetTarget(assm_->pc());
+ }
+ assm_->dc64(data);
+ }
+ shared_entries_.clear();
+ shared_entries_count = 0;
+
+ // Emit unique entries.
+ std::vector<std::pair<uint64_t, int> >::const_iterator unique_it;
+ for (unique_it = unique_entries_.begin();
+ unique_it != unique_entries_.end();
+ unique_it++) {
+ Instruction* instr = assm_->InstructionAt(unique_it->second);
+
+ // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
+ ASSERT(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
+ instr->SetImmPCOffsetTarget(assm_->pc());
+ assm_->dc64(unique_it->first);
+ }
+ unique_entries_.clear();
+ first_use_ = -1;
+}
+
+
+// Assembler
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
+ constpool_(this),
recorded_ast_id_(TypeFeedbackId::None()),
unresolved_branches_(),
positions_recorder_(this) {
Assembler::~Assembler() {
- ASSERT(num_pending_reloc_info_ == 0);
+ ASSERT(constpool_.IsEmpty());
ASSERT(const_pool_blocked_nesting_ == 0);
ASSERT(veneer_pool_blocked_nesting_ == 0);
}
pc_ = buffer_;
reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_),
reinterpret_cast<byte*>(pc_));
- num_pending_reloc_info_ = 0;
+ constpool_.Clear();
next_constant_pool_check_ = 0;
next_veneer_pool_check_ = kMaxInt;
no_const_pool_before_ = 0;
- first_const_pool_use_ = -1;
ClearRecordedAstId();
}
void Assembler::GetCode(CodeDesc* desc) {
// Emit constant pool if necessary.
CheckConstPool(true, false);
- ASSERT(num_pending_reloc_info_ == 0);
+ ASSERT(constpool_.IsEmpty());
// Set up code descriptor.
if (desc) {
void Assembler::EndBlockConstPool() {
if (--const_pool_blocked_nesting_ == 0) {
// Check the constant pool hasn't been blocked for too long.
- ASSERT((num_pending_reloc_info_ == 0) ||
- (pc_offset() < (first_const_pool_use_ + kMaxDistToConstPool)));
+ ASSERT(pc_offset() < constpool_.MaxPcOffset());
// Two cases:
// * no_const_pool_before_ >= next_constant_pool_check_ and the emission is
// still blocked
}
-void Assembler::ConstantPoolMarker(uint32_t size) {
- ASSERT(is_const_pool_blocked());
- // + 1 is for the crash guard.
- Emit(LDR_x_lit | ImmLLiteral(size + 1) | Rt(xzr));
-}
-
-
void Assembler::EmitPoolGuard() {
// We must generate only one instruction as this is used in scopes that
// control the size of the code generated.
}
-void Assembler::ConstantPoolGuard() {
-#ifdef DEBUG
- // Currently this is only used after a constant pool marker.
- ASSERT(is_const_pool_blocked());
- Instruction* instr = reinterpret_cast<Instruction*>(pc_);
- ASSERT(instr->preceding()->IsLdrLiteralX() &&
- instr->preceding()->Rt() == xzr.code());
-#endif
- EmitPoolGuard();
-}
-
-
void Assembler::StartBlockVeneerPool() {
++veneer_pool_blocked_nesting_;
}
// buffer nor pc absolute pointing inside the code buffer, so there is no need
// to relocate any emitted relocation entries.
- // Relocate pending relocation entries.
- for (int i = 0; i < num_pending_reloc_info_; i++) {
- RelocInfo& rinfo = pending_reloc_info_[i];
- ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
- rinfo.rmode() != RelocInfo::POSITION);
- if (rinfo.rmode() != RelocInfo::JS_RETURN) {
- rinfo.set_pc(rinfo.pc() + pc_delta);
- }
- }
+ // Pending relocation entries are also relative, no need to relocate.
}
|| RelocInfo::IsVeneerPool(rmode));
// These modes do not need an entry in the constant pool.
} else {
- ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
- if (num_pending_reloc_info_ == 0) {
- first_const_pool_use_ = pc_offset();
- }
- pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
+ constpool_.RecordEntry(data, rmode);
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
BlockConstPoolFor(1);
void Assembler::BlockConstPoolFor(int instructions) {
int pc_limit = pc_offset() + instructions * kInstructionSize;
if (no_const_pool_before_ < pc_limit) {
- // If there are some pending entries, the constant pool cannot be blocked
- // further than first_const_pool_use_ + kMaxDistToConstPool
- ASSERT((num_pending_reloc_info_ == 0) ||
- (pc_limit < (first_const_pool_use_ + kMaxDistToConstPool)));
no_const_pool_before_ = pc_limit;
+ // Make sure the pool won't be blocked for too long.
+ ASSERT(pc_limit < constpool_.MaxPcOffset());
}
if (next_constant_pool_check_ < no_const_pool_before_) {
}
// There is nothing to do if there are no pending constant pool entries.
- if (num_pending_reloc_info_ == 0) {
+ if (constpool_.IsEmpty()) {
// Calculate the offset of the next check.
- next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval;
+ SetNextConstPoolCheckIn(kCheckConstPoolInterval);
return;
}
// We emit a constant pool when:
// * requested to do so by parameter force_emit (e.g. after each function).
// * the distance to the first instruction accessing the constant pool is
- // kAvgDistToConstPool or more.
- // * no jump is required and the distance to the first instruction accessing
- // the constant pool is at least kMaxDistToPConstool / 2.
- ASSERT(first_const_pool_use_ >= 0);
- int dist = pc_offset() - first_const_pool_use_;
- if (!force_emit && dist < kAvgDistToConstPool &&
- (require_jump || (dist < (kMaxDistToConstPool / 2)))) {
+ // kApproxMaxDistToConstPool or more.
+ // * the number of entries in the pool is kApproxMaxPoolEntryCount or more.
+ int dist = constpool_.DistanceToFirstUse();
+ int count = constpool_.EntryCount();
+ if (!force_emit &&
+ (dist < kApproxMaxDistToConstPool) &&
+ (count < kApproxMaxPoolEntryCount)) {
return;
}
- int jump_instr = require_jump ? kInstructionSize : 0;
- int size_pool_marker = kInstructionSize;
- int size_pool_guard = kInstructionSize;
- int pool_size = jump_instr + size_pool_marker + size_pool_guard +
- num_pending_reloc_info_ * kPointerSize;
- int needed_space = pool_size + kGap;
// Emit veneers for branches that would go out of range during emission of the
// constant pool.
- CheckVeneerPool(false, require_jump, kVeneerDistanceMargin + pool_size);
-
- Label size_check;
- bind(&size_check);
+ int worst_case_size = constpool_.WorstCaseSize();
+ CheckVeneerPool(false, require_jump,
+ kVeneerDistanceMargin + worst_case_size);
// Check that the code buffer is large enough before emitting the constant
- // pool (include the jump over the pool, the constant pool marker, the
- // constant pool guard, and the gap to the relocation information).
+ // pool (this includes the gap to the relocation information).
+ int needed_space = worst_case_size + kGap + 1 * kInstructionSize;
while (buffer_space() <= needed_space) {
GrowBuffer();
}
- {
- // Block recursive calls to CheckConstPool and protect from veneer pools.
- BlockPoolsScope block_pools(this);
- RecordConstPool(pool_size);
-
- // Emit jump over constant pool if necessary.
- Label after_pool;
- if (require_jump) {
- b(&after_pool);
- }
-
- // Emit a constant pool header. The header has two goals:
- // 1) Encode the size of the constant pool, for use by the disassembler.
- // 2) Terminate the program, to try to prevent execution from accidentally
- // flowing into the constant pool.
- // The header is therefore made of two arm64 instructions:
- // ldr xzr, #<size of the constant pool in 32-bit words>
- // blr xzr
- // If executed the code will likely segfault and lr will point to the
- // beginning of the constant pool.
- // TODO(all): currently each relocated constant is 64 bits, consider adding
- // support for 32-bit entries.
- RecordComment("[ Constant Pool");
- ConstantPoolMarker(2 * num_pending_reloc_info_);
- ConstantPoolGuard();
-
- // Emit constant pool entries.
- for (int i = 0; i < num_pending_reloc_info_; i++) {
- RelocInfo& rinfo = pending_reloc_info_[i];
- ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
- rinfo.rmode() != RelocInfo::POSITION &&
- rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
- rinfo.rmode() != RelocInfo::CONST_POOL &&
- rinfo.rmode() != RelocInfo::VENEER_POOL);
-
- Instruction* instr = reinterpret_cast<Instruction*>(rinfo.pc());
- // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
- ASSERT(instr->IsLdrLiteral() &&
- instr->ImmLLiteral() == 0);
-
- instr->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
- dc64(rinfo.data());
- }
-
- num_pending_reloc_info_ = 0;
- first_const_pool_use_ = -1;
-
- RecordComment("]");
-
- if (after_pool.is_linked()) {
- bind(&after_pool);
- }
- }
+ Label size_check;
+ bind(&size_check);
+ constpool_.Emit(require_jump);
+ ASSERT(SizeOfCodeGeneratedSince(&size_check) <=
+ static_cast<unsigned>(worst_case_size));
// Since a constant pool was just emitted, move the check offset forward by
// the standard interval.
- next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval;
-
- ASSERT(SizeOfCodeGeneratedSince(&size_check) ==
- static_cast<unsigned>(pool_size));
+ SetNextConstPoolCheckIn(kCheckConstPoolInterval);
}
#include <list>
#include <map>
+#include <vector>
#include "src/arm64/instructions-arm64.h"
#include "src/assembler.h"
};
+class ConstPool {
+ public:
+ explicit ConstPool(Assembler* assm)
+ : assm_(assm),
+ first_use_(-1),
+ shared_entries_count(0) {}
+ void RecordEntry(intptr_t data, RelocInfo::Mode mode);
+ int EntryCount() const {
+ return shared_entries_count + unique_entries_.size();
+ }
+ bool IsEmpty() const {
+ return shared_entries_.empty() && unique_entries_.empty();
+ }
+ // Distance in bytes between the current pc and the first instruction
+ // using the pool. If there are no pending entries return kMaxInt.
+ int DistanceToFirstUse();
+ // Offset after which instructions using the pool will be out of range.
+ int MaxPcOffset();
+ // Maximum size the constant pool can be with current entries. It always
+ // includes alignment padding and branch over.
+ int WorstCaseSize();
+ // Size in bytes of the literal pool *if* it is emitted at the current
+ // pc. The size will include the branch over the pool if it was requested.
+ int SizeIfEmittedAtCurrentPc(bool require_jump);
+ // Emit the literal pool at the current pc with a branch over the pool if
+ // requested.
+ void Emit(bool require_jump);
+ // Discard any pending pool entries.
+ void Clear();
+
+ private:
+ bool CanBeShared(RelocInfo::Mode mode);
+ void EmitMarker();
+ void EmitGuard();
+ void EmitEntries();
+
+ Assembler* assm_;
+ // Keep track of the first instruction requiring a constant pool entry
+ // since the previous constant pool was emitted.
+ int first_use_;
+ // values, pc offset(s) of entries which can be shared.
+ std::multimap<uint64_t, int> shared_entries_;
+ // Number of distinct literal in shared entries.
+ int shared_entries_count;
+ // values, pc offset of entries which cannot be shared.
+ std::vector<std::pair<uint64_t, int> > unique_entries_;
+};
+
+
// -----------------------------------------------------------------------------
// Assembler.
virtual ~Assembler();
virtual void AbortedCodeGeneration() {
- num_pending_reloc_info_ = 0;
+ constpool_.Clear();
}
// System functions ---------------------------------------------------------
static bool IsConstantPoolAt(Instruction* instr);
static int ConstantPoolSizeAt(Instruction* instr);
// See Assembler::CheckConstPool for more info.
- void ConstantPoolMarker(uint32_t size);
void EmitPoolGuard();
- void ConstantPoolGuard();
// Prevent veneer pool emission until EndBlockVeneerPool is called.
// Call to this function can be nested but must be followed by an equal
// Code generation helpers --------------------------------------------------
- unsigned num_pending_reloc_info() const { return num_pending_reloc_info_; }
+ bool IsConstPoolEmpty() const { return constpool_.IsEmpty(); }
+
+ Instruction* pc() const { return Instruction::Cast(pc_); }
Instruction* InstructionAt(int offset) const {
return reinterpret_cast<Instruction*>(buffer_ + offset);
// instructions.
void BlockConstPoolFor(int instructions);
+ // Set how far from current pc the next constant pool check will be.
+ void SetNextConstPoolCheckIn(int instructions) {
+ next_constant_pool_check_ = pc_offset() + instructions * kInstructionSize;
+ }
+
// Emit the instruction at pc_.
void Emit(Instr instruction) {
STATIC_ASSERT(sizeof(*pc_) == 1);
int next_constant_pool_check_;
// Constant pool generation
- // Pools are emitted in the instruction stream, preferably after unconditional
- // jumps or after returns from functions (in dead code locations).
- // If a long code sequence does not contain unconditional jumps, it is
- // necessary to emit the constant pool before the pool gets too far from the
- // location it is accessed from. In this case, we emit a jump over the emitted
- // constant pool.
+ // Pools are emitted in the instruction stream. They are emitted when:
+ // * the distance to the first use is above a pre-defined distance or
+ // * the numbers of entries in the pool is above a pre-defined size or
+ // * code generation is finished
+ // If a pool needs to be emitted before code generation is finished a branch
+ // over the emitted pool will be inserted.
+
// Constants in the pool may be addresses of functions that gets relocated;
// if so, a relocation info entry is associated to the constant pool entry.
// expensive. By default we only check again once a number of instructions
// has been generated. That also means that the sizing of the buffers is not
// an exact science, and that we rely on some slop to not overrun buffers.
- static const int kCheckConstPoolIntervalInst = 128;
- static const int kCheckConstPoolInterval =
- kCheckConstPoolIntervalInst * kInstructionSize;
-
- // Constants in pools are accessed via pc relative addressing, which can
- // reach +/-4KB thereby defining a maximum distance between the instruction
- // and the accessed constant.
- static const int kMaxDistToConstPool = 4 * KB;
- static const int kMaxNumPendingRelocInfo =
- kMaxDistToConstPool / kInstructionSize;
-
-
- // Average distance beetween a constant pool and the first instruction
- // accessing the constant pool. Longer distance should result in less I-cache
- // pollution.
- // In practice the distance will be smaller since constant pool emission is
- // forced after function return and sometimes after unconditional branches.
- static const int kAvgDistToConstPool =
- kMaxDistToConstPool - kCheckConstPoolInterval;
+ static const int kCheckConstPoolInterval = 128;
+
+ // Distance to first use after a which a pool will be emitted. Pool entries
+ // are accessed with pc relative load therefore this cannot be more than
+ // 1 * MB. Since constant pool emission checks are interval based this value
+ // is an approximation.
+ static const int kApproxMaxDistToConstPool = 64 * KB;
+
+ // Number of pool entries after which a pool will be emitted. Since constant
+ // pool emission checks are interval based this value is an approximation.
+ static const int kApproxMaxPoolEntryCount = 512;
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
int no_const_pool_before_; // Block emission before this pc offset.
- // Keep track of the first instruction requiring a constant pool entry
- // since the previous constant pool was emitted.
- int first_const_pool_use_;
-
// Emission of the veneer pools may be blocked in some code sequences.
int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
// If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction.
- // the buffer of pending relocation info
- RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
- // number of pending reloc info entries in the buffer
- int num_pending_reloc_info_;
+ // The pending constant pool.
+ ConstPool constpool_;
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
PositionsRecorder positions_recorder_;
friend class PositionsRecorder;
friend class EnsureSpace;
+ friend class ConstPool;
};
class PatchingAssembler : public Assembler {
// Verify we have generated the number of instruction we expected.
ASSERT((pc_offset() + kGap) == buffer_size_);
// Verify no relocation information has been emitted.
- ASSERT(num_pending_reloc_info() == 0);
+ ASSERT(IsConstPoolEmpty());
// Flush the Instruction cache.
size_t length = buffer_size_ - kGap;
CPU::FlushICache(buffer_, length);