// movt ip, #... @ call address high 16
// blx ip
// @ return address
- // Or pre-V7 or cases that need frequent patching:
- // ldr ip, [pc, #...] @ call address
+ // Or pre-V7 or cases that need frequent patching, the address is in the
+ // constant pool. It could be a small constant pool load:
+ // ldr ip, [pc / pp, #...] @ call address
+ // blx ip
+ // @ return address
+ // Or an extended constant pool load:
+ // movw ip, #...
+ // movt ip, #...
+ // ldr ip, [pc, ip] @ call address
// blx ip
// @ return address
Address candidate = pc - 2 * Assembler::kInstrSize;
if (IsLdrPcImmediateOffset(candidate_instr) |
IsLdrPpImmediateOffset(candidate_instr)) {
return candidate;
+ } else if (IsLdrPpRegOffset(candidate_instr)) {
+ candidate = pc - 4 * Assembler::kInstrSize;
+ ASSERT(IsMovW(Memory::int32_at(candidate)) &&
+ IsMovT(Memory::int32_at(candidate + Assembler::kInstrSize)));
+ return candidate;
+ } else {
+ candidate = pc - 3 * Assembler::kInstrSize;
+ ASSERT(IsMovW(Memory::int32_at(candidate)) &&
+ IsMovT(Memory::int32_at(candidate + kInstrSize)));
+ return candidate;
}
- candidate = pc - 3 * Assembler::kInstrSize;
- ASSERT(IsMovW(Memory::int32_at(candidate)) &&
- IsMovT(Memory::int32_at(candidate + kInstrSize)));
- return candidate;
}
Address Assembler::return_address_from_call_start(Address pc) {
if (IsLdrPcImmediateOffset(Memory::int32_at(pc)) |
IsLdrPpImmediateOffset(Memory::int32_at(pc))) {
+ // Load from constant pool, small section.
return pc + kInstrSize * 2;
} else {
ASSERT(IsMovW(Memory::int32_at(pc)));
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
- return pc + kInstrSize * 3;
+ if (IsLdrPpRegOffset(Memory::int32_at(pc + kInstrSize))) {
+ // Load from constant pool, extended section.
+ return pc + kInstrSize * 4;
+ } else {
+ // A movw / movt load immediate.
+ return pc + kInstrSize * 3;
+ }
}
}
}
-static Instr EncodeMovwImmediate(uint32_t immediate) {
- ASSERT(immediate < 0x10000);
- return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
-}
-
-
-static Instr PatchMovwImmediate(Instr instruction, uint32_t immediate) {
- instruction &= ~EncodeMovwImmediate(0xffff);
- return instruction | EncodeMovwImmediate(immediate);
-}
-
-
-static bool IsConstantPoolLoad(Address pc) {
- return !Assembler::IsMovW(Memory::int32_at(pc));
+bool Assembler::is_constant_pool_load(Address pc) {
+ return !Assembler::IsMovW(Memory::int32_at(pc)) ||
+ (FLAG_enable_ool_constant_pool &&
+ Assembler::IsLdrPpRegOffset(
+ Memory::int32_at(pc + 2 * Assembler::kInstrSize)));
}
Address pc, ConstantPoolArray* constant_pool) {
if (FLAG_enable_ool_constant_pool) {
ASSERT(constant_pool != NULL);
- ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc)));
- return reinterpret_cast<Address>(constant_pool) +
- GetLdrRegisterImmediateOffset(Memory::int32_at(pc));
+ int cp_offset;
+ if (IsMovW(Memory::int32_at(pc))) {
+ ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)) &&
+ IsLdrPpRegOffset(Memory::int32_at(pc + 2 * kInstrSize)));
+ // This is an extended constant pool lookup.
+ Instruction* movw_instr = Instruction::At(pc);
+ Instruction* movt_instr = Instruction::At(pc + kInstrSize);
+ cp_offset = (movt_instr->ImmedMovwMovtValue() << 16) |
+ movw_instr->ImmedMovwMovtValue();
+ } else {
+ // This is a small constant pool lookup.
+ ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc)));
+ cp_offset = GetLdrRegisterImmediateOffset(Memory::int32_at(pc));
+ }
+ return reinterpret_cast<Address>(constant_pool) + cp_offset;
} else {
ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc)));
Instr instr = Memory::int32_at(pc);
Address Assembler::target_address_at(Address pc,
ConstantPoolArray* constant_pool) {
- if (IsConstantPoolLoad(pc)) {
+ if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Return the value in the constant pool.
return Memory::Address_at(constant_pool_entry_address(pc, constant_pool));
} else {
ConstantPoolArray* constant_pool,
Address target,
ICacheFlushMode icache_flush_mode) {
- if (IsConstantPoolLoad(pc)) {
+ if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Update the entry in the constant pool.
Memory::Address_at(constant_pool_entry_address(pc, constant_pool)) = target;
// Intuitively, we would think it is necessary to always flush the
bool RelocInfo::IsInConstantPool() {
- if (FLAG_enable_ool_constant_pool) {
- return Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_));
- } else {
- return Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_));
- }
+ return Assembler::is_constant_pool_load(pc_);
}
// ldr rd, [pp, #offset]
const Instr kLdrPpImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
const Instr kLdrPpImmedPattern = 5 * B24 | L | kRegister_r8_Code * B16;
+// ldr rd, [pp, rn]
+const Instr kLdrPpRegMask = 15 * B24 | 7 * B20 | 15 * B16;
+const Instr kLdrPpRegPattern = 7 * B24 | L | kRegister_r8_Code * B16;
// vldr dd, [pc, #offset]
const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
const Instr kMovMvnFlip = B22;
const Instr kMovLeaveCCMask = 0xdff * B16;
const Instr kMovLeaveCCPattern = 0x1a0 * B16;
+const Instr kMovwPattern = 0x30 * B20;
+const Instr kMovtPattern = 0x34 * B20;
const Instr kMovwLeaveCCFlip = 0x5 * B21;
const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
const Instr kCmpCmnPattern = 0x15 * B20;
first_const_pool_64_use_ = -1;
last_bound_pos_ = 0;
constant_pool_available_ = !FLAG_enable_ool_constant_pool;
- constant_pool_full_ = false;
ClearRecordedAstId();
}
}
+Instr Assembler::GetConsantPoolLoadMask() {
+ if (FLAG_enable_ool_constant_pool) {
+ return kLdrPpImmedMask;
+ } else {
+ return kLdrPCImmedMask;
+ }
+}
+
+
bool Assembler::IsPush(Instr instr) {
return ((instr & ~kRdMask) == kPushRegPattern);
}
}
+bool Assembler::IsLdrPpRegOffset(Instr instr) {
+ // Check the instruction is indeed a
+ // ldr<cond> <Rd>, [pp, +/- <Rm>].
+ return (instr & kLdrPpRegMask) == kLdrPpRegPattern;
+}
+
+
+Instr Assembler::GetLdrPpRegOffsetPattern() { return kLdrPpRegPattern; }
+
+
bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
// Check the instruction is indeed a
// vldr<cond> <Dd>, [pc +/- offset_10].
if (CpuFeatures::IsSupported(ARMv7)) {
if (imm32 < 0x10000) {
*instr ^= kMovwLeaveCCFlip;
- *instr |= EncodeMovwImmediate(imm32);
+ *instr |= Assembler::EncodeMovwImmediate(imm32);
*rotate_imm = *immed_8 = 0; // Not used for movw.
return true;
}
static bool use_mov_immediate_load(const Operand& x,
const Assembler* assembler) {
- if (assembler != NULL && !assembler->can_use_constant_pool()) {
+ if (assembler != NULL && !assembler->is_constant_pool_available()) {
// If there is no constant pool available, we must use an mov immediate.
// TODO(rmcilroy): enable ARMv6 support.
ASSERT(CpuFeatures::IsSupported(ARMv7));
}
-bool Operand::is_single_instruction(const Assembler* assembler,
- Instr instr) const {
- if (rm_.is_valid()) return true;
+int Operand::instructions_required(const Assembler* assembler,
+ Instr instr) const {
+ if (rm_.is_valid()) return 1;
uint32_t dummy1, dummy2;
if (must_output_reloc_info(assembler) ||
!fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, or use of
- // constant pool is required. For a mov instruction not setting the
- // condition code additional instruction conventions can be used.
- if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- return !use_mov_immediate_load(*this, assembler);
+ // constant pool is required. First account for the instructions required
+ // for the constant pool or immediate load
+ int instructions;
+ if (use_mov_immediate_load(*this, assembler)) {
+ instructions = 2; // A movw, movt immediate load.
+ } else if (assembler != NULL && assembler->use_extended_constant_pool()) {
+ instructions = 3; // An extended constant pool load.
} else {
- // If this is not a mov or mvn instruction there will always an additional
- // instructions - either mov or ldr. The mov might actually be two
- // instructions mov or movw followed by movt so including the actual
- // instruction two or three instructions will be generated.
- return false;
+ instructions = 1; // A small constant pool load.
}
+
+ if ((instr & ~kCondMask) != 13 * B21) { // mov, S not set
+ // For a mov or mvn instruction which doesn't set the condition
+ // code, the constant pool or immediate load is enough, otherwise we need
+ // to account for the actual instruction being requested.
+ instructions += 1;
+ }
+ return instructions;
} else {
// No use of constant pool and the immediate operand can be encoded as a
// shifter operand.
- return true;
+ return 1;
}
}
mov(rd, target, LeaveCC, cond);
}
} else {
- ASSERT(can_use_constant_pool());
- ConstantPoolAddEntry(rinfo);
- ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond);
+ ASSERT(is_constant_pool_available());
+ ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
+ if (section == ConstantPoolArray::EXTENDED_SECTION) {
+ ASSERT(FLAG_enable_ool_constant_pool);
+ Register target = rd.code() == pc.code() ? ip : rd;
+ // Emit instructions to load constant pool offset.
+ movw(target, 0, cond);
+ movt(target, 0, cond);
+ // Load from constant pool at offset.
+ ldr(rd, MemOperand(pp, target), cond);
+ } else {
+ ASSERT(section == ConstantPoolArray::SMALL_SECTION);
+ ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond);
+ }
}
}
int vd, d;
dst.split_code(&vd, &d);
emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
- } else if (FLAG_enable_vldr_imm && can_use_constant_pool()) {
+ } else if (FLAG_enable_vldr_imm && is_constant_pool_available()) {
// TODO(jfb) Temporarily turned off until we have constant blinding or
// some equivalent mitigation: an attacker can otherwise control
// generated data which also happens to be executable, a Very Bad
// that's tricky because vldr has a limited reach. Furthermore
// it breaks load locality.
RelocInfo rinfo(pc_, imm);
- ConstantPoolAddEntry(rinfo);
- vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0));
+ ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
+ if (section == ConstantPoolArray::EXTENDED_SECTION) {
+ ASSERT(FLAG_enable_ool_constant_pool);
+ // Emit instructions to load constant pool offset.
+ movw(ip, 0);
+ movt(ip, 0);
+ // Load from constant pool at offset.
+ vldr(dst, MemOperand(pp, ip));
+ } else {
+ ASSERT(section == ConstantPoolArray::SMALL_SECTION);
+ vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0));
+ }
} else {
// Synthesise the double from ARM immediates.
uint32_t lo, hi;
instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
((kNumRegisters-1)*B12) | // mask out register
EncodeMovwImmediate(0xFFFF)); // mask out immediate value
- return instr == 0x34*B20;
+ return instr == kMovtPattern;
}
instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
((kNumRegisters-1)*B12) | // mask out destination
EncodeMovwImmediate(0xFFFF)); // mask out immediate value
- return instr == 0x30*B20;
+ return instr == kMovwPattern;
+}
+
+
+Instr Assembler::GetMovTPattern() { return kMovtPattern; }
+
+
+Instr Assembler::GetMovWPattern() { return kMovwPattern; }
+
+
+Instr Assembler::EncodeMovwImmediate(uint32_t immediate) {
+ ASSERT(immediate < 0x10000);
+ return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
+}
+
+
+Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) {
+ instruction &= ~EncodeMovwImmediate(0xffff);
+ return instruction | EncodeMovwImmediate(immediate);
}
}
-void Assembler::ConstantPoolAddEntry(const RelocInfo& rinfo) {
+ConstantPoolArray::LayoutSection Assembler::ConstantPoolAddEntry(
+ const RelocInfo& rinfo) {
if (FLAG_enable_ool_constant_pool) {
- constant_pool_builder_.AddEntry(this, rinfo);
+ return constant_pool_builder_.AddEntry(this, rinfo);
} else {
if (rinfo.rmode() == RelocInfo::NONE64) {
ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
BlockConstPoolFor(1);
+ return ConstantPoolArray::SMALL_SECTION;
}
}
ConstantPoolBuilder::ConstantPoolBuilder()
- : entries_(),
- merged_indexes_(),
- count_of_64bit_(0),
- count_of_code_ptr_(0),
- count_of_heap_ptr_(0),
- count_of_32bit_(0) { }
+ : entries_(), current_section_(ConstantPoolArray::SMALL_SECTION) {}
bool ConstantPoolBuilder::IsEmpty() {
}
-bool ConstantPoolBuilder::Is64BitEntry(RelocInfo::Mode rmode) {
- return rmode == RelocInfo::NONE64;
-}
-
-
-bool ConstantPoolBuilder::Is32BitEntry(RelocInfo::Mode rmode) {
- return !RelocInfo::IsGCRelocMode(rmode) && rmode != RelocInfo::NONE64;
-}
-
-
-bool ConstantPoolBuilder::IsCodePtrEntry(RelocInfo::Mode rmode) {
- return RelocInfo::IsCodeTarget(rmode);
-}
-
-
-bool ConstantPoolBuilder::IsHeapPtrEntry(RelocInfo::Mode rmode) {
- return RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode);
+ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType(
+ RelocInfo::Mode rmode) {
+ if (rmode == RelocInfo::NONE64) {
+ return ConstantPoolArray::INT64;
+ } else if (!RelocInfo::IsGCRelocMode(rmode)) {
+ return ConstantPoolArray::INT32;
+ } else if (RelocInfo::IsCodeTarget(rmode)) {
+ return ConstantPoolArray::CODE_PTR;
+ } else {
+ ASSERT(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode));
+ return ConstantPoolArray::HEAP_PTR;
+ }
}
-void ConstantPoolBuilder::AddEntry(Assembler* assm,
- const RelocInfo& rinfo) {
+ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry(
+ Assembler* assm, const RelocInfo& rinfo) {
RelocInfo::Mode rmode = rinfo.rmode();
ASSERT(rmode != RelocInfo::COMMENT &&
rmode != RelocInfo::POSITION &&
// Try to merge entries which won't be patched.
int merged_index = -1;
+ ConstantPoolArray::LayoutSection entry_section = current_section_;
if (RelocInfo::IsNone(rmode) ||
(!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) {
size_t i;
- std::vector<RelocInfo>::const_iterator it;
+ std::vector<ConstantPoolEntry>::const_iterator it;
for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
- if (RelocInfo::IsEqual(rinfo, *it)) {
+ if (RelocInfo::IsEqual(rinfo, it->rinfo_)) {
+ // Merge with found entry.
merged_index = i;
+ entry_section = entries_[i].section_;
break;
}
}
}
-
- entries_.push_back(rinfo);
- merged_indexes_.push_back(merged_index);
+ ASSERT(entry_section <= current_section_);
+ entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index));
if (merged_index == -1) {
// Not merged, so update the appropriate count.
- if (Is64BitEntry(rmode)) {
- count_of_64bit_++;
- } else if (Is32BitEntry(rmode)) {
- count_of_32bit_++;
- } else if (IsCodePtrEntry(rmode)) {
- count_of_code_ptr_++;
- } else {
- ASSERT(IsHeapPtrEntry(rmode));
- count_of_heap_ptr_++;
- }
+ number_of_entries_[entry_section].increment(GetConstantPoolType(rmode));
}
- // Check if we still have room for another entry given Arm's ldr and vldr
- // immediate offset range.
- // TODO(rmcilroy): Avoid creating a new object here when we support
- // extended constant pools.
- ConstantPoolArray::NumberOfEntries total(count_of_64bit_,
- count_of_code_ptr_,
- count_of_heap_ptr_,
- count_of_32bit_);
- ConstantPoolArray::NumberOfEntries int64_counts(count_of_64bit_, 0, 0, 0);
- if (!(is_uint12(ConstantPoolArray::SizeFor(total)) &&
- is_uint10(ConstantPoolArray::SizeFor(int64_counts)))) {
- assm->set_constant_pool_full();
+ // Check if we still have room for another entry in the small section
+ // given Arm's ldr and vldr immediate offset range.
+ if (current_section_ == ConstantPoolArray::SMALL_SECTION &&
+ !(is_uint12(ConstantPoolArray::SizeFor(*small_entries())) &&
+ is_uint10(ConstantPoolArray::MaxInt64Offset(
+ small_entries()->count_of(ConstantPoolArray::INT64))))) {
+ current_section_ = ConstantPoolArray::EXTENDED_SECTION;
}
+ return entry_section;
}
void ConstantPoolBuilder::Relocate(int pc_delta) {
- for (std::vector<RelocInfo>::iterator rinfo = entries_.begin();
- rinfo != entries_.end(); rinfo++) {
- ASSERT(rinfo->rmode() != RelocInfo::JS_RETURN);
- rinfo->set_pc(rinfo->pc() + pc_delta);
+ for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
+ entry != entries_.end(); entry++) {
+ ASSERT(entry->rinfo_.rmode() != RelocInfo::JS_RETURN);
+ entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta);
}
}
Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
if (IsEmpty()) {
return isolate->factory()->empty_constant_pool_array();
+ } else if (extended_entries()->is_empty()) {
+ return isolate->factory()->NewConstantPoolArray(*small_entries());
} else {
- ConstantPoolArray::NumberOfEntries small(count_of_64bit_,
- count_of_code_ptr_,
- count_of_heap_ptr_,
- count_of_32bit_);
- return isolate->factory()->NewConstantPoolArray(small);
+ ASSERT(current_section_ == ConstantPoolArray::EXTENDED_SECTION);
+ return isolate->factory()->NewExtendedConstantPoolArray(
+ *small_entries(), *extended_entries());
}
}
void ConstantPoolBuilder::Populate(Assembler* assm,
ConstantPoolArray* constant_pool) {
- ASSERT(count_of_64bit_ == constant_pool->number_of_entries(
- ConstantPoolArray::INT64, ConstantPoolArray::SMALL_SECTION));
- ASSERT(count_of_code_ptr_ == constant_pool->number_of_entries(
- ConstantPoolArray::CODE_PTR, ConstantPoolArray::SMALL_SECTION));
- ASSERT(count_of_heap_ptr_ == constant_pool->number_of_entries(
- ConstantPoolArray::HEAP_PTR, ConstantPoolArray::SMALL_SECTION));
- ASSERT(count_of_32bit_ == constant_pool->number_of_entries(
- ConstantPoolArray::INT32, ConstantPoolArray::SMALL_SECTION));
- ASSERT(entries_.size() == merged_indexes_.size());
-
- int index_64bit = 0;
- int index_code_ptr = count_of_64bit_;
- int index_heap_ptr = count_of_64bit_ + count_of_code_ptr_;
- int index_32bit = count_of_64bit_ + count_of_code_ptr_ + count_of_heap_ptr_;
-
- size_t i;
- std::vector<RelocInfo>::const_iterator rinfo;
- for (rinfo = entries_.begin(), i = 0; rinfo != entries_.end(); rinfo++, i++) {
- RelocInfo::Mode rmode = rinfo->rmode();
+ ASSERT_EQ(extended_entries()->is_empty(),
+ !constant_pool->is_extended_layout());
+ ASSERT(small_entries()->equals(ConstantPoolArray::NumberOfEntries(
+ constant_pool, ConstantPoolArray::SMALL_SECTION)));
+ if (constant_pool->is_extended_layout()) {
+ ASSERT(extended_entries()->equals(ConstantPoolArray::NumberOfEntries(
+ constant_pool, ConstantPoolArray::EXTENDED_SECTION)));
+ }
+
+ ConstantPoolArray::NumberOfEntries small_idx;
+ ConstantPoolArray::NumberOfEntries extended_idx;
+ for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
+ entry != entries_.end(); entry++) {
+ RelocInfo rinfo = entry->rinfo_;
+ RelocInfo::Mode rmode = entry->rinfo_.rmode();
+ ConstantPoolArray::Type type = GetConstantPoolType(rmode);
// Update constant pool if necessary and get the entry's offset.
int offset;
- if (merged_indexes_[i] == -1) {
- if (Is64BitEntry(rmode)) {
- offset = constant_pool->OffsetOfElementAt(index_64bit) - kHeapObjectTag;
- constant_pool->set(index_64bit++, rinfo->data64());
- } else if (Is32BitEntry(rmode)) {
- offset = constant_pool->OffsetOfElementAt(index_32bit) - kHeapObjectTag;
- constant_pool->set(index_32bit++, static_cast<int32_t>(rinfo->data()));
- } else if (IsCodePtrEntry(rmode)) {
- offset = constant_pool->OffsetOfElementAt(index_code_ptr) -
- kHeapObjectTag;
- constant_pool->set(index_code_ptr++,
- reinterpret_cast<Address>(rinfo->data()));
+ if (entry->merged_index_ == -1) {
+ int index;
+ if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) {
+ index = small_entries()->total_count() +
+ extended_entries()->base_of(type) + extended_idx.count_of(type);
+ extended_idx.increment(type);
+ } else {
+ ASSERT(entry->section_ == ConstantPoolArray::SMALL_SECTION);
+ index = small_entries()->base_of(type) + small_idx.count_of(type);
+ small_idx.increment(type);
+ }
+ if (type == ConstantPoolArray::INT64) {
+ constant_pool->set(index, rinfo.data64());
+ } else if (type == ConstantPoolArray::INT32) {
+ constant_pool->set(index, static_cast<int32_t>(rinfo.data()));
+ } else if (type == ConstantPoolArray::CODE_PTR) {
+ constant_pool->set(index, reinterpret_cast<Address>(rinfo.data()));
} else {
- ASSERT(IsHeapPtrEntry(rmode));
- offset = constant_pool->OffsetOfElementAt(index_heap_ptr) -
- kHeapObjectTag;
- constant_pool->set(index_heap_ptr++,
- reinterpret_cast<Object *>(rinfo->data()));
+ ASSERT(type == ConstantPoolArray::HEAP_PTR);
+ constant_pool->set(index, reinterpret_cast<Object*>(rinfo.data()));
}
- merged_indexes_[i] = offset; // Stash offset for merged entries.
+ offset = constant_pool->OffsetOfElementAt(index) - kHeapObjectTag;
+ entry->merged_index_ = offset; // Stash offset for merged entries.
} else {
- size_t merged_index = static_cast<size_t>(merged_indexes_[i]);
- ASSERT(merged_index < merged_indexes_.size() && merged_index < i);
- offset = merged_indexes_[merged_index];
+ ASSERT(entry->merged_index_ < (entry - entries_.begin()));
+ offset = entries_[entry->merged_index_].merged_index_;
}
// Patch vldr/ldr instruction with correct offset.
- Instr instr = assm->instr_at(rinfo->pc());
- if (Is64BitEntry(rmode)) {
+ Instr instr = assm->instr_at(rinfo.pc());
+ if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) {
+ // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
+ Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
+ ASSERT((Assembler::IsMovW(instr) &&
+ Instruction::ImmedMovwMovtValue(instr) == 0));
+ ASSERT((Assembler::IsMovT(next_instr) &&
+ Instruction::ImmedMovwMovtValue(next_instr) == 0));
+ assm->instr_at_put(rinfo.pc(),
+ Assembler::PatchMovwImmediate(instr, offset & 0xffff));
+ assm->instr_at_put(
+ rinfo.pc() + Assembler::kInstrSize,
+ Assembler::PatchMovwImmediate(next_instr, offset >> 16));
+ } else if (type == ConstantPoolArray::INT64) {
// Instruction to patch must be 'vldr rd, [pp, #0]'.
ASSERT((Assembler::IsVldrDPpImmediateOffset(instr) &&
Assembler::GetVldrDRegisterImmediateOffset(instr) == 0));
ASSERT(is_uint10(offset));
- assm->instr_at_put(rinfo->pc(),
- Assembler::SetVldrDRegisterImmediateOffset(instr, offset));
+ assm->instr_at_put(rinfo.pc(), Assembler::SetVldrDRegisterImmediateOffset(
+ instr, offset));
} else {
// Instruction to patch must be 'ldr rd, [pp, #0]'.
ASSERT((Assembler::IsLdrPpImmediateOffset(instr) &&
Assembler::GetLdrRegisterImmediateOffset(instr) == 0));
ASSERT(is_uint12(offset));
- assm->instr_at_put(rinfo->pc(),
- Assembler::SetLdrRegisterImmediateOffset(instr, offset));
+ assm->instr_at_put(
+ rinfo.pc(), Assembler::SetLdrRegisterImmediateOffset(instr, offset));
}
}
- ASSERT((index_64bit == count_of_64bit_) &&
- (index_code_ptr == (index_64bit + count_of_code_ptr_)) &&
- (index_heap_ptr == (index_code_ptr + count_of_heap_ptr_)) &&
- (index_32bit == (index_heap_ptr + count_of_32bit_)));
+ ASSERT(small_idx.equals(*small_entries()));
+ ASSERT(extended_idx.equals(*extended_entries()));
}
// Return true if this is a register operand.
INLINE(bool is_reg() const);
- // Return true if this operand fits in one instruction so that no
- // 2-instruction solution with a load into the ip register is necessary. If
+ // Return the number of actual instructions required to implement the given
+ // instruction for this particular operand. This can be a single instruction,
+ // if no load into the ip register is necessary, or anything between 2 and 4
+ // instructions when we need to load from the constant pool (depending upon
+ // whether the constant pool entry is in the small or extended section). If
// the instruction this operand is used for is a MOV or MVN instruction the
// actual instruction to use is required for this calculation. For other
// instructions instr is ignored.
- bool is_single_instruction(const Assembler* assembler,
- Instr instr = 0) const;
+ //
+ // The value returned is only valid as long as no entries are added to the
+ // constant pool between this call and the actual instruction being emitted.
+ int instructions_required(const Assembler* assembler, Instr instr = 0) const;
bool must_output_reloc_info(const Assembler* assembler) const;
inline int32_t immediate() const {
class ConstantPoolBuilder BASE_EMBEDDED {
public:
explicit ConstantPoolBuilder();
- void AddEntry(Assembler* assm, const RelocInfo& rinfo);
+ ConstantPoolArray::LayoutSection AddEntry(Assembler* assm,
+ const RelocInfo& rinfo);
void Relocate(int pc_delta);
bool IsEmpty();
Handle<ConstantPoolArray> New(Isolate* isolate);
void Populate(Assembler* assm, ConstantPoolArray* constant_pool);
- inline int count_of_64bit() const { return count_of_64bit_; }
- inline int count_of_code_ptr() const { return count_of_code_ptr_; }
- inline int count_of_heap_ptr() const { return count_of_heap_ptr_; }
- inline int count_of_32bit() const { return count_of_32bit_; }
+ inline ConstantPoolArray::LayoutSection current_section() const {
+ return current_section_;
+ }
+
+ inline ConstantPoolArray::NumberOfEntries* number_of_entries(
+ ConstantPoolArray::LayoutSection section) {
+ return &number_of_entries_[section];
+ }
+
+ inline ConstantPoolArray::NumberOfEntries* small_entries() {
+ return number_of_entries(ConstantPoolArray::SMALL_SECTION);
+ }
+
+ inline ConstantPoolArray::NumberOfEntries* extended_entries() {
+ return number_of_entries(ConstantPoolArray::EXTENDED_SECTION);
+ }
private:
- bool Is64BitEntry(RelocInfo::Mode rmode);
- bool Is32BitEntry(RelocInfo::Mode rmode);
- bool IsCodePtrEntry(RelocInfo::Mode rmode);
- bool IsHeapPtrEntry(RelocInfo::Mode rmode);
-
- // TODO(rmcilroy): This should ideally be a ZoneList, however that would mean
- // RelocInfo would need to subclass ZoneObject which it currently doesn't.
- std::vector<RelocInfo> entries_;
- std::vector<int> merged_indexes_;
- int count_of_64bit_;
- int count_of_code_ptr_;
- int count_of_heap_ptr_;
- int count_of_32bit_;
+ struct ConstantPoolEntry {
+ ConstantPoolEntry(RelocInfo rinfo, ConstantPoolArray::LayoutSection section,
+ int merged_index)
+ : rinfo_(rinfo), section_(section), merged_index_(merged_index) {}
+
+ RelocInfo rinfo_;
+ ConstantPoolArray::LayoutSection section_;
+ int merged_index_;
+ };
+
+ ConstantPoolArray::Type GetConstantPoolType(RelocInfo::Mode rmode);
+
+ std::vector<ConstantPoolEntry> entries_;
+ ConstantPoolArray::LayoutSection current_section_;
+ ConstantPoolArray::NumberOfEntries number_of_entries_[2];
};
struct VmovIndex {
// Manages the jump elimination optimization if the second parameter is true.
int branch_offset(Label* L, bool jump_elimination_allowed);
+ // Returns true if the given pc address is the start of a constant pool load
+ // instruction sequence.
+ INLINE(static bool is_constant_pool_load(Address pc));
+
// Return the address in the constant pool of the code target address used by
// the branch/call instruction at pc, or the object in a mov.
INLINE(static Address constant_pool_entry_address(
static bool IsLdrRegisterImmediate(Instr instr);
static bool IsVldrDRegisterImmediate(Instr instr);
static Instr GetConsantPoolLoadPattern();
+ static Instr GetConsantPoolLoadMask();
+ static bool IsLdrPpRegOffset(Instr instr);
+ static Instr GetLdrPpRegOffsetPattern();
static bool IsLdrPpImmediateOffset(Instr instr);
static bool IsVldrDPpImmediateOffset(Instr instr);
static int GetLdrRegisterImmediateOffset(Instr instr);
static int GetCmpImmediateRawImmediate(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
static bool IsMovT(Instr instr);
+ static Instr GetMovTPattern();
static bool IsMovW(Instr instr);
+ static Instr GetMovWPattern();
+ static Instr EncodeMovwImmediate(uint32_t immediate);
+ static Instr PatchMovwImmediate(Instr instruction, uint32_t immediate);
// Constants in pools are accessed via pc relative addressing, which can
// reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point
// Generate the constant pool for the generated code.
void PopulateConstantPool(ConstantPoolArray* constant_pool);
- bool can_use_constant_pool() const {
- return is_constant_pool_available() && !constant_pool_full_;
- }
+ bool is_constant_pool_available() const { return constant_pool_available_; }
- void set_constant_pool_full() {
- constant_pool_full_ = true;
+ bool use_extended_constant_pool() const {
+ return constant_pool_builder_.current_section() ==
+ ConstantPoolArray::EXTENDED_SECTION;
}
+
protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
(pc_offset() < no_const_pool_before_);
}
- bool is_constant_pool_available() const {
- return constant_pool_available_;
- }
-
void set_constant_pool_available(bool available) {
constant_pool_available_ = available;
}
// Indicates whether the constant pool can be accessed, which is only possible
// if the pp register points to the current code object's constant pool.
bool constant_pool_available_;
- // Indicates whether the constant pool is too full to accept new entries due
- // to the ldr instruction's limitted immediate offset range.
- bool constant_pool_full_;
// Code emission
inline void CheckBuffer();
// Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
void RecordRelocInfo(const RelocInfo& rinfo);
- void ConstantPoolAddEntry(const RelocInfo& rinfo);
+ ConstantPoolArray::LayoutSection ConstantPoolAddEntry(const RelocInfo& rinfo);
friend class RelocInfo;
friend class CodePatcher;
// * function: r1 or at sp.
//
// An inlined call site may have been generated before calling this stub.
-// In this case the offset to the inline site to patch is passed in r5.
+// In this case the offset to the inline sites to patch are passed in r5 and r6.
// (See LCodeGen::DoInstanceOfKnownGlobal)
void InstanceofStub::Generate(MacroAssembler* masm) {
// Call site inlining and patching implies arguments in registers.
Register map = r3; // Map of the object.
const Register function = r1; // Function (rhs).
const Register prototype = r4; // Prototype of the function.
- const Register inline_site = r9;
const Register scratch = r2;
- const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
-
Label slow, loop, is_instance, is_not_instance, not_js_object;
if (!HasArgsInRegisters()) {
ASSERT(HasArgsInRegisters());
// Patch the (relocated) inlined map check.
- // The offset was stored in r5
+ // The map_load_offset was stored in r5
// (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
- const Register offset = r5;
- __ sub(inline_site, lr, offset);
+ const Register map_load_offset = r5;
+ __ sub(r9, lr, map_load_offset);
// Get the map location in r5 and patch it.
- __ GetRelocatedValueLocation(inline_site, offset);
- __ ldr(offset, MemOperand(offset));
- __ str(map, FieldMemOperand(offset, Cell::kValueOffset));
+ __ GetRelocatedValueLocation(r9, map_load_offset, scratch);
+ __ ldr(map_load_offset, MemOperand(map_load_offset));
+ __ str(map, FieldMemOperand(map_load_offset, Cell::kValueOffset));
}
// Register mapping: r3 is object map and r4 is function prototype.
} else {
// Patch the call site to return true.
__ LoadRoot(r0, Heap::kTrueValueRootIndex);
- __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+ // The bool_load_offset was stored in r6
+ // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
+ const Register bool_load_offset = r6;
+ __ sub(r9, lr, bool_load_offset);
// Get the boolean result location in scratch and patch it.
- __ GetRelocatedValueLocation(inline_site, scratch);
+ __ GetRelocatedValueLocation(r9, scratch, scratch2);
__ str(r0, MemOperand(scratch));
if (!ReturnTrueFalseObject()) {
} else {
// Patch the call site to return false.
__ LoadRoot(r0, Heap::kFalseValueRootIndex);
- __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+ // The bool_load_offset was stored in r6
+ // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
+ const Register bool_load_offset = r6;
+ __ sub(r9, lr, bool_load_offset);
+ ;
// Get the boolean result location in scratch and patch it.
- __ GetRelocatedValueLocation(inline_site, scratch);
+ __ GetRelocatedValueLocation(r9, scratch, scratch2);
__ str(r0, MemOperand(scratch));
if (!ReturnTrueFalseObject()) {
inline int Immed4Value() const { return Bits(19, 16); }
inline int ImmedMovwMovtValue() const {
return Immed4Value() << 12 | Offset12Value(); }
+ DECLARE_STATIC_ACCESSOR(ImmedMovwMovtValue);
// Fields used in Load/Store instructions
inline int PUValue() const { return Bits(24, 23); }
}
+static const int kProfileCounterResetSequenceLength = 5 * Assembler::kInstrSize;
+
+
void FullCodeGenerator::EmitProfilingCounterReset() {
+ Assembler::BlockConstPoolScope block_const_pool(masm_);
+ PredictableCodeSizeScope predictable_code_size_scope(
+ masm_, kProfileCounterResetSequenceLength);
+ Label start;
+ __ bind(&start);
int reset_value = FLAG_interrupt_budget;
if (info_->is_debug()) {
// Detect debug break requests as soon as possible.
reset_value = FLAG_interrupt_budget >> 4;
}
__ mov(r2, Operand(profiling_counter_));
+ // The mov instruction above can be either 1, 2 or 3 instructions depending
+ // upon whether it is an extended constant pool - insert nop to compensate.
+ ASSERT(masm_->InstructionsGeneratedSince(&start) <= 3);
+ while (masm_->InstructionsGeneratedSince(&start) != 3) {
+ __ nop();
+ }
__ mov(r3, Operand(Smi::FromInt(reset_value)));
__ str(r3, FieldMemOperand(r2, Cell::kValueOffset));
}
Address load_address = pc - 2 * Assembler::kInstrSize;
if (!FLAG_enable_ool_constant_pool) {
ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(load_address)));
+ } else if (Assembler::IsLdrPpRegOffset(Memory::int32_at(load_address))) {
+ // This is an extended constant pool lookup.
+ load_address -= 2 * Assembler::kInstrSize;
+ ASSERT(Assembler::IsMovW(Memory::int32_at(load_address)));
+ ASSERT(Assembler::IsMovT(
+ Memory::int32_at(load_address + Assembler::kInstrSize)));
} else if (Assembler::IsMovT(Memory::int32_at(load_address))) {
+ // This is a movw_movt immediate load.
load_address -= Assembler::kInstrSize;
ASSERT(Assembler::IsMovW(Memory::int32_at(load_address)));
} else {
+ // This is a small constant pool lookup.
ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(load_address)));
}
return load_address;
// <decrement profiling counter>
// bpl ok
// ; load interrupt stub address into ip - either of:
- // ldr ip, [pc/pp, <constant pool offset>] | movw ip, <immed low>
- // | movt ip, <immed high>
+ // ; <small cp load> | <extended cp load> | <immediate load>
+ // ldr ip, [pc/pp, #imm] | movw ip, #imm | movw ip, #imm
+ // | movt ip, #imm> | movw ip, #imm
+ // | ldr ip, [pp, ip]
// blx ip
+ // <reset profiling counter>
// ok-label
- // Calculate branch offet to the ok-label - this is the difference between
- // the branch address and |pc| (which points at <blx ip>) plus one instr.
- int branch_offset = pc + Assembler::kInstrSize - branch_address;
+ // Calculate branch offset to the ok-label - this is the difference
+ // between the branch address and |pc| (which points at <blx ip>) plus
+ // kProfileCounterResetSequence instructions
+ int branch_offset = pc - Instruction::kPCReadOffset - branch_address +
+ kProfileCounterResetSequenceLength;
patcher.masm()->b(branch_offset, pl);
break;
}
// <decrement profiling counter>
// mov r0, r0 (NOP)
// ; load on-stack replacement address into ip - either of:
- // ldr ip, [pc/pp, <constant pool offset>] | movw ip, <immed low>
- // | movt ip, <immed high>
+ // ; <small cp load> | <extended cp load> | <immediate load>
+ // ldr ip, [pc/pp, #imm] | movw ip, #imm | movw ip, #imm
+ // | movt ip, #imm> | movw ip, #imm
+ // | ldr ip, [pp, ip]
// blx ip
+ // <reset profiling counter>
// ok-label
patcher.masm()->nop();
break;
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() V8_OVERRIDE {
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
+ codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
+ &load_bool_);
}
virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
Label* map_check() { return &map_check_; }
+ Label* load_bool() { return &load_bool_; }
+
private:
LInstanceOfKnownGlobal* instr_;
Label map_check_;
+ Label load_bool_;
};
DeferredInstanceOfKnownGlobal* deferred;
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch with
// the cached map.
- PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
__ mov(ip, Operand(Handle<Object>(cell)));
__ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
__ cmp(map, Operand(ip));
__ b(ne, &cache_miss);
+ __ bind(deferred->load_bool()); // Label for calculating code patching.
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch
// with true or false.
void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
+ Label* map_check,
+ Label* bool_load) {
InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
flags = static_cast<InstanceofStub::Flags>(
flags | InstanceofStub::kArgsInRegisters);
LoadContextFromDeferred(instr->context());
__ Move(InstanceofStub::right(), instr->function());
- static const int kAdditionalDelta = 4;
+
+ int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET);
+ int additional_delta = (call_size / Assembler::kInstrSize) + 4;
// Make sure that code size is predicable, since we use specific constants
// offsets in the code to find embedded values..
- PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
- int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
- Label before_push_delta;
- __ bind(&before_push_delta);
- __ BlockConstPoolFor(kAdditionalDelta);
- // r5 is used to communicate the offset to the location of the map check.
- __ mov(r5, Operand(delta * kPointerSize));
- // The mov above can generate one or two instructions. The delta was computed
- // for two instructions, so we need to pad here in case of one instruction.
- if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
- ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
- __ nop();
+ PredictableCodeSizeScope predictable(
+ masm_, (additional_delta + 1) * Assembler::kInstrSize);
+ // Make sure we don't emit any additional entries in the constant pool before
+ // the call to ensure that the CallCodeSize() calculated the correct number of
+ // instructions for the constant pool load.
+ {
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+ int map_check_delta =
+ masm_->InstructionsGeneratedSince(map_check) + additional_delta;
+ int bool_load_delta =
+ masm_->InstructionsGeneratedSince(bool_load) + additional_delta;
+ Label before_push_delta;
+ __ bind(&before_push_delta);
+ __ BlockConstPoolFor(additional_delta);
+ // r5 is used to communicate the offset to the location of the map check.
+ __ mov(r5, Operand(map_check_delta * kPointerSize));
+ // r6 is used to communicate the offset to the location of the bool load.
+ __ mov(r6, Operand(bool_load_delta * kPointerSize));
+ // The mov above can generate one or two instructions. The delta was
+ // computed for two instructions, so we need to pad here in case of one
+ // instruction.
+ while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) {
+ __ nop();
+ }
}
CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
ASSERT(instr->target()->IsRegister());
Register target = ToRegister(instr->target());
generator.BeforeCall(__ CallSize(target));
- __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Make sure we don't emit any additional entries in the constant pool
+ // before the call to ensure that the CallCodeSize() calculated the correct
+ // number of instructions for the constant pool load.
+ {
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+ __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+ }
__ Call(target);
}
generator.AfterCall();
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
+ Label* map_check, Label* bool_load);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register result,
int MacroAssembler::CallSize(
Address target, RelocInfo::Mode rmode, Condition cond) {
- int size = 2 * kInstrSize;
Instr mov_instr = cond | MOV | LeaveCC;
- intptr_t immediate = reinterpret_cast<intptr_t>(target);
- if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) {
- size += kInstrSize;
- }
- return size;
+ Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
+ return kInstrSize +
+ mov_operand.instructions_required(this, mov_instr) * kInstrSize;
}
Address target,
RelocInfo::Mode rmode,
Condition cond) {
- int size = 2 * kInstrSize;
Instr mov_instr = cond | MOV | LeaveCC;
- intptr_t immediate = reinterpret_cast<intptr_t>(target);
- if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) {
- size += kInstrSize;
- }
- return size;
+ Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
+ return kInstrSize +
+ mov_operand.instructions_required(NULL, mov_instr) * kInstrSize;
}
!src2.must_output_reloc_info(this) &&
src2.immediate() == 0) {
mov(dst, Operand::Zero(), LeaveCC, cond);
- } else if (!src2.is_single_instruction(this) &&
+ } else if (!(src2.instructions_required(this) == 1) &&
!src2.must_output_reloc_info(this) &&
CpuFeatures::IsSupported(ARMv7) &&
IsPowerOf2(src2.immediate() + 1)) {
object_size -= bits;
shift += 8;
Operand bits_operand(bits);
- ASSERT(bits_operand.is_single_instruction(this));
+ ASSERT(bits_operand.instructions_required(this) == 1);
add(scratch2, source, bits_operand, SetCC, cond);
source = scratch2;
cond = cc;
void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
- Register result) {
- const uint32_t kLdrOffsetMask = (1 << 12) - 1;
+ Register result,
+ Register scratch) {
+ Label small_constant_pool_load, load_result;
ldr(result, MemOperand(ldr_location));
+
+ if (FLAG_enable_ool_constant_pool) {
+ // Check if this is an extended constant pool load.
+ and_(scratch, result, Operand(GetConsantPoolLoadMask()));
+ teq(scratch, Operand(GetConsantPoolLoadPattern()));
+ b(eq, &small_constant_pool_load);
+ if (emit_debug_code()) {
+ // Check that the instruction sequence is:
+ // movw reg, #offset_low
+ // movt reg, #offset_high
+ // ldr reg, [pp, reg]
+ Instr patterns[] = {GetMovWPattern(), GetMovTPattern(),
+ GetLdrPpRegOffsetPattern()};
+ for (int i = 0; i < 3; i++) {
+ ldr(result, MemOperand(ldr_location, i * kInstrSize));
+ and_(result, result, Operand(patterns[i]));
+ cmp(result, Operand(patterns[i]));
+ Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
+ }
+ // Result was clobbered. Restore it.
+ ldr(result, MemOperand(ldr_location));
+ }
+
+ // Get the offset into the constant pool. First extract movw immediate into
+ // result.
+ and_(scratch, result, Operand(0xfff));
+ mov(ip, Operand(result, LSR, 4));
+ and_(ip, ip, Operand(0xf000));
+ orr(result, scratch, Operand(ip));
+ // Then extract movt immediate and or into result.
+ ldr(scratch, MemOperand(ldr_location, kInstrSize));
+ and_(ip, scratch, Operand(0xf0000));
+ orr(result, result, Operand(ip, LSL, 12));
+ and_(scratch, scratch, Operand(0xfff));
+ orr(result, result, Operand(scratch, LSL, 16));
+
+ b(&load_result);
+ }
+
+ bind(&small_constant_pool_load);
if (emit_debug_code()) {
// Check that the instruction is a ldr reg, [<pc or pp> + offset] .
and_(result, result, Operand(GetConsantPoolLoadPattern()));
// Result was clobbered. Restore it.
ldr(result, MemOperand(ldr_location));
}
- // Get the address of the constant.
+
+ // Get the offset into the constant pool.
+ const uint32_t kLdrOffsetMask = (1 << 12) - 1;
and_(result, result, Operand(kLdrOffsetMask));
+
+ bind(&load_result);
+ // Get the address of the constant.
if (FLAG_enable_ool_constant_pool) {
add(result, pp, Operand(result));
} else {
// macro assembler.
MacroAssembler(Isolate* isolate, void* buffer, int size);
- // Jump, Call, and Ret pseudo instructions implementing inter-working.
- void Jump(Register target, Condition cond = al);
- void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
- void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+
+ // Returns the size of a call in instructions. Note, the value returned is
+ // only valid as long as no entries are added to the constant pool between
+ // checking the call size and emitting the actual call.
static int CallSize(Register target, Condition cond = al);
- void Call(Register target, Condition cond = al);
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
int CallStubSize(CodeStub* stub,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
Address target,
RelocInfo::Mode rmode,
Condition cond = al);
+
+ // Jump, Call, and Ret pseudo instructions implementing inter-working.
+ void Jump(Register target, Condition cond = al);
+ void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
+ void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+ void Call(Register target, Condition cond = al);
void Call(Address target, RelocInfo::Mode rmode,
Condition cond = al,
TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
// Get the location of a relocated constant (its address in the constant pool)
// from its load site.
- void GetRelocatedValueLocation(Register ldr_location,
- Register result);
+ void GetRelocatedValueLocation(Register ldr_location, Register result,
+ Register scratch);
void ClampUint8(Register output_reg, Register input_reg);
// Continue just after the slot.
after_break_target_ = addr + Assembler::kDebugBreakSlotLength;
- } else if (IsDebugBreak(Assembler::target_address_at(addr, *code))) {
- // We now know that there is still a debug break call at the target address,
- // so the break point is still there and the original code will hold the
- // address to jump to in order to complete the call which is replaced by a
- // call to DebugBreakXXX.
-
- // Find the corresponding address in the original code.
- addr += original_code->instruction_start() - code->instruction_start();
-
- // Install jump to the call address in the original code. This will be the
- // call which was overwritten by the call to DebugBreakXXX.
- after_break_target_ = Assembler::target_address_at(addr, *original_code);
} else {
- // There is no longer a break point present. Don't try to look in the
- // original code as the running code will have the right address. This takes
- // care of the case where the last break point is removed from the function
- // and therefore no "original code" is available.
- after_break_target_ = Assembler::target_address_at(addr, *code);
+ addr = Assembler::target_address_from_return_address(frame->pc());
+ if (IsDebugBreak(Assembler::target_address_at(addr, *code))) {
+ // We now know that there is still a debug break call at the target
+ // address, so the break point is still there and the original code will
+ // hold the address to jump to in order to complete the call which is
+ // replaced by a call to DebugBreakXXX.
+
+ // Find the corresponding address in the original code.
+ addr += original_code->instruction_start() - code->instruction_start();
+
+ // Install jump to the call address in the original code. This will be the
+ // call which was overwritten by the call to DebugBreakXXX.
+ after_break_target_ = Assembler::target_address_at(addr, *original_code);
+ } else {
+ // There is no longer a break point present. Don't try to look in the
+ // original code as the running code will have the right address. This
+ // takes care of the case where the last break point is removed from the
+ // function and therefore no "original code" is available.
+ after_break_target_ = Assembler::target_address_at(addr, *code);
+ }
}
}
}
+void ConstantPoolArray::NumberOfEntries::increment(Type type) {
+ ASSERT(type < NUMBER_OF_TYPES);
+ element_counts_[type]++;
+}
+
+
+int ConstantPoolArray::NumberOfEntries::equals(
+ const ConstantPoolArray::NumberOfEntries& other) const {
+ for (int i = 0; i < NUMBER_OF_TYPES; i++) {
+ if (element_counts_[i] != other.element_counts_[i]) return false;
+ }
+ return true;
+}
+
+
+bool ConstantPoolArray::NumberOfEntries::is_empty() const {
+ return total_count() == 0;
+}
+
+
+int ConstantPoolArray::NumberOfEntries::count_of(Type type) const {
+ ASSERT(type < NUMBER_OF_TYPES);
+ return element_counts_[type];
+}
+
+
+int ConstantPoolArray::NumberOfEntries::base_of(Type type) const {
+ int base = 0;
+ ASSERT(type < NUMBER_OF_TYPES);
+ for (int i = 0; i < type; i++) {
+ base += element_counts_[i];
+ }
+ return base;
+}
+
+
+int ConstantPoolArray::NumberOfEntries::total_count() const {
+ int count = 0;
+ for (int i = 0; i < NUMBER_OF_TYPES; i++) {
+ count += element_counts_[i];
+ }
+ return count;
+}
+
+
+int ConstantPoolArray::NumberOfEntries::are_in_range(int min, int max) const {
+ for (int i = FIRST_TYPE; i < NUMBER_OF_TYPES; i++) {
+ if (element_counts_[i] < min || element_counts_[i] > max) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+int ConstantPoolArray::Iterator::next_index() {
+ ASSERT(!is_finished());
+ int ret = next_index_++;
+ update_section();
+ return ret;
+}
+
+
+bool ConstantPoolArray::Iterator::is_finished() {
+ return next_index_ > array_->last_index(type_, final_section_);
+}
+
+
+void ConstantPoolArray::Iterator::update_section() {
+ if (next_index_ > array_->last_index(type_, current_section_) &&
+ current_section_ != final_section_) {
+ ASSERT(final_section_ == EXTENDED_SECTION);
+ current_section_ = EXTENDED_SECTION;
+ next_index_ = array_->first_index(type_, EXTENDED_SECTION);
+ }
+}
+
+
bool ConstantPoolArray::is_extended_layout() {
uint32_t small_layout_1 = READ_UINT32_FIELD(this, kSmallLayout1Offset);
return IsExtendedField::decode(small_layout_1);
}
-int ConstantPoolArray::Iterator::next_index() {
- ASSERT(!is_finished());
- int ret = next_index_++;
- update_section();
- return ret;
-}
-
-
-bool ConstantPoolArray::Iterator::is_finished() {
- return next_index_ > array_->last_index(type_, final_section_);
-}
-
-
-void ConstantPoolArray::Iterator::update_section() {
- if (next_index_ > array_->last_index(type_, current_section_) &&
- current_section_ != final_section_) {
- ASSERT(final_section_ == EXTENDED_SECTION);
- current_section_ = EXTENDED_SECTION;
- next_index_ = array_->first_index(type_, EXTENDED_SECTION);
- }
-}
-
-
WriteBarrierMode HeapObject::GetWriteBarrierMode(
const DisallowHeapAllocation& promise) {
Heap* heap = GetHeap();
void ConstantPoolArray::ConstantPoolIterateBody(ObjectVisitor* v) {
- ConstantPoolArray::Iterator code_iter(this, ConstantPoolArray::CODE_PTR);
- while (!code_iter.is_finished()) {
- v->VisitCodeEntry(reinterpret_cast<Address>(
- RawFieldOfElementAt(code_iter.next_index())));
- }
-
- ConstantPoolArray::Iterator heap_iter(this, ConstantPoolArray::HEAP_PTR);
- while (!heap_iter.is_finished()) {
- v->VisitPointer(RawFieldOfElementAt(heap_iter.next_index()));
+ // Unfortunately the serializer relies on pointers within an object being
+ // visited in-order, so we have to iterate both the code and heap pointers in
+ // the small section before doing so in the extended section.
+ for (int s = 0; s <= final_section(); ++s) {
+ LayoutSection section = static_cast<LayoutSection>(s);
+ ConstantPoolArray::Iterator code_iter(this, ConstantPoolArray::CODE_PTR,
+ section);
+ while (!code_iter.is_finished()) {
+ v->VisitCodeEntry(reinterpret_cast<Address>(
+ RawFieldOfElementAt(code_iter.next_index())));
+ }
+
+ ConstantPoolArray::Iterator heap_iter(this, ConstantPoolArray::HEAP_PTR,
+ section);
+ while (!heap_iter.is_finished()) {
+ v->VisitPointer(RawFieldOfElementAt(heap_iter.next_index()));
+ }
}
}
class NumberOfEntries BASE_EMBEDDED {
public:
+ inline NumberOfEntries() {
+ for (int i = 0; i < NUMBER_OF_TYPES; i++) {
+ element_counts_[i] = 0;
+ }
+ }
+
inline NumberOfEntries(int int64_count, int code_ptr_count,
int heap_ptr_count, int int32_count) {
element_counts_[INT64] = int64_count;
element_counts_[INT32] = array->number_of_entries(INT32, section);
}
- inline int count_of(Type type) const {
- ASSERT(type < NUMBER_OF_TYPES);
- return element_counts_[type];
- }
-
- inline int total_count() const {
- int count = 0;
- for (int i = 0; i < NUMBER_OF_TYPES; i++) {
- count += element_counts_[i];
- }
- return count;
- }
-
- inline int are_in_range(int min, int max) const {
- for (int i = FIRST_TYPE; i < NUMBER_OF_TYPES; i++) {
- if (element_counts_[i] < min || element_counts_[i] > max) {
- return false;
- }
- }
- return true;
- }
+ inline void increment(Type type);
+ inline int equals(const NumberOfEntries& other) const;
+ inline bool is_empty() const;
+ inline int count_of(Type type) const;
+ inline int base_of(Type type) const;
+ inline int total_count() const;
+ inline int are_in_range(int min, int max) const;
private:
int element_counts_[NUMBER_OF_TYPES];
class Iterator BASE_EMBEDDED {
public:
inline Iterator(ConstantPoolArray* array, Type type)
- : array_(array), type_(type), final_section_(array->final_section()) {
- current_section_ = SMALL_SECTION;
- next_index_ = array->first_index(type, SMALL_SECTION);
+ : array_(array),
+ type_(type),
+ final_section_(array->final_section()),
+ current_section_(SMALL_SECTION),
+ next_index_(array->first_index(type, SMALL_SECTION)) {
+ update_section();
+ }
+
+ inline Iterator(ConstantPoolArray* array, Type type, LayoutSection section)
+ : array_(array),
+ type_(type),
+ final_section_(section),
+ current_section_(section),
+ next_index_(array->first_index(type, section)) {
update_section();
}
inline int next_index();
inline bool is_finished();
+
private:
inline void update_section();
ConstantPoolArray* array_;
// Garbage collection support.
inline int size();
+
+ inline static int MaxInt64Offset(int number_of_int64) {
+ return kFirstEntryOffset + (number_of_int64 * kInt64Size);
+ }
+
inline static int SizeFor(const NumberOfEntries& small) {
int size = kFirstEntryOffset +
(small.count_of(INT64) * kInt64Size) +
int size = 0;
switch (identity()) {
case OLD_POINTER_SPACE:
- size = 96 * kPointerSize * KB;
+ size = 112 * kPointerSize * KB;
break;
case OLD_DATA_SPACE:
size = 192 * KB;