#ifndef V8_ARM_ASSEMBLER_ARM_INL_H_
#define V8_ARM_ASSEMBLER_ARM_INL_H_
-#include "arm/assembler-arm.h"
+#include "src/arm/assembler-arm.h"
-#include "cpu.h"
-#include "debug.h"
+#include "src/assembler.h"
+#include "src/debug.h"
namespace v8 {
namespace internal {
+bool CpuFeatures::SupportsCrankshaft() { return IsSupported(VFP3); }
+
+
int Register::NumAllocatableRegisters() {
return kMaxNumAllocatableRegisters;
}
}
+int DwVfpRegister::NumReservedRegisters() {
+ return kNumReservedRegisters;
+}
+
+
int DwVfpRegister::NumAllocatableRegisters() {
return NumRegisters() - kNumReservedRegisters;
}
+// static
+int DwVfpRegister::NumAllocatableAliasedRegisters() {
+ return LowDwVfpRegister::kMaxNumLowRegisters - kNumReservedRegisters;
+}
+
+
int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
- ASSERT(!reg.is(kDoubleRegZero));
- ASSERT(!reg.is(kScratchDoubleReg));
+ DCHECK(!reg.is(kDoubleRegZero));
+ DCHECK(!reg.is(kScratchDoubleReg));
if (reg.code() > kDoubleRegZero.code()) {
return reg.code() - kNumReservedRegisters;
}
DwVfpRegister DwVfpRegister::FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < NumAllocatableRegisters());
- ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
+ DCHECK(index >= 0 && index < NumAllocatableRegisters());
+ DCHECK(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
kNumReservedRegisters - 1);
if (index >= kDoubleRegZero.code()) {
return from_code(index + kNumReservedRegisters);
}
-void RelocInfo::apply(intptr_t delta) {
+void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
if (RelocInfo::IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
Address RelocInfo::target_address() {
- ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_);
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
- return reinterpret_cast<Address>(Assembler::target_pointer_address_at(pc_));
+ if (FLAG_enable_ool_constant_pool ||
+ Assembler::IsMovW(Memory::int32_at(pc_))) {
+ // We return the PC for ool constant pool since this function is used by the
+ // serializerer and expects the address to reside within the code object.
+ return reinterpret_cast<Address>(pc_);
+ } else {
+ DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
+ return constant_pool_entry_address();
+ }
+}
+
+
+Address RelocInfo::constant_pool_entry_address() {
+ DCHECK(IsInConstantPool());
+ return Assembler::constant_pool_entry_address(pc_, host_->constant_pool());
}
}
-void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, target);
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
+void RelocInfo::set_target_address(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+ Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
+ host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target_code));
Object* RelocInfo::target_object() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
}
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<Object>(reinterpret_cast<Object**>(
- Assembler::target_pointer_at(pc_)));
+ Assembler::target_address_at(pc_, host_)));
}
-Object** RelocInfo::target_object_address() {
- // Provide a "natural pointer" to the embedded object,
- // which can be de-referenced during heap iteration.
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- reconstructed_obj_ptr_ =
- reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
- return &reconstructed_obj_ptr_;
-}
-
-
-void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- ASSERT(!target->IsConsString());
- Assembler::set_target_pointer_at(pc_, reinterpret_cast<Address>(target));
- if (mode == UPDATE_WRITE_BARRIER &&
+void RelocInfo::set_target_object(Object* target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ Assembler::set_target_address_at(pc_, host_,
+ reinterpret_cast<Address>(target),
+ icache_flush_mode);
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
host()->GetHeap()->incremental_marking()->RecordWrite(
}
-Address* RelocInfo::target_reference_address() {
- ASSERT(rmode_ == EXTERNAL_REFERENCE);
- reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
- return &reconstructed_adr_ptr_;
+Address RelocInfo::target_reference() {
+ DCHECK(rmode_ == EXTERNAL_REFERENCE);
+ return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
- ASSERT(IsRuntimeEntry(rmode_));
+ DCHECK(IsRuntimeEntry(rmode_));
return target_address();
}
void RelocInfo::set_target_runtime_entry(Address target,
- WriteBarrierMode mode) {
- ASSERT(IsRuntimeEntry(rmode_));
- if (target_address() != target) set_target_address(target, mode);
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ if (target_address() != target)
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
}
Handle<Cell> RelocInfo::target_cell_handle() {
- ASSERT(rmode_ == RelocInfo::CELL);
+ DCHECK(rmode_ == RelocInfo::CELL);
Address address = Memory::Address_at(pc_);
return Handle<Cell>(reinterpret_cast<Cell**>(address));
}
Cell* RelocInfo::target_cell() {
- ASSERT(rmode_ == RelocInfo::CELL);
+ DCHECK(rmode_ == RelocInfo::CELL);
return Cell::FromValueAddress(Memory::Address_at(pc_));
}
-void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
- ASSERT(rmode_ == RelocInfo::CELL);
+void RelocInfo::set_target_cell(Cell* cell,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::CELL);
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
- if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
// evacuation candidate.
host()->GetHeap()->incremental_marking()->RecordWrite(
}
-static const int kNoCodeAgeSequenceLength = 3;
+static const int kNoCodeAgeSequenceLength = 3 * Assembler::kInstrSize;
Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
Code* RelocInfo::code_age_stub() {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
return Code::GetCodeFromTargetAddress(
- Memory::Address_at(pc_ + Assembler::kInstrSize *
- (kNoCodeAgeSequenceLength - 1)));
+ Memory::Address_at(pc_ +
+ (kNoCodeAgeSequenceLength - Assembler::kInstrSize)));
}
-void RelocInfo::set_code_age_stub(Code* stub) {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Memory::Address_at(pc_ + Assembler::kInstrSize *
- (kNoCodeAgeSequenceLength - 1)) =
+void RelocInfo::set_code_age_stub(Code* stub,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ Memory::Address_at(pc_ +
+ (kNoCodeAgeSequenceLength - Assembler::kInstrSize)) =
stub->instruction_start();
}
Address RelocInfo::call_address() {
// The 2 instructions offset assumes patched debug break slot or return
// sequence.
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
}
void RelocInfo::set_call_address(Address target) {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
if (host() != NULL) {
Object** RelocInfo::call_object_address() {
- ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
}
+void RelocInfo::WipeOut() {
+ DCHECK(IsEmbeddedObject(rmode_) ||
+ IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) ||
+ IsExternalReference(rmode_));
+ Assembler::set_target_address_at(pc_, host_, NULL);
+}
+
+
bool RelocInfo::IsPatchedReturnSequence() {
Instr current_instr = Assembler::instr_at(pc_);
Instr next_instr = Assembler::instr_at(pc_ + Assembler::kInstrSize);
// A patched return sequence is:
// ldr ip, [pc, #0]
// blx ip
- return ((current_instr & kLdrPCMask) == kLdrPCPattern)
- && ((next_instr & kBlxRegMask) == kBlxRegPattern);
+ return Assembler::IsLdrPcImmediateOffset(current_instr) &&
+ Assembler::IsBlxReg(next_instr);
}
visitor->VisitExternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) &&
isolate->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
-#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(this);
}
StaticVisitor::VisitExternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
-#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(heap, this);
-#endif
} else if (RelocInfo::IsRuntimeEntry(mode)) {
StaticVisitor::VisitRuntimeEntry(this);
}
}
-Address Assembler::target_pointer_address_at(Address pc) {
- Address target_pc = pc;
- Instr instr = Memory::int32_at(target_pc);
- // If we have a bx instruction, the instruction before the bx is
- // what we need to patch.
- static const int32_t kBxInstMask = 0x0ffffff0;
- static const int32_t kBxInstPattern = 0x012fff10;
- if ((instr & kBxInstMask) == kBxInstPattern) {
- target_pc -= kInstrSize;
- instr = Memory::int32_at(target_pc);
- }
-
- // With a blx instruction, the instruction before is what needs to be patched.
- if ((instr & kBlxRegMask) == kBlxRegPattern) {
- target_pc -= kInstrSize;
- instr = Memory::int32_at(target_pc);
- }
-
- ASSERT(IsLdrPcImmediateOffset(instr));
- int offset = instr & 0xfff; // offset_12 is unsigned
- if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
- // Verify that the constant pool comes after the instruction referencing it.
- ASSERT(offset >= -4);
- return target_pc + offset + 8;
-}
-
-
-Address Assembler::target_pointer_at(Address pc) {
- if (IsMovW(Memory::int32_at(pc))) {
- ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
- Instruction* instr = Instruction::At(pc);
- Instruction* next_instr = Instruction::At(pc + kInstrSize);
- return reinterpret_cast<Address>(
- (next_instr->ImmedMovwMovtValue() << 16) |
- instr->ImmedMovwMovtValue());
- }
- return Memory::Address_at(target_pointer_address_at(pc));
-}
-
-
Address Assembler::target_address_from_return_address(Address pc) {
// Returns the address of the call target from the return address that will
// be returned to after a call.
- // Call sequence on V7 or later is :
+ // Call sequence on V7 or later is:
// movw ip, #... @ call address low 16
// movt ip, #... @ call address high 16
// blx ip
// @ return address
- // Or pre-V7 or cases that need frequent patching:
- // ldr ip, [pc, #...] @ call address
+ // For V6 when the constant pool is unavailable, it is:
+ // mov ip, #... @ call address low 8
+ // orr ip, ip, #... @ call address 2nd 8
+ // orr ip, ip, #... @ call address 3rd 8
+ // orr ip, ip, #... @ call address high 8
+ // blx ip
+ // @ return address
+ // In cases that need frequent patching, the address is in the
+ // constant pool. It could be a small constant pool load:
+ // ldr ip, [pc / pp, #...] @ call address
+ // blx ip
+ // @ return address
+ // Or an extended constant pool load (ARMv7):
+ // movw ip, #...
+ // movt ip, #...
+ // ldr ip, [pc, ip] @ call address
+ // blx ip
+ // @ return address
+ // Or an extended constant pool load (ARMv6):
+ // mov ip, #...
+ // orr ip, ip, #...
+ // orr ip, ip, #...
+ // orr ip, ip, #...
+ // ldr ip, [pc, ip] @ call address
// blx ip
// @ return address
Address candidate = pc - 2 * Assembler::kInstrSize;
Instr candidate_instr(Memory::int32_at(candidate));
- if (IsLdrPcImmediateOffset(candidate_instr)) {
+ if (IsLdrPcImmediateOffset(candidate_instr) |
+ IsLdrPpImmediateOffset(candidate_instr)) {
+ return candidate;
+ } else {
+ if (IsLdrPpRegOffset(candidate_instr)) {
+ candidate -= Assembler::kInstrSize;
+ }
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ candidate -= 1 * Assembler::kInstrSize;
+ DCHECK(IsMovW(Memory::int32_at(candidate)) &&
+ IsMovT(Memory::int32_at(candidate + Assembler::kInstrSize)));
+ } else {
+ candidate -= 3 * Assembler::kInstrSize;
+ DCHECK(
+ IsMovImmed(Memory::int32_at(candidate)) &&
+ IsOrrImmed(Memory::int32_at(candidate + Assembler::kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(candidate + 2 * Assembler::kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(candidate + 3 * Assembler::kInstrSize)));
+ }
return candidate;
}
- candidate = pc - 3 * Assembler::kInstrSize;
- ASSERT(IsMovW(Memory::int32_at(candidate)) &&
- IsMovT(Memory::int32_at(candidate + kInstrSize)));
- return candidate;
+}
+
+
+Address Assembler::break_address_from_return_address(Address pc) {
+ return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
}
Address Assembler::return_address_from_call_start(Address pc) {
- if (IsLdrPcImmediateOffset(Memory::int32_at(pc))) {
+ if (IsLdrPcImmediateOffset(Memory::int32_at(pc)) |
+ IsLdrPpImmediateOffset(Memory::int32_at(pc))) {
+ // Load from constant pool, small section.
return pc + kInstrSize * 2;
} else {
- ASSERT(IsMovW(Memory::int32_at(pc)));
- ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
- return pc + kInstrSize * 3;
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ DCHECK(IsMovW(Memory::int32_at(pc)));
+ DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
+ if (IsLdrPpRegOffset(Memory::int32_at(pc + 2 * kInstrSize))) {
+ // Load from constant pool, extended section.
+ return pc + kInstrSize * 4;
+ } else {
+ // A movw / movt load immediate.
+ return pc + kInstrSize * 3;
+ }
+ } else {
+ DCHECK(IsMovImmed(Memory::int32_at(pc)));
+ DCHECK(IsOrrImmed(Memory::int32_at(pc + kInstrSize)));
+ DCHECK(IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)));
+ DCHECK(IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
+ if (IsLdrPpRegOffset(Memory::int32_at(pc + 4 * kInstrSize))) {
+ // Load from constant pool, extended section.
+ return pc + kInstrSize * 6;
+ } else {
+ // A mov / orr load immediate.
+ return pc + kInstrSize * 5;
+ }
+ }
}
}
void Assembler::deserialization_set_special_target_at(
- Address constant_pool_entry, Address target) {
- Memory::Address_at(constant_pool_entry) = target;
+ Address constant_pool_entry, Code* code, Address target) {
+ if (FLAG_enable_ool_constant_pool) {
+ set_target_address_at(constant_pool_entry, code, target);
+ } else {
+ Memory::Address_at(constant_pool_entry) = target;
+ }
}
-void Assembler::set_external_target_at(Address constant_pool_entry,
- Address target) {
- Memory::Address_at(constant_pool_entry) = target;
+bool Assembler::is_constant_pool_load(Address pc) {
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ return !Assembler::IsMovW(Memory::int32_at(pc)) ||
+ (FLAG_enable_ool_constant_pool &&
+ Assembler::IsLdrPpRegOffset(
+ Memory::int32_at(pc + 2 * Assembler::kInstrSize)));
+ } else {
+ return !Assembler::IsMovImmed(Memory::int32_at(pc)) ||
+ (FLAG_enable_ool_constant_pool &&
+ Assembler::IsLdrPpRegOffset(
+ Memory::int32_at(pc + 4 * Assembler::kInstrSize)));
+ }
}
-static Instr EncodeMovwImmediate(uint32_t immediate) {
- ASSERT(immediate < 0x10000);
- return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
+Address Assembler::constant_pool_entry_address(
+ Address pc, ConstantPoolArray* constant_pool) {
+ if (FLAG_enable_ool_constant_pool) {
+ DCHECK(constant_pool != NULL);
+ int cp_offset;
+ if (!CpuFeatures::IsSupported(ARMv7) && IsMovImmed(Memory::int32_at(pc))) {
+ DCHECK(IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)) &&
+ IsLdrPpRegOffset(Memory::int32_at(pc + 4 * kInstrSize)));
+ // This is an extended constant pool lookup (ARMv6).
+ Instr mov_instr = instr_at(pc);
+ Instr orr_instr_1 = instr_at(pc + kInstrSize);
+ Instr orr_instr_2 = instr_at(pc + 2 * kInstrSize);
+ Instr orr_instr_3 = instr_at(pc + 3 * kInstrSize);
+ cp_offset = DecodeShiftImm(mov_instr) | DecodeShiftImm(orr_instr_1) |
+ DecodeShiftImm(orr_instr_2) | DecodeShiftImm(orr_instr_3);
+ } else if (IsMovW(Memory::int32_at(pc))) {
+ DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)) &&
+ IsLdrPpRegOffset(Memory::int32_at(pc + 2 * kInstrSize)));
+ // This is an extended constant pool lookup (ARMv7).
+ Instruction* movw_instr = Instruction::At(pc);
+ Instruction* movt_instr = Instruction::At(pc + kInstrSize);
+ cp_offset = (movt_instr->ImmedMovwMovtValue() << 16) |
+ movw_instr->ImmedMovwMovtValue();
+ } else {
+ // This is a small constant pool lookup.
+ DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc)));
+ cp_offset = GetLdrRegisterImmediateOffset(Memory::int32_at(pc));
+ }
+ return reinterpret_cast<Address>(constant_pool) + cp_offset;
+ } else {
+ DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc)));
+ Instr instr = Memory::int32_at(pc);
+ return pc + GetLdrRegisterImmediateOffset(instr) + kPcLoadDelta;
+ }
}
-void Assembler::set_target_pointer_at(Address pc, Address target) {
- if (IsMovW(Memory::int32_at(pc))) {
- ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
- uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
- uint32_t immediate = reinterpret_cast<uint32_t>(target);
- uint32_t intermediate = instr_ptr[0];
- intermediate &= ~EncodeMovwImmediate(0xFFFF);
- intermediate |= EncodeMovwImmediate(immediate & 0xFFFF);
- instr_ptr[0] = intermediate;
- intermediate = instr_ptr[1];
- intermediate &= ~EncodeMovwImmediate(0xFFFF);
- intermediate |= EncodeMovwImmediate(immediate >> 16);
- instr_ptr[1] = intermediate;
- ASSERT(IsMovW(Memory::int32_at(pc)));
- ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
- CPU::FlushICache(pc, 2 * kInstrSize);
+Address Assembler::target_address_at(Address pc,
+ ConstantPoolArray* constant_pool) {
+ if (is_constant_pool_load(pc)) {
+ // This is a constant pool lookup. Return the value in the constant pool.
+ return Memory::Address_at(constant_pool_entry_address(pc, constant_pool));
+ } else if (CpuFeatures::IsSupported(ARMv7)) {
+ // This is an movw / movt immediate load. Return the immediate.
+ DCHECK(IsMovW(Memory::int32_at(pc)) &&
+ IsMovT(Memory::int32_at(pc + kInstrSize)));
+ Instruction* movw_instr = Instruction::At(pc);
+ Instruction* movt_instr = Instruction::At(pc + kInstrSize);
+ return reinterpret_cast<Address>(
+ (movt_instr->ImmedMovwMovtValue() << 16) |
+ movw_instr->ImmedMovwMovtValue());
} else {
- ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
- Memory::Address_at(target_pointer_address_at(pc)) = target;
+ // This is an mov / orr immediate load. Return the immediate.
+ DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
+ IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
+ Instr mov_instr = instr_at(pc);
+ Instr orr_instr_1 = instr_at(pc + kInstrSize);
+ Instr orr_instr_2 = instr_at(pc + 2 * kInstrSize);
+ Instr orr_instr_3 = instr_at(pc + 3 * kInstrSize);
+ Address ret = reinterpret_cast<Address>(
+ DecodeShiftImm(mov_instr) | DecodeShiftImm(orr_instr_1) |
+ DecodeShiftImm(orr_instr_2) | DecodeShiftImm(orr_instr_3));
+ return ret;
+ }
+}
+
+
+void Assembler::set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target,
+ ICacheFlushMode icache_flush_mode) {
+ if (is_constant_pool_load(pc)) {
+ // This is a constant pool lookup. Update the entry in the constant pool.
+ Memory::Address_at(constant_pool_entry_address(pc, constant_pool)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
- // CPU::FlushICache(pc, sizeof(target));
+ // CpuFeatures::FlushICache(pc, sizeof(target));
// However, on ARM, no instruction is actually patched in the case
// of embedded constants of the form:
- // ldr ip, [pc, #...]
+ // ldr ip, [pp, #...]
// since the instruction accessing this address in the constant pool remains
// unchanged.
+ } else if (CpuFeatures::IsSupported(ARMv7)) {
+ // This is an movw / movt immediate load. Patch the immediate embedded in
+ // the instructions.
+ DCHECK(IsMovW(Memory::int32_at(pc)));
+ DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
+ uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
+ uint32_t immediate = reinterpret_cast<uint32_t>(target);
+ instr_ptr[0] = PatchMovwImmediate(instr_ptr[0], immediate & 0xFFFF);
+ instr_ptr[1] = PatchMovwImmediate(instr_ptr[1], immediate >> 16);
+ DCHECK(IsMovW(Memory::int32_at(pc)));
+ DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CpuFeatures::FlushICache(pc, 2 * kInstrSize);
+ }
+ } else {
+ // This is an mov / orr immediate load. Patch the immediate embedded in
+ // the instructions.
+ DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
+ IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
+ uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
+ uint32_t immediate = reinterpret_cast<uint32_t>(target);
+ instr_ptr[0] = PatchShiftImm(instr_ptr[0], immediate & kImm8Mask);
+ instr_ptr[1] = PatchShiftImm(instr_ptr[1], immediate & (kImm8Mask << 8));
+ instr_ptr[2] = PatchShiftImm(instr_ptr[2], immediate & (kImm8Mask << 16));
+ instr_ptr[3] = PatchShiftImm(instr_ptr[3], immediate & (kImm8Mask << 24));
+ DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
+ IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
+ IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ CpuFeatures::FlushICache(pc, 4 * kInstrSize);
+ }
}
}
-Address Assembler::target_address_at(Address pc) {
- return target_pointer_at(pc);
-}
-
-
-void Assembler::set_target_address_at(Address pc, Address target) {
- set_target_pointer_at(pc, target);
-}
-
-
} } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_ARM_INL_H_