}
-// Operand.
-template<typename T>
-Operand::Operand(Handle<T> value) : reg_(NoReg) {
- initialize_handle(value);
-}
-
-
+// Immediate.
// Default initializer is for int types
-template<typename int_t>
-struct OperandInitializer {
+template<typename T>
+struct ImmediateInitializer {
static const bool kIsIntType = true;
- static inline RelocInfo::Mode rmode_for(int_t) {
- return sizeof(int_t) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32;
+ static inline RelocInfo::Mode rmode_for(T) {
+ return sizeof(T) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32;
}
- static inline int64_t immediate_for(int_t t) {
- STATIC_ASSERT(sizeof(int_t) <= 8);
+ static inline int64_t immediate_for(T t) {
+ STATIC_ASSERT(sizeof(T) <= 8);
return t;
}
};
template<>
-struct OperandInitializer<Smi*> {
+struct ImmediateInitializer<Smi*> {
static const bool kIsIntType = false;
static inline RelocInfo::Mode rmode_for(Smi* t) {
return RelocInfo::NONE64;
template<>
-struct OperandInitializer<ExternalReference> {
+struct ImmediateInitializer<ExternalReference> {
static const bool kIsIntType = false;
static inline RelocInfo::Mode rmode_for(ExternalReference t) {
return RelocInfo::EXTERNAL_REFERENCE;
template<typename T>
-Operand::Operand(T t)
- : immediate_(OperandInitializer<T>::immediate_for(t)),
- reg_(NoReg),
- rmode_(OperandInitializer<T>::rmode_for(t)) {}
+Immediate::Immediate(Handle<T> value) {
+ InitializeHandle(value);
+}
template<typename T>
-Operand::Operand(T t, RelocInfo::Mode rmode)
- : immediate_(OperandInitializer<T>::immediate_for(t)),
- reg_(NoReg),
+Immediate::Immediate(T t)
+ : value_(ImmediateInitializer<T>::immediate_for(t)),
+ rmode_(ImmediateInitializer<T>::rmode_for(t)) {}
+
+
+template<typename T>
+Immediate::Immediate(T t, RelocInfo::Mode rmode)
+ : value_(ImmediateInitializer<T>::immediate_for(t)),
rmode_(rmode) {
- STATIC_ASSERT(OperandInitializer<T>::kIsIntType);
+ STATIC_ASSERT(ImmediateInitializer<T>::kIsIntType);
}
+// Operand.
+template<typename T>
+Operand::Operand(Handle<T> value) : immediate_(value), reg_(NoReg) {}
+
+
+template<typename T>
+Operand::Operand(T t) : immediate_(t), reg_(NoReg) {}
+
+
+template<typename T>
+Operand::Operand(T t, RelocInfo::Mode rmode)
+ : immediate_(t, rmode),
+ reg_(NoReg) {}
+
+
Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
- : reg_(reg),
+ : immediate_(0),
+ reg_(reg),
shift_(shift),
extend_(NO_EXTEND),
- shift_amount_(shift_amount),
- rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
+ shift_amount_(shift_amount) {
ASSERT(reg.Is64Bits() || (shift_amount < kWRegSizeInBits));
ASSERT(reg.Is32Bits() || (shift_amount < kXRegSizeInBits));
ASSERT(!reg.IsSP());
Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
- : reg_(reg),
+ : immediate_(0),
+ reg_(reg),
shift_(NO_SHIFT),
extend_(extend),
- shift_amount_(shift_amount),
- rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
+ shift_amount_(shift_amount) {
ASSERT(reg.IsValid());
ASSERT(shift_amount <= 4);
ASSERT(!reg.IsSP());
bool Operand::IsZero() const {
if (IsImmediate()) {
- return immediate() == 0;
+ return ImmediateValue() == 0;
} else {
return reg().IsZero();
}
}
-int64_t Operand::immediate() const {
+Immediate Operand::immediate() const {
ASSERT(IsImmediate());
return immediate_;
}
+int64_t Operand::ImmediateValue() const {
+ ASSERT(IsImmediate());
+ return immediate_.value();
+}
+
+
Register Operand::reg() const {
ASSERT(IsShiftedRegister() || IsExtendedRegister());
return reg_;
ASSERT(base.Is64Bits() && !base.IsZero());
if (offset.IsImmediate()) {
- offset_ = offset.immediate();
+ offset_ = offset.ImmediateValue();
regoffset_ = NoReg;
} else if (offset.IsShiftedRegister()) {
}
+LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit;
+ } else {
+ ASSERT(rt.IsFPRegister());
+ return rt.Is64Bits() ? LDR_d_lit : LDR_s_lit;
+ }
+}
+
+
int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
ASSERT(kStartOfLabelLinkChain == 0);
int offset = LinkAndGetByteOffsetTo(label);
}
-void Assembler::LoadRelocated(const CPURegister& rt, const Operand& operand) {
- LoadRelocatedValue(rt, operand, LDR_x_lit);
-}
-
-
inline void Assembler::CheckBufferSpace() {
ASSERT(pc_ < (buffer_ + buffer_size_));
if (buffer_space() < kGap) {
}
-void Operand::initialize_handle(Handle<Object> handle) {
+void Immediate::InitializeHandle(Handle<Object> handle) {
AllowDeferredHandleDereference using_raw_address;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
if (obj->IsHeapObject()) {
ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
- immediate_ = reinterpret_cast<intptr_t>(handle.location());
+ value_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t));
- immediate_ = reinterpret_cast<intptr_t>(obj);
+ value_ = reinterpret_cast<intptr_t>(obj);
rmode_ = RelocInfo::NONE64;
}
}
bool Operand::NeedsRelocation(const Assembler* assembler) const {
- if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
+ RelocInfo::Mode rmode = immediate_.rmode();
+
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
return assembler->serializer_enabled();
}
- return !RelocInfo::IsNone(rmode_);
+ return !RelocInfo::IsNone(rmode);
}
}
-void Assembler::ldr(const Register& rt, uint64_t imm) {
- // TODO(all): Constant pool may be garbage collected. Hence we cannot store
- // arbitrary values in them. Manually move it for now. Fix
- // MacroAssembler::Fmov when this is implemented.
- UNIMPLEMENTED();
+void Assembler::ldr_pcrel(const CPURegister& rt, int imm19) {
+ // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a
+ // constant pool. It should not be emitted.
+ ASSERT(!rt.IsZero());
+ Emit(LoadLiteralOpFor(rt) | ImmLLiteral(imm19) | Rt(rt));
}
-void Assembler::ldr(const FPRegister& ft, double imm) {
- // TODO(all): Constant pool may be garbage collected. Hence we cannot store
- // arbitrary values in them. Manually move it for now. Fix
- // MacroAssembler::Fmov when this is implemented.
- UNIMPLEMENTED();
-}
-
+void Assembler::ldr(const CPURegister& rt, const Immediate& imm) {
+ // Currently we only support 64-bit literals.
+ ASSERT(rt.Is64Bits());
-void Assembler::ldr(const FPRegister& ft, float imm) {
- // TODO(all): Constant pool may be garbage collected. Hence we cannot store
- // arbitrary values in them. Manually move it for now. Fix
- // MacroAssembler::Fmov when this is implemented.
- UNIMPLEMENTED();
+ RecordRelocInfo(imm.rmode(), imm.value());
+ BlockConstPoolFor(1);
+ // The load will be patched when the constpool is emitted, patching code
+ // expect a load literal with offset 0.
+ ldr_pcrel(rt, 0);
}
ASSERT(rd.SizeInBits() == rn.SizeInBits());
ASSERT(!operand.NeedsRelocation(this));
if (operand.IsImmediate()) {
- int64_t immediate = operand.immediate();
+ int64_t immediate = operand.ImmediateValue();
ASSERT(IsImmAddSub(immediate));
Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
ASSERT(rd.SizeInBits() == rn.SizeInBits());
ASSERT(!operand.NeedsRelocation(this));
if (operand.IsImmediate()) {
- int64_t immediate = operand.immediate();
+ int64_t immediate = operand.ImmediateValue();
unsigned reg_size = rd.SizeInBits();
ASSERT(immediate != 0);
Instr ccmpop;
ASSERT(!operand.NeedsRelocation(this));
if (operand.IsImmediate()) {
- int64_t immediate = operand.immediate();
+ int64_t immediate = operand.ImmediateValue();
ASSERT(IsImmConditionalCompare(immediate));
ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
} else {
}
-void Assembler::LoadLiteral(const CPURegister& rt, int offset_from_pc) {
- ASSERT((offset_from_pc & ((1 << kLiteralEntrySizeLog2) - 1)) == 0);
- // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a
- // constant pool. It should not be emitted.
- ASSERT(!rt.Is(xzr));
- Emit(LDR_x_lit |
- ImmLLiteral(offset_from_pc >> kLiteralEntrySizeLog2) |
- Rt(rt));
-}
-
-
-void Assembler::LoadRelocatedValue(const CPURegister& rt,
- const Operand& operand,
- LoadLiteralOp op) {
- int64_t imm = operand.immediate();
- ASSERT(is_int32(imm) || is_uint32(imm) || (rt.Is64Bits()));
- RecordRelocInfo(operand.rmode(), imm);
- BlockConstPoolFor(1);
- Emit(op | ImmLLiteral(0) | Rt(rt));
-}
-
-
// Test if a given value can be encoded in the immediate field of a logical
// instruction.
// If it can be encoded, the function returns true, and values pointed to by n,
#define kCallerSaved CPURegList::GetCallerSaved()
#define kCallerSavedFP CPURegList::GetCallerSavedFP()
+// -----------------------------------------------------------------------------
+// Immediates.
+class Immediate {
+ public:
+ template<typename T>
+ inline explicit Immediate(Handle<T> handle);
+
+ // This is allowed to be an implicit constructor because Immediate is
+ // a wrapper class that doesn't normally perform any type conversion.
+ template<typename T>
+ inline Immediate(T value); // NOLINT(runtime/explicit)
+
+ template<typename T>
+ inline Immediate(T value, RelocInfo::Mode rmode);
+
+ int64_t value() const { return value_; }
+ RelocInfo::Mode rmode() const { return rmode_; }
+
+ private:
+ void InitializeHandle(Handle<Object> value);
+
+ int64_t value_;
+ RelocInfo::Mode rmode_;
+};
+
// -----------------------------------------------------------------------------
// Operands.
inline Operand(T t); // NOLINT(runtime/explicit)
// Implicit constructor for int types.
- template<typename int_t>
- inline Operand(int_t t, RelocInfo::Mode rmode);
+ template<typename T>
+ inline Operand(T t, RelocInfo::Mode rmode);
inline bool IsImmediate() const;
inline bool IsShiftedRegister() const;
// which helps in the encoding of instructions that use the stack pointer.
inline Operand ToExtendedRegister() const;
- inline int64_t immediate() const;
+ inline Immediate immediate() const;
+ inline int64_t ImmediateValue() const;
inline Register reg() const;
inline Shift shift() const;
inline Extend extend() const;
inline unsigned shift_amount() const;
// Relocation information.
- RelocInfo::Mode rmode() const { return rmode_; }
- void set_rmode(RelocInfo::Mode rmode) { rmode_ = rmode; }
bool NeedsRelocation(const Assembler* assembler) const;
// Helpers
inline static Operand UntagSmiAndScale(Register smi, int scale);
private:
- void initialize_handle(Handle<Object> value);
- int64_t immediate_;
+ Immediate immediate_;
Register reg_;
Shift shift_;
Extend extend_;
unsigned shift_amount_;
- RelocInfo::Mode rmode_;
};
// Memory instructions.
- // Load literal from pc + offset_from_pc.
- void LoadLiteral(const CPURegister& rt, int offset_from_pc);
-
// Load integer or FP register.
void ldr(const CPURegister& rt, const MemOperand& src);
void stnp(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& dst);
- // Load literal to register.
- void ldr(const Register& rt, uint64_t imm);
+ // Load literal to register from a pc relative address.
+ void ldr_pcrel(const CPURegister& rt, int imm19);
- // Load literal to FP register.
- void ldr(const FPRegister& ft, double imm);
- void ldr(const FPRegister& ft, float imm);
+ // Load literal to register.
+ void ldr(const CPURegister& rt, const Immediate& imm);
// Move instructions. The default shift of -1 indicates that the move
// instruction will calculate an appropriate 16-bit immediate and left shift
void CheckVeneerPool(bool force_emit, bool require_jump,
int margin = kVeneerDistanceMargin);
-
class BlockPoolsScope {
public:
explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
};
- // Available for constrained code generation scopes. Prefer
- // MacroAssembler::Mov() when possible.
- inline void LoadRelocated(const CPURegister& rt, const Operand& operand);
-
protected:
inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
const CPURegister& rt, const CPURegister& rt2);
static inline LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
const CPURegister& rt, const CPURegister& rt2);
+ static inline LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
// Remove the specified branch from the unbound label link chain.
// If available, a veneer for this label can be used for other branches in the
const CPURegister& rt2,
const MemOperand& addr,
LoadStorePairNonTemporalOp op);
- // Register the relocation information for the operand and load its value
- // into rt.
- void LoadRelocatedValue(const CPURegister& rt,
- const Operand& operand,
- LoadLiteralOp op);
void ConditionalSelect(const Register& rd,
const Register& rn,
const Register& rm,
const unsigned kInstructionSize = 4;
const unsigned kInstructionSizeLog2 = 2;
-const unsigned kLiteralEntrySize = 4;
-const unsigned kLiteralEntrySizeLog2 = 2;
+const unsigned kLoadLiteralScaleLog2 = 2;
const unsigned kMaxLoadLiteralRange = 1 * MB;
const unsigned kNumberOfRegisters = 32;
// The first instruction of a patched return sequence must be a load literal
// loading the address of the debug break return code.
- patcher.LoadLiteral(ip0, 3 * kInstructionSize);
+ patcher.ldr_pcrel(ip0, (3 * kInstructionSize) >> kLoadLiteralScaleLog2);
// TODO(all): check the following is correct.
// The debug break return code will push a frame and call statically compiled
// code. By using blr, even though control will not return after the branch,
// The first instruction of a patched debug break slot must be a load literal
// loading the address of the debug break slot code.
- patcher.LoadLiteral(ip0, 2 * kInstructionSize);
+ patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2);
// TODO(all): check the following is correct.
// The debug break slot code will push a frame and call statically compiled
// code. By using blr, event hough control will not return after the branch,
Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
PatchingAssembler patcher(call_address, patch_size() / kInstructionSize);
- patcher.LoadLiteral(ip0, 2 * kInstructionSize);
+ patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2);
patcher.blr(ip0);
patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));
switch (format[2]) {
case 'L': { // ILLiteral - Immediate Load Literal.
AppendToOutput("pc%+" PRId64,
- instr->ImmLLiteral() << kLiteralEntrySizeLog2);
+ instr->ImmLLiteral() << kLoadLiteralScaleLog2);
return 9;
}
case 'S': { // ILS - Immediate Load/Store.
// TODO(all): This implementation is overkill as it supports 2**31+1
// arguments, consider how to improve it without creating a security
// hole.
- __ LoadLiteral(ip0, 3 * kInstructionSize);
+ __ ldr_pcrel(ip0, (3 * kInstructionSize) >> kLoadLiteralScaleLog2);
__ add(current_sp, current_sp, ip0);
__ ret();
__ dc64(kXRegSize * (info_->scope()->num_parameters() + 1));
void Instruction::SetImmLLiteral(Instruction* source) {
ASSERT(IsAligned(DistanceTo(source), kInstructionSize));
- ptrdiff_t offset = DistanceTo(source) >> kLiteralEntrySizeLog2;
+ ptrdiff_t offset = DistanceTo(source) >> kLoadLiteralScaleLog2;
Instr imm = Assembler::ImmLLiteral(offset);
Instr mask = ImmLLiteral_mask;
void SetImmLLiteral(Instruction* source);
uint8_t* LiteralAddress() {
- int offset = ImmLLiteral() << kLiteralEntrySizeLog2;
+ int offset = ImmLLiteral() << kLoadLiteralScaleLog2;
return reinterpret_cast<uint8_t*>(this) + offset;
}
__ bind(&map_check);
// Will be patched with the cached map.
Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
- __ LoadRelocated(scratch, Operand(Handle<Object>(cell)));
+ __ ldr(scratch, Immediate(Handle<Object>(cell)));
__ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ cmp(map, scratch);
__ b(&cache_miss, ne);
// above, so check the size of the code generated.
ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4);
// Will be patched with the cached result.
- __ LoadRelocated(result, Operand(factory()->the_hole_value()));
+ __ ldr(result, Immediate(factory()->the_hole_value()));
}
__ B(&done);
StatusFlags nzcv,
Condition cond) {
ASSERT(allow_macro_instructions_);
- if (operand.IsImmediate() && (operand.immediate() < 0)) {
- ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN);
+ if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
+ ConditionalCompareMacro(rn, -operand.ImmediateValue(), nzcv, cond, CCMN);
} else {
ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
}
StatusFlags nzcv,
Condition cond) {
ASSERT(allow_macro_instructions_);
- if (operand.IsImmediate() && (operand.immediate() < 0)) {
- ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP);
+ if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
+ ConditionalCompareMacro(rn, -operand.ImmediateValue(), nzcv, cond, CCMP);
} else {
ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
}
const Register& rn,
const Operand& operand) {
ASSERT(allow_macro_instructions_);
- if (operand.IsImmediate() && (operand.immediate() < 0)) {
- AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, SUB);
+ if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
+ AddSubMacro(rd, rn, -operand.ImmediateValue(), LeaveFlags, SUB);
} else {
AddSubMacro(rd, rn, operand, LeaveFlags, ADD);
}
const Register& rn,
const Operand& operand) {
ASSERT(allow_macro_instructions_);
- if (operand.IsImmediate() && (operand.immediate() < 0)) {
- AddSubMacro(rd, rn, -operand.immediate(), SetFlags, SUB);
+ if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
+ AddSubMacro(rd, rn, -operand.ImmediateValue(), SetFlags, SUB);
} else {
AddSubMacro(rd, rn, operand, SetFlags, ADD);
}
const Register& rn,
const Operand& operand) {
ASSERT(allow_macro_instructions_);
- if (operand.IsImmediate() && (operand.immediate() < 0)) {
- AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, ADD);
+ if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
+ AddSubMacro(rd, rn, -operand.ImmediateValue(), LeaveFlags, ADD);
} else {
AddSubMacro(rd, rn, operand, LeaveFlags, SUB);
}
const Register& rn,
const Operand& operand) {
ASSERT(allow_macro_instructions_);
- if (operand.IsImmediate() && (operand.immediate() < 0)) {
- AddSubMacro(rd, rn, -operand.immediate(), SetFlags, ADD);
+ if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
+ AddSubMacro(rd, rn, -operand.ImmediateValue(), SetFlags, ADD);
} else {
AddSubMacro(rd, rn, operand, SetFlags, SUB);
}
ASSERT(allow_macro_instructions_);
ASSERT(!rd.IsZero());
if (operand.IsImmediate()) {
- Mov(rd, -operand.immediate());
+ Mov(rd, -operand.ImmediateValue());
} else {
Sub(rd, AppropriateZeroRegFor(rd), operand);
}
} else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
fmov(fd, xzr);
} else {
- UseScratchRegisterScope temps(this);
- Register tmp = temps.AcquireX();
- // TODO(all): Use Assembler::ldr(const FPRegister& ft, double imm).
- Mov(tmp, double_to_rawbits(imm));
- Fmov(fd, tmp);
+ Ldr(fd, imm);
}
}
}
-void MacroAssembler::Ldr(const FPRegister& ft, double imm) {
+void MacroAssembler::Ldr(const CPURegister& rt, const Immediate& imm) {
ASSERT(allow_macro_instructions_);
- ldr(ft, imm);
+ ldr(rt, imm);
}
-void MacroAssembler::Ldr(const Register& rt, uint64_t imm) {
+void MacroAssembler::Ldr(const CPURegister& rt, double imm) {
ASSERT(allow_macro_instructions_);
- ASSERT(!rt.IsZero());
- ldr(rt, imm);
+ ASSERT(rt.Is64Bits());
+ ldr(rt, Immediate(double_to_rawbits(imm)));
}
InstructionAccurateScope scope(this);
ASSERT(space.IsImmediate());
// Align to 16 bytes.
- uint64_t imm = RoundUp(space.immediate(), 0x10);
+ uint64_t imm = RoundUp(space.ImmediateValue(), 0x10);
ASSERT(is_uint24(imm));
Register source = StackPointer();
const Operand& rhs,
Condition cond,
Label* label) {
- if (rhs.IsImmediate() && (rhs.immediate() == 0) &&
+ if (rhs.IsImmediate() && (rhs.ImmediateValue() == 0) &&
((cond == eq) || (cond == ne))) {
if (cond == eq) {
Cbz(lhs, label);
if (operand.NeedsRelocation(this)) {
Register temp = temps.AcquireX();
- LoadRelocated(temp, operand);
+ Ldr(temp, operand.immediate());
Logical(rd, rn, temp, op);
} else if (operand.IsImmediate()) {
- int64_t immediate = operand.immediate();
+ int64_t immediate = operand.ImmediateValue();
unsigned reg_size = rd.SizeInBits();
ASSERT(rd.Is64Bits() || is_uint32(immediate));
Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
if (operand.NeedsRelocation(this)) {
- LoadRelocated(dst, operand);
+ Ldr(dst, operand.immediate());
} else if (operand.IsImmediate()) {
// Call the macro assembler for generic immediates.
- Mov(dst, operand.immediate());
+ Mov(dst, operand.ImmediateValue());
} else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
// Emit a shift instruction if moving a shifted register. This operation
ASSERT(allow_macro_instructions_);
if (operand.NeedsRelocation(this)) {
- LoadRelocated(rd, operand);
+ Ldr(rd, operand.immediate());
mvn(rd, rd);
} else if (operand.IsImmediate()) {
// Call the macro assembler for generic immediates.
- Mov(rd, ~operand.immediate());
+ Mov(rd, ~operand.ImmediateValue());
} else if (operand.IsExtendedRegister()) {
// Emit two instructions for the extend case. This differs from Mov, as
if (operand.NeedsRelocation(this)) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- LoadRelocated(temp, operand);
+ Ldr(temp, operand.immediate());
ConditionalCompareMacro(rn, temp, nzcv, cond, op);
} else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
- (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
+ (operand.IsImmediate() &&
+ IsImmConditionalCompare(operand.ImmediateValue()))) {
// The immediate can be encoded in the instruction, or the operand is an
// unshifted register: call the assembler.
ConditionalCompare(rn, operand, nzcv, cond, op);
if (operand.IsImmediate()) {
// Immediate argument. Handle special cases of 0, 1 and -1 using zero
// register.
- int64_t imm = operand.immediate();
+ int64_t imm = operand.ImmediateValue();
Register zr = AppropriateZeroRegFor(rn);
if (imm == 0) {
csel(rd, rn, zr, cond);
} else {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireSameSizeAs(rn);
- Mov(temp, operand.immediate());
+ Mov(temp, imm);
csel(rd, rn, temp, cond);
}
} else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
if (operand.NeedsRelocation(this)) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- LoadRelocated(temp, operand);
+ Ldr(temp, operand.immediate());
AddSubMacro(rd, rn, temp, S, op);
- } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
- (rn.IsZero() && !operand.IsShiftedRegister()) ||
+ } else if ((operand.IsImmediate() &&
+ !IsImmAddSub(operand.ImmediateValue())) ||
+ (rn.IsZero() && !operand.IsShiftedRegister()) ||
(operand.IsShiftedRegister() && (operand.shift() == ROR))) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireSameSizeAs(rn);
if (operand.NeedsRelocation(this)) {
Register temp = temps.AcquireX();
- LoadRelocated(temp, operand);
+ Ldr(temp, operand.immediate());
AddSubWithCarryMacro(rd, rn, temp, S, op);
} else if (operand.IsImmediate() ||
// on entry and the total size of the specified registers must also be a
// multiple of 16 bytes.
if (total_size.IsImmediate()) {
- ASSERT((total_size.immediate() % 16) == 0);
+ ASSERT((total_size.ImmediateValue() % 16) == 0);
}
// Don't check access size for non-immediate sizes. It's difficult to do
// on entry and the total size of the specified registers must also be a
// multiple of 16 bytes.
if (total_size.IsImmediate()) {
- ASSERT((total_size.immediate() % 16) == 0);
+ ASSERT((total_size.ImmediateValue() % 16) == 0);
}
// Don't check access size for non-immediate sizes. It's difficult to do
void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
if (offset.IsImmediate()) {
- ASSERT(offset.immediate() >= 0);
+ ASSERT(offset.ImmediateValue() >= 0);
} else if (emit_debug_code()) {
Cmp(xzr, offset);
Check(le, kStackAccessBelowStackPointer);
void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
if (offset.IsImmediate()) {
- ASSERT(offset.immediate() >= 0);
+ ASSERT(offset.ImmediateValue() >= 0);
} else if (emit_debug_code()) {
Cmp(xzr, offset);
Check(le, kStackAccessBelowStackPointer);
movk(temp, (imm >> 16) & 0xffff, 16);
movk(temp, (imm >> 32) & 0xffff, 32);
} else {
- LoadRelocated(temp, Operand(reinterpret_cast<intptr_t>(target), rmode));
+ Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
}
Blr(temp);
#ifdef DEBUG
//
// A branch (br) is used rather than a call (blr) because this code replaces
// the frame setup code that would normally preserve lr.
- __ LoadLiteral(ip0, kCodeAgeStubEntryOffset);
+ __ ldr_pcrel(ip0, kCodeAgeStubEntryOffset >> kLoadLiteralScaleLog2);
__ adr(x0, &start);
__ br(ip0);
// IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
inline void Ldpsw(const Register& rt,
const Register& rt2,
const MemOperand& src);
- // Provide both double and float interfaces for FP immediate loads, rather
- // than relying on implicit C++ casts. This allows signalling NaNs to be
- // preserved when the immediate matches the format of fd. Most systems convert
- // signalling NaNs to quiet NaNs when converting between float and double.
- inline void Ldr(const FPRegister& ft, double imm);
- inline void Ldr(const FPRegister& ft, float imm);
- inline void Ldr(const Register& rt, uint64_t imm);
+ // Load a literal from the inline constant pool.
+ inline void Ldr(const CPURegister& rt, const Immediate& imm);
+ // Helper function for double immediate.
+ inline void Ldr(const CPURegister& rt, double imm);
inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
inline void Lsr(const Register& rd, const Register& rn, unsigned shift);