const Register& rn,
const Operand& operand) {
ASSERT(allow_macro_instructions_);
- if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
+ if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
+ IsImmAddSub(-operand.ImmediateValue())) {
AddSubMacro(rd, rn, -operand.ImmediateValue(), LeaveFlags, SUB);
} else {
AddSubMacro(rd, rn, operand, LeaveFlags, ADD);
const Register& rn,
const Operand& operand) {
ASSERT(allow_macro_instructions_);
- if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
+ if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
+ IsImmAddSub(-operand.ImmediateValue())) {
AddSubMacro(rd, rn, -operand.ImmediateValue(), SetFlags, SUB);
} else {
AddSubMacro(rd, rn, operand, SetFlags, ADD);
const Register& rn,
const Operand& operand) {
ASSERT(allow_macro_instructions_);
- if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
+ if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
+ IsImmAddSub(-operand.ImmediateValue())) {
AddSubMacro(rd, rn, -operand.ImmediateValue(), LeaveFlags, ADD);
} else {
AddSubMacro(rd, rn, operand, LeaveFlags, SUB);
const Register& rn,
const Operand& operand) {
ASSERT(allow_macro_instructions_);
- if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
+ if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
+ IsImmAddSub(-operand.ImmediateValue())) {
AddSubMacro(rd, rn, -operand.ImmediateValue(), SetFlags, ADD);
} else {
AddSubMacro(rd, rn, operand, SetFlags, SUB);
} else {
// Immediate can't be encoded: synthesize using move immediate.
Register temp = temps.AcquireSameSizeAs(rn);
- Mov(temp, immediate);
+ Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
if (rd.Is(csp)) {
// If rd is the stack pointer we cannot use it as the destination
// register so we use the temp register as an intermediate again.
- Logical(temp, rn, temp, op);
+ Logical(temp, rn, imm_operand, op);
Mov(csp, temp);
AssertStackConsistency();
} else {
- Logical(rd, rn, temp, op);
+ Logical(rd, rn, imm_operand, op);
}
}
// applying move-keep operations to move-zero and move-inverted initial
// values.
- unsigned reg_size = rd.SizeInBits();
- unsigned n, imm_s, imm_r;
- if (IsImmMovz(imm, reg_size) && !rd.IsSP()) {
- // Immediate can be represented in a move zero instruction. Movz can't
- // write to the stack pointer.
- movz(rd, imm);
- } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) {
- // Immediate can be represented in a move inverted instruction. Movn can't
- // write to the stack pointer.
- movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask));
- } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
- // Immediate can be represented in a logical orr instruction.
- LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR);
- } else {
+ // Try to move the immediate in one instruction, and if that fails, switch to
+ // using multiple instructions.
+ if (!TryOneInstrMoveImmediate(rd, imm)) {
+ unsigned reg_size = rd.SizeInBits();
+
// Generic immediate case. Imm will be represented by
// [imm3, imm2, imm1, imm0], where each imm is 16 bits.
// A move-zero or move-inverted is generated for the first non-zero or
}
+bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
+ int64_t imm) {
+ unsigned n, imm_s, imm_r;
+ int reg_size = dst.SizeInBits();
+ if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
+ // Immediate can be represented in a move zero instruction. Movz can't write
+ // to the stack pointer.
+ movz(dst, imm);
+ return true;
+ } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
+ // Immediate can be represented in a move not instruction. Movn can't write
+ // to the stack pointer.
+ movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
+ return true;
+ } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be represented in a logical orr instruction.
+ LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
+ return true;
+ }
+ return false;
+}
+
+
+Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
+ int64_t imm) {
+ int reg_size = dst.SizeInBits();
+
+ // Encode the immediate in a single move instruction, if possible.
+ if (TryOneInstrMoveImmediate(dst, imm)) {
+ // The move was successful; nothing to do here.
+ } else {
+ // Pre-shift the immediate to the least-significant bits of the register.
+ int shift_low = CountTrailingZeros(imm, reg_size);
+ int64_t imm_low = imm >> shift_low;
+
+ // Pre-shift the immediate to the most-significant bits of the register. We
+ // insert set bits in the least-significant bits, as this creates a
+ // different immediate that may be encodable using movn or orr-immediate.
+ // If this new immediate is encodable, the set bits will be eliminated by
+ // the post shift on the following instruction.
+ int shift_high = CountLeadingZeros(imm, reg_size);
+ int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);
+
+ if (TryOneInstrMoveImmediate(dst, imm_low)) {
+ // The new immediate has been moved into the destination's low bits:
+ // return a new leftward-shifting operand.
+ return Operand(dst, LSL, shift_low);
+ } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
+ // The new immediate has been moved into the destination's high bits:
+ // return a new rightward-shifting operand.
+ return Operand(dst, LSR, shift_high);
+ } else {
+ // Use the generic move operation to set up the immediate.
+ Mov(dst, imm);
+ }
+ }
+ return Operand(dst);
+}
+
+
void MacroAssembler::AddSubMacro(const Register& rd,
const Register& rn,
const Operand& operand,
(operand.IsShiftedRegister() && (operand.shift() == ROR))) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireSameSizeAs(rn);
- Mov(temp, operand);
- AddSub(rd, rn, temp, S, op);
+ if (operand.IsImmediate()) {
+ Operand imm_operand =
+ MoveImmediateForShiftedOp(temp, operand.ImmediateValue());
+ AddSub(rd, rn, imm_operand, S, op);
+ } else {
+ Mov(temp, operand);
+ AddSub(rd, rn, temp, S, op);
+ }
} else {
AddSub(rd, rn, operand, S, op);
}