15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
const Instr kBlxRegPattern =
B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4;
+const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
+const Instr kMovMvnPattern = 0xd * B21;
+const Instr kMovMvnFlip = B22;
+const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
+const Instr kCmpCmnPattern = 0x15 * B20;
+const Instr kCmpCmnFlip = B21;
+const Instr kALUMask = 0x6f * B21;
+const Instr kAddPattern = 0x4 * B21;
+const Instr kSubPattern = 0x2 * B21;
+const Instr kBicPattern = 0xe * B21;
+const Instr kAndPattern = 0x0 * B21;
+const Instr kAddSubFlip = 0x6 * B21;
+const Instr kAndBicFlip = 0xe * B21;
+
// A mask for the Rd register for push, pop, ldr, str instructions.
const Instr kRdMask = 0x0000f000;
static const int kRdShift = 12;
// Low-level code emission routines depending on the addressing mode.
+// If this returns true then you have to use the rotate_imm and immed_8
+// that it returns, because it may have already changed the instruction
+// to match them!
static bool fits_shifter(uint32_t imm32,
uint32_t* rotate_imm,
uint32_t* immed_8,
return true;
}
}
- // If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
- if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
- if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
- *instr ^= 0x2*B21;
- return true;
+ // If the opcode is one with a complementary version and the complementary
+ // immediate fits, change the opcode.
+ if (instr != NULL) {
+ if ((*instr & kMovMvnMask) == kMovMvnPattern) {
+ if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
+ *instr ^= kMovMvnFlip;
+ return true;
+ }
+ } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
+ if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
+ *instr ^= kCmpCmnFlip;
+ return true;
+ }
+ } else {
+ Instr alu_insn = (*instr & kALUMask);
+ if (alu_insn == kAddPattern ||
+ alu_insn == kSubPattern) {
+ if (fits_shifter(-imm32, rotate_imm, immed_8, NULL)) {
+ *instr ^= kAddSubFlip;
+ return true;
+ }
+ } else if (alu_insn == kAndPattern ||
+ alu_insn == kBicPattern) {
+ if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
+ *instr ^= kAndBicFlip;
+ return true;
+ }
+ }
}
}
return false;
}
+bool Operand::is_single_instruction() const {
+ if (rm_.is_valid()) return true;
+ if (MustUseIp(rmode_)) return false;
+ uint32_t dummy1, dummy2;
+ return fits_shifter(imm32_, &dummy1, &dummy2, NULL);
+}
+
+
void Assembler::addrmod1(Instr instr,
Register rn,
Register rd,
// Return true if this is a register operand.
INLINE(bool is_reg() const);
+ // Return true of this operand fits in one instruction so that no
+ // 2-instruction solution with a load into the ip register is necessary.
+ bool is_single_instruction() const;
+
+ inline int32_t immediate() const {
+ ASSERT(!rm_.is_valid());
+ return imm32_;
+ }
+
Register rm() const { return rm_; }
private:
extern const Instr kBlxRegMask;
extern const Instr kBlxRegPattern;
+extern const Instr kMovMvnMask;
+extern const Instr kMovMvnPattern;
+extern const Instr kMovMvnFlip;
+
+extern const Instr kCmpCmnMask;
+extern const Instr kCmpCmnPattern;
+extern const Instr kCmpCmnFlip;
+
+extern const Instr kALUMask;
+extern const Instr kAddPattern;
+extern const Instr kSubPattern;
+extern const Instr kAndPattern;
+extern const Instr kBicPattern;
+extern const Instr kAddSubFlip;
+extern const Instr kAndBicFlip;
class Assembler : public Malloced {
public:
__ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
// Clear the heap tag on the elements array.
- __ and_(scratch1, scratch1, Operand(~kHeapObjectTagMask));
+ ASSERT(kSmiTag == 0);
+ __ sub(scratch1, scratch1, Operand(kHeapObjectTag));
// Initialize the FixedArray and fill it with holes. FixedArray length is
// stored as a smi.
FieldMemOperand(result, JSArray::kElementsOffset));
// Clear the heap tag on the elements array.
- __ and_(elements_array_storage,
- elements_array_storage,
- Operand(~kHeapObjectTagMask));
+ ASSERT(kSmiTag == 0);
+ __ sub(elements_array_storage,
+ elements_array_storage,
+ Operand(kHeapObjectTag));
// Initialize the fixed array and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// The field instance sizes contains both pre-allocated property fields and
// in-object properties.
__ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
- __ and_(r6,
- r0,
- Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8));
- __ add(r3, r3, Operand(r6, LSR, Map::kPreAllocatedPropertyFieldsByte * 8));
- __ and_(r6, r0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8));
- __ sub(r3, r3, Operand(r6, LSR, Map::kInObjectPropertiesByte * 8), SetCC);
+ __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * 8, 8);
+ __ add(r3, r3, Operand(r6));
+ __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * 8, 8);
+ __ sub(r3, r3, Operand(r6), SetCC);
// Done if no extra properties are to be allocated.
__ b(eq, &allocated);
switch (op) {
case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
- case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
+ case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
default: UNREACHABLE();
}
frame_->EmitPush(tos, TypeInfo::Smi());
switch (op) {
case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break;
case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
- case Token::BIT_AND: __ and_(tos, tos, Operand(value)); break;
+ case Token::BIT_AND: __ And(tos, tos, Operand(value)); break;
default: UNREACHABLE();
}
deferred->BindExit();
// Gets the wrong answer for 0, but we already checked for that case above.
__ CountLeadingZeros(source_, mantissa, zeros_);
// Compute exponent and or it into the exponent register.
- // We use mantissa as a scratch register here.
- __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias));
+ // We use mantissa as a scratch register here. Use a fudge factor to
+ // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
+ // that fit in the ARM's constant field.
+ int fudge = 0x400;
+ __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
+ __ add(mantissa, mantissa, Operand(fudge));
__ orr(exponent,
exponent,
Operand(mantissa, LSL, HeapNumber::kExponentShift));
bool never_nan_nan) {
Label not_identical;
Label heap_number, return_equal;
- Register exp_mask_reg = r5;
__ cmp(r0, r1);
__ b(ne, ¬_identical);
// The two objects are identical. If we know that one of them isn't NaN then
// we now know they test equal.
if (cc != eq || !never_nan_nan) {
- __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
-
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves.
// They are both equal and they are not both Smis so both of them are not
// Read top bits of double representation (second word of value).
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
// Test that exponent bits are all set.
- __ and_(r3, r2, Operand(exp_mask_reg));
- __ cmp(r3, Operand(exp_mask_reg));
+ __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ // NaNs have all-one exponents so they sign extend to -1.
+ __ cmp(r3, Operand(-1));
__ b(ne, &return_equal);
// Shift out flag and all exponent bits, retaining only mantissa.
Register rhs_mantissa = exp_first ? r1 : r0;
Register lhs_mantissa = exp_first ? r3 : r2;
Label one_is_nan, neither_is_nan;
- Label lhs_not_nan_exp_mask_is_loaded;
-
- Register exp_mask_reg = r5;
- __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
- __ and_(r4, lhs_exponent, Operand(exp_mask_reg));
- __ cmp(r4, Operand(exp_mask_reg));
- __ b(ne, &lhs_not_nan_exp_mask_is_loaded);
+ __ Sbfx(r4,
+ lhs_exponent,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+ // NaNs have all-one exponents so they sign extend to -1.
+ __ cmp(r4, Operand(-1));
+ __ b(ne, lhs_not_nan);
__ mov(r4,
Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
SetCC);
__ b(ne, &one_is_nan);
__ bind(lhs_not_nan);
- __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
- __ bind(&lhs_not_nan_exp_mask_is_loaded);
- __ and_(r4, rhs_exponent, Operand(exp_mask_reg));
- __ cmp(r4, Operand(exp_mask_reg));
+ __ Sbfx(r4,
+ rhs_exponent,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
+ // NaNs have all-one exponents so they sign extend to -1.
+ __ cmp(r4, Operand(-1));
__ b(ne, &neither_is_nan);
__ mov(r4,
Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
// Get exponent word.
__ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
// Get exponent alone in scratch2.
- __ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask));
+ __ Ubfx(scratch2,
+ scratch,
+ HeapNumber::kExponentShift,
+ HeapNumber::kExponentBits);
// Load dest with zero. We use this either for the final shift or
// for the answer.
__ mov(dest, Operand(0));
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
// the exponent that we are fastest at and also the highest exponent we can
// handle here.
- const uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ cmp(scratch2, Operand(non_smi_exponent));
+ const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
+ // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
+ // split it up to avoid a constant pool entry. You can't do that in general
+ // for cmp because of the overflow flag, but we know the exponent is in the
+ // range 0-2047 so there is no overflow.
+ int fudge_factor = 0x400;
+ __ sub(scratch2, scratch2, Operand(fudge_factor));
+ __ cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
// If we have a match of the int32-but-not-Smi exponent then skip some logic.
__ b(eq, &right_exponent);
// If the exponent is higher than that then go to slow case. This catches
// We know the exponent is smaller than 30 (biased). If it is less than
// 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
// it rounds to zero.
- const uint32_t zero_exponent =
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- __ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
+ const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
+ __ sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
// Dest already has a Smi zero.
__ b(lt, &done);
if (!CpuFeatures::IsSupported(VFP3)) {
- // We have a shifted exponent between 0 and 30 in scratch2.
- __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
- // We now have the exponent in dest. Subtract from 30 to get
- // how much to shift down.
- __ rsb(dest, dest, Operand(30));
+ // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
+ // get how much to shift down.
+ __ rsb(dest, scratch2, Operand(30));
}
__ bind(&right_exponent);
if (CpuFeatures::IsSupported(VFP3)) {
__ eor(r1, r1, Operand(r1, LSR, 16));
__ eor(r1, r1, Operand(r1, LSR, 8));
ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
- if (CpuFeatures::IsSupported(ARMv7)) {
- const int kTranscendentalCacheSizeBits = 9;
- ASSERT_EQ(1 << kTranscendentalCacheSizeBits,
- TranscendentalCache::kCacheSize);
- __ ubfx(r1, r1, 0, kTranscendentalCacheSizeBits);
- } else {
- __ and_(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
- }
+ __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
// r2 = low 32 bits of double value.
// r3 = high 32 bits of double value.
__ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
__ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset));
__ eor(r3, r3, Operand(r4, ASR, String::kHashShift));
- __ and_(r3, r3, Operand(KeyedLookupCache::kCapacityMask));
+ __ And(r3, r3, Operand(KeyedLookupCache::kCapacityMask));
// Load the key (consisting of map and symbol) from the cache and
// check for match.
// We want the smi-tagged index in r0. kArrayIndexValueMask has zeros in
// the low kHashShift bits.
ASSERT(String::kHashShift >= kSmiTagSize);
- __ and_(r3, r3, Operand(String::kArrayIndexValueMask));
+ __ Ubfx(r3, r3, String::kHashShift, String::kArrayIndexValueBits);
// Here we actually clobber the key (r0) which will be used if calling into
// runtime later. However as the new key is the numeric value of a string key
// there is no difference in using either key.
- __ mov(r0, Operand(r3, ASR, String::kHashShift - kSmiTagSize));
+ __ mov(r0, Operand(r3, LSL, kSmiTagSize));
// Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
- // vldr requires offset to be a multiple of 4 so we can not
- // include -kHeapObjectTag into it.
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
if (array_type == kExternalFloatArray) {
+ // vldr requires offset to be a multiple of 4 so we can not
+ // include -kHeapObjectTag into it.
+ __ sub(r5, r0, Operand(kHeapObjectTag));
+ __ vldr(d0, r5, HeapNumber::kValueOffset);
__ vcvt_f32_f64(s0, d0);
__ vmov(r5, s0);
__ str(r5, MemOperand(r3, r4, LSL, 2));
} else {
- Label done;
-
// Need to perform float-to-int conversion.
- // Test for NaN.
- __ vcmp(d0, d0);
- // Move vector status bits to normal status bits.
- __ vmrs(v8::internal::pc);
- __ mov(r5, Operand(0), LeaveCC, vs); // NaN converts to 0.
- __ b(vs, &done);
-
- // Test whether exponent equal to 0x7FF (infinity or NaN).
- __ vmov(r6, r7, d0);
- __ mov(r5, Operand(0x7FF00000));
- __ and_(r6, r6, Operand(r5));
- __ teq(r6, Operand(r5));
- __ mov(r6, Operand(0), LeaveCC, eq);
+ // Test for NaN or infinity (both give zero).
+ __ ldr(r6, FieldMemOperand(r5, HeapNumber::kExponentOffset));
+
+ // Hoisted load. vldr requires offset to be a multiple of 4 so we can not
+ // include -kHeapObjectTag into it.
+ __ sub(r5, r0, Operand(kHeapObjectTag));
+ __ vldr(d0, r5, HeapNumber::kValueOffset);
+
+ __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ // NaNs and Infinities have all-one exponents so they sign extend to -1.
+ __ cmp(r6, Operand(-1));
+ __ mov(r5, Operand(Smi::FromInt(0)), LeaveCC, eq);
// Not infinity or NaN simply convert to int.
if (IsElementTypeSigned(array_type)) {
} else {
__ vcvt_u32_f64(s0, d0, ne);
}
-
__ vmov(r5, s0, ne);
- __ bind(&done);
switch (array_type) {
case kExternalByteArray:
case kExternalUnsignedByteArray:
}
+void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
+ Condition cond) {
+ if (!CpuFeatures::IsSupported(ARMv7) || src2.is_single_instruction()) {
+ and_(dst, src1, src2, LeaveCC, cond);
+ return;
+ }
+ int32_t immediate = src2.immediate();
+ if (immediate == 0) {
+ mov(dst, Operand(0), LeaveCC, cond);
+ return;
+ }
+ if (IsPowerOf2(immediate + 1) && ((immediate & 1) != 0)) {
+ ubfx(dst, src1, 0, WhichPowerOf2(immediate + 1), cond);
+ return;
+ }
+ and_(dst, src1, src2, LeaveCC, cond);
+}
+
+
+void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
+ Condition cond) {
+ ASSERT(lsb < 32);
+ if (!CpuFeatures::IsSupported(ARMv7)) {
+ int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
+ and_(dst, src1, Operand(mask), LeaveCC, cond);
+ if (lsb != 0) {
+ mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
+ }
+ } else {
+ ubfx(dst, src1, lsb, width, cond);
+ }
+}
+
+
+void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
+ Condition cond) {
+ ASSERT(lsb < 32);
+ if (!CpuFeatures::IsSupported(ARMv7)) {
+ int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
+ and_(dst, src1, Operand(mask), LeaveCC, cond);
+ int shift_up = 32 - lsb - width;
+ int shift_down = lsb + shift_up;
+ if (shift_up != 0) {
+ mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
+ }
+ if (shift_down != 0) {
+ mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
+ }
+ } else {
+ sbfx(dst, src1, lsb, width, cond);
+ }
+}
+
+
void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
// Empty the const pool.
CheckConstPool(true, true);
Register scratch = no_reg,
Condition cond = al);
+
+ void And(Register dst, Register src1, const Operand& src2,
+ Condition cond = al);
+ void Ubfx(Register dst, Register src, int lsb, int width,
+ Condition cond = al);
+ void Sbfx(Register dst, Register src, int lsb, int width,
+ Condition cond = al);
+
void Call(Label* target);
void Move(Register dst, Handle<Object> value);
// May do nothing if the registers are identical.
static const uint32_t kExponentMask = 0x7ff00000u;
static const uint32_t kMantissaMask = 0xfffffu;
static const int kMantissaBits = 52;
- static const int KExponentBits = 11;
+ static const int kExponentBits = 11;
static const int kExponentBias = 1023;
static const int kExponentShift = 20;
static const int kMantissaBitsInTopWord = 20;
// Flags layout.
static const int kFlagsICStateShift = 0;
static const int kFlagsICInLoopShift = 3;
- static const int kFlagsKindShift = 4;
- static const int kFlagsTypeShift = 8;
+ static const int kFlagsTypeShift = 4;
+ static const int kFlagsKindShift = 7;
static const int kFlagsArgumentsCountShift = 11;
static const int kFlagsICStateMask = 0x00000007; // 00000000111
static const int kFlagsICInLoopMask = 0x00000008; // 00000001000
- static const int kFlagsKindMask = 0x000000F0; // 00011110000
- static const int kFlagsTypeMask = 0x00000700; // 11100000000
+ static const int kFlagsTypeMask = 0x00000070; // 00001110000
+ static const int kFlagsKindMask = 0x00000780; // 11110000000
static const int kFlagsArgumentsCountMask = 0xFFFFF800;
static const int kFlagsNotUsedInLookup =
}
+// X must be a power of 2. Returns the number of trailing zeros.
+template <typename T>
+static inline int WhichPowerOf2(T x) {
+ ASSERT(IsPowerOf2(x));
+ ASSERT(x != 0);
+ if (x < 0) return 31;
+ int bits = 0;
+#ifdef DEBUG
+ int original_x = x;
+#endif
+ if (x >= 0x10000) {
+ bits += 16;
+ x >>= 16;
+ }
+ if (x >= 0x100) {
+ bits += 8;
+ x >>= 8;
+ }
+ if (x >= 0x10) {
+ bits += 4;
+ x >>= 4;
+ }
+ switch (x) {
+ default: UNREACHABLE();
+ case 8: bits++; // Fall through.
+ case 4: bits++; // Fall through.
+ case 2: bits++; // Fall through.
+ case 1: break;
+ }
+ ASSERT_EQ(1 << bits, original_x);
+ return bits;
+ return 0;
+}
+
+
// The C++ standard leaves the semantics of '>>' undefined for
// negative signed operands. Most implementations do the right thing,
// though.
COMPARE(mvn(r5, Operand(r4), SetCC, cc),
"31f05004 mvnccs r5, r4");
+ // Instructions autotransformed by the assembler.
+ // mov -> mvn.
+ COMPARE(mov(r3, Operand(-1), LeaveCC, al),
+ "e3e03000 mvn r3, #0");
+ COMPARE(mov(r4, Operand(-2), SetCC, al),
+ "e3f04001 mvns r4, #1");
+ COMPARE(mov(r5, Operand(0x0ffffff0), SetCC, ne),
+ "13f052ff mvnnes r5, #-268435441");
+ COMPARE(mov(r6, Operand(-1), LeaveCC, ne),
+ "13e06000 mvnne r6, #0");
+
+ // mvn -> mov.
+ COMPARE(mvn(r3, Operand(-1), LeaveCC, al),
+ "e3a03000 mov r3, #0");
+ COMPARE(mvn(r4, Operand(-2), SetCC, al),
+ "e3b04001 movs r4, #1");
+ COMPARE(mvn(r5, Operand(0x0ffffff0), SetCC, ne),
+ "13b052ff movnes r5, #-268435441");
+ COMPARE(mvn(r6, Operand(-1), LeaveCC, ne),
+ "13a06000 movne r6, #0");
+
+ // and <-> bic.
+ COMPARE(and_(r3, r5, Operand(0xfc03ffff)),
+ "e3c537ff bic r3, r5, #66846720");
+ COMPARE(bic(r3, r5, Operand(0xfc03ffff)),
+ "e20537ff and r3, r5, #66846720");
+
+ // sub <-> add.
+ COMPARE(add(r3, r5, Operand(-1024)),
+ "e2453b01 sub r3, r5, #1024");
+ COMPARE(sub(r3, r5, Operand(-1024)),
+ "e2853b01 add r3, r5, #1024");
+
+ // cmp <-> cmn.
+ COMPARE(cmp(r3, Operand(-1024)),
+ "e3730b01 cmn r3, #1024");
+ COMPARE(cmn(r3, Operand(-1024)),
+ "e3530b01 cmp r3, #1024");
+
// Miscellaneous instructions encoded as type 0.
COMPARE(blx(ip),
"e12fff3c blx ip");