return UsesNarrowValue(Return->getReturnValue());
if (auto *Trunc = dyn_cast<TruncInst>(V))
return UsesNarrowValue(Trunc->getOperand(0));
+ if (auto *ZExt = dyn_cast<ZExtInst>(V))
+ return UsesNarrowValue(ZExt->getOperand(0));
if (auto *ICmp = dyn_cast<ICmpInst>(V))
return ICmp->isSigned();
if (!isa<Instruction>(V) || !isa<IntegerType>(V->getType()))
return nullptr;
- if ((!Promoted.count(V) && !NewInsts.count(V)) || !TruncTysMap.count(V))
+ if ((!Promoted.count(V) && !NewInsts.count(V)) || !TruncTysMap.count(V) ||
+ Leaves.count(V))
return nullptr;
Type *TruncTy = TruncTysMap[V];
}
}
}
- LLVM_DEBUG(dbgs() << "ARM CGP: Mutation complete.\n");
+ LLVM_DEBUG(dbgs() << "ARM CGP: Mutation complete:\n");
}
/// We accept most instructions, as well as Arguments and ConstantInsts. We
isa<LoadInst>(V))
return isSupportedType(V);
- // Currently, Trunc is the only cast we support.
if (auto *Trunc = dyn_cast<TruncInst>(V))
return isSupportedType(Trunc->getOperand(0));
+ if (auto *ZExt = dyn_cast<ZExtInst>(V))
+ return isSupportedType(ZExt->getOperand(0));
+
// Special cases for calls as we need to check for zeroext
// TODO We should accept calls even if they don't have zeroext, as they can
// still be roots.
; CHECK-COMMON-LABEL: promote_i8_sink_i16_2
; CHECK-COMMON: bl dummy_i8
; CHECK-COMMON: adds r0, #1
-; CHECK-COMMON: uxtb r0, r0
+; CHECK-COMMON-NOT: uxt
; CHECK-COMMON: cmp r0
define i16 @promote_i8_sink_i16_2(i8 zeroext %arg0, i8 zeroext %arg1, i16 zeroext %arg2) {
%call = tail call zeroext i8 @dummy_i8(i8 %arg0)
ret i8 %4
}
-; The pass will bail because of the zext, otherwise we'd want something like:
-; ldrb [[LD:r[^ ]+]], [r0]
-; subs [[SUB:r[^ ]+]], [[LD]], #1
-; cmp [[LD]], [[SUB]]
+; The pass perform the transform, but a uxtb will still be inserted to handle
+; the zext to the icmp.
; CHECK-COMMON-LABEL: icmp_i32_zext:
+; CHECK-COMMON: sub
; CHECK-COMMON: uxtb
+; CHECK-COMMON: cmp
define i8 @icmp_i32_zext(i8* %ptr) {
entry:
%gep = getelementptr inbounds i8, i8* %ptr, i32 0
ret i8 %2
}
-; Won't handle zext or sext
+; Won't don't handle sext
; CHECK-COMMON-LABEL: icmp_sext_zext_store_i8_i16
+; CHECK-COMMON: ldrb
+; CHECK-COMMON: ldrsh
define i32 @icmp_sext_zext_store_i8_i16() {
entry:
%0 = load i8, i8* getelementptr inbounds ([16 x i8], [16 x i8]* @d_uch, i32 0, i32 2), align 1
ret i32 %conv3
}
-; Pass will bail because of the zext, otherwise:
-; ldrb [[LD:r[^ ]+]], [r1]
-; subs [[SUB:r[^ ]+]], #1
-; cmp [[SUB]], #3
; CHECK-COMMON-LABEL: or_icmp_ugt:
-; CHECK-COMMON: uxt
+; CHECK-COMMON: ldrb
+; CHECK-COMMON: sub.w
+; CHECK-COMMON-NOT: uxt
+; CHECK-COMMON: cmp.w
+; CHECK-COMMON-NOT: uxt
+; CHECK-COMMON: cmp
define i1 @or_icmp_ugt(i32 %arg, i8* %ptr) {
entry:
%0 = load i8, i8* %ptr
ret i16 %res
}
-; Pass will bail because of the zext
+; We currently only handle truncs as sinks, so a uxt will still be needed for
+; the icmp ugt instruction.
; CHECK-COMMON-LABEL: urem_trunc_icmps
+; CHECK-COMMON: cmp
; CHECK-COMMON: uxt
+; CHECK-COMMON: cmp
define void @urem_trunc_icmps(i16** %in, i32* %g, i32* %k) {
entry:
%ptr = load i16*, i16** %in, align 4
ret void
}
-; Again, zexts will prevent the transform.
; Check that %exp requires uxth in all cases, and will also be required to
; promote %1 for the call - unless we can generate a uadd16.
; CHECK-COMMON-LABEL: zext_load_sink_call: