return OverflowState;
}
+bool IslExprBuilder::hasLargeInts(isl::ast_expr Expr) {
+ enum isl_ast_expr_type Type = isl_ast_expr_get_type(Expr.get());
+
+ if (Type == isl_ast_expr_id)
+ return false;
+
+ if (Type == isl_ast_expr_int) {
+ isl::val Val = Expr.get_val();
+ APInt APValue = APIntFromVal(Val);
+ auto BitWidth = APValue.getBitWidth();
+ return BitWidth >= 64;
+ }
+
+ assert(Type == isl_ast_expr_op && "Expected isl_ast_expr of type operation");
+
+ int NumArgs = isl_ast_expr_get_op_n_arg(Expr.get());
+
+ for (int i = 0; i < NumArgs; i++) {
+ isl::ast_expr Operand = Expr.get_op_arg(i);
+ if (hasLargeInts(Operand))
+ return true;
+ }
+
+ return false;
+}
+
Value *IslExprBuilder::createBinOp(BinaryOperator::BinaryOps Opc, Value *LHS,
Value *RHS, const Twine &Name) {
// Handle the plain operation (without overflow tracking) first.
/// of this run-time check to false to be conservatively correct,
Value *IslNodeBuilder::createRTC(isl_ast_expr *Condition) {
auto ExprBuilder = getExprBuilder();
+
+ // In case the AST expression has integers larger than 64 bit, bail out. The
+ // resulting LLVM-IR will contain operations on types that use more than 64
+ // bits. These are -- in case wrapping intrinsics are used -- translated to
+ // runtime library calls that are not available on all systems (e.g., Android)
+ // and consequently will result in linker errors.
+ if (ExprBuilder.hasLargeInts(isl::manage(isl_ast_expr_copy(Condition)))) {
+ isl_ast_expr_free(Condition);
+ return Builder.getFalse();
+ }
+
ExprBuilder.setTrackOverflow(true);
Value *RTC = ExprBuilder.create(Condition);
if (!RTC->getType()->isIntegerTy(1))
; RUN: opt %loadPolly -S -polly-codegen < %s | FileCheck %s
;
; The boundary context contains a constant that does not fit in 64 bits. Hence,
-; we will check that we use an appropriaty typed constant, here with 65 bits.
-; An alternative would be to bail out early but that would not be as easy.
-;
-; CHECK: {{.*}} = icmp sle i65 {{.*}}, -9223372036854775810
-;
-; CHECK: polly.start
+; make sure we bail out. On certain systems, e.g. AOSP, no runtime support for
+; 128bit operations is available and consequently the code generation of large
+; values might cause linker errors.
;
+; CHECK: br i1 false, label %polly.start, label %bb11.pre_entry_bb
+
target triple = "x86_64-unknown-linux-gnu"
@global = external global i32, align 4
;
; CHECK: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
; CHECK-NEXT: [val, ptr] -> { Stmt_for_body[i0] -> MemRef_B[9 + val] };
-; CHECK-NEXT: Execution Context: [val, ptr] -> { : val <= 9223372036854775806 }
+; CHECK-NEXT: Execution Context: [val, ptr] -> { : val <= 32766 }
;
; CHECK: ReadAccess := [Reduction Type: +] [Scalar: 0]
; CHECK-NEXT: [val, ptr] -> { Stmt_for_body[i0] -> MemRef_A[9 + ptr] };
; CHECK-NEXT: [val, ptr] -> { Stmt_for_body[i0] -> MemRef_A[9 + ptr] };
;
; IR: polly.stmt.for.body:
-; IR-NEXT: %p_tmp = ptrtoint i64* %scevgep to i64
-; IR-NEXT: %p_add = add nsw i64 %p_tmp, 1
-; IR-NEXT: %p_arrayidx3 = getelementptr inbounds i64, i64* %A, i64 %p_add
+; IR-NEXT: %p_tmp = ptrtoint i64* %scevgep to i16
+; IR-NEXT: %p_add = add nsw i16 %p_tmp, 1
+; IR-NEXT: %p_arrayidx3 = getelementptr inbounds i64, i64* %A, i16 %p_add
; IR-NEXT: %tmp4_p_scalar_ = load i64, i64* %p_arrayidx3
; IR-NEXT: %p_add4 = add nsw i64 %tmp4_p_scalar_, %polly.preload.tmp3.merge
; IR-NEXT: store i64 %p_add4, i64* %p_arrayidx3
;
; IR: polly.loop_preheader:
-; IR-NEXT: %scevgep = getelementptr i64, i64* %ptr, i64 1
-; IR-NEXT: %26 = add i64 %val, 1
+; IR-NEXT: %scevgep = getelementptr i64, i64* %ptr, i16 1
+; IR-NEXT: %35 = add i16 %val, 1
; IR-NEXT: br label %polly.loop_header
;
;
-target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target datalayout = "e-p:16:16:16-m:e-i64:64-f80:128-n8:16:16:64-S128"
-define void @f(i64* %A, i64* %B, i64* %ptr, i64 %val) {
+define void @f(i64* %A, i64* %B, i64* %ptr, i16 %val) {
entry:
br label %for.cond
for.body: ; preds = %for.cond
%add.ptr = getelementptr inbounds i64, i64* %ptr, i64 1
- %tmp = ptrtoint i64* %add.ptr to i64
- %add = add nsw i64 %tmp, 1
- %add1 = add nsw i64 %val, 1
- %tmp1 = inttoptr i64 %add1 to i64*
+ %tmp = ptrtoint i64* %add.ptr to i16
+ %add = add nsw i16 %tmp, 1
+ %add1 = add nsw i16 %val, 1
+ %tmp1 = inttoptr i16 %add1 to i64*
%add.ptr2 = getelementptr inbounds i64, i64* %tmp1, i64 1
- %tmp2 = ptrtoint i64* %add.ptr2 to i64
- %arrayidx = getelementptr inbounds i64, i64* %B, i64 %tmp2
+ %tmp2 = ptrtoint i64* %add.ptr2 to i16
+ %arrayidx = getelementptr inbounds i64, i64* %B, i16 %tmp2
%tmp3 = load i64, i64* %arrayidx
- %arrayidx3 = getelementptr inbounds i64, i64* %A, i64 %add
+ %arrayidx3 = getelementptr inbounds i64, i64* %A, i16 %add
%tmp4 = load i64, i64* %arrayidx3
%add4 = add nsw i64 %tmp4, %tmp3
store i64 %add4, i64* %arrayidx3