2 * Copyright 2008-2009 Katholieke Universiteit Leuven
4 * Use of this software is governed by the GNU LGPLv2.1 license
6 * Written by Sven Verdoolaege, K.U.Leuven, Departement
7 * Computerwetenschappen, Celestijnenlaan 200A, B-3001 Leuven, Belgium
10 #include <isl_ctx_private.h>
13 #include <isl_mat_private.h>
14 #include "isl_map_private.h"
15 #include <isl_dim_private.h>
17 struct isl_mat *isl_mat_alloc(struct isl_ctx *ctx,
18 unsigned n_row, unsigned n_col)
23 mat = isl_alloc_type(ctx, struct isl_mat);
28 mat->block = isl_blk_alloc(ctx, n_row * n_col);
29 if (isl_blk_is_error(mat->block))
31 mat->row = isl_alloc_array(ctx, isl_int *, n_row);
35 for (i = 0; i < n_row; ++i)
36 mat->row[i] = mat->block.data + i * n_col;
48 isl_blk_free(ctx, mat->block);
53 struct isl_mat *isl_mat_extend(struct isl_mat *mat,
54 unsigned n_row, unsigned n_col)
62 if (mat->max_col >= n_col && mat->n_row >= n_row) {
63 if (mat->n_col < n_col)
68 if (mat->max_col < n_col) {
69 struct isl_mat *new_mat;
71 if (n_row < mat->n_row)
73 new_mat = isl_mat_alloc(mat->ctx, n_row, n_col);
76 for (i = 0; i < mat->n_row; ++i)
77 isl_seq_cpy(new_mat->row[i], mat->row[i], mat->n_col);
82 mat = isl_mat_cow(mat);
86 old = mat->block.data;
87 mat->block = isl_blk_extend(mat->ctx, mat->block, n_row * mat->max_col);
88 if (isl_blk_is_error(mat->block))
90 mat->row = isl_realloc_array(mat->ctx, mat->row, isl_int *, n_row);
94 for (i = 0; i < mat->n_row; ++i)
95 mat->row[i] = mat->block.data + (mat->row[i] - old);
96 for (i = mat->n_row; i < n_row; ++i)
97 mat->row[i] = mat->block.data + i * mat->max_col;
99 if (mat->n_col < n_col)
108 struct isl_mat *isl_mat_sub_alloc(struct isl_ctx *ctx, isl_int **row,
109 unsigned first_row, unsigned n_row, unsigned first_col, unsigned n_col)
114 mat = isl_alloc_type(ctx, struct isl_mat);
117 mat->row = isl_alloc_array(ctx, isl_int *, n_row);
120 for (i = 0; i < n_row; ++i)
121 mat->row[i] = row[first_row+i] + first_col;
127 mat->block = isl_blk_empty();
128 mat->flags = ISL_MAT_BORROWED;
135 void isl_mat_sub_copy(struct isl_ctx *ctx, isl_int **dst, isl_int **src,
136 unsigned n_row, unsigned dst_col, unsigned src_col, unsigned n_col)
140 for (i = 0; i < n_row; ++i)
141 isl_seq_cpy(dst[i]+dst_col, src[i]+src_col, n_col);
144 void isl_mat_sub_neg(struct isl_ctx *ctx, isl_int **dst, isl_int **src,
145 unsigned n_row, unsigned dst_col, unsigned src_col, unsigned n_col)
149 for (i = 0; i < n_row; ++i)
150 isl_seq_neg(dst[i]+dst_col, src[i]+src_col, n_col);
153 struct isl_mat *isl_mat_copy(struct isl_mat *mat)
162 struct isl_mat *isl_mat_dup(struct isl_mat *mat)
165 struct isl_mat *mat2;
169 mat2 = isl_mat_alloc(mat->ctx, mat->n_row, mat->n_col);
172 for (i = 0; i < mat->n_row; ++i)
173 isl_seq_cpy(mat2->row[i], mat->row[i], mat->n_col);
177 struct isl_mat *isl_mat_cow(struct isl_mat *mat)
179 struct isl_mat *mat2;
183 if (mat->ref == 1 && !ISL_F_ISSET(mat, ISL_MAT_BORROWED))
186 mat2 = isl_mat_dup(mat);
191 void isl_mat_free(struct isl_mat *mat)
199 if (!ISL_F_ISSET(mat, ISL_MAT_BORROWED))
200 isl_blk_free(mat->ctx, mat->block);
201 isl_ctx_deref(mat->ctx);
206 int isl_mat_rows(__isl_keep isl_mat *mat)
208 return mat ? mat->n_row : -1;
211 int isl_mat_cols(__isl_keep isl_mat *mat)
213 return mat ? mat->n_col : -1;
216 int isl_mat_get_element(__isl_keep isl_mat *mat, int row, int col, isl_int *v)
220 if (row < 0 || row >= mat->n_row)
221 isl_die(mat->ctx, isl_error_invalid, "row out of range",
223 if (col < 0 || col >= mat->n_col)
224 isl_die(mat->ctx, isl_error_invalid, "column out of range",
226 isl_int_set(*v, mat->row[row][col]);
230 __isl_give isl_mat *isl_mat_set_element(__isl_take isl_mat *mat,
231 int row, int col, isl_int v)
233 mat = isl_mat_cow(mat);
236 if (row < 0 || row >= mat->n_row)
237 isl_die(mat->ctx, isl_error_invalid, "row out of range",
239 if (col < 0 || col >= mat->n_col)
240 isl_die(mat->ctx, isl_error_invalid, "column out of range",
242 isl_int_set(mat->row[row][col], v);
249 struct isl_mat *isl_mat_identity(struct isl_ctx *ctx, unsigned n_row)
254 mat = isl_mat_alloc(ctx, n_row, n_row);
257 for (i = 0; i < n_row; ++i) {
258 isl_seq_clr(mat->row[i], i);
259 isl_int_set_si(mat->row[i][i], 1);
260 isl_seq_clr(mat->row[i]+i+1, n_row-(i+1));
266 struct isl_vec *isl_mat_vec_product(struct isl_mat *mat, struct isl_vec *vec)
269 struct isl_vec *prod;
274 isl_assert(mat->ctx, mat->n_col == vec->size, goto error);
276 prod = isl_vec_alloc(mat->ctx, mat->n_row);
280 for (i = 0; i < prod->size; ++i)
281 isl_seq_inner_product(mat->row[i], vec->el, vec->size,
282 &prod->block.data[i]);
292 __isl_give isl_vec *isl_mat_vec_inverse_product(__isl_take isl_mat *mat,
293 __isl_take isl_vec *vec)
295 struct isl_mat *vec_mat;
300 vec_mat = isl_mat_alloc(vec->ctx, vec->size, 1);
303 for (i = 0; i < vec->size; ++i)
304 isl_int_set(vec_mat->row[i][0], vec->el[i]);
305 vec_mat = isl_mat_inverse_product(mat, vec_mat);
309 vec = isl_vec_alloc(vec_mat->ctx, vec_mat->n_row);
311 for (i = 0; i < vec->size; ++i)
312 isl_int_set(vec->el[i], vec_mat->row[i][0]);
313 isl_mat_free(vec_mat);
321 struct isl_vec *isl_vec_mat_product(struct isl_vec *vec, struct isl_mat *mat)
324 struct isl_vec *prod;
329 isl_assert(mat->ctx, mat->n_row == vec->size, goto error);
331 prod = isl_vec_alloc(mat->ctx, mat->n_col);
335 for (i = 0; i < prod->size; ++i) {
336 isl_int_set_si(prod->el[i], 0);
337 for (j = 0; j < vec->size; ++j)
338 isl_int_addmul(prod->el[i], vec->el[j], mat->row[j][i]);
349 struct isl_mat *isl_mat_aff_direct_sum(struct isl_mat *left,
350 struct isl_mat *right)
358 isl_assert(left->ctx, left->n_row == right->n_row, goto error);
359 isl_assert(left->ctx, left->n_row >= 1, goto error);
360 isl_assert(left->ctx, left->n_col >= 1, goto error);
361 isl_assert(left->ctx, right->n_col >= 1, goto error);
362 isl_assert(left->ctx,
363 isl_seq_first_non_zero(left->row[0]+1, left->n_col-1) == -1,
365 isl_assert(left->ctx,
366 isl_seq_first_non_zero(right->row[0]+1, right->n_col-1) == -1,
369 sum = isl_mat_alloc(left->ctx, left->n_row, left->n_col + right->n_col - 1);
372 isl_int_lcm(sum->row[0][0], left->row[0][0], right->row[0][0]);
373 isl_int_divexact(left->row[0][0], sum->row[0][0], left->row[0][0]);
374 isl_int_divexact(right->row[0][0], sum->row[0][0], right->row[0][0]);
376 isl_seq_clr(sum->row[0]+1, sum->n_col-1);
377 for (i = 1; i < sum->n_row; ++i) {
378 isl_int_mul(sum->row[i][0], left->row[0][0], left->row[i][0]);
379 isl_int_addmul(sum->row[i][0],
380 right->row[0][0], right->row[i][0]);
381 isl_seq_scale(sum->row[i]+1, left->row[i]+1, left->row[0][0],
383 isl_seq_scale(sum->row[i]+left->n_col,
384 right->row[i]+1, right->row[0][0],
388 isl_int_divexact(left->row[0][0], sum->row[0][0], left->row[0][0]);
389 isl_int_divexact(right->row[0][0], sum->row[0][0], right->row[0][0]);
399 static void exchange(struct isl_mat *M, struct isl_mat **U,
400 struct isl_mat **Q, unsigned row, unsigned i, unsigned j)
403 for (r = row; r < M->n_row; ++r)
404 isl_int_swap(M->row[r][i], M->row[r][j]);
406 for (r = 0; r < (*U)->n_row; ++r)
407 isl_int_swap((*U)->row[r][i], (*U)->row[r][j]);
410 isl_mat_swap_rows(*Q, i, j);
413 static void subtract(struct isl_mat *M, struct isl_mat **U,
414 struct isl_mat **Q, unsigned row, unsigned i, unsigned j, isl_int m)
417 for (r = row; r < M->n_row; ++r)
418 isl_int_submul(M->row[r][j], m, M->row[r][i]);
420 for (r = 0; r < (*U)->n_row; ++r)
421 isl_int_submul((*U)->row[r][j], m, (*U)->row[r][i]);
424 for (r = 0; r < (*Q)->n_col; ++r)
425 isl_int_addmul((*Q)->row[i][r], m, (*Q)->row[j][r]);
429 static void oppose(struct isl_mat *M, struct isl_mat **U,
430 struct isl_mat **Q, unsigned row, unsigned col)
433 for (r = row; r < M->n_row; ++r)
434 isl_int_neg(M->row[r][col], M->row[r][col]);
436 for (r = 0; r < (*U)->n_row; ++r)
437 isl_int_neg((*U)->row[r][col], (*U)->row[r][col]);
440 isl_seq_neg((*Q)->row[col], (*Q)->row[col], (*Q)->n_col);
443 /* Given matrix M, compute
448 * with U and Q unimodular matrices and H a matrix in column echelon form
449 * such that on each echelon row the entries in the non-echelon column
450 * are non-negative (if neg == 0) or non-positive (if neg == 1)
451 * and stricly smaller (in absolute value) than the entries in the echelon
453 * If U or Q are NULL, then these matrices are not computed.
455 struct isl_mat *isl_mat_left_hermite(struct isl_mat *M, int neg,
456 struct isl_mat **U, struct isl_mat **Q)
471 *U = isl_mat_identity(M->ctx, M->n_col);
476 *Q = isl_mat_identity(M->ctx, M->n_col);
483 for (row = 0; row < M->n_row; ++row) {
485 first = isl_seq_abs_min_non_zero(M->row[row]+col, M->n_col-col);
490 exchange(M, U, Q, row, first, col);
491 if (isl_int_is_neg(M->row[row][col]))
492 oppose(M, U, Q, row, col);
494 while ((off = isl_seq_first_non_zero(M->row[row]+first,
495 M->n_col-first)) != -1) {
497 isl_int_fdiv_q(c, M->row[row][first], M->row[row][col]);
498 subtract(M, U, Q, row, col, first, c);
499 if (!isl_int_is_zero(M->row[row][first]))
500 exchange(M, U, Q, row, first, col);
504 for (i = 0; i < col; ++i) {
505 if (isl_int_is_zero(M->row[row][i]))
508 isl_int_cdiv_q(c, M->row[row][i], M->row[row][col]);
510 isl_int_fdiv_q(c, M->row[row][i], M->row[row][col]);
511 if (isl_int_is_zero(c))
513 subtract(M, U, Q, row, col, i, c);
532 struct isl_mat *isl_mat_right_kernel(struct isl_mat *mat)
535 struct isl_mat *U = NULL;
538 mat = isl_mat_left_hermite(mat, 0, &U, NULL);
542 for (i = 0, rank = 0; rank < mat->n_col; ++rank) {
543 while (i < mat->n_row && isl_int_is_zero(mat->row[i][rank]))
548 K = isl_mat_alloc(U->ctx, U->n_row, U->n_col - rank);
551 isl_mat_sub_copy(K->ctx, K->row, U->row, U->n_row, 0, rank, U->n_col-rank);
561 struct isl_mat *isl_mat_lin_to_aff(struct isl_mat *mat)
564 struct isl_mat *mat2;
568 mat2 = isl_mat_alloc(mat->ctx, 1+mat->n_row, 1+mat->n_col);
571 isl_int_set_si(mat2->row[0][0], 1);
572 isl_seq_clr(mat2->row[0]+1, mat->n_col);
573 for (i = 0; i < mat->n_row; ++i) {
574 isl_int_set_si(mat2->row[1+i][0], 0);
575 isl_seq_cpy(mat2->row[1+i]+1, mat->row[i], mat->n_col);
584 /* Given two matrices M1 and M2, return the block matrix
589 __isl_give isl_mat *isl_mat_diagonal(__isl_take isl_mat *mat1,
590 __isl_take isl_mat *mat2)
598 mat = isl_mat_alloc(mat1->ctx, mat1->n_row + mat2->n_row,
599 mat1->n_col + mat2->n_col);
602 for (i = 0; i < mat1->n_row; ++i) {
603 isl_seq_cpy(mat->row[i], mat1->row[i], mat1->n_col);
604 isl_seq_clr(mat->row[i] + mat1->n_col, mat2->n_col);
606 for (i = 0; i < mat2->n_row; ++i) {
607 isl_seq_clr(mat->row[mat1->n_row + i], mat1->n_col);
608 isl_seq_cpy(mat->row[mat1->n_row + i] + mat1->n_col,
609 mat2->row[i], mat2->n_col);
620 static int row_first_non_zero(isl_int **row, unsigned n_row, unsigned col)
624 for (i = 0; i < n_row; ++i)
625 if (!isl_int_is_zero(row[i][col]))
630 static int row_abs_min_non_zero(isl_int **row, unsigned n_row, unsigned col)
632 int i, min = row_first_non_zero(row, n_row, col);
635 for (i = min + 1; i < n_row; ++i) {
636 if (isl_int_is_zero(row[i][col]))
638 if (isl_int_abs_lt(row[i][col], row[min][col]))
644 static void inv_exchange(struct isl_mat *left, struct isl_mat *right,
645 unsigned i, unsigned j)
647 left = isl_mat_swap_rows(left, i, j);
648 right = isl_mat_swap_rows(right, i, j);
651 static void inv_oppose(
652 struct isl_mat *left, struct isl_mat *right, unsigned row)
654 isl_seq_neg(left->row[row]+row, left->row[row]+row, left->n_col-row);
655 isl_seq_neg(right->row[row], right->row[row], right->n_col);
658 static void inv_subtract(struct isl_mat *left, struct isl_mat *right,
659 unsigned row, unsigned i, isl_int m)
662 isl_seq_combine(left->row[i]+row,
663 left->ctx->one, left->row[i]+row,
664 m, left->row[row]+row,
666 isl_seq_combine(right->row[i], right->ctx->one, right->row[i],
667 m, right->row[row], right->n_col);
670 /* Compute inv(left)*right
672 struct isl_mat *isl_mat_inverse_product(struct isl_mat *left,
673 struct isl_mat *right)
681 isl_assert(left->ctx, left->n_row == left->n_col, goto error);
682 isl_assert(left->ctx, left->n_row == right->n_row, goto error);
684 if (left->n_row == 0) {
689 left = isl_mat_cow(left);
690 right = isl_mat_cow(right);
696 for (row = 0; row < left->n_row; ++row) {
697 int pivot, first, i, off;
698 pivot = row_abs_min_non_zero(left->row+row, left->n_row-row, row);
702 isl_assert(left->ctx, pivot >= 0, goto error);
706 inv_exchange(left, right, pivot, row);
707 if (isl_int_is_neg(left->row[row][row]))
708 inv_oppose(left, right, row);
710 while ((off = row_first_non_zero(left->row+first,
711 left->n_row-first, row)) != -1) {
713 isl_int_fdiv_q(a, left->row[first][row],
714 left->row[row][row]);
715 inv_subtract(left, right, row, first, a);
716 if (!isl_int_is_zero(left->row[first][row]))
717 inv_exchange(left, right, row, first);
721 for (i = 0; i < row; ++i) {
722 if (isl_int_is_zero(left->row[i][row]))
724 isl_int_gcd(a, left->row[row][row], left->row[i][row]);
725 isl_int_divexact(b, left->row[i][row], a);
726 isl_int_divexact(a, left->row[row][row], a);
728 isl_seq_combine(left->row[i] + i,
730 b, left->row[row] + i,
732 isl_seq_combine(right->row[i], a, right->row[i],
733 b, right->row[row], right->n_col);
738 isl_int_set(a, left->row[0][0]);
739 for (row = 1; row < left->n_row; ++row)
740 isl_int_lcm(a, a, left->row[row][row]);
741 if (isl_int_is_zero(a)){
743 isl_assert(left->ctx, 0, goto error);
745 for (row = 0; row < left->n_row; ++row) {
746 isl_int_divexact(left->row[row][row], a, left->row[row][row]);
747 if (isl_int_is_one(left->row[row][row]))
749 isl_seq_scale(right->row[row], right->row[row],
750 left->row[row][row], right->n_col);
762 void isl_mat_col_scale(struct isl_mat *mat, unsigned col, isl_int m)
766 for (i = 0; i < mat->n_row; ++i)
767 isl_int_mul(mat->row[i][col], mat->row[i][col], m);
770 void isl_mat_col_combine(struct isl_mat *mat, unsigned dst,
771 isl_int m1, unsigned src1, isl_int m2, unsigned src2)
777 for (i = 0; i < mat->n_row; ++i) {
778 isl_int_mul(tmp, m1, mat->row[i][src1]);
779 isl_int_addmul(tmp, m2, mat->row[i][src2]);
780 isl_int_set(mat->row[i][dst], tmp);
785 struct isl_mat *isl_mat_right_inverse(struct isl_mat *mat)
791 mat = isl_mat_cow(mat);
795 inv = isl_mat_identity(mat->ctx, mat->n_col);
796 inv = isl_mat_cow(inv);
802 for (row = 0; row < mat->n_row; ++row) {
803 int pivot, first, i, off;
804 pivot = isl_seq_abs_min_non_zero(mat->row[row]+row, mat->n_col-row);
808 isl_assert(mat->ctx, pivot >= 0, goto error);
812 exchange(mat, &inv, NULL, row, pivot, row);
813 if (isl_int_is_neg(mat->row[row][row]))
814 oppose(mat, &inv, NULL, row, row);
816 while ((off = isl_seq_first_non_zero(mat->row[row]+first,
817 mat->n_col-first)) != -1) {
819 isl_int_fdiv_q(a, mat->row[row][first],
821 subtract(mat, &inv, NULL, row, row, first, a);
822 if (!isl_int_is_zero(mat->row[row][first]))
823 exchange(mat, &inv, NULL, row, row, first);
827 for (i = 0; i < row; ++i) {
828 if (isl_int_is_zero(mat->row[row][i]))
830 isl_int_gcd(a, mat->row[row][row], mat->row[row][i]);
831 isl_int_divexact(b, mat->row[row][i], a);
832 isl_int_divexact(a, mat->row[row][row], a);
834 isl_mat_col_combine(mat, i, a, i, b, row);
835 isl_mat_col_combine(inv, i, a, i, b, row);
840 isl_int_set(a, mat->row[0][0]);
841 for (row = 1; row < mat->n_row; ++row)
842 isl_int_lcm(a, a, mat->row[row][row]);
843 if (isl_int_is_zero(a)){
847 for (row = 0; row < mat->n_row; ++row) {
848 isl_int_divexact(mat->row[row][row], a, mat->row[row][row]);
849 if (isl_int_is_one(mat->row[row][row]))
851 isl_mat_col_scale(inv, row, mat->row[row][row]);
864 struct isl_mat *isl_mat_transpose(struct isl_mat *mat)
866 struct isl_mat *transpose = NULL;
869 if (mat->n_col == mat->n_row) {
870 mat = isl_mat_cow(mat);
873 for (i = 0; i < mat->n_row; ++i)
874 for (j = i + 1; j < mat->n_col; ++j)
875 isl_int_swap(mat->row[i][j], mat->row[j][i]);
878 transpose = isl_mat_alloc(mat->ctx, mat->n_col, mat->n_row);
881 for (i = 0; i < mat->n_row; ++i)
882 for (j = 0; j < mat->n_col; ++j)
883 isl_int_set(transpose->row[j][i], mat->row[i][j]);
891 struct isl_mat *isl_mat_swap_cols(struct isl_mat *mat, unsigned i, unsigned j)
895 mat = isl_mat_cow(mat);
898 isl_assert(mat->ctx, i < mat->n_col, goto error);
899 isl_assert(mat->ctx, j < mat->n_col, goto error);
901 for (r = 0; r < mat->n_row; ++r)
902 isl_int_swap(mat->row[r][i], mat->row[r][j]);
909 struct isl_mat *isl_mat_swap_rows(struct isl_mat *mat, unsigned i, unsigned j)
915 mat = isl_mat_cow(mat);
919 mat->row[i] = mat->row[j];
924 struct isl_mat *isl_mat_product(struct isl_mat *left, struct isl_mat *right)
927 struct isl_mat *prod;
931 isl_assert(left->ctx, left->n_col == right->n_row, goto error);
932 prod = isl_mat_alloc(left->ctx, left->n_row, right->n_col);
935 if (left->n_col == 0) {
936 for (i = 0; i < prod->n_row; ++i)
937 isl_seq_clr(prod->row[i], prod->n_col);
940 for (i = 0; i < prod->n_row; ++i) {
941 for (j = 0; j < prod->n_col; ++j) {
942 isl_int_mul(prod->row[i][j],
943 left->row[i][0], right->row[0][j]);
944 for (k = 1; k < left->n_col; ++k)
945 isl_int_addmul(prod->row[i][j],
946 left->row[i][k], right->row[k][j]);
958 /* Replace the variables x in the rows q by x' given by x = M x',
959 * with M the matrix mat.
961 * If the number of new variables is greater than the original
962 * number of variables, then the rows q have already been
963 * preextended. If the new number is smaller, then the coefficients
964 * of the divs, which are not changed, need to be shifted down.
965 * The row q may be the equalities, the inequalities or the
966 * div expressions. In the latter case, has_div is true and
967 * we need to take into account the extra denominator column.
969 static int preimage(struct isl_ctx *ctx, isl_int **q, unsigned n,
970 unsigned n_div, int has_div, struct isl_mat *mat)
976 if (mat->n_col >= mat->n_row)
979 e = mat->n_row - mat->n_col;
981 for (i = 0; i < n; ++i)
982 isl_int_mul(q[i][0], q[i][0], mat->row[0][0]);
983 t = isl_mat_sub_alloc(mat->ctx, q, 0, n, has_div, mat->n_row);
984 t = isl_mat_product(t, mat);
987 for (i = 0; i < n; ++i) {
988 isl_seq_swp_or_cpy(q[i] + has_div, t->row[i], t->n_col);
989 isl_seq_cpy(q[i] + has_div + t->n_col,
990 q[i] + has_div + t->n_col + e, n_div);
991 isl_seq_clr(q[i] + has_div + t->n_col + n_div, e);
997 /* Replace the variables x in bset by x' given by x = M x', with
1000 * If there are fewer variables x' then there are x, then we perform
1001 * the transformation in place, which that, in principle,
1002 * this frees up some extra variables as the number
1003 * of columns remains constant, but we would have to extend
1004 * the div array too as the number of rows in this array is assumed
1005 * to be equal to extra.
1007 struct isl_basic_set *isl_basic_set_preimage(struct isl_basic_set *bset,
1008 struct isl_mat *mat)
1010 struct isl_ctx *ctx;
1016 bset = isl_basic_set_cow(bset);
1020 isl_assert(ctx, bset->dim->nparam == 0, goto error);
1021 isl_assert(ctx, 1+bset->dim->n_out == mat->n_row, goto error);
1022 isl_assert(ctx, mat->n_col > 0, goto error);
1024 if (mat->n_col > mat->n_row) {
1025 bset = isl_basic_set_extend(bset, 0, mat->n_col-1, 0, 0, 0);
1028 } else if (mat->n_col < mat->n_row) {
1029 bset->dim = isl_dim_cow(bset->dim);
1032 bset->dim->n_out -= mat->n_row - mat->n_col;
1035 if (preimage(ctx, bset->eq, bset->n_eq, bset->n_div, 0,
1036 isl_mat_copy(mat)) < 0)
1039 if (preimage(ctx, bset->ineq, bset->n_ineq, bset->n_div, 0,
1040 isl_mat_copy(mat)) < 0)
1043 if (preimage(ctx, bset->div, bset->n_div, bset->n_div, 1, mat) < 0)
1046 ISL_F_CLR(bset, ISL_BASIC_SET_NO_IMPLICIT);
1047 ISL_F_CLR(bset, ISL_BASIC_SET_NO_REDUNDANT);
1048 ISL_F_CLR(bset, ISL_BASIC_SET_NORMALIZED);
1049 ISL_F_CLR(bset, ISL_BASIC_SET_NORMALIZED_DIVS);
1050 ISL_F_CLR(bset, ISL_BASIC_SET_ALL_EQUALITIES);
1052 bset = isl_basic_set_simplify(bset);
1053 bset = isl_basic_set_finalize(bset);
1059 isl_basic_set_free(bset);
1063 struct isl_set *isl_set_preimage(struct isl_set *set, struct isl_mat *mat)
1065 struct isl_ctx *ctx;
1068 set = isl_set_cow(set);
1073 for (i = 0; i < set->n; ++i) {
1074 set->p[i] = isl_basic_set_preimage(set->p[i],
1079 if (mat->n_col != mat->n_row) {
1080 set->dim = isl_dim_cow(set->dim);
1083 set->dim->n_out += mat->n_col;
1084 set->dim->n_out -= mat->n_row;
1087 ISL_F_CLR(set, ISL_SET_NORMALIZED);
1095 void isl_mat_dump(struct isl_mat *mat, FILE *out, int indent)
1100 fprintf(out, "%*snull mat\n", indent, "");
1104 if (mat->n_row == 0)
1105 fprintf(out, "%*s[]\n", indent, "");
1107 for (i = 0; i < mat->n_row; ++i) {
1109 fprintf(out, "%*s[[", indent, "");
1111 fprintf(out, "%*s[", indent+1, "");
1112 for (j = 0; j < mat->n_col; ++j) {
1115 isl_int_print(out, mat->row[i][j], 0);
1117 if (i == mat->n_row-1)
1118 fprintf(out, "]]\n");
1120 fprintf(out, "]\n");
1124 struct isl_mat *isl_mat_drop_cols(struct isl_mat *mat, unsigned col, unsigned n)
1128 mat = isl_mat_cow(mat);
1132 if (col != mat->n_col-n) {
1133 for (r = 0; r < mat->n_row; ++r)
1134 isl_seq_cpy(mat->row[r]+col, mat->row[r]+col+n,
1135 mat->n_col - col - n);
1141 struct isl_mat *isl_mat_drop_rows(struct isl_mat *mat, unsigned row, unsigned n)
1145 mat = isl_mat_cow(mat);
1149 for (r = row; r+n < mat->n_row; ++r)
1150 mat->row[r] = mat->row[r+n];
1156 __isl_give isl_mat *isl_mat_insert_cols(__isl_take isl_mat *mat,
1157 unsigned col, unsigned n)
1166 ext = isl_mat_alloc(mat->ctx, mat->n_row, mat->n_col + n);
1170 isl_mat_sub_copy(mat->ctx, ext->row, mat->row, mat->n_row, 0, 0, col);
1171 isl_mat_sub_copy(mat->ctx, ext->row, mat->row, mat->n_row,
1172 col + n, col, mat->n_col - col);
1181 __isl_give isl_mat *isl_mat_insert_zero_cols(__isl_take isl_mat *mat,
1182 unsigned first, unsigned n)
1188 mat = isl_mat_insert_cols(mat, first, n);
1192 for (i = 0; i < mat->n_row; ++i)
1193 isl_seq_clr(mat->row[i] + first, n);
1198 __isl_give isl_mat *isl_mat_add_zero_cols(__isl_take isl_mat *mat, unsigned n)
1203 return isl_mat_insert_zero_cols(mat, mat->n_col, n);
1206 __isl_give isl_mat *isl_mat_insert_rows(__isl_take isl_mat *mat,
1207 unsigned row, unsigned n)
1216 ext = isl_mat_alloc(mat->ctx, mat->n_row + n, mat->n_col);
1220 isl_mat_sub_copy(mat->ctx, ext->row, mat->row, row, 0, 0, mat->n_col);
1221 isl_mat_sub_copy(mat->ctx, ext->row + row + n, mat->row + row,
1222 mat->n_row - row, 0, 0, mat->n_col);
1231 __isl_give isl_mat *isl_mat_add_rows(__isl_take isl_mat *mat, unsigned n)
1236 return isl_mat_insert_rows(mat, mat->n_row, n);
1239 void isl_mat_col_submul(struct isl_mat *mat,
1240 int dst_col, isl_int f, int src_col)
1244 for (i = 0; i < mat->n_row; ++i)
1245 isl_int_submul(mat->row[i][dst_col], f, mat->row[i][src_col]);
1248 void isl_mat_col_add(__isl_keep isl_mat *mat, int dst_col, int src_col)
1255 for (i = 0; i < mat->n_row; ++i)
1256 isl_int_add(mat->row[i][dst_col],
1257 mat->row[i][dst_col], mat->row[i][src_col]);
1260 void isl_mat_col_mul(struct isl_mat *mat, int dst_col, isl_int f, int src_col)
1264 for (i = 0; i < mat->n_row; ++i)
1265 isl_int_mul(mat->row[i][dst_col], f, mat->row[i][src_col]);
1268 struct isl_mat *isl_mat_unimodular_complete(struct isl_mat *M, int row)
1271 struct isl_mat *H = NULL, *Q = NULL;
1276 isl_assert(M->ctx, M->n_row == M->n_col, goto error);
1278 H = isl_mat_left_hermite(isl_mat_copy(M), 0, NULL, &Q);
1279 M->n_row = M->n_col;
1282 for (r = 0; r < row; ++r)
1283 isl_assert(M->ctx, isl_int_is_one(H->row[r][r]), goto error);
1284 for (r = row; r < M->n_row; ++r)
1285 isl_seq_cpy(M->row[r], Q->row[r], M->n_col);
1296 __isl_give isl_mat *isl_mat_concat(__isl_take isl_mat *top,
1297 __isl_take isl_mat *bot)
1299 struct isl_mat *mat;
1304 isl_assert(top->ctx, top->n_col == bot->n_col, goto error);
1305 if (top->n_row == 0) {
1309 if (bot->n_row == 0) {
1314 mat = isl_mat_alloc(top->ctx, top->n_row + bot->n_row, top->n_col);
1317 isl_mat_sub_copy(mat->ctx, mat->row, top->row, top->n_row,
1319 isl_mat_sub_copy(mat->ctx, mat->row + top->n_row, bot->row, bot->n_row,
1330 int isl_mat_is_equal(__isl_keep isl_mat *mat1, __isl_keep isl_mat *mat2)
1337 if (mat1->n_row != mat2->n_row)
1340 if (mat1->n_col != mat2->n_col)
1343 for (i = 0; i < mat1->n_row; ++i)
1344 if (!isl_seq_eq(mat1->row[i], mat2->row[i], mat1->n_col))
1350 __isl_give isl_mat *isl_mat_from_row_vec(__isl_take isl_vec *vec)
1352 struct isl_mat *mat;
1356 mat = isl_mat_alloc(vec->ctx, 1, vec->size);
1360 isl_seq_cpy(mat->row[0], vec->el, vec->size);
1369 __isl_give isl_mat *isl_mat_vec_concat(__isl_take isl_mat *top,
1370 __isl_take isl_vec *bot)
1372 return isl_mat_concat(top, isl_mat_from_row_vec(bot));
1375 __isl_give isl_mat *isl_mat_move_cols(__isl_take isl_mat *mat,
1376 unsigned dst_col, unsigned src_col, unsigned n)
1382 if (n == 0 || dst_col == src_col)
1385 res = isl_mat_alloc(mat->ctx, mat->n_row, mat->n_col);
1389 if (dst_col < src_col) {
1390 isl_mat_sub_copy(res->ctx, res->row, mat->row, mat->n_row,
1392 isl_mat_sub_copy(res->ctx, res->row, mat->row, mat->n_row,
1393 dst_col, src_col, n);
1394 isl_mat_sub_copy(res->ctx, res->row, mat->row, mat->n_row,
1395 dst_col + n, dst_col, src_col - dst_col);
1396 isl_mat_sub_copy(res->ctx, res->row, mat->row, mat->n_row,
1397 src_col + n, src_col + n,
1398 res->n_col - src_col - n);
1400 isl_mat_sub_copy(res->ctx, res->row, mat->row, mat->n_row,
1402 isl_mat_sub_copy(res->ctx, res->row, mat->row, mat->n_row,
1403 src_col, src_col + n, dst_col - src_col);
1404 isl_mat_sub_copy(res->ctx, res->row, mat->row, mat->n_row,
1405 dst_col, src_col, n);
1406 isl_mat_sub_copy(res->ctx, res->row, mat->row, mat->n_row,
1407 dst_col + n, dst_col + n,
1408 res->n_col - dst_col - n);
1418 void isl_mat_gcd(__isl_keep isl_mat *mat, isl_int *gcd)
1423 isl_int_set_si(*gcd, 0);
1428 for (i = 0; i < mat->n_row; ++i) {
1429 isl_seq_gcd(mat->row[i], mat->n_col, &g);
1430 isl_int_gcd(*gcd, *gcd, g);
1435 __isl_give isl_mat *isl_mat_scale_down(__isl_take isl_mat *mat, isl_int m)
1442 for (i = 0; i < mat->n_row; ++i)
1443 isl_seq_scale_down(mat->row[i], mat->row[i], m, mat->n_col);
1448 __isl_give isl_mat *isl_mat_normalize(__isl_take isl_mat *mat)
1456 isl_mat_gcd(mat, &gcd);
1457 mat = isl_mat_scale_down(mat, gcd);