2 * Copyright 2008-2009 Katholieke Universiteit Leuven
4 * Use of this software is governed by the GNU LGPLv2.1 license
6 * Written by Sven Verdoolaege, K.U.Leuven, Departement
7 * Computerwetenschappen, Celestijnenlaan 200A, B-3001 Leuven, Belgium
10 #include <isl_ctx_private.h>
13 #include <isl_mat_private.h>
14 #include "isl_map_private.h"
15 #include <isl_dim_private.h>
17 struct isl_mat *isl_mat_alloc(struct isl_ctx *ctx,
18 unsigned n_row, unsigned n_col)
23 mat = isl_alloc_type(ctx, struct isl_mat);
28 mat->block = isl_blk_alloc(ctx, n_row * n_col);
29 if (isl_blk_is_error(mat->block))
31 mat->row = isl_alloc_array(ctx, isl_int *, n_row);
35 for (i = 0; i < n_row; ++i)
36 mat->row[i] = mat->block.data + i * n_col;
48 isl_blk_free(ctx, mat->block);
53 struct isl_mat *isl_mat_extend(struct isl_mat *mat,
54 unsigned n_row, unsigned n_col)
63 if (mat->max_col >= n_col && mat->n_row >= n_row) {
64 if (mat->n_col < n_col)
69 if (mat->max_col < n_col) {
70 struct isl_mat *new_mat;
72 if (n_row < mat->n_row)
74 new_mat = isl_mat_alloc(mat->ctx, n_row, n_col);
77 for (i = 0; i < mat->n_row; ++i)
78 isl_seq_cpy(new_mat->row[i], mat->row[i], mat->n_col);
83 mat = isl_mat_cow(mat);
87 old = mat->block.data;
88 mat->block = isl_blk_extend(mat->ctx, mat->block, n_row * mat->max_col);
89 if (isl_blk_is_error(mat->block))
91 row = isl_realloc_array(mat->ctx, mat->row, isl_int *, n_row);
96 for (i = 0; i < mat->n_row; ++i)
97 mat->row[i] = mat->block.data + (mat->row[i] - old);
98 for (i = mat->n_row; i < n_row; ++i)
99 mat->row[i] = mat->block.data + i * mat->max_col;
101 if (mat->n_col < n_col)
110 struct isl_mat *isl_mat_sub_alloc(struct isl_ctx *ctx, isl_int **row,
111 unsigned first_row, unsigned n_row, unsigned first_col, unsigned n_col)
116 mat = isl_alloc_type(ctx, struct isl_mat);
119 mat->row = isl_alloc_array(ctx, isl_int *, n_row);
122 for (i = 0; i < n_row; ++i)
123 mat->row[i] = row[first_row+i] + first_col;
129 mat->block = isl_blk_empty();
130 mat->flags = ISL_MAT_BORROWED;
137 void isl_mat_sub_copy(struct isl_ctx *ctx, isl_int **dst, isl_int **src,
138 unsigned n_row, unsigned dst_col, unsigned src_col, unsigned n_col)
142 for (i = 0; i < n_row; ++i)
143 isl_seq_cpy(dst[i]+dst_col, src[i]+src_col, n_col);
146 void isl_mat_sub_neg(struct isl_ctx *ctx, isl_int **dst, isl_int **src,
147 unsigned n_row, unsigned dst_col, unsigned src_col, unsigned n_col)
151 for (i = 0; i < n_row; ++i)
152 isl_seq_neg(dst[i]+dst_col, src[i]+src_col, n_col);
155 struct isl_mat *isl_mat_copy(struct isl_mat *mat)
164 struct isl_mat *isl_mat_dup(struct isl_mat *mat)
167 struct isl_mat *mat2;
171 mat2 = isl_mat_alloc(mat->ctx, mat->n_row, mat->n_col);
174 for (i = 0; i < mat->n_row; ++i)
175 isl_seq_cpy(mat2->row[i], mat->row[i], mat->n_col);
179 struct isl_mat *isl_mat_cow(struct isl_mat *mat)
181 struct isl_mat *mat2;
185 if (mat->ref == 1 && !ISL_F_ISSET(mat, ISL_MAT_BORROWED))
188 mat2 = isl_mat_dup(mat);
193 void isl_mat_free(struct isl_mat *mat)
201 if (!ISL_F_ISSET(mat, ISL_MAT_BORROWED))
202 isl_blk_free(mat->ctx, mat->block);
203 isl_ctx_deref(mat->ctx);
208 int isl_mat_rows(__isl_keep isl_mat *mat)
210 return mat ? mat->n_row : -1;
213 int isl_mat_cols(__isl_keep isl_mat *mat)
215 return mat ? mat->n_col : -1;
218 int isl_mat_get_element(__isl_keep isl_mat *mat, int row, int col, isl_int *v)
222 if (row < 0 || row >= mat->n_row)
223 isl_die(mat->ctx, isl_error_invalid, "row out of range",
225 if (col < 0 || col >= mat->n_col)
226 isl_die(mat->ctx, isl_error_invalid, "column out of range",
228 isl_int_set(*v, mat->row[row][col]);
232 __isl_give isl_mat *isl_mat_set_element(__isl_take isl_mat *mat,
233 int row, int col, isl_int v)
235 mat = isl_mat_cow(mat);
238 if (row < 0 || row >= mat->n_row)
239 isl_die(mat->ctx, isl_error_invalid, "row out of range",
241 if (col < 0 || col >= mat->n_col)
242 isl_die(mat->ctx, isl_error_invalid, "column out of range",
244 isl_int_set(mat->row[row][col], v);
251 struct isl_mat *isl_mat_identity(struct isl_ctx *ctx, unsigned n_row)
256 mat = isl_mat_alloc(ctx, n_row, n_row);
259 for (i = 0; i < n_row; ++i) {
260 isl_seq_clr(mat->row[i], i);
261 isl_int_set_si(mat->row[i][i], 1);
262 isl_seq_clr(mat->row[i]+i+1, n_row-(i+1));
268 struct isl_vec *isl_mat_vec_product(struct isl_mat *mat, struct isl_vec *vec)
271 struct isl_vec *prod;
276 isl_assert(mat->ctx, mat->n_col == vec->size, goto error);
278 prod = isl_vec_alloc(mat->ctx, mat->n_row);
282 for (i = 0; i < prod->size; ++i)
283 isl_seq_inner_product(mat->row[i], vec->el, vec->size,
284 &prod->block.data[i]);
294 __isl_give isl_vec *isl_mat_vec_inverse_product(__isl_take isl_mat *mat,
295 __isl_take isl_vec *vec)
297 struct isl_mat *vec_mat;
302 vec_mat = isl_mat_alloc(vec->ctx, vec->size, 1);
305 for (i = 0; i < vec->size; ++i)
306 isl_int_set(vec_mat->row[i][0], vec->el[i]);
307 vec_mat = isl_mat_inverse_product(mat, vec_mat);
311 vec = isl_vec_alloc(vec_mat->ctx, vec_mat->n_row);
313 for (i = 0; i < vec->size; ++i)
314 isl_int_set(vec->el[i], vec_mat->row[i][0]);
315 isl_mat_free(vec_mat);
323 struct isl_vec *isl_vec_mat_product(struct isl_vec *vec, struct isl_mat *mat)
326 struct isl_vec *prod;
331 isl_assert(mat->ctx, mat->n_row == vec->size, goto error);
333 prod = isl_vec_alloc(mat->ctx, mat->n_col);
337 for (i = 0; i < prod->size; ++i) {
338 isl_int_set_si(prod->el[i], 0);
339 for (j = 0; j < vec->size; ++j)
340 isl_int_addmul(prod->el[i], vec->el[j], mat->row[j][i]);
351 struct isl_mat *isl_mat_aff_direct_sum(struct isl_mat *left,
352 struct isl_mat *right)
360 isl_assert(left->ctx, left->n_row == right->n_row, goto error);
361 isl_assert(left->ctx, left->n_row >= 1, goto error);
362 isl_assert(left->ctx, left->n_col >= 1, goto error);
363 isl_assert(left->ctx, right->n_col >= 1, goto error);
364 isl_assert(left->ctx,
365 isl_seq_first_non_zero(left->row[0]+1, left->n_col-1) == -1,
367 isl_assert(left->ctx,
368 isl_seq_first_non_zero(right->row[0]+1, right->n_col-1) == -1,
371 sum = isl_mat_alloc(left->ctx, left->n_row, left->n_col + right->n_col - 1);
374 isl_int_lcm(sum->row[0][0], left->row[0][0], right->row[0][0]);
375 isl_int_divexact(left->row[0][0], sum->row[0][0], left->row[0][0]);
376 isl_int_divexact(right->row[0][0], sum->row[0][0], right->row[0][0]);
378 isl_seq_clr(sum->row[0]+1, sum->n_col-1);
379 for (i = 1; i < sum->n_row; ++i) {
380 isl_int_mul(sum->row[i][0], left->row[0][0], left->row[i][0]);
381 isl_int_addmul(sum->row[i][0],
382 right->row[0][0], right->row[i][0]);
383 isl_seq_scale(sum->row[i]+1, left->row[i]+1, left->row[0][0],
385 isl_seq_scale(sum->row[i]+left->n_col,
386 right->row[i]+1, right->row[0][0],
390 isl_int_divexact(left->row[0][0], sum->row[0][0], left->row[0][0]);
391 isl_int_divexact(right->row[0][0], sum->row[0][0], right->row[0][0]);
401 static void exchange(struct isl_mat *M, struct isl_mat **U,
402 struct isl_mat **Q, unsigned row, unsigned i, unsigned j)
405 for (r = row; r < M->n_row; ++r)
406 isl_int_swap(M->row[r][i], M->row[r][j]);
408 for (r = 0; r < (*U)->n_row; ++r)
409 isl_int_swap((*U)->row[r][i], (*U)->row[r][j]);
412 isl_mat_swap_rows(*Q, i, j);
415 static void subtract(struct isl_mat *M, struct isl_mat **U,
416 struct isl_mat **Q, unsigned row, unsigned i, unsigned j, isl_int m)
419 for (r = row; r < M->n_row; ++r)
420 isl_int_submul(M->row[r][j], m, M->row[r][i]);
422 for (r = 0; r < (*U)->n_row; ++r)
423 isl_int_submul((*U)->row[r][j], m, (*U)->row[r][i]);
426 for (r = 0; r < (*Q)->n_col; ++r)
427 isl_int_addmul((*Q)->row[i][r], m, (*Q)->row[j][r]);
431 static void oppose(struct isl_mat *M, struct isl_mat **U,
432 struct isl_mat **Q, unsigned row, unsigned col)
435 for (r = row; r < M->n_row; ++r)
436 isl_int_neg(M->row[r][col], M->row[r][col]);
438 for (r = 0; r < (*U)->n_row; ++r)
439 isl_int_neg((*U)->row[r][col], (*U)->row[r][col]);
442 isl_seq_neg((*Q)->row[col], (*Q)->row[col], (*Q)->n_col);
445 /* Given matrix M, compute
450 * with U and Q unimodular matrices and H a matrix in column echelon form
451 * such that on each echelon row the entries in the non-echelon column
452 * are non-negative (if neg == 0) or non-positive (if neg == 1)
453 * and stricly smaller (in absolute value) than the entries in the echelon
455 * If U or Q are NULL, then these matrices are not computed.
457 struct isl_mat *isl_mat_left_hermite(struct isl_mat *M, int neg,
458 struct isl_mat **U, struct isl_mat **Q)
473 *U = isl_mat_identity(M->ctx, M->n_col);
478 *Q = isl_mat_identity(M->ctx, M->n_col);
485 for (row = 0; row < M->n_row; ++row) {
487 first = isl_seq_abs_min_non_zero(M->row[row]+col, M->n_col-col);
492 exchange(M, U, Q, row, first, col);
493 if (isl_int_is_neg(M->row[row][col]))
494 oppose(M, U, Q, row, col);
496 while ((off = isl_seq_first_non_zero(M->row[row]+first,
497 M->n_col-first)) != -1) {
499 isl_int_fdiv_q(c, M->row[row][first], M->row[row][col]);
500 subtract(M, U, Q, row, col, first, c);
501 if (!isl_int_is_zero(M->row[row][first]))
502 exchange(M, U, Q, row, first, col);
506 for (i = 0; i < col; ++i) {
507 if (isl_int_is_zero(M->row[row][i]))
510 isl_int_cdiv_q(c, M->row[row][i], M->row[row][col]);
512 isl_int_fdiv_q(c, M->row[row][i], M->row[row][col]);
513 if (isl_int_is_zero(c))
515 subtract(M, U, Q, row, col, i, c);
535 struct isl_mat *isl_mat_right_kernel(struct isl_mat *mat)
538 struct isl_mat *U = NULL;
541 mat = isl_mat_left_hermite(mat, 0, &U, NULL);
545 for (i = 0, rank = 0; rank < mat->n_col; ++rank) {
546 while (i < mat->n_row && isl_int_is_zero(mat->row[i][rank]))
551 K = isl_mat_alloc(U->ctx, U->n_row, U->n_col - rank);
554 isl_mat_sub_copy(K->ctx, K->row, U->row, U->n_row, 0, rank, U->n_col-rank);
564 struct isl_mat *isl_mat_lin_to_aff(struct isl_mat *mat)
567 struct isl_mat *mat2;
571 mat2 = isl_mat_alloc(mat->ctx, 1+mat->n_row, 1+mat->n_col);
574 isl_int_set_si(mat2->row[0][0], 1);
575 isl_seq_clr(mat2->row[0]+1, mat->n_col);
576 for (i = 0; i < mat->n_row; ++i) {
577 isl_int_set_si(mat2->row[1+i][0], 0);
578 isl_seq_cpy(mat2->row[1+i]+1, mat->row[i], mat->n_col);
587 /* Given two matrices M1 and M2, return the block matrix
592 __isl_give isl_mat *isl_mat_diagonal(__isl_take isl_mat *mat1,
593 __isl_take isl_mat *mat2)
601 mat = isl_mat_alloc(mat1->ctx, mat1->n_row + mat2->n_row,
602 mat1->n_col + mat2->n_col);
605 for (i = 0; i < mat1->n_row; ++i) {
606 isl_seq_cpy(mat->row[i], mat1->row[i], mat1->n_col);
607 isl_seq_clr(mat->row[i] + mat1->n_col, mat2->n_col);
609 for (i = 0; i < mat2->n_row; ++i) {
610 isl_seq_clr(mat->row[mat1->n_row + i], mat1->n_col);
611 isl_seq_cpy(mat->row[mat1->n_row + i] + mat1->n_col,
612 mat2->row[i], mat2->n_col);
623 static int row_first_non_zero(isl_int **row, unsigned n_row, unsigned col)
627 for (i = 0; i < n_row; ++i)
628 if (!isl_int_is_zero(row[i][col]))
633 static int row_abs_min_non_zero(isl_int **row, unsigned n_row, unsigned col)
635 int i, min = row_first_non_zero(row, n_row, col);
638 for (i = min + 1; i < n_row; ++i) {
639 if (isl_int_is_zero(row[i][col]))
641 if (isl_int_abs_lt(row[i][col], row[min][col]))
647 static void inv_exchange(struct isl_mat *left, struct isl_mat *right,
648 unsigned i, unsigned j)
650 left = isl_mat_swap_rows(left, i, j);
651 right = isl_mat_swap_rows(right, i, j);
654 static void inv_oppose(
655 struct isl_mat *left, struct isl_mat *right, unsigned row)
657 isl_seq_neg(left->row[row]+row, left->row[row]+row, left->n_col-row);
658 isl_seq_neg(right->row[row], right->row[row], right->n_col);
661 static void inv_subtract(struct isl_mat *left, struct isl_mat *right,
662 unsigned row, unsigned i, isl_int m)
665 isl_seq_combine(left->row[i]+row,
666 left->ctx->one, left->row[i]+row,
667 m, left->row[row]+row,
669 isl_seq_combine(right->row[i], right->ctx->one, right->row[i],
670 m, right->row[row], right->n_col);
673 /* Compute inv(left)*right
675 struct isl_mat *isl_mat_inverse_product(struct isl_mat *left,
676 struct isl_mat *right)
684 isl_assert(left->ctx, left->n_row == left->n_col, goto error);
685 isl_assert(left->ctx, left->n_row == right->n_row, goto error);
687 if (left->n_row == 0) {
692 left = isl_mat_cow(left);
693 right = isl_mat_cow(right);
699 for (row = 0; row < left->n_row; ++row) {
700 int pivot, first, i, off;
701 pivot = row_abs_min_non_zero(left->row+row, left->n_row-row, row);
705 isl_assert(left->ctx, pivot >= 0, goto error);
709 inv_exchange(left, right, pivot, row);
710 if (isl_int_is_neg(left->row[row][row]))
711 inv_oppose(left, right, row);
713 while ((off = row_first_non_zero(left->row+first,
714 left->n_row-first, row)) != -1) {
716 isl_int_fdiv_q(a, left->row[first][row],
717 left->row[row][row]);
718 inv_subtract(left, right, row, first, a);
719 if (!isl_int_is_zero(left->row[first][row]))
720 inv_exchange(left, right, row, first);
724 for (i = 0; i < row; ++i) {
725 if (isl_int_is_zero(left->row[i][row]))
727 isl_int_gcd(a, left->row[row][row], left->row[i][row]);
728 isl_int_divexact(b, left->row[i][row], a);
729 isl_int_divexact(a, left->row[row][row], a);
731 isl_seq_combine(left->row[i] + i,
733 b, left->row[row] + i,
735 isl_seq_combine(right->row[i], a, right->row[i],
736 b, right->row[row], right->n_col);
741 isl_int_set(a, left->row[0][0]);
742 for (row = 1; row < left->n_row; ++row)
743 isl_int_lcm(a, a, left->row[row][row]);
744 if (isl_int_is_zero(a)){
746 isl_assert(left->ctx, 0, goto error);
748 for (row = 0; row < left->n_row; ++row) {
749 isl_int_divexact(left->row[row][row], a, left->row[row][row]);
750 if (isl_int_is_one(left->row[row][row]))
752 isl_seq_scale(right->row[row], right->row[row],
753 left->row[row][row], right->n_col);
765 void isl_mat_col_scale(struct isl_mat *mat, unsigned col, isl_int m)
769 for (i = 0; i < mat->n_row; ++i)
770 isl_int_mul(mat->row[i][col], mat->row[i][col], m);
773 void isl_mat_col_combine(struct isl_mat *mat, unsigned dst,
774 isl_int m1, unsigned src1, isl_int m2, unsigned src2)
780 for (i = 0; i < mat->n_row; ++i) {
781 isl_int_mul(tmp, m1, mat->row[i][src1]);
782 isl_int_addmul(tmp, m2, mat->row[i][src2]);
783 isl_int_set(mat->row[i][dst], tmp);
788 struct isl_mat *isl_mat_right_inverse(struct isl_mat *mat)
794 mat = isl_mat_cow(mat);
798 inv = isl_mat_identity(mat->ctx, mat->n_col);
799 inv = isl_mat_cow(inv);
805 for (row = 0; row < mat->n_row; ++row) {
806 int pivot, first, i, off;
807 pivot = isl_seq_abs_min_non_zero(mat->row[row]+row, mat->n_col-row);
811 isl_assert(mat->ctx, pivot >= 0, goto error);
815 exchange(mat, &inv, NULL, row, pivot, row);
816 if (isl_int_is_neg(mat->row[row][row]))
817 oppose(mat, &inv, NULL, row, row);
819 while ((off = isl_seq_first_non_zero(mat->row[row]+first,
820 mat->n_col-first)) != -1) {
822 isl_int_fdiv_q(a, mat->row[row][first],
824 subtract(mat, &inv, NULL, row, row, first, a);
825 if (!isl_int_is_zero(mat->row[row][first]))
826 exchange(mat, &inv, NULL, row, row, first);
830 for (i = 0; i < row; ++i) {
831 if (isl_int_is_zero(mat->row[row][i]))
833 isl_int_gcd(a, mat->row[row][row], mat->row[row][i]);
834 isl_int_divexact(b, mat->row[row][i], a);
835 isl_int_divexact(a, mat->row[row][row], a);
837 isl_mat_col_combine(mat, i, a, i, b, row);
838 isl_mat_col_combine(inv, i, a, i, b, row);
843 isl_int_set(a, mat->row[0][0]);
844 for (row = 1; row < mat->n_row; ++row)
845 isl_int_lcm(a, a, mat->row[row][row]);
846 if (isl_int_is_zero(a)){
850 for (row = 0; row < mat->n_row; ++row) {
851 isl_int_divexact(mat->row[row][row], a, mat->row[row][row]);
852 if (isl_int_is_one(mat->row[row][row]))
854 isl_mat_col_scale(inv, row, mat->row[row][row]);
867 struct isl_mat *isl_mat_transpose(struct isl_mat *mat)
869 struct isl_mat *transpose = NULL;
872 if (mat->n_col == mat->n_row) {
873 mat = isl_mat_cow(mat);
876 for (i = 0; i < mat->n_row; ++i)
877 for (j = i + 1; j < mat->n_col; ++j)
878 isl_int_swap(mat->row[i][j], mat->row[j][i]);
881 transpose = isl_mat_alloc(mat->ctx, mat->n_col, mat->n_row);
884 for (i = 0; i < mat->n_row; ++i)
885 for (j = 0; j < mat->n_col; ++j)
886 isl_int_set(transpose->row[j][i], mat->row[i][j]);
894 struct isl_mat *isl_mat_swap_cols(struct isl_mat *mat, unsigned i, unsigned j)
898 mat = isl_mat_cow(mat);
901 isl_assert(mat->ctx, i < mat->n_col, goto error);
902 isl_assert(mat->ctx, j < mat->n_col, goto error);
904 for (r = 0; r < mat->n_row; ++r)
905 isl_int_swap(mat->row[r][i], mat->row[r][j]);
912 struct isl_mat *isl_mat_swap_rows(struct isl_mat *mat, unsigned i, unsigned j)
918 mat = isl_mat_cow(mat);
922 mat->row[i] = mat->row[j];
927 struct isl_mat *isl_mat_product(struct isl_mat *left, struct isl_mat *right)
930 struct isl_mat *prod;
934 isl_assert(left->ctx, left->n_col == right->n_row, goto error);
935 prod = isl_mat_alloc(left->ctx, left->n_row, right->n_col);
938 if (left->n_col == 0) {
939 for (i = 0; i < prod->n_row; ++i)
940 isl_seq_clr(prod->row[i], prod->n_col);
945 for (i = 0; i < prod->n_row; ++i) {
946 for (j = 0; j < prod->n_col; ++j) {
947 isl_int_mul(prod->row[i][j],
948 left->row[i][0], right->row[0][j]);
949 for (k = 1; k < left->n_col; ++k)
950 isl_int_addmul(prod->row[i][j],
951 left->row[i][k], right->row[k][j]);
963 /* Replace the variables x in the rows q by x' given by x = M x',
964 * with M the matrix mat.
966 * If the number of new variables is greater than the original
967 * number of variables, then the rows q have already been
968 * preextended. If the new number is smaller, then the coefficients
969 * of the divs, which are not changed, need to be shifted down.
970 * The row q may be the equalities, the inequalities or the
971 * div expressions. In the latter case, has_div is true and
972 * we need to take into account the extra denominator column.
974 static int preimage(struct isl_ctx *ctx, isl_int **q, unsigned n,
975 unsigned n_div, int has_div, struct isl_mat *mat)
981 if (mat->n_col >= mat->n_row)
984 e = mat->n_row - mat->n_col;
986 for (i = 0; i < n; ++i)
987 isl_int_mul(q[i][0], q[i][0], mat->row[0][0]);
988 t = isl_mat_sub_alloc(mat->ctx, q, 0, n, has_div, mat->n_row);
989 t = isl_mat_product(t, mat);
992 for (i = 0; i < n; ++i) {
993 isl_seq_swp_or_cpy(q[i] + has_div, t->row[i], t->n_col);
994 isl_seq_cpy(q[i] + has_div + t->n_col,
995 q[i] + has_div + t->n_col + e, n_div);
996 isl_seq_clr(q[i] + has_div + t->n_col + n_div, e);
1002 /* Replace the variables x in bset by x' given by x = M x', with
1005 * If there are fewer variables x' then there are x, then we perform
1006 * the transformation in place, which that, in principle,
1007 * this frees up some extra variables as the number
1008 * of columns remains constant, but we would have to extend
1009 * the div array too as the number of rows in this array is assumed
1010 * to be equal to extra.
1012 struct isl_basic_set *isl_basic_set_preimage(struct isl_basic_set *bset,
1013 struct isl_mat *mat)
1015 struct isl_ctx *ctx;
1021 bset = isl_basic_set_cow(bset);
1025 isl_assert(ctx, bset->dim->nparam == 0, goto error);
1026 isl_assert(ctx, 1+bset->dim->n_out == mat->n_row, goto error);
1027 isl_assert(ctx, mat->n_col > 0, goto error);
1029 if (mat->n_col > mat->n_row) {
1030 bset = isl_basic_set_extend(bset, 0, mat->n_col-1, 0, 0, 0);
1033 } else if (mat->n_col < mat->n_row) {
1034 bset->dim = isl_dim_cow(bset->dim);
1037 bset->dim->n_out -= mat->n_row - mat->n_col;
1040 if (preimage(ctx, bset->eq, bset->n_eq, bset->n_div, 0,
1041 isl_mat_copy(mat)) < 0)
1044 if (preimage(ctx, bset->ineq, bset->n_ineq, bset->n_div, 0,
1045 isl_mat_copy(mat)) < 0)
1048 if (preimage(ctx, bset->div, bset->n_div, bset->n_div, 1, mat) < 0)
1051 ISL_F_CLR(bset, ISL_BASIC_SET_NO_IMPLICIT);
1052 ISL_F_CLR(bset, ISL_BASIC_SET_NO_REDUNDANT);
1053 ISL_F_CLR(bset, ISL_BASIC_SET_NORMALIZED);
1054 ISL_F_CLR(bset, ISL_BASIC_SET_NORMALIZED_DIVS);
1055 ISL_F_CLR(bset, ISL_BASIC_SET_ALL_EQUALITIES);
1057 bset = isl_basic_set_simplify(bset);
1058 bset = isl_basic_set_finalize(bset);
1064 isl_basic_set_free(bset);
1068 struct isl_set *isl_set_preimage(struct isl_set *set, struct isl_mat *mat)
1070 struct isl_ctx *ctx;
1073 set = isl_set_cow(set);
1078 for (i = 0; i < set->n; ++i) {
1079 set->p[i] = isl_basic_set_preimage(set->p[i],
1084 if (mat->n_col != mat->n_row) {
1085 set->dim = isl_dim_cow(set->dim);
1088 set->dim->n_out += mat->n_col;
1089 set->dim->n_out -= mat->n_row;
1092 ISL_F_CLR(set, ISL_SET_NORMALIZED);
1100 void isl_mat_dump(struct isl_mat *mat, FILE *out, int indent)
1105 fprintf(out, "%*snull mat\n", indent, "");
1109 if (mat->n_row == 0)
1110 fprintf(out, "%*s[]\n", indent, "");
1112 for (i = 0; i < mat->n_row; ++i) {
1114 fprintf(out, "%*s[[", indent, "");
1116 fprintf(out, "%*s[", indent+1, "");
1117 for (j = 0; j < mat->n_col; ++j) {
1120 isl_int_print(out, mat->row[i][j], 0);
1122 if (i == mat->n_row-1)
1123 fprintf(out, "]]\n");
1125 fprintf(out, "]\n");
1129 struct isl_mat *isl_mat_drop_cols(struct isl_mat *mat, unsigned col, unsigned n)
1133 mat = isl_mat_cow(mat);
1137 if (col != mat->n_col-n) {
1138 for (r = 0; r < mat->n_row; ++r)
1139 isl_seq_cpy(mat->row[r]+col, mat->row[r]+col+n,
1140 mat->n_col - col - n);
1146 struct isl_mat *isl_mat_drop_rows(struct isl_mat *mat, unsigned row, unsigned n)
1150 mat = isl_mat_cow(mat);
1154 for (r = row; r+n < mat->n_row; ++r)
1155 mat->row[r] = mat->row[r+n];
1161 __isl_give isl_mat *isl_mat_insert_cols(__isl_take isl_mat *mat,
1162 unsigned col, unsigned n)
1171 ext = isl_mat_alloc(mat->ctx, mat->n_row, mat->n_col + n);
1175 isl_mat_sub_copy(mat->ctx, ext->row, mat->row, mat->n_row, 0, 0, col);
1176 isl_mat_sub_copy(mat->ctx, ext->row, mat->row, mat->n_row,
1177 col + n, col, mat->n_col - col);
1186 __isl_give isl_mat *isl_mat_insert_zero_cols(__isl_take isl_mat *mat,
1187 unsigned first, unsigned n)
1193 mat = isl_mat_insert_cols(mat, first, n);
1197 for (i = 0; i < mat->n_row; ++i)
1198 isl_seq_clr(mat->row[i] + first, n);
1203 __isl_give isl_mat *isl_mat_add_zero_cols(__isl_take isl_mat *mat, unsigned n)
1208 return isl_mat_insert_zero_cols(mat, mat->n_col, n);
1211 __isl_give isl_mat *isl_mat_insert_rows(__isl_take isl_mat *mat,
1212 unsigned row, unsigned n)
1221 ext = isl_mat_alloc(mat->ctx, mat->n_row + n, mat->n_col);
1225 isl_mat_sub_copy(mat->ctx, ext->row, mat->row, row, 0, 0, mat->n_col);
1226 isl_mat_sub_copy(mat->ctx, ext->row + row + n, mat->row + row,
1227 mat->n_row - row, 0, 0, mat->n_col);
1236 __isl_give isl_mat *isl_mat_add_rows(__isl_take isl_mat *mat, unsigned n)
1241 return isl_mat_insert_rows(mat, mat->n_row, n);
1244 void isl_mat_col_submul(struct isl_mat *mat,
1245 int dst_col, isl_int f, int src_col)
1249 for (i = 0; i < mat->n_row; ++i)
1250 isl_int_submul(mat->row[i][dst_col], f, mat->row[i][src_col]);
1253 void isl_mat_col_add(__isl_keep isl_mat *mat, int dst_col, int src_col)
1260 for (i = 0; i < mat->n_row; ++i)
1261 isl_int_add(mat->row[i][dst_col],
1262 mat->row[i][dst_col], mat->row[i][src_col]);
1265 void isl_mat_col_mul(struct isl_mat *mat, int dst_col, isl_int f, int src_col)
1269 for (i = 0; i < mat->n_row; ++i)
1270 isl_int_mul(mat->row[i][dst_col], f, mat->row[i][src_col]);
1273 struct isl_mat *isl_mat_unimodular_complete(struct isl_mat *M, int row)
1276 struct isl_mat *H = NULL, *Q = NULL;
1281 isl_assert(M->ctx, M->n_row == M->n_col, goto error);
1283 H = isl_mat_left_hermite(isl_mat_copy(M), 0, NULL, &Q);
1284 M->n_row = M->n_col;
1287 for (r = 0; r < row; ++r)
1288 isl_assert(M->ctx, isl_int_is_one(H->row[r][r]), goto error);
1289 for (r = row; r < M->n_row; ++r)
1290 isl_seq_cpy(M->row[r], Q->row[r], M->n_col);
1301 __isl_give isl_mat *isl_mat_concat(__isl_take isl_mat *top,
1302 __isl_take isl_mat *bot)
1304 struct isl_mat *mat;
1309 isl_assert(top->ctx, top->n_col == bot->n_col, goto error);
1310 if (top->n_row == 0) {
1314 if (bot->n_row == 0) {
1319 mat = isl_mat_alloc(top->ctx, top->n_row + bot->n_row, top->n_col);
1322 isl_mat_sub_copy(mat->ctx, mat->row, top->row, top->n_row,
1324 isl_mat_sub_copy(mat->ctx, mat->row + top->n_row, bot->row, bot->n_row,
1335 int isl_mat_is_equal(__isl_keep isl_mat *mat1, __isl_keep isl_mat *mat2)
1342 if (mat1->n_row != mat2->n_row)
1345 if (mat1->n_col != mat2->n_col)
1348 for (i = 0; i < mat1->n_row; ++i)
1349 if (!isl_seq_eq(mat1->row[i], mat2->row[i], mat1->n_col))
1355 __isl_give isl_mat *isl_mat_from_row_vec(__isl_take isl_vec *vec)
1357 struct isl_mat *mat;
1361 mat = isl_mat_alloc(vec->ctx, 1, vec->size);
1365 isl_seq_cpy(mat->row[0], vec->el, vec->size);
1374 __isl_give isl_mat *isl_mat_vec_concat(__isl_take isl_mat *top,
1375 __isl_take isl_vec *bot)
1377 return isl_mat_concat(top, isl_mat_from_row_vec(bot));
1380 __isl_give isl_mat *isl_mat_move_cols(__isl_take isl_mat *mat,
1381 unsigned dst_col, unsigned src_col, unsigned n)
1387 if (n == 0 || dst_col == src_col)
1390 res = isl_mat_alloc(mat->ctx, mat->n_row, mat->n_col);
1394 if (dst_col < src_col) {
1395 isl_mat_sub_copy(res->ctx, res->row, mat->row, mat->n_row,
1397 isl_mat_sub_copy(res->ctx, res->row, mat->row, mat->n_row,
1398 dst_col, src_col, n);
1399 isl_mat_sub_copy(res->ctx, res->row, mat->row, mat->n_row,
1400 dst_col + n, dst_col, src_col - dst_col);
1401 isl_mat_sub_copy(res->ctx, res->row, mat->row, mat->n_row,
1402 src_col + n, src_col + n,
1403 res->n_col - src_col - n);
1405 isl_mat_sub_copy(res->ctx, res->row, mat->row, mat->n_row,
1407 isl_mat_sub_copy(res->ctx, res->row, mat->row, mat->n_row,
1408 src_col, src_col + n, dst_col - src_col);
1409 isl_mat_sub_copy(res->ctx, res->row, mat->row, mat->n_row,
1410 dst_col, src_col, n);
1411 isl_mat_sub_copy(res->ctx, res->row, mat->row, mat->n_row,
1412 dst_col + n, dst_col + n,
1413 res->n_col - dst_col - n);
1423 void isl_mat_gcd(__isl_keep isl_mat *mat, isl_int *gcd)
1428 isl_int_set_si(*gcd, 0);
1433 for (i = 0; i < mat->n_row; ++i) {
1434 isl_seq_gcd(mat->row[i], mat->n_col, &g);
1435 isl_int_gcd(*gcd, *gcd, g);
1440 __isl_give isl_mat *isl_mat_scale_down(__isl_take isl_mat *mat, isl_int m)
1447 for (i = 0; i < mat->n_row; ++i)
1448 isl_seq_scale_down(mat->row[i], mat->row[i], m, mat->n_col);
1453 __isl_give isl_mat *isl_mat_normalize(__isl_take isl_mat *mat)
1461 isl_mat_gcd(mat, &gcd);
1462 mat = isl_mat_scale_down(mat, gcd);