2 * Copyright 2005-2007 Universiteit Leiden
3 * Copyright 2008-2009 Katholieke Universiteit Leuven
4 * Copyright 2010 INRIA Saclay
6 * Use of this software is governed by the GNU LGPLv2.1 license
8 * Written by Sven Verdoolaege, Leiden Institute of Advanced Computer Science,
9 * Universiteit Leiden, Niels Bohrweg 1, 2333 CA Leiden, The Netherlands
10 * and K.U.Leuven, Departement Computerwetenschappen, Celestijnenlaan 200A,
11 * B-3001 Leuven, Belgium
12 * and INRIA Saclay - Ile-de-France, Parc Club Orsay Universite,
13 * ZAC des vignes, 4 rue Jacques Monod, 91893 Orsay, France
20 /* A private structure to keep track of a mapping together with
21 * a user-specified identifier and a boolean indicating whether
22 * the map represents a must or may access/dependence.
24 struct isl_labeled_map {
30 /* A structure containing the input for dependence analysis:
32 * - n_must + n_may (<= max_source) sources
33 * - a function for determining the relative order of sources and sink
34 * The must sources are placed before the may sources.
36 * domain_map is an auxiliary map that maps the sink access relation
37 * to the domain of this access relation.
39 struct isl_access_info {
41 struct isl_labeled_map sink;
42 isl_access_level_before level_before;
46 struct isl_labeled_map source[1];
49 /* A structure containing the output of dependence analysis:
50 * - n_source dependences
51 * - a wrapped subset of the sink for which definitely no source could be found
52 * - a wrapped subset of the sink for which possibly no source could be found
55 isl_set *must_no_source;
56 isl_set *may_no_source;
58 struct isl_labeled_map *dep;
61 /* Construct an isl_access_info structure and fill it up with
62 * the given data. The number of sources is set to 0.
64 __isl_give isl_access_info *isl_access_info_alloc(__isl_take isl_map *sink,
65 void *sink_user, isl_access_level_before fn, int max_source)
68 struct isl_access_info *acc;
73 ctx = isl_map_get_ctx(sink);
74 isl_assert(ctx, max_source >= 0, goto error);
76 acc = isl_calloc(ctx, struct isl_access_info,
77 sizeof(struct isl_access_info) +
78 (max_source - 1) * sizeof(struct isl_labeled_map));
83 acc->sink.data = sink_user;
84 acc->level_before = fn;
85 acc->max_source = max_source;
95 /* Free the given isl_access_info structure.
97 void isl_access_info_free(__isl_take isl_access_info *acc)
103 isl_map_free(acc->domain_map);
104 isl_map_free(acc->sink.map);
105 for (i = 0; i < acc->n_must + acc->n_may; ++i)
106 isl_map_free(acc->source[i].map);
110 isl_ctx *isl_access_info_get_ctx(__isl_keep isl_access_info *acc)
112 return acc ? isl_map_get_ctx(acc->sink.map) : NULL;
115 /* Add another source to an isl_access_info structure, making
116 * sure the "must" sources are placed before the "may" sources.
117 * This function may be called at most max_source times on a
118 * given isl_access_info structure, with max_source as specified
119 * in the call to isl_access_info_alloc that constructed the structure.
121 __isl_give isl_access_info *isl_access_info_add_source(
122 __isl_take isl_access_info *acc, __isl_take isl_map *source,
123 int must, void *source_user)
129 ctx = isl_map_get_ctx(acc->sink.map);
130 isl_assert(ctx, acc->n_must + acc->n_may < acc->max_source, goto error);
134 acc->source[acc->n_must + acc->n_may] =
135 acc->source[acc->n_must];
136 acc->source[acc->n_must].map = source;
137 acc->source[acc->n_must].data = source_user;
138 acc->source[acc->n_must].must = 1;
141 acc->source[acc->n_must + acc->n_may].map = source;
142 acc->source[acc->n_must + acc->n_may].data = source_user;
143 acc->source[acc->n_must + acc->n_may].must = 0;
149 isl_map_free(source);
150 isl_access_info_free(acc);
154 /* A temporary structure used while sorting the accesses in an isl_access_info.
156 struct isl_access_sort_info {
157 struct isl_map *source_map;
159 struct isl_access_info *acc;
162 /* Return -n, 0 or n (with n a positive value), depending on whether
163 * the source access identified by p1 should be sorted before, together
164 * or after that identified by p2.
166 * If p1 and p2 share a different number of levels with the sink,
167 * then the one with the lowest number of shared levels should be
169 * If they both share no levels, then the order is irrelevant.
170 * Otherwise, if p1 appears before p2, then it should be sorted first.
171 * For more generic initial schedules, it is possible that neither
172 * p1 nor p2 appears before the other, or at least not in any obvious way.
173 * We therefore also check if p2 appears before p1, in which case p2
174 * should be sorted first.
175 * If not, we try to order the two statements based on the description
176 * of the iteration domains. This results in an arbitrary, but fairly
179 static int access_sort_cmp(const void *p1, const void *p2)
181 const struct isl_access_sort_info *i1, *i2;
184 i1 = (const struct isl_access_sort_info *) p1;
185 i2 = (const struct isl_access_sort_info *) p2;
187 level1 = i1->acc->level_before(i1->source_data, i1->acc->sink.data);
188 level2 = i2->acc->level_before(i2->source_data, i2->acc->sink.data);
190 if (level1 != level2 || !level1)
191 return level1 - level2;
193 level1 = i1->acc->level_before(i1->source_data, i2->source_data);
197 level2 = i1->acc->level_before(i2->source_data, i1->source_data);
201 h1 = isl_map_get_hash(i1->source_map);
202 h2 = isl_map_get_hash(i2->source_map);
203 return h1 > h2 ? 1 : h1 < h2 ? -1 : 0;
206 /* Sort the must source accesses in order of increasing number of shared
207 * levels with the sink access.
208 * Source accesses with the same number of shared levels are sorted
209 * in their textual order.
211 static __isl_give isl_access_info *isl_access_info_sort_sources(
212 __isl_take isl_access_info *acc)
216 struct isl_access_sort_info *array;
220 if (acc->n_must <= 1)
223 ctx = isl_map_get_ctx(acc->sink.map);
224 array = isl_alloc_array(ctx, struct isl_access_sort_info, acc->n_must);
228 for (i = 0; i < acc->n_must; ++i) {
229 array[i].source_map = acc->source[i].map;
230 array[i].source_data = acc->source[i].data;
234 qsort(array, acc->n_must, sizeof(struct isl_access_sort_info),
237 for (i = 0; i < acc->n_must; ++i) {
238 acc->source[i].map = array[i].source_map;
239 acc->source[i].data = array[i].source_data;
246 isl_access_info_free(acc);
250 /* Align the parameters of the two spaces if needed and then call
253 static __isl_give isl_space *space_align_and_join(__isl_take isl_space *left,
254 __isl_take isl_space *right)
256 if (isl_space_match(left, isl_dim_param, right, isl_dim_param))
257 return isl_space_join(left, right);
259 left = isl_space_align_params(left, isl_space_copy(right));
260 right = isl_space_align_params(right, isl_space_copy(left));
261 return isl_space_join(left, right);
264 /* Initialize an empty isl_flow structure corresponding to a given
265 * isl_access_info structure.
266 * For each must access, two dependences are created (initialized
267 * to the empty relation), one for the resulting must dependences
268 * and one for the resulting may dependences. May accesses can
269 * only lead to may dependences, so only one dependence is created
271 * This function is private as isl_flow structures are only supposed
272 * to be created by isl_access_info_compute_flow.
274 static __isl_give isl_flow *isl_flow_alloc(__isl_keep isl_access_info *acc)
278 struct isl_flow *dep;
283 ctx = isl_map_get_ctx(acc->sink.map);
284 dep = isl_calloc_type(ctx, struct isl_flow);
288 dep->dep = isl_calloc_array(ctx, struct isl_labeled_map,
289 2 * acc->n_must + acc->n_may);
293 dep->n_source = 2 * acc->n_must + acc->n_may;
294 for (i = 0; i < acc->n_must; ++i) {
296 dim = space_align_and_join(
297 isl_map_get_space(acc->source[i].map),
298 isl_space_reverse(isl_map_get_space(acc->sink.map)));
299 dep->dep[2 * i].map = isl_map_empty(dim);
300 dep->dep[2 * i + 1].map = isl_map_copy(dep->dep[2 * i].map);
301 dep->dep[2 * i].data = acc->source[i].data;
302 dep->dep[2 * i + 1].data = acc->source[i].data;
303 dep->dep[2 * i].must = 1;
304 dep->dep[2 * i + 1].must = 0;
305 if (!dep->dep[2 * i].map || !dep->dep[2 * i + 1].map)
308 for (i = acc->n_must; i < acc->n_must + acc->n_may; ++i) {
310 dim = space_align_and_join(
311 isl_map_get_space(acc->source[i].map),
312 isl_space_reverse(isl_map_get_space(acc->sink.map)));
313 dep->dep[acc->n_must + i].map = isl_map_empty(dim);
314 dep->dep[acc->n_must + i].data = acc->source[i].data;
315 dep->dep[acc->n_must + i].must = 0;
316 if (!dep->dep[acc->n_must + i].map)
326 /* Iterate over all sources and for each resulting flow dependence
327 * that is not empty, call the user specfied function.
328 * The second argument in this function call identifies the source,
329 * while the third argument correspond to the final argument of
330 * the isl_flow_foreach call.
332 int isl_flow_foreach(__isl_keep isl_flow *deps,
333 int (*fn)(__isl_take isl_map *dep, int must, void *dep_user, void *user),
341 for (i = 0; i < deps->n_source; ++i) {
342 if (isl_map_plain_is_empty(deps->dep[i].map))
344 if (fn(isl_map_copy(deps->dep[i].map), deps->dep[i].must,
345 deps->dep[i].data, user) < 0)
352 /* Return a copy of the subset of the sink for which no source could be found.
354 __isl_give isl_map *isl_flow_get_no_source(__isl_keep isl_flow *deps, int must)
360 return isl_set_unwrap(isl_set_copy(deps->must_no_source));
362 return isl_set_unwrap(isl_set_copy(deps->may_no_source));
365 void isl_flow_free(__isl_take isl_flow *deps)
371 isl_set_free(deps->must_no_source);
372 isl_set_free(deps->may_no_source);
374 for (i = 0; i < deps->n_source; ++i)
375 isl_map_free(deps->dep[i].map);
381 isl_ctx *isl_flow_get_ctx(__isl_keep isl_flow *deps)
383 return deps ? isl_set_get_ctx(deps->must_no_source) : NULL;
386 /* Return a map that enforces that the domain iteration occurs after
387 * the range iteration at the given level.
388 * If level is odd, then the domain iteration should occur after
389 * the target iteration in their shared level/2 outermost loops.
390 * In this case we simply need to enforce that these outermost
391 * loop iterations are the same.
392 * If level is even, then the loop iterator of the domain should
393 * be greater than the loop iterator of the range at the last
394 * of the level/2 shared loops, i.e., loop level/2 - 1.
396 static __isl_give isl_map *after_at_level(__isl_take isl_space *dim, int level)
398 struct isl_basic_map *bmap;
401 bmap = isl_basic_map_equal(dim, level/2);
403 bmap = isl_basic_map_more_at(dim, level/2 - 1);
405 return isl_map_from_basic_map(bmap);
408 /* Compute the last iteration of must source j that precedes the sink
409 * at the given level for sink iterations in set_C.
410 * The subset of set_C for which no such iteration can be found is returned
413 static struct isl_map *last_source(struct isl_access_info *acc,
414 struct isl_set *set_C,
415 int j, int level, struct isl_set **empty)
417 struct isl_map *read_map;
418 struct isl_map *write_map;
419 struct isl_map *dep_map;
420 struct isl_map *after;
421 struct isl_map *result;
423 read_map = isl_map_copy(acc->sink.map);
424 write_map = isl_map_copy(acc->source[j].map);
425 write_map = isl_map_reverse(write_map);
426 dep_map = isl_map_apply_range(read_map, write_map);
427 after = after_at_level(isl_map_get_space(dep_map), level);
428 dep_map = isl_map_intersect(dep_map, after);
429 result = isl_map_partial_lexmax(dep_map, set_C, empty);
430 result = isl_map_reverse(result);
435 /* For a given mapping between iterations of must source j and iterations
436 * of the sink, compute the last iteration of must source k preceding
437 * the sink at level before_level for any of the sink iterations,
438 * but following the corresponding iteration of must source j at level
441 static struct isl_map *last_later_source(struct isl_access_info *acc,
442 struct isl_map *old_map,
443 int j, int before_level,
444 int k, int after_level,
445 struct isl_set **empty)
448 struct isl_set *set_C;
449 struct isl_map *read_map;
450 struct isl_map *write_map;
451 struct isl_map *dep_map;
452 struct isl_map *after_write;
453 struct isl_map *before_read;
454 struct isl_map *result;
456 set_C = isl_map_range(isl_map_copy(old_map));
457 read_map = isl_map_copy(acc->sink.map);
458 write_map = isl_map_copy(acc->source[k].map);
460 write_map = isl_map_reverse(write_map);
461 dep_map = isl_map_apply_range(read_map, write_map);
462 dim = space_align_and_join(isl_map_get_space(acc->source[k].map),
463 isl_space_reverse(isl_map_get_space(acc->source[j].map)));
464 after_write = after_at_level(dim, after_level);
465 after_write = isl_map_apply_range(after_write, old_map);
466 after_write = isl_map_reverse(after_write);
467 dep_map = isl_map_intersect(dep_map, after_write);
468 before_read = after_at_level(isl_map_get_space(dep_map), before_level);
469 dep_map = isl_map_intersect(dep_map, before_read);
470 result = isl_map_partial_lexmax(dep_map, set_C, empty);
471 result = isl_map_reverse(result);
476 /* Given a shared_level between two accesses, return 1 if the
477 * the first can precede the second at the requested target_level.
478 * If the target level is odd, i.e., refers to a statement level
479 * dimension, then first needs to precede second at the requested
480 * level, i.e., shared_level must be equal to target_level.
481 * If the target level is odd, then the two loops should share
482 * at least the requested number of outer loops.
484 static int can_precede_at_level(int shared_level, int target_level)
486 if (shared_level < target_level)
488 if ((target_level % 2) && shared_level > target_level)
493 /* Given a possible flow dependence temp_rel[j] between source j and the sink
494 * at level sink_level, remove those elements for which
495 * there is an iteration of another source k < j that is closer to the sink.
496 * The flow dependences temp_rel[k] are updated with the improved sources.
497 * Any improved source needs to precede the sink at the same level
498 * and needs to follow source j at the same or a deeper level.
499 * The lower this level, the later the execution date of source k.
500 * We therefore consider lower levels first.
502 * If temp_rel[j] is empty, then there can be no improvement and
503 * we return immediately.
505 static int intermediate_sources(__isl_keep isl_access_info *acc,
506 struct isl_map **temp_rel, int j, int sink_level)
509 int depth = 2 * isl_map_dim(acc->source[j].map, isl_dim_in) + 1;
511 if (isl_map_plain_is_empty(temp_rel[j]))
514 for (k = j - 1; k >= 0; --k) {
516 plevel = acc->level_before(acc->source[k].data, acc->sink.data);
517 if (!can_precede_at_level(plevel, sink_level))
520 plevel2 = acc->level_before(acc->source[j].data,
521 acc->source[k].data);
523 for (level = sink_level; level <= depth; ++level) {
525 struct isl_set *trest;
526 struct isl_map *copy;
528 if (!can_precede_at_level(plevel2, level))
531 copy = isl_map_copy(temp_rel[j]);
532 T = last_later_source(acc, copy, j, sink_level, k,
534 if (isl_map_plain_is_empty(T)) {
539 temp_rel[j] = isl_map_intersect_range(temp_rel[j], trest);
540 temp_rel[k] = isl_map_union_disjoint(temp_rel[k], T);
547 /* Compute all iterations of may source j that precedes the sink at the given
548 * level for sink iterations in set_C.
550 static __isl_give isl_map *all_sources(__isl_keep isl_access_info *acc,
551 __isl_take isl_set *set_C, int j, int level)
558 read_map = isl_map_copy(acc->sink.map);
559 read_map = isl_map_intersect_domain(read_map, set_C);
560 write_map = isl_map_copy(acc->source[acc->n_must + j].map);
561 write_map = isl_map_reverse(write_map);
562 dep_map = isl_map_apply_range(read_map, write_map);
563 after = after_at_level(isl_map_get_space(dep_map), level);
564 dep_map = isl_map_intersect(dep_map, after);
566 return isl_map_reverse(dep_map);
569 /* For a given mapping between iterations of must source k and iterations
570 * of the sink, compute the all iteration of may source j preceding
571 * the sink at level before_level for any of the sink iterations,
572 * but following the corresponding iteration of must source k at level
575 static __isl_give isl_map *all_later_sources(__isl_keep isl_access_info *acc,
576 __isl_keep isl_map *old_map,
577 int j, int before_level, int k, int after_level)
584 isl_map *after_write;
585 isl_map *before_read;
587 set_C = isl_map_range(isl_map_copy(old_map));
588 read_map = isl_map_copy(acc->sink.map);
589 read_map = isl_map_intersect_domain(read_map, set_C);
590 write_map = isl_map_copy(acc->source[acc->n_must + j].map);
592 write_map = isl_map_reverse(write_map);
593 dep_map = isl_map_apply_range(read_map, write_map);
594 dim = isl_space_join(isl_map_get_space(acc->source[acc->n_must + j].map),
595 isl_space_reverse(isl_map_get_space(acc->source[k].map)));
596 after_write = after_at_level(dim, after_level);
597 after_write = isl_map_apply_range(after_write, old_map);
598 after_write = isl_map_reverse(after_write);
599 dep_map = isl_map_intersect(dep_map, after_write);
600 before_read = after_at_level(isl_map_get_space(dep_map), before_level);
601 dep_map = isl_map_intersect(dep_map, before_read);
602 return isl_map_reverse(dep_map);
605 /* Given the must and may dependence relations for the must accesses
606 * for level sink_level, check if there are any accesses of may access j
607 * that occur in between and return their union.
608 * If some of these accesses are intermediate with respect to
609 * (previously thought to be) must dependences, then these
610 * must dependences are turned into may dependences.
612 static __isl_give isl_map *all_intermediate_sources(
613 __isl_keep isl_access_info *acc, __isl_take isl_map *map,
614 struct isl_map **must_rel, struct isl_map **may_rel,
615 int j, int sink_level)
618 int depth = 2 * isl_map_dim(acc->source[acc->n_must + j].map,
621 for (k = 0; k < acc->n_must; ++k) {
624 if (isl_map_plain_is_empty(may_rel[k]) &&
625 isl_map_plain_is_empty(must_rel[k]))
628 plevel = acc->level_before(acc->source[k].data,
629 acc->source[acc->n_must + j].data);
631 for (level = sink_level; level <= depth; ++level) {
636 if (!can_precede_at_level(plevel, level))
639 copy = isl_map_copy(may_rel[k]);
640 T = all_later_sources(acc, copy, j, sink_level, k, level);
641 map = isl_map_union(map, T);
643 copy = isl_map_copy(must_rel[k]);
644 T = all_later_sources(acc, copy, j, sink_level, k, level);
645 ran = isl_map_range(isl_map_copy(T));
646 map = isl_map_union(map, T);
647 may_rel[k] = isl_map_union_disjoint(may_rel[k],
648 isl_map_intersect_range(isl_map_copy(must_rel[k]),
650 T = isl_map_from_domain_and_range(
652 isl_space_domain(isl_map_get_space(must_rel[k]))),
654 must_rel[k] = isl_map_subtract(must_rel[k], T);
661 /* Compute dependences for the case where all accesses are "may"
662 * accesses, which boils down to computing memory based dependences.
663 * The generic algorithm would also work in this case, but it would
664 * be overkill to use it.
666 static __isl_give isl_flow *compute_mem_based_dependences(
667 __isl_keep isl_access_info *acc)
674 res = isl_flow_alloc(acc);
678 mustdo = isl_map_domain(isl_map_copy(acc->sink.map));
679 maydo = isl_set_copy(mustdo);
681 for (i = 0; i < acc->n_may; ++i) {
688 plevel = acc->level_before(acc->source[i].data, acc->sink.data);
689 is_before = plevel & 1;
692 dim = isl_map_get_space(res->dep[i].map);
694 before = isl_map_lex_le_first(dim, plevel);
696 before = isl_map_lex_lt_first(dim, plevel);
697 dep = isl_map_apply_range(isl_map_copy(acc->source[i].map),
698 isl_map_reverse(isl_map_copy(acc->sink.map)));
699 dep = isl_map_intersect(dep, before);
700 mustdo = isl_set_subtract(mustdo,
701 isl_map_range(isl_map_copy(dep)));
702 res->dep[i].map = isl_map_union(res->dep[i].map, dep);
705 res->may_no_source = isl_set_subtract(maydo, isl_set_copy(mustdo));
706 res->must_no_source = mustdo;
711 /* Compute dependences for the case where there is at least one
714 * The core algorithm considers all levels in which a source may precede
715 * the sink, where a level may either be a statement level or a loop level.
716 * The outermost statement level is 1, the first loop level is 2, etc...
717 * The algorithm basically does the following:
718 * for all levels l of the read access from innermost to outermost
719 * for all sources w that may precede the sink access at that level
720 * compute the last iteration of the source that precedes the sink access
722 * add result to possible last accesses at level l of source w
723 * for all sources w2 that we haven't considered yet at this level that may
724 * also precede the sink access
725 * for all levels l2 of w from l to innermost
726 * for all possible last accesses dep of w at l
727 * compute last iteration of w2 between the source and sink
729 * add result to possible last accesses at level l of write w2
730 * and replace possible last accesses dep by the remainder
733 * The above algorithm is applied to the must access. During the course
734 * of the algorithm, we keep track of sink iterations that still
735 * need to be considered. These iterations are split into those that
736 * haven't been matched to any source access (mustdo) and those that have only
737 * been matched to may accesses (maydo).
738 * At the end of each level, we also consider the may accesses.
739 * In particular, we consider may accesses that precede the remaining
740 * sink iterations, moving elements from mustdo to maydo when appropriate,
741 * and may accesses that occur between a must source and a sink of any
742 * dependences found at the current level, turning must dependences into
743 * may dependences when appropriate.
746 static __isl_give isl_flow *compute_val_based_dependences(
747 __isl_keep isl_access_info *acc)
751 isl_set *mustdo = NULL;
752 isl_set *maydo = NULL;
755 isl_map **must_rel = NULL;
756 isl_map **may_rel = NULL;
761 res = isl_flow_alloc(acc);
764 ctx = isl_map_get_ctx(acc->sink.map);
766 depth = 2 * isl_map_dim(acc->sink.map, isl_dim_in) + 1;
767 mustdo = isl_map_domain(isl_map_copy(acc->sink.map));
768 maydo = isl_set_empty_like(mustdo);
769 if (!mustdo || !maydo)
771 if (isl_set_plain_is_empty(mustdo))
774 must_rel = isl_alloc_array(ctx, struct isl_map *, acc->n_must);
775 may_rel = isl_alloc_array(ctx, struct isl_map *, acc->n_must);
776 if (!must_rel || !may_rel)
779 for (level = depth; level >= 1; --level) {
780 for (j = acc->n_must-1; j >=0; --j) {
781 must_rel[j] = isl_map_empty_like(res->dep[j].map);
782 may_rel[j] = isl_map_copy(must_rel[j]);
785 for (j = acc->n_must - 1; j >= 0; --j) {
787 struct isl_set *rest;
790 plevel = acc->level_before(acc->source[j].data,
792 if (!can_precede_at_level(plevel, level))
795 T = last_source(acc, mustdo, j, level, &rest);
796 must_rel[j] = isl_map_union_disjoint(must_rel[j], T);
799 intermediate_sources(acc, must_rel, j, level);
801 T = last_source(acc, maydo, j, level, &rest);
802 may_rel[j] = isl_map_union_disjoint(may_rel[j], T);
805 intermediate_sources(acc, may_rel, j, level);
807 if (isl_set_plain_is_empty(mustdo) &&
808 isl_set_plain_is_empty(maydo))
811 for (j = j - 1; j >= 0; --j) {
814 plevel = acc->level_before(acc->source[j].data,
816 if (!can_precede_at_level(plevel, level))
819 intermediate_sources(acc, must_rel, j, level);
820 intermediate_sources(acc, may_rel, j, level);
823 for (j = 0; j < acc->n_may; ++j) {
828 plevel = acc->level_before(acc->source[acc->n_must + j].data,
830 if (!can_precede_at_level(plevel, level))
833 T = all_sources(acc, isl_set_copy(maydo), j, level);
834 res->dep[2 * acc->n_must + j].map =
835 isl_map_union(res->dep[2 * acc->n_must + j].map, T);
836 T = all_sources(acc, isl_set_copy(mustdo), j, level);
837 ran = isl_map_range(isl_map_copy(T));
838 res->dep[2 * acc->n_must + j].map =
839 isl_map_union(res->dep[2 * acc->n_must + j].map, T);
840 mustdo = isl_set_subtract(mustdo, isl_set_copy(ran));
841 maydo = isl_set_union_disjoint(maydo, ran);
843 T = res->dep[2 * acc->n_must + j].map;
844 T = all_intermediate_sources(acc, T, must_rel, may_rel,
846 res->dep[2 * acc->n_must + j].map = T;
849 for (j = acc->n_must - 1; j >= 0; --j) {
850 res->dep[2 * j].map =
851 isl_map_union_disjoint(res->dep[2 * j].map,
853 res->dep[2 * j + 1].map =
854 isl_map_union_disjoint(res->dep[2 * j + 1].map,
858 if (isl_set_plain_is_empty(mustdo) &&
859 isl_set_plain_is_empty(maydo))
866 res->must_no_source = mustdo;
867 res->may_no_source = maydo;
871 isl_set_free(mustdo);
878 /* Given a "sink" access, a list of n "source" accesses,
879 * compute for each iteration of the sink access
880 * and for each element accessed by that iteration,
881 * the source access in the list that last accessed the
882 * element accessed by the sink access before this sink access.
883 * Each access is given as a map from the loop iterators
884 * to the array indices.
885 * The result is a list of n relations between source and sink
886 * iterations and a subset of the domain of the sink access,
887 * corresponding to those iterations that access an element
888 * not previously accessed.
890 * To deal with multi-valued sink access relations, the sink iteration
891 * domain is first extended with dimensions that correspond to the data
892 * space. After the computation is finished, these extra dimensions are
893 * projected out again.
895 __isl_give isl_flow *isl_access_info_compute_flow(__isl_take isl_access_info *acc)
898 struct isl_flow *res = NULL;
903 acc->domain_map = isl_map_domain_map(isl_map_copy(acc->sink.map));
904 acc->sink.map = isl_map_range_map(acc->sink.map);
908 if (acc->n_must == 0)
909 res = compute_mem_based_dependences(acc);
911 acc = isl_access_info_sort_sources(acc);
912 res = compute_val_based_dependences(acc);
917 for (j = 0; j < res->n_source; ++j) {
918 res->dep[j].map = isl_map_apply_range(res->dep[j].map,
919 isl_map_copy(acc->domain_map));
920 if (!res->dep[j].map)
923 if (!res->must_no_source || !res->may_no_source)
926 isl_access_info_free(acc);
929 isl_access_info_free(acc);
935 /* Keep track of some information about a schedule for a given
936 * access. In particular, keep track of which dimensions
937 * have a constant value and of the actual constant values.
939 struct isl_sched_info {
944 static void sched_info_free(__isl_take struct isl_sched_info *info)
948 isl_vec_free(info->cst);
953 /* Extract information on the constant dimensions of the schedule
954 * for a given access. The "map" is of the form
958 * with S the schedule domain, D the iteration domain and A the data domain.
960 static __isl_give struct isl_sched_info *sched_info_alloc(
961 __isl_keep isl_map *map)
965 struct isl_sched_info *info;
971 dim = isl_space_unwrap(isl_space_domain(isl_map_get_space(map)));
974 n = isl_space_dim(dim, isl_dim_in);
977 ctx = isl_map_get_ctx(map);
978 info = isl_alloc_type(ctx, struct isl_sched_info);
981 info->is_cst = isl_alloc_array(ctx, int, n);
982 info->cst = isl_vec_alloc(ctx, n);
983 if (!info->is_cst || !info->cst)
986 for (i = 0; i < n; ++i)
987 info->is_cst[i] = isl_map_plain_is_fixed(map, isl_dim_in, i,
992 sched_info_free(info);
996 struct isl_compute_flow_data {
997 isl_union_map *must_source;
998 isl_union_map *may_source;
999 isl_union_map *must_dep;
1000 isl_union_map *may_dep;
1001 isl_union_map *must_no_source;
1002 isl_union_map *may_no_source;
1007 struct isl_sched_info *sink_info;
1008 struct isl_sched_info **source_info;
1009 isl_access_info *accesses;
1012 static int count_matching_array(__isl_take isl_map *map, void *user)
1016 struct isl_compute_flow_data *data;
1018 data = (struct isl_compute_flow_data *)user;
1020 dim = isl_space_range(isl_map_get_space(map));
1022 eq = isl_space_is_equal(dim, data->dim);
1024 isl_space_free(dim);
1035 static int collect_matching_array(__isl_take isl_map *map, void *user)
1039 struct isl_sched_info *info;
1040 struct isl_compute_flow_data *data;
1042 data = (struct isl_compute_flow_data *)user;
1044 dim = isl_space_range(isl_map_get_space(map));
1046 eq = isl_space_is_equal(dim, data->dim);
1048 isl_space_free(dim);
1057 info = sched_info_alloc(map);
1058 data->source_info[data->count] = info;
1060 data->accesses = isl_access_info_add_source(data->accesses,
1061 map, data->must, info);
1071 /* Determine the shared nesting level and the "textual order" of
1072 * the given accesses.
1074 * We first determine the minimal schedule dimension for both accesses.
1076 * If among those dimensions, we can find one where both have a fixed
1077 * value and if moreover those values are different, then the previous
1078 * dimension is the last shared nesting level and the textual order
1079 * is determined based on the order of the fixed values.
1080 * If no such fixed values can be found, then we set the shared
1081 * nesting level to the minimal schedule dimension, with no textual ordering.
1083 static int before(void *first, void *second)
1085 struct isl_sched_info *info1 = first;
1086 struct isl_sched_info *info2 = second;
1090 n1 = info1->cst->size;
1091 n2 = info2->cst->size;
1096 for (i = 0; i < n1; ++i) {
1097 if (!info1->is_cst[i])
1099 if (!info2->is_cst[i])
1101 if (isl_int_eq(info1->cst->el[i], info2->cst->el[i]))
1103 return 2 * i + isl_int_lt(info1->cst->el[i], info2->cst->el[i]);
1109 /* Given a sink access, look for all the source accesses that access
1110 * the same array and perform dataflow analysis on them using
1111 * isl_access_info_compute_flow.
1113 static int compute_flow(__isl_take isl_map *map, void *user)
1117 struct isl_compute_flow_data *data;
1120 data = (struct isl_compute_flow_data *)user;
1122 ctx = isl_map_get_ctx(map);
1124 data->accesses = NULL;
1125 data->sink_info = NULL;
1126 data->source_info = NULL;
1128 data->dim = isl_space_range(isl_map_get_space(map));
1130 if (isl_union_map_foreach_map(data->must_source,
1131 &count_matching_array, data) < 0)
1133 if (isl_union_map_foreach_map(data->may_source,
1134 &count_matching_array, data) < 0)
1137 data->sink_info = sched_info_alloc(map);
1138 data->source_info = isl_calloc_array(ctx, struct isl_sched_info *,
1141 data->accesses = isl_access_info_alloc(isl_map_copy(map),
1142 data->sink_info, &before, data->count);
1143 if (!data->sink_info || !data->source_info || !data->accesses)
1147 if (isl_union_map_foreach_map(data->must_source,
1148 &collect_matching_array, data) < 0)
1151 if (isl_union_map_foreach_map(data->may_source,
1152 &collect_matching_array, data) < 0)
1155 flow = isl_access_info_compute_flow(data->accesses);
1156 data->accesses = NULL;
1161 data->must_no_source = isl_union_map_union(data->must_no_source,
1162 isl_union_map_from_map(isl_flow_get_no_source(flow, 1)));
1163 data->may_no_source = isl_union_map_union(data->may_no_source,
1164 isl_union_map_from_map(isl_flow_get_no_source(flow, 0)));
1166 for (i = 0; i < flow->n_source; ++i) {
1168 dep = isl_union_map_from_map(isl_map_copy(flow->dep[i].map));
1169 if (flow->dep[i].must)
1170 data->must_dep = isl_union_map_union(data->must_dep, dep);
1172 data->may_dep = isl_union_map_union(data->may_dep, dep);
1175 isl_flow_free(flow);
1177 sched_info_free(data->sink_info);
1178 if (data->source_info) {
1179 for (i = 0; i < data->count; ++i)
1180 sched_info_free(data->source_info[i]);
1181 free(data->source_info);
1183 isl_space_free(data->dim);
1188 isl_access_info_free(data->accesses);
1189 sched_info_free(data->sink_info);
1190 if (data->source_info) {
1191 for (i = 0; i < data->count; ++i)
1192 sched_info_free(data->source_info[i]);
1193 free(data->source_info);
1195 isl_space_free(data->dim);
1201 /* Given a collection of "sink" and "source" accesses,
1202 * compute for each iteration of a sink access
1203 * and for each element accessed by that iteration,
1204 * the source access in the list that last accessed the
1205 * element accessed by the sink access before this sink access.
1206 * Each access is given as a map from the loop iterators
1207 * to the array indices.
1208 * The result is a relations between source and sink
1209 * iterations and a subset of the domain of the sink accesses,
1210 * corresponding to those iterations that access an element
1211 * not previously accessed.
1213 * We first prepend the schedule dimensions to the domain
1214 * of the accesses so that we can easily compare their relative order.
1215 * Then we consider each sink access individually in compute_flow.
1217 int isl_union_map_compute_flow(__isl_take isl_union_map *sink,
1218 __isl_take isl_union_map *must_source,
1219 __isl_take isl_union_map *may_source,
1220 __isl_take isl_union_map *schedule,
1221 __isl_give isl_union_map **must_dep, __isl_give isl_union_map **may_dep,
1222 __isl_give isl_union_map **must_no_source,
1223 __isl_give isl_union_map **may_no_source)
1226 isl_union_map *range_map = NULL;
1227 struct isl_compute_flow_data data;
1229 sink = isl_union_map_align_params(sink,
1230 isl_union_map_get_space(must_source));
1231 sink = isl_union_map_align_params(sink,
1232 isl_union_map_get_space(may_source));
1233 sink = isl_union_map_align_params(sink,
1234 isl_union_map_get_space(schedule));
1235 dim = isl_union_map_get_space(sink);
1236 must_source = isl_union_map_align_params(must_source, isl_space_copy(dim));
1237 may_source = isl_union_map_align_params(may_source, isl_space_copy(dim));
1238 schedule = isl_union_map_align_params(schedule, isl_space_copy(dim));
1240 schedule = isl_union_map_reverse(schedule);
1241 range_map = isl_union_map_range_map(schedule);
1242 schedule = isl_union_map_reverse(isl_union_map_copy(range_map));
1243 sink = isl_union_map_apply_domain(sink, isl_union_map_copy(schedule));
1244 must_source = isl_union_map_apply_domain(must_source,
1245 isl_union_map_copy(schedule));
1246 may_source = isl_union_map_apply_domain(may_source, schedule);
1248 data.must_source = must_source;
1249 data.may_source = may_source;
1250 data.must_dep = must_dep ?
1251 isl_union_map_empty(isl_space_copy(dim)) : NULL;
1252 data.may_dep = may_dep ? isl_union_map_empty(isl_space_copy(dim)) : NULL;
1253 data.must_no_source = must_no_source ?
1254 isl_union_map_empty(isl_space_copy(dim)) : NULL;
1255 data.may_no_source = may_no_source ?
1256 isl_union_map_empty(isl_space_copy(dim)) : NULL;
1258 isl_space_free(dim);
1260 if (isl_union_map_foreach_map(sink, &compute_flow, &data) < 0)
1263 isl_union_map_free(sink);
1264 isl_union_map_free(must_source);
1265 isl_union_map_free(may_source);
1268 data.must_dep = isl_union_map_apply_domain(data.must_dep,
1269 isl_union_map_copy(range_map));
1270 data.must_dep = isl_union_map_apply_range(data.must_dep,
1271 isl_union_map_copy(range_map));
1272 *must_dep = data.must_dep;
1275 data.may_dep = isl_union_map_apply_domain(data.may_dep,
1276 isl_union_map_copy(range_map));
1277 data.may_dep = isl_union_map_apply_range(data.may_dep,
1278 isl_union_map_copy(range_map));
1279 *may_dep = data.may_dep;
1281 if (must_no_source) {
1282 data.must_no_source = isl_union_map_apply_domain(
1283 data.must_no_source, isl_union_map_copy(range_map));
1284 *must_no_source = data.must_no_source;
1286 if (may_no_source) {
1287 data.may_no_source = isl_union_map_apply_domain(
1288 data.may_no_source, isl_union_map_copy(range_map));
1289 *may_no_source = data.may_no_source;
1292 isl_union_map_free(range_map);
1296 isl_union_map_free(range_map);
1297 isl_union_map_free(sink);
1298 isl_union_map_free(must_source);
1299 isl_union_map_free(may_source);
1300 isl_union_map_free(data.must_dep);
1301 isl_union_map_free(data.may_dep);
1302 isl_union_map_free(data.must_no_source);
1303 isl_union_map_free(data.may_no_source);
1310 *must_no_source = NULL;
1312 *may_no_source = NULL;