Target supports fully-masked (also known as fully-predicated) loops,
so that vector loops can handle partial as well as full vectors.
+@item vect_masked_load
+Target supports vector masked loads.
+
@item vect_masked_store
Target supports vector masked stores.
return 0;
}
-/* XFAILed because of the fix for PR97307 which sinks the load of a[i], preventing
- if-conversion to happen. */
-/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 2 "vect" { xfail *-*-* } } } */
-/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 1 "vect" } } */
+/* Since the fix for PR97307 which sinks the load of a[i], preventing
+ if-conversion to happen, targets that cannot do masked loads only
+ vectorize the inline copy. */
+/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 2 "vect" { target vect_masked_load } } } */
+/* { dg-final { scan-tree-dump-times "LOOP VECTORIZED" 1 "vect" { target { ! vect_masked_load } } } } */
/* { dg-final { scan-tree-dump-times "optimizing condition reduction with FOLD_EXTRACT_LAST" 2 "vect" { target vect_fold_extract_last } } } */
/* { dg-final { scan-tree-dump-not "condition expression based on integer induction." "vect" } } */
|| [istarget aarch64*-*-*] }}]
}
+# Return 1 if the target supports vector masked loads.
+
+proc check_effective_target_vect_masked_load { } {
+ return [expr { [check_avx_available]
+ || [check_effective_target_aarch64_sve]
+ || [istarget amdgcn*-*-*] } ]
+}
+
# Return 1 if the target supports vector masked stores.
proc check_effective_target_vect_masked_store { } {