cwrite (new_file_flag, bp_out, w);
opened += new_file_flag;
new_file_flag = !max_files || (opened < max_files);
+ if (!new_file_flag && ignorable (errno))
+ {
+ /* If filter no longer accepting input, stop reading. */
+ n_read = 0;
+ break;
+ }
bp_out += w;
to_read -= w;
to_write = n_bytes;
lines_rr (uintmax_t k, uintmax_t n, char *buf, size_t bufsize)
{
bool wrapped = false;
+ bool wrote = false;
bool file_limit;
size_t i_file;
of_t *files IF_LINT (= NULL);
else if (fwrite (bp, to_write, 1, files[i_file].ofile) != 1
&& ! ignorable (errno))
error (EXIT_FAILURE, errno, "%s", files[i_file].of_name);
+ if (! ignorable (errno))
+ wrote = true;
+
if (file_limit)
{
if (fclose (files[i_file].ofile) != 0 && ! ignorable (errno))
if (next && ++i_file == n)
{
wrapped = true;
+ /* If no filters are accepting input, stop reading. */
+ if (! wrote)
+ goto no_filters;
+ wrote = false;
i_file = 0;
}
}
}
}
+no_filters:
/* Ensure all files created, so that any existing files are truncated,
and to signal any waiting fifo consumers.
Also, close any open file descriptors.
# where they would result in a non zero exit from split.
yes | head -n200K | split -b1G --filter='head -c1 >/dev/null' || fail=1
+# Ensure that endless input is ignored when all filters finish
+timeout 10 yes | split --filter="head -c1 >/dev/null" -n r/1 || fail=1
+timeout 10 split --filter="head -c1 >/dev/null" -n 1 /dev/zero || fail=1
+
Exit $fail