2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include "kerncompat.h"
20 #include "androidcompat.h"
22 #include <sys/ioctl.h>
25 #include <sys/types.h>
26 #include <sys/socket.h>
28 #include <sys/syscall.h>
31 #include <uuid/uuid.h>
48 static const char * const scrub_cmd_group_usage[] = {
49 "btrfs scrub <command> [options] <path>|<device>",
53 #define SCRUB_DATA_FILE "/var/lib/btrfs/scrub.status"
54 #define SCRUB_PROGRESS_SOCKET_PATH "/var/lib/btrfs/scrub.progress"
55 #define SCRUB_FILE_VERSION_PREFIX "scrub status"
56 #define SCRUB_FILE_VERSION "1"
67 /* TBD: replace with #include "linux/ioprio.h" in some years */
68 #if !defined (IOPRIO_H)
69 #define IOPRIO_WHO_PROCESS 1
70 #define IOPRIO_CLASS_SHIFT 13
71 #define IOPRIO_PRIO_VALUE(class, data) \
72 (((class) << IOPRIO_CLASS_SHIFT) | (data))
73 #define IOPRIO_CLASS_IDLE 3
76 struct scrub_progress {
77 struct btrfs_ioctl_scrub_args scrub_args;
81 struct scrub_stats stats;
82 struct scrub_file_record *resumed;
84 pthread_mutex_t progress_mutex;
89 struct scrub_file_record {
90 u8 fsid[BTRFS_FSID_SIZE];
92 struct scrub_stats stats;
93 struct btrfs_scrub_progress p;
96 struct scrub_progress_cycle {
100 struct btrfs_ioctl_fs_info_args *fi;
101 struct scrub_progress *progress;
102 struct scrub_progress *shared_progress;
103 pthread_mutex_t *write_mutex;
106 struct scrub_fs_stat {
107 struct btrfs_scrub_progress p;
108 struct scrub_stats s;
112 static void print_scrub_full(struct btrfs_scrub_progress *sp)
114 printf("\tdata_extents_scrubbed: %lld\n", sp->data_extents_scrubbed);
115 printf("\ttree_extents_scrubbed: %lld\n", sp->tree_extents_scrubbed);
116 printf("\tdata_bytes_scrubbed: %lld\n", sp->data_bytes_scrubbed);
117 printf("\ttree_bytes_scrubbed: %lld\n", sp->tree_bytes_scrubbed);
118 printf("\tread_errors: %lld\n", sp->read_errors);
119 printf("\tcsum_errors: %lld\n", sp->csum_errors);
120 printf("\tverify_errors: %lld\n", sp->verify_errors);
121 printf("\tno_csum: %lld\n", sp->no_csum);
122 printf("\tcsum_discards: %lld\n", sp->csum_discards);
123 printf("\tsuper_errors: %lld\n", sp->super_errors);
124 printf("\tmalloc_errors: %lld\n", sp->malloc_errors);
125 printf("\tuncorrectable_errors: %lld\n", sp->uncorrectable_errors);
126 printf("\tunverified_errors: %lld\n", sp->unverified_errors);
127 printf("\tcorrected_errors: %lld\n", sp->corrected_errors);
128 printf("\tlast_physical: %lld\n", sp->last_physical);
131 #define PRINT_SCRUB_ERROR(test, desc) do { \
133 printf(" %s=%llu", desc, test); \
136 static void print_scrub_summary(struct btrfs_scrub_progress *p)
141 err_cnt = p->read_errors +
146 err_cnt2 = p->corrected_errors + p->uncorrectable_errors;
148 if (p->malloc_errors)
149 printf("*** WARNING: memory allocation failed while scrubbing. "
150 "results may be inaccurate\n");
152 printf("\ttotal bytes scrubbed: %s with %llu errors\n",
153 pretty_size(p->data_bytes_scrubbed + p->tree_bytes_scrubbed),
154 max(err_cnt, err_cnt2));
156 if (err_cnt || err_cnt2) {
157 printf("\terror details:");
158 PRINT_SCRUB_ERROR(p->read_errors, "read");
159 PRINT_SCRUB_ERROR(p->super_errors, "super");
160 PRINT_SCRUB_ERROR(p->verify_errors, "verify");
161 PRINT_SCRUB_ERROR(p->csum_errors, "csum");
163 printf("\tcorrected errors: %llu, uncorrectable errors: %llu, "
164 "unverified errors: %llu\n", p->corrected_errors,
165 p->uncorrectable_errors, p->unverified_errors);
169 #define _SCRUB_FS_STAT(p, name, fs_stat) do { \
170 fs_stat->p.name += p->name; \
173 #define _SCRUB_FS_STAT_MIN(ss, name, fs_stat) \
175 if (fs_stat->s.name > ss->name) { \
176 fs_stat->s.name = ss->name; \
180 #define _SCRUB_FS_STAT_ZMIN(ss, name, fs_stat) \
182 if (!fs_stat->s.name || fs_stat->s.name > ss->name) { \
183 fs_stat->s.name = ss->name; \
187 #define _SCRUB_FS_STAT_ZMAX(ss, name, fs_stat) \
189 if (!(fs_stat)->s.name || (fs_stat)->s.name < (ss)->name) { \
190 (fs_stat)->s.name = (ss)->name; \
194 static void add_to_fs_stat(struct btrfs_scrub_progress *p,
195 struct scrub_stats *ss,
196 struct scrub_fs_stat *fs_stat)
198 _SCRUB_FS_STAT(p, data_extents_scrubbed, fs_stat);
199 _SCRUB_FS_STAT(p, tree_extents_scrubbed, fs_stat);
200 _SCRUB_FS_STAT(p, data_bytes_scrubbed, fs_stat);
201 _SCRUB_FS_STAT(p, tree_bytes_scrubbed, fs_stat);
202 _SCRUB_FS_STAT(p, read_errors, fs_stat);
203 _SCRUB_FS_STAT(p, csum_errors, fs_stat);
204 _SCRUB_FS_STAT(p, verify_errors, fs_stat);
205 _SCRUB_FS_STAT(p, no_csum, fs_stat);
206 _SCRUB_FS_STAT(p, csum_discards, fs_stat);
207 _SCRUB_FS_STAT(p, super_errors, fs_stat);
208 _SCRUB_FS_STAT(p, malloc_errors, fs_stat);
209 _SCRUB_FS_STAT(p, uncorrectable_errors, fs_stat);
210 _SCRUB_FS_STAT(p, corrected_errors, fs_stat);
211 _SCRUB_FS_STAT(p, last_physical, fs_stat);
212 _SCRUB_FS_STAT_ZMIN(ss, t_start, fs_stat);
213 _SCRUB_FS_STAT_ZMIN(ss, t_resumed, fs_stat);
214 _SCRUB_FS_STAT_ZMAX(ss, duration, fs_stat);
215 _SCRUB_FS_STAT_ZMAX(ss, canceled, fs_stat);
216 _SCRUB_FS_STAT_MIN(ss, finished, fs_stat);
219 static void init_fs_stat(struct scrub_fs_stat *fs_stat)
221 memset(fs_stat, 0, sizeof(*fs_stat));
222 fs_stat->s.finished = 1;
225 static void _print_scrub_ss(struct scrub_stats *ss)
232 if (!ss || !ss->t_start) {
233 printf("\tno stats available\n");
237 localtime_r(&ss->t_resumed, &tm);
238 strftime(t, sizeof(t), "%c", &tm);
239 t[sizeof(t) - 1] = '\0';
240 printf("\tscrub resumed at %s", t);
242 localtime_r(&ss->t_start, &tm);
243 strftime(t, sizeof(t), "%c", &tm);
244 t[sizeof(t) - 1] = '\0';
245 printf("\tscrub started at %s", t);
248 seconds = ss->duration;
249 hours = ss->duration / (60 * 60);
250 gmtime_r(&seconds, &tm);
251 strftime(t, sizeof(t), "%M:%S", &tm);
253 printf(", running for %02u:%s\n", hours, t);
254 else if (ss->canceled)
255 printf(" and was aborted after %02u:%s\n", hours, t);
256 else if (ss->finished)
257 printf(" and finished after %02u:%s\n", hours, t);
259 printf(", interrupted after %02u:%s, not running\n",
263 static void print_scrub_dev(struct btrfs_ioctl_dev_info_args *di,
264 struct btrfs_scrub_progress *p, int raw,
265 const char *append, struct scrub_stats *ss)
267 printf("scrub device %s (id %llu) %s\n", di->path, di->devid,
268 append ? append : "");
276 print_scrub_summary(p);
280 static void print_fs_stat(struct scrub_fs_stat *fs_stat, int raw)
282 _print_scrub_ss(&fs_stat->s);
285 print_scrub_full(&fs_stat->p);
287 print_scrub_summary(&fs_stat->p);
290 static void free_history(struct scrub_file_record **last_scrubs)
292 struct scrub_file_record **l = last_scrubs;
301 * cancels a running scrub and makes the master process record the current
302 * progress status before exiting.
304 static int cancel_fd = -1;
305 static void scrub_sigint_record_progress(int signal)
309 ret = ioctl(cancel_fd, BTRFS_IOC_SCRUB_CANCEL, NULL);
311 perror("Scrub cancel failed");
314 static int scrub_handle_sigint_parent(void)
316 struct sigaction sa = {
317 .sa_handler = SIG_IGN,
318 .sa_flags = SA_RESTART,
321 return sigaction(SIGINT, &sa, NULL);
324 static int scrub_handle_sigint_child(int fd)
326 struct sigaction sa = {
327 .sa_handler = fd == -1 ? SIG_DFL : scrub_sigint_record_progress,
331 return sigaction(SIGINT, &sa, NULL);
334 static int scrub_datafile(const char *fn_base, const char *fn_local,
335 const char *fn_tmp, char *datafile, int size)
340 datafile[end + 1] = '\0';
341 strncpy(datafile, fn_base, end);
342 ret = strlen(datafile);
348 strncpy(datafile + ret + 1, fn_local, end - ret - 1);
349 ret = strlen(datafile);
356 strncpy(datafile + ret + 1, fn_tmp, end - ret - 1);
357 ret = strlen(datafile);
366 static int scrub_open_file(const char *datafile, int m)
371 fd = open(datafile, m, 0600);
375 ret = flock(fd, LOCK_EX|LOCK_NB);
385 static int scrub_open_file_r(const char *fn_base, const char *fn_local)
388 char datafile[PATH_MAX];
389 ret = scrub_datafile(fn_base, fn_local, NULL,
390 datafile, sizeof(datafile));
393 return scrub_open_file(datafile, O_RDONLY);
396 static int scrub_open_file_w(const char *fn_base, const char *fn_local,
400 char datafile[PATH_MAX];
401 ret = scrub_datafile(fn_base, fn_local, tmp,
402 datafile, sizeof(datafile));
405 return scrub_open_file(datafile, O_WRONLY|O_CREAT);
408 static int scrub_rename_file(const char *fn_base, const char *fn_local,
412 char datafile_old[PATH_MAX];
413 char datafile_new[PATH_MAX];
414 ret = scrub_datafile(fn_base, fn_local, tmp,
415 datafile_old, sizeof(datafile_old));
418 ret = scrub_datafile(fn_base, fn_local, NULL,
419 datafile_new, sizeof(datafile_new));
422 ret = rename(datafile_old, datafile_new);
423 return ret ? -errno : 0;
426 #define _SCRUB_KVREAD(ret, i, name, avail, l, dest) if (ret == 0) { \
427 ret = scrub_kvread(i, sizeof(#name), avail, l, #name, dest.name); \
431 * returns 0 if the key did not match (nothing was read)
432 * 1 if the key did match (success)
433 * -1 if the key did match and an error occurred
435 static int scrub_kvread(int *i, int len, int avail, const char *buf,
436 const char *key, u64 *dest)
440 if (*i + len + 1 < avail && strncmp(&buf[*i], key, len - 1) == 0) {
445 for (j = 0; isdigit(buf[*i + j]) && *i + j < avail; ++j)
449 *dest = atoll(&buf[*i]);
457 #define _SCRUB_INVALID do { \
459 warning("invalid data on line %d pos " \
460 "%d state %d (near \"%.*s\") at %s:%d", \
461 lineno, i, state, 20 > avail ? avail : 20, \
462 l + i, __FILE__, __LINE__); \
466 static struct scrub_file_record **scrub_read_file(int fd, int report_errors)
479 char empty_uuid[BTRFS_FSID_SIZE] = {0};
480 struct scrub_file_record **p = NULL;
483 old_avail = avail - i;
484 BUG_ON(old_avail < 0);
486 memmove(l, l + i, old_avail);
487 avail = read(fd, l + old_avail, sizeof(l) - old_avail);
490 if (avail == 0 && old_avail == 0) {
492 memcmp(p[curr]->fsid, empty_uuid, BTRFS_FSID_SIZE) == 0) {
494 } else if (curr == -1) {
495 p = ERR_PTR(-ENODATA);
501 return ERR_PTR(-errno);
510 case 0: /* start of file */
511 ret = scrub_kvread(&i,
512 sizeof(SCRUB_FILE_VERSION_PREFIX), avail, l,
513 SCRUB_FILE_VERSION_PREFIX, &version);
516 if (version != atoll(SCRUB_FILE_VERSION))
517 return ERR_PTR(-ENOTSUP);
520 case 1: /* start of line, alloc */
522 * this state makes sure we have a complete line in
523 * further processing, so we don't need wrap-tracking
526 if (!eof && !memchr(l + i, '\n', avail - i))
529 if (curr > -1 && memcmp(p[curr]->fsid, empty_uuid,
530 BTRFS_FSID_SIZE) == 0) {
536 p = realloc(p, (curr + 2) * sizeof(*p));
539 return ERR_PTR(-errno);
541 p[curr] = malloc(sizeof(**p));
544 return ERR_PTR(-errno);
546 memset(p[curr], 0, sizeof(**p));
550 case 2: /* start of line, skip space */
551 while (isspace(l[i]) && i < avail) {
557 (!eof && !memchr(l + i, '\n', avail - i)))
561 case 3: /* read fsid */
564 for (j = 0; l[i + j] != ':' && i + j < avail; ++j)
566 if (i + j + 1 >= avail)
568 if (j != BTRFS_UUID_UNPARSED_SIZE - 1)
571 ret = uuid_parse(l + i, p[curr]->fsid);
577 case 4: /* read dev id */
578 for (j = 0; isdigit(l[i + j]) && i+j < avail; ++j)
580 if (j == 0 || i + j + 1 >= avail)
582 p[curr]->devid = atoll(&l[i]);
586 case 5: /* read key/value pair */
588 _SCRUB_KVREAD(ret, &i, data_extents_scrubbed, avail, l,
590 _SCRUB_KVREAD(ret, &i, data_extents_scrubbed, avail, l,
592 _SCRUB_KVREAD(ret, &i, tree_extents_scrubbed, avail, l,
594 _SCRUB_KVREAD(ret, &i, data_bytes_scrubbed, avail, l,
596 _SCRUB_KVREAD(ret, &i, tree_bytes_scrubbed, avail, l,
598 _SCRUB_KVREAD(ret, &i, read_errors, avail, l,
600 _SCRUB_KVREAD(ret, &i, csum_errors, avail, l,
602 _SCRUB_KVREAD(ret, &i, verify_errors, avail, l,
604 _SCRUB_KVREAD(ret, &i, no_csum, avail, l,
606 _SCRUB_KVREAD(ret, &i, csum_discards, avail, l,
608 _SCRUB_KVREAD(ret, &i, super_errors, avail, l,
610 _SCRUB_KVREAD(ret, &i, malloc_errors, avail, l,
612 _SCRUB_KVREAD(ret, &i, uncorrectable_errors, avail, l,
614 _SCRUB_KVREAD(ret, &i, corrected_errors, avail, l,
616 _SCRUB_KVREAD(ret, &i, last_physical, avail, l,
618 _SCRUB_KVREAD(ret, &i, finished, avail, l,
620 _SCRUB_KVREAD(ret, &i, t_start, avail, l,
621 (u64 *)&p[curr]->stats);
622 _SCRUB_KVREAD(ret, &i, t_resumed, avail, l,
623 (u64 *)&p[curr]->stats);
624 _SCRUB_KVREAD(ret, &i, duration, avail, l,
625 (u64 *)&p[curr]->stats);
626 _SCRUB_KVREAD(ret, &i, canceled, avail, l,
632 case 6: /* after number */
635 else if (l[i] == '\n')
641 case 99: /* skip rest of line */
646 if (l[i - 1] == '\n') {
658 static int scrub_write_buf(int fd, const void *data, int len)
661 ret = write(fd, data, len);
665 static int scrub_writev(int fd, char *buf, int max, const char *fmt, ...)
666 __attribute__ ((format (printf, 4, 5)));
667 static int scrub_writev(int fd, char *buf, int max, const char *fmt, ...)
673 ret = vsnprintf(buf, max, fmt, args);
677 return scrub_write_buf(fd, buf, ret);
680 #define _SCRUB_SUM(dest, data, name) dest->scrub_args.progress.name = \
681 data->resumed->p.name + data->scrub_args.progress.name
683 static struct scrub_progress *scrub_resumed_stats(struct scrub_progress *data,
684 struct scrub_progress *dest)
686 if (!data->resumed || data->skip)
689 _SCRUB_SUM(dest, data, data_extents_scrubbed);
690 _SCRUB_SUM(dest, data, tree_extents_scrubbed);
691 _SCRUB_SUM(dest, data, data_bytes_scrubbed);
692 _SCRUB_SUM(dest, data, tree_bytes_scrubbed);
693 _SCRUB_SUM(dest, data, read_errors);
694 _SCRUB_SUM(dest, data, csum_errors);
695 _SCRUB_SUM(dest, data, verify_errors);
696 _SCRUB_SUM(dest, data, no_csum);
697 _SCRUB_SUM(dest, data, csum_discards);
698 _SCRUB_SUM(dest, data, super_errors);
699 _SCRUB_SUM(dest, data, malloc_errors);
700 _SCRUB_SUM(dest, data, uncorrectable_errors);
701 _SCRUB_SUM(dest, data, corrected_errors);
702 _SCRUB_SUM(dest, data, last_physical);
703 dest->stats.canceled = data->stats.canceled;
704 dest->stats.finished = data->stats.finished;
705 dest->stats.t_resumed = data->stats.t_start;
706 dest->stats.t_start = data->resumed->stats.t_start;
707 dest->stats.duration = data->resumed->stats.duration +
708 data->stats.duration;
709 dest->scrub_args.devid = data->scrub_args.devid;
713 #define _SCRUB_KVWRITE(fd, buf, name, use) \
714 scrub_kvwrite(fd, buf, sizeof(buf), #name, \
715 use->scrub_args.progress.name)
717 #define _SCRUB_KVWRITE_STATS(fd, buf, name, use) \
718 scrub_kvwrite(fd, buf, sizeof(buf), #name, \
721 static int scrub_kvwrite(int fd, char *buf, int max, const char *key, u64 val)
723 return scrub_writev(fd, buf, max, "|%s:%lld", key, val);
726 static int scrub_write_file(int fd, const char *fsid,
727 struct scrub_progress *data, int n)
732 struct scrub_progress local;
733 struct scrub_progress *use;
738 /* each -1 is to subtract one \0 byte, the + 2 is for ':' and '\n' */
739 ret = scrub_write_buf(fd, SCRUB_FILE_VERSION_PREFIX ":"
740 SCRUB_FILE_VERSION "\n",
741 (sizeof(SCRUB_FILE_VERSION_PREFIX) - 1) +
742 (sizeof(SCRUB_FILE_VERSION) - 1) + 2);
746 for (i = 0; i < n; ++i) {
747 use = scrub_resumed_stats(&data[i], &local);
748 if (scrub_write_buf(fd, fsid, strlen(fsid)) ||
749 scrub_write_buf(fd, ":", 1) ||
750 scrub_writev(fd, buf, sizeof(buf), "%lld",
751 use->scrub_args.devid) ||
752 scrub_write_buf(fd, buf, ret) ||
753 _SCRUB_KVWRITE(fd, buf, data_extents_scrubbed, use) ||
754 _SCRUB_KVWRITE(fd, buf, tree_extents_scrubbed, use) ||
755 _SCRUB_KVWRITE(fd, buf, data_bytes_scrubbed, use) ||
756 _SCRUB_KVWRITE(fd, buf, tree_bytes_scrubbed, use) ||
757 _SCRUB_KVWRITE(fd, buf, read_errors, use) ||
758 _SCRUB_KVWRITE(fd, buf, csum_errors, use) ||
759 _SCRUB_KVWRITE(fd, buf, verify_errors, use) ||
760 _SCRUB_KVWRITE(fd, buf, no_csum, use) ||
761 _SCRUB_KVWRITE(fd, buf, csum_discards, use) ||
762 _SCRUB_KVWRITE(fd, buf, super_errors, use) ||
763 _SCRUB_KVWRITE(fd, buf, malloc_errors, use) ||
764 _SCRUB_KVWRITE(fd, buf, uncorrectable_errors, use) ||
765 _SCRUB_KVWRITE(fd, buf, corrected_errors, use) ||
766 _SCRUB_KVWRITE(fd, buf, last_physical, use) ||
767 _SCRUB_KVWRITE_STATS(fd, buf, t_start, use) ||
768 _SCRUB_KVWRITE_STATS(fd, buf, t_resumed, use) ||
769 _SCRUB_KVWRITE_STATS(fd, buf, duration, use) ||
770 _SCRUB_KVWRITE_STATS(fd, buf, canceled, use) ||
771 _SCRUB_KVWRITE_STATS(fd, buf, finished, use) ||
772 scrub_write_buf(fd, "\n", 1)) {
780 static int scrub_write_progress(pthread_mutex_t *m, const char *fsid,
781 struct scrub_progress *data, int n)
788 ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old);
794 ret = pthread_mutex_lock(m);
800 fd = scrub_open_file_w(SCRUB_DATA_FILE, fsid, "tmp");
805 err = scrub_write_file(fd, fsid, data, n);
808 err = scrub_rename_file(SCRUB_DATA_FILE, fsid, "tmp");
819 ret = pthread_mutex_unlock(m);
824 ret = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old);
832 static void *scrub_one_dev(void *ctx)
834 struct scrub_progress *sp = ctx;
838 sp->stats.canceled = 0;
839 sp->stats.duration = 0;
840 sp->stats.finished = 0;
842 ret = syscall(SYS_ioprio_set, IOPRIO_WHO_PROCESS, 0,
843 IOPRIO_PRIO_VALUE(sp->ioprio_class,
844 sp->ioprio_classdata));
846 warning("setting ioprio failed: %s (ignored)",
849 ret = ioctl(sp->fd, BTRFS_IOC_SCRUB, &sp->scrub_args);
850 gettimeofday(&tv, NULL);
852 sp->stats.duration = tv.tv_sec - sp->stats.t_start;
853 sp->stats.canceled = !!ret;
854 sp->ioctl_errno = errno;
855 ret = pthread_mutex_lock(&sp->progress_mutex);
857 return ERR_PTR(-ret);
858 sp->stats.finished = 1;
859 ret = pthread_mutex_unlock(&sp->progress_mutex);
861 return ERR_PTR(-ret);
866 static void *progress_one_dev(void *ctx)
868 struct scrub_progress *sp = ctx;
870 sp->ret = ioctl(sp->fd, BTRFS_IOC_SCRUB_PROGRESS, &sp->scrub_args);
871 sp->ioctl_errno = errno;
876 /* nb: returns a negative errno via ERR_PTR */
877 static void *scrub_progress_cycle(void *ctx)
880 int perr = 0; /* positive / pthread error returns */
883 char fsid[BTRFS_UUID_UNPARSED_SIZE];
884 struct scrub_progress *sp;
885 struct scrub_progress *sp_last;
886 struct scrub_progress *sp_shared;
888 struct scrub_progress_cycle *spc = ctx;
889 int ndev = spc->fi->num_devices;
893 struct pollfd accept_poll_fd = {
898 struct pollfd write_poll_fd = {
902 struct sockaddr_un peer;
903 socklen_t peer_size = sizeof(peer);
905 perr = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
909 uuid_unparse(spc->fi->fsid, fsid);
911 for (i = 0; i < ndev; ++i) {
912 sp = &spc->progress[i];
913 sp_last = &spc->progress[i + ndev];
914 sp_shared = &spc->shared_progress[i];
915 sp->scrub_args.devid = sp_last->scrub_args.devid =
916 sp_shared->scrub_args.devid;
917 sp->fd = sp_last->fd = spc->fdmnt;
918 sp->stats.t_start = sp_last->stats.t_start =
919 sp_shared->stats.t_start;
920 sp->resumed = sp_last->resumed = sp_shared->resumed;
921 sp->skip = sp_last->skip = sp_shared->skip;
922 sp->stats.finished = sp_last->stats.finished =
923 sp_shared->stats.finished;
927 ret = poll(&accept_poll_fd, 1, 5 * 1000);
933 peer_fd = accept(spc->prg_fd, (struct sockaddr *)&peer,
935 gettimeofday(&tv, NULL);
938 for (i = 0; i < ndev; ++i) {
939 sp = &spc->progress[this * ndev + i];
940 sp_last = &spc->progress[last * ndev + i];
941 sp_shared = &spc->shared_progress[i];
942 if (sp->stats.finished)
944 progress_one_dev(sp);
945 sp->stats.duration = tv.tv_sec - sp->stats.t_start;
948 if (sp->ioctl_errno != ENOTCONN &&
949 sp->ioctl_errno != ENODEV) {
950 ret = -sp->ioctl_errno;
954 * scrub finished or device removed, check the
955 * finished flag. if unset, just use the last
956 * result we got for the current write and go
957 * on. flag should be set on next cycle, then.
959 perr = pthread_setcancelstate(
960 PTHREAD_CANCEL_DISABLE, &old);
963 perr = pthread_mutex_lock(&sp_shared->progress_mutex);
966 if (!sp_shared->stats.finished) {
967 perr = pthread_mutex_unlock(
968 &sp_shared->progress_mutex);
971 perr = pthread_setcancelstate(
972 PTHREAD_CANCEL_ENABLE, &old);
975 memcpy(sp, sp_last, sizeof(*sp));
978 perr = pthread_mutex_unlock(&sp_shared->progress_mutex);
981 perr = pthread_setcancelstate(
982 PTHREAD_CANCEL_ENABLE, &old);
985 memcpy(sp, sp_shared, sizeof(*sp));
986 memcpy(sp_last, sp_shared, sizeof(*sp));
989 write_poll_fd.fd = peer_fd;
990 ret = poll(&write_poll_fd, 1, 0);
996 ret = scrub_write_file(
998 &spc->progress[this * ndev], ndev);
1005 if (!spc->do_record)
1007 ret = scrub_write_progress(spc->write_mutex, fsid,
1008 &spc->progress[this * ndev], ndev);
1017 return ERR_PTR(ret);
1020 static struct scrub_file_record *last_dev_scrub(
1021 struct scrub_file_record *const *const past_scrubs, u64 devid)
1025 if (!past_scrubs || IS_ERR(past_scrubs))
1028 for (i = 0; past_scrubs[i]; ++i)
1029 if (past_scrubs[i]->devid == devid)
1030 return past_scrubs[i];
1035 static int mkdir_p(char *path)
1040 for (i = 1; i < strlen(path); ++i) {
1044 ret = mkdir(path, 0777);
1045 if (ret && errno != EEXIST)
1053 static int is_scrub_running_on_fs(struct btrfs_ioctl_fs_info_args *fi_args,
1054 struct btrfs_ioctl_dev_info_args *di_args,
1055 struct scrub_file_record **past_scrubs)
1059 if (!fi_args || !di_args || !past_scrubs)
1062 for (i = 0; i < fi_args->num_devices; i++) {
1063 struct scrub_file_record *sfr =
1064 last_dev_scrub(past_scrubs, di_args[i].devid);
1068 if (!(sfr->stats.finished || sfr->stats.canceled))
1074 static int is_scrub_running_in_kernel(int fd,
1075 struct btrfs_ioctl_dev_info_args *di_args, u64 max_devices)
1077 struct scrub_progress sp;
1081 for (i = 0; i < max_devices; i++) {
1082 memset(&sp, 0, sizeof(sp));
1083 sp.scrub_args.devid = di_args[i].devid;
1084 ret = ioctl(fd, BTRFS_IOC_SCRUB_PROGRESS, &sp.scrub_args);
1092 static const char * const cmd_scrub_start_usage[];
1093 static const char * const cmd_scrub_resume_usage[];
1095 static int scrub_start(int argc, char **argv, int resume)
1105 int e_uncorrectable = 0;
1106 int e_correctable = 0;
1109 int do_background = 1;
1115 int do_stats_per_dev = 0;
1116 int ioprio_class = IOPRIO_CLASS_IDLE;
1117 int ioprio_classdata = 0;
1121 struct btrfs_ioctl_fs_info_args fi_args;
1122 struct btrfs_ioctl_dev_info_args *di_args = NULL;
1123 struct scrub_progress *sp = NULL;
1124 struct scrub_fs_stat fs_stat;
1126 struct sockaddr_un addr = {
1127 .sun_family = AF_UNIX,
1129 pthread_t *t_devs = NULL;
1131 struct scrub_file_record **past_scrubs = NULL;
1132 struct scrub_file_record *last_scrub = NULL;
1133 char *datafile = strdup(SCRUB_DATA_FILE);
1134 char fsid[BTRFS_UUID_UNPARSED_SIZE];
1135 char sock_path[PATH_MAX] = "";
1136 struct scrub_progress_cycle spc;
1137 pthread_mutex_t spc_write_mutex = PTHREAD_MUTEX_INITIALIZER;
1140 DIR *dirstream = NULL;
1142 int nothing_to_resume = 0;
1144 while ((c = getopt(argc, argv, "BdqrRc:n:f")) != -1) {
1152 do_stats_per_dev = 1;
1164 ioprio_class = (int)strtol(optarg, NULL, 10);
1167 ioprio_classdata = (int)strtol(optarg, NULL, 10);
1174 usage(resume ? cmd_scrub_resume_usage :
1175 cmd_scrub_start_usage);
1179 /* try to catch most error cases before forking */
1181 if (check_argc_exact(argc - optind, 1)) {
1182 usage(resume ? cmd_scrub_resume_usage :
1183 cmd_scrub_start_usage);
1186 spc.progress = NULL;
1187 if (do_quiet && do_print)
1190 if (mkdir_p(datafile)) {
1191 warning_on(!do_quiet,
1192 "cannot create scrub data file, mkdir %s failed: %s. Status recording disabled",
1193 datafile, strerror(errno));
1198 path = argv[optind];
1200 fdmnt = open_path_or_dev_mnt(path, &dirstream, !do_quiet);
1204 ret = get_fs_info(path, &fi_args, &di_args);
1207 "getting dev info for scrub failed: %s",
1212 if (!fi_args.num_devices) {
1213 error_on(!do_quiet, "no devices found");
1218 uuid_unparse(fi_args.fsid, fsid);
1219 fdres = scrub_open_file_r(SCRUB_DATA_FILE, fsid);
1220 if (fdres < 0 && fdres != -ENOENT) {
1221 warning_on(!do_quiet, "failed to open status file: %s",
1223 } else if (fdres >= 0) {
1224 past_scrubs = scrub_read_file(fdres, !do_quiet);
1225 if (IS_ERR(past_scrubs))
1226 warning_on(!do_quiet, "failed to read status file: %s",
1227 strerror(-PTR_ERR(past_scrubs)));
1232 * Check for stale information in the status file, ie. if it's
1233 * canceled=0, finished=0 but no scrub is running.
1235 if (!is_scrub_running_in_kernel(fdmnt, di_args, fi_args.num_devices))
1239 * check whether any involved device is already busy running a
1240 * scrub. This would cause damaged status messages and the state
1241 * "aborted" without the explanation that a scrub was already
1242 * running. Therefore check it first, prevent it and give some
1243 * feedback to the user if scrub is already running.
1244 * Note that if scrub is started with a block device as the
1245 * parameter, only that particular block device is checked. It
1246 * is a normal mode of operation to start scrub on multiple
1247 * single devices, there is no reason to prevent this.
1249 if (!force && is_scrub_running_on_fs(&fi_args, di_args, past_scrubs)) {
1251 "Scrub is already running.\n"
1252 "To cancel use 'btrfs scrub cancel %s'.\n"
1253 "To see the status use 'btrfs scrub status [-d] %s'",
1259 t_devs = malloc(fi_args.num_devices * sizeof(*t_devs));
1260 sp = calloc(fi_args.num_devices, sizeof(*sp));
1261 spc.progress = calloc(fi_args.num_devices * 2, sizeof(*spc.progress));
1263 if (!t_devs || !sp || !spc.progress) {
1264 error_on(!do_quiet, "scrub failed: %s", strerror(errno));
1269 for (i = 0; i < fi_args.num_devices; ++i) {
1270 devid = di_args[i].devid;
1271 ret = pthread_mutex_init(&sp[i].progress_mutex, NULL);
1273 error_on(!do_quiet, "pthread_mutex_init failed: %s",
1278 last_scrub = last_dev_scrub(past_scrubs, devid);
1279 sp[i].scrub_args.devid = devid;
1281 if (resume && last_scrub && (last_scrub->stats.canceled ||
1282 !last_scrub->stats.finished)) {
1284 sp[i].scrub_args.start = last_scrub->p.last_physical;
1285 sp[i].resumed = last_scrub;
1286 } else if (resume) {
1289 sp[i].resumed = last_scrub;
1293 sp[i].scrub_args.start = 0ll;
1294 sp[i].resumed = NULL;
1297 sp[i].scrub_args.end = (u64)-1ll;
1298 sp[i].scrub_args.flags = readonly ? BTRFS_SCRUB_READONLY : 0;
1299 sp[i].ioprio_class = ioprio_class;
1300 sp[i].ioprio_classdata = ioprio_classdata;
1303 if (!n_start && !n_resume) {
1305 printf("scrub: nothing to resume for %s, fsid %s\n",
1307 nothing_to_resume = 1;
1311 ret = prg_fd = socket(AF_UNIX, SOCK_STREAM, 0);
1313 ret = scrub_datafile(SCRUB_PROGRESS_SOCKET_PATH, fsid, NULL,
1314 sock_path, sizeof(sock_path));
1315 /* ignore EOVERFLOW, try using a shorter path for the socket */
1316 addr.sun_path[sizeof(addr.sun_path) - 1] = '\0';
1317 strncpy(addr.sun_path, sock_path, sizeof(addr.sun_path) - 1);
1318 ret = bind(prg_fd, (struct sockaddr *)&addr, sizeof(addr));
1319 if (ret != -1 || errno != EADDRINUSE)
1322 * bind failed with EADDRINUSE. so let's see if anyone answers
1323 * when we make a call to the socket ...
1325 ret = connect(prg_fd, (struct sockaddr *)&addr, sizeof(addr));
1326 if (!ret || errno != ECONNREFUSED) {
1327 /* ... yes, so scrub must be running. error out */
1328 error("scrub already running");
1334 * ... no, this means someone left us alone with an unused
1335 * socket in the file system. remove it and try again.
1337 ret = unlink(sock_path);
1340 ret = listen(prg_fd, 100);
1342 warning_on(!do_quiet,
1343 "failed to open the progress status socket at %s: %s. Progress cannot be queried",
1344 sock_path[0] ? sock_path :
1345 SCRUB_PROGRESS_SOCKET_PATH, strerror(errno));
1355 /* write all-zero progress file for a start */
1356 ret = scrub_write_progress(&spc_write_mutex, fsid, sp,
1357 fi_args.num_devices);
1359 warning_on(!do_quiet,
1360 "failed to write the progress status file: %s. Status recording disabled",
1366 if (do_background) {
1369 error_on(!do_quiet, "cannot scrub, fork failed: %s",
1377 scrub_handle_sigint_parent();
1379 printf("scrub %s on %s, fsid %s (pid=%d)\n",
1380 n_start ? "started" : "resumed",
1388 error_on(!do_quiet, "wait failed (ret=%d): %s",
1389 ret, strerror(errno));
1393 if (!WIFEXITED(stat) || WEXITSTATUS(stat)) {
1394 error_on(!do_quiet, "scrub process failed");
1395 err = WIFEXITED(stat) ? WEXITSTATUS(stat) : -1;
1403 scrub_handle_sigint_child(fdmnt);
1405 for (i = 0; i < fi_args.num_devices; ++i) {
1407 sp[i].scrub_args.progress = sp[i].resumed->p;
1408 sp[i].stats = sp[i].resumed->stats;
1410 sp[i].stats.finished = 1;
1413 devid = di_args[i].devid;
1414 gettimeofday(&tv, NULL);
1415 sp[i].stats.t_start = tv.tv_sec;
1416 ret = pthread_create(&t_devs[i], NULL,
1417 scrub_one_dev, &sp[i]);
1420 error("creating scrub_one_dev[%llu] thread failed: %s",
1421 devid, strerror(ret));
1428 spc.prg_fd = prg_fd;
1429 spc.do_record = do_record;
1430 spc.write_mutex = &spc_write_mutex;
1431 spc.shared_progress = sp;
1433 ret = pthread_create(&t_prog, NULL, scrub_progress_cycle, &spc);
1436 error("creating progress thread failed: %s",
1443 for (i = 0; i < fi_args.num_devices; ++i) {
1446 devid = di_args[i].devid;
1447 ret = pthread_join(t_devs[i], NULL);
1450 error("pthread_join failed for scrub_one_dev[%llu]: %s",
1451 devid, strerror(ret));
1456 switch (sp[i].ioctl_errno) {
1459 warning("device %lld not present",
1467 error("scrubbing %s failed for device id %lld: ret=%d, errno=%d (%s)",
1469 sp[i].ret, sp[i].ioctl_errno,
1470 strerror(sp[i].ioctl_errno));
1475 if (sp[i].scrub_args.progress.uncorrectable_errors > 0)
1477 if (sp[i].scrub_args.progress.corrected_errors > 0
1478 || sp[i].scrub_args.progress.unverified_errors > 0)
1483 const char *append = "done";
1484 if (!do_stats_per_dev)
1485 init_fs_stat(&fs_stat);
1486 for (i = 0; i < fi_args.num_devices; ++i) {
1487 if (do_stats_per_dev) {
1488 print_scrub_dev(&di_args[i],
1489 &sp[i].scrub_args.progress,
1491 sp[i].ret ? "canceled" : "done",
1495 append = "canceled";
1496 add_to_fs_stat(&sp[i].scrub_args.progress,
1497 &sp[i].stats, &fs_stat);
1500 if (!do_stats_per_dev) {
1501 printf("scrub %s for %s\n", append, fsid);
1502 print_fs_stat(&fs_stat, print_raw);
1506 ret = pthread_cancel(t_prog);
1508 ret = pthread_join(t_prog, &terr);
1510 /* check for errors from the handling of the progress thread */
1511 if (do_print && ret) {
1512 error("progress thread handling failed: %s",
1516 /* check for errors returned from the progress thread itself */
1517 if (do_print && terr && terr != PTHREAD_CANCELED)
1518 error("recording progress failed: %s",
1519 strerror(-PTR_ERR(terr)));
1522 ret = scrub_write_progress(&spc_write_mutex, fsid, sp,
1523 fi_args.num_devices);
1524 if (ret && do_print)
1525 error("failed to record the result: %s",
1529 scrub_handle_sigint_child(-1);
1532 free_history(past_scrubs);
1542 close_file_or_dir(fdmnt, dirstream);
1546 if (nothing_to_resume)
1548 if (e_uncorrectable) {
1549 error_on(!do_quiet, "there are uncorrectable errors");
1553 warning_on(!do_quiet,
1554 "errors detected during scrubbing, corrected");
1559 static const char * const cmd_scrub_start_usage[] = {
1560 "btrfs scrub start [-BdqrRf] [-c ioprio_class -n ioprio_classdata] <path>|<device>",
1561 "Start a new scrub. If a scrub is already running, the new one fails.",
1563 "-B do not background",
1564 "-d stats per device (-B only)",
1566 "-r read only mode",
1567 "-R raw print mode, print full data instead of summary",
1568 "-c set ioprio class (see ionice(1) manpage)",
1569 "-n set ioprio classdata (see ionice(1) manpage)",
1570 "-f force starting new scrub even if a scrub is already running",
1571 " this is useful when scrub stats record file is damaged",
1575 static int cmd_scrub_start(int argc, char **argv)
1577 return scrub_start(argc, argv, 0);
1580 static const char * const cmd_scrub_cancel_usage[] = {
1581 "btrfs scrub cancel <path>|<device>",
1582 "Cancel a running scrub",
1586 static int cmd_scrub_cancel(int argc, char **argv)
1591 DIR *dirstream = NULL;
1593 clean_args_no_options(argc, argv, cmd_scrub_cancel_usage);
1595 if (check_argc_exact(argc - optind, 1))
1596 usage(cmd_scrub_cancel_usage);
1598 path = argv[optind];
1600 fdmnt = open_path_or_dev_mnt(path, &dirstream, 1);
1606 ret = ioctl(fdmnt, BTRFS_IOC_SCRUB_CANCEL, NULL);
1609 error("scrub cancel failed on %s: %s", path,
1610 errno == ENOTCONN ? "not running" : strerror(errno));
1611 if (errno == ENOTCONN)
1619 printf("scrub cancelled\n");
1622 close_file_or_dir(fdmnt, dirstream);
1626 static const char * const cmd_scrub_resume_usage[] = {
1627 "btrfs scrub resume [-BdqrR] [-c ioprio_class -n ioprio_classdata] <path>|<device>",
1628 "Resume previously canceled or interrupted scrub",
1630 "-B do not background",
1631 "-d stats per device (-B only)",
1633 "-r read only mode",
1634 "-R raw print mode, print full data instead of summary",
1635 "-c set ioprio class (see ionice(1) manpage)",
1636 "-n set ioprio classdata (see ionice(1) manpage)",
1640 static int cmd_scrub_resume(int argc, char **argv)
1642 return scrub_start(argc, argv, 1);
1645 static const char * const cmd_scrub_status_usage[] = {
1646 "btrfs scrub status [-dR] <path>|<device>",
1647 "Show status of running or finished scrub",
1649 "-d stats per device",
1650 "-R print raw stats",
1654 static int cmd_scrub_status(int argc, char **argv)
1657 struct btrfs_ioctl_fs_info_args fi_args;
1658 struct btrfs_ioctl_dev_info_args *di_args = NULL;
1659 struct scrub_file_record **past_scrubs = NULL;
1660 struct scrub_file_record *last_scrub;
1661 struct scrub_fs_stat fs_stat;
1662 struct sockaddr_un addr = {
1663 .sun_family = AF_UNIX,
1670 int do_stats_per_dev = 0;
1672 char fsid[BTRFS_UUID_UNPARSED_SIZE];
1675 DIR *dirstream = NULL;
1677 while ((c = getopt(argc, argv, "dR")) != -1) {
1680 do_stats_per_dev = 1;
1687 usage(cmd_scrub_status_usage);
1691 if (check_argc_exact(argc - optind, 1))
1692 usage(cmd_scrub_status_usage);
1694 path = argv[optind];
1696 fdmnt = open_path_or_dev_mnt(path, &dirstream, 1);
1700 ret = get_fs_info(path, &fi_args, &di_args);
1702 error("getting dev info for scrub failed: %s",
1707 if (!fi_args.num_devices) {
1708 error("no devices found");
1713 uuid_unparse(fi_args.fsid, fsid);
1715 fdres = socket(AF_UNIX, SOCK_STREAM, 0);
1717 error("failed to create socket to receive progress information: %s",
1722 scrub_datafile(SCRUB_PROGRESS_SOCKET_PATH, fsid,
1723 NULL, addr.sun_path, sizeof(addr.sun_path));
1724 /* ignore EOVERFLOW, just use shorter name and hope for the best */
1725 addr.sun_path[sizeof(addr.sun_path) - 1] = '\0';
1726 ret = connect(fdres, (struct sockaddr *)&addr, sizeof(addr));
1729 fdres = scrub_open_file_r(SCRUB_DATA_FILE, fsid);
1730 if (fdres < 0 && fdres != -ENOENT) {
1731 warning("failed to open status file: %s",
1739 past_scrubs = scrub_read_file(fdres, 1);
1740 if (IS_ERR(past_scrubs))
1741 warning("failed to read status: %s",
1742 strerror(-PTR_ERR(past_scrubs)));
1744 in_progress = is_scrub_running_in_kernel(fdmnt, di_args, fi_args.num_devices);
1746 printf("scrub status for %s\n", fsid);
1748 if (do_stats_per_dev) {
1749 for (i = 0; i < fi_args.num_devices; ++i) {
1750 last_scrub = last_dev_scrub(past_scrubs,
1753 print_scrub_dev(&di_args[i], NULL, print_raw,
1757 last_scrub->stats.in_progress = in_progress;
1758 print_scrub_dev(&di_args[i], &last_scrub->p, print_raw,
1759 last_scrub->stats.finished ?
1760 "history" : "status",
1761 &last_scrub->stats);
1764 init_fs_stat(&fs_stat);
1765 fs_stat.s.in_progress = in_progress;
1766 for (i = 0; i < fi_args.num_devices; ++i) {
1767 last_scrub = last_dev_scrub(past_scrubs,
1771 add_to_fs_stat(&last_scrub->p, &last_scrub->stats,
1774 print_fs_stat(&fs_stat, print_raw);
1778 free_history(past_scrubs);
1782 close_file_or_dir(fdmnt, dirstream);
1787 static const char scrub_cmd_group_info[] =
1788 "verify checksums of data and metadata";
1790 const struct cmd_group scrub_cmd_group = {
1791 scrub_cmd_group_usage, scrub_cmd_group_info, {
1792 { "start", cmd_scrub_start, cmd_scrub_start_usage, NULL, 0 },
1793 { "cancel", cmd_scrub_cancel, cmd_scrub_cancel_usage, NULL, 0 },
1794 { "resume", cmd_scrub_resume, cmd_scrub_resume_usage, NULL, 0 },
1795 { "status", cmd_scrub_status, cmd_scrub_status_usage, NULL, 0 },
1800 int cmd_scrub(int argc, char **argv)
1802 return handle_command_group(&scrub_cmd_group, argc, argv);