2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include "kerncompat.h"
20 #include "androidcompat.h"
22 #include <sys/ioctl.h>
25 #include <sys/types.h>
26 #include <sys/socket.h>
28 #include <sys/syscall.h>
31 #include <uuid/uuid.h>
48 static const char * const scrub_cmd_group_usage[] = {
49 "btrfs scrub <command> [options] <path>|<device>",
53 #define SCRUB_DATA_FILE "/var/lib/btrfs/scrub.status"
54 #define SCRUB_PROGRESS_SOCKET_PATH "/var/lib/btrfs/scrub.progress"
55 #define SCRUB_FILE_VERSION_PREFIX "scrub status"
56 #define SCRUB_FILE_VERSION "1"
67 /* TBD: replace with #include "linux/ioprio.h" in some years */
68 #if !defined (IOPRIO_H)
69 #define IOPRIO_WHO_PROCESS 1
70 #define IOPRIO_CLASS_SHIFT 13
71 #define IOPRIO_PRIO_VALUE(class, data) \
72 (((class) << IOPRIO_CLASS_SHIFT) | (data))
73 #define IOPRIO_CLASS_IDLE 3
76 struct scrub_progress {
77 struct btrfs_ioctl_scrub_args scrub_args;
81 struct scrub_stats stats;
82 struct scrub_file_record *resumed;
84 pthread_mutex_t progress_mutex;
89 struct scrub_file_record {
90 u8 fsid[BTRFS_FSID_SIZE];
92 struct scrub_stats stats;
93 struct btrfs_scrub_progress p;
96 struct scrub_progress_cycle {
100 struct btrfs_ioctl_fs_info_args *fi;
101 struct scrub_progress *progress;
102 struct scrub_progress *shared_progress;
103 pthread_mutex_t *write_mutex;
106 struct scrub_fs_stat {
107 struct btrfs_scrub_progress p;
108 struct scrub_stats s;
112 static void print_scrub_full(struct btrfs_scrub_progress *sp)
114 printf("\tdata_extents_scrubbed: %lld\n", sp->data_extents_scrubbed);
115 printf("\ttree_extents_scrubbed: %lld\n", sp->tree_extents_scrubbed);
116 printf("\tdata_bytes_scrubbed: %lld\n", sp->data_bytes_scrubbed);
117 printf("\ttree_bytes_scrubbed: %lld\n", sp->tree_bytes_scrubbed);
118 printf("\tread_errors: %lld\n", sp->read_errors);
119 printf("\tcsum_errors: %lld\n", sp->csum_errors);
120 printf("\tverify_errors: %lld\n", sp->verify_errors);
121 printf("\tno_csum: %lld\n", sp->no_csum);
122 printf("\tcsum_discards: %lld\n", sp->csum_discards);
123 printf("\tsuper_errors: %lld\n", sp->super_errors);
124 printf("\tmalloc_errors: %lld\n", sp->malloc_errors);
125 printf("\tuncorrectable_errors: %lld\n", sp->uncorrectable_errors);
126 printf("\tunverified_errors: %lld\n", sp->unverified_errors);
127 printf("\tcorrected_errors: %lld\n", sp->corrected_errors);
128 printf("\tlast_physical: %lld\n", sp->last_physical);
131 #define PRINT_SCRUB_ERROR(test, desc) do { \
133 printf(" %s=%llu", desc, test); \
136 static void print_scrub_summary(struct btrfs_scrub_progress *p)
141 err_cnt = p->read_errors +
146 err_cnt2 = p->corrected_errors + p->uncorrectable_errors;
148 if (p->malloc_errors)
149 printf("*** WARNING: memory allocation failed while scrubbing. "
150 "results may be inaccurate\n");
152 printf("\ttotal bytes scrubbed: %s with %llu errors\n",
153 pretty_size(p->data_bytes_scrubbed + p->tree_bytes_scrubbed),
154 max(err_cnt, err_cnt2));
156 if (err_cnt || err_cnt2) {
157 printf("\terror details:");
158 PRINT_SCRUB_ERROR(p->read_errors, "read");
159 PRINT_SCRUB_ERROR(p->super_errors, "super");
160 PRINT_SCRUB_ERROR(p->verify_errors, "verify");
161 PRINT_SCRUB_ERROR(p->csum_errors, "csum");
163 printf("\tcorrected errors: %llu, uncorrectable errors: %llu, "
164 "unverified errors: %llu\n", p->corrected_errors,
165 p->uncorrectable_errors, p->unverified_errors);
169 #define _SCRUB_FS_STAT(p, name, fs_stat) do { \
170 fs_stat->p.name += p->name; \
173 #define _SCRUB_FS_STAT_MIN(ss, name, fs_stat) \
175 if (fs_stat->s.name > ss->name) { \
176 fs_stat->s.name = ss->name; \
180 #define _SCRUB_FS_STAT_ZMIN(ss, name, fs_stat) \
182 if (!fs_stat->s.name || fs_stat->s.name > ss->name) { \
183 fs_stat->s.name = ss->name; \
187 #define _SCRUB_FS_STAT_ZMAX(ss, name, fs_stat) \
189 if (!(fs_stat)->s.name || (fs_stat)->s.name < (ss)->name) { \
190 (fs_stat)->s.name = (ss)->name; \
194 static void add_to_fs_stat(struct btrfs_scrub_progress *p,
195 struct scrub_stats *ss,
196 struct scrub_fs_stat *fs_stat)
198 _SCRUB_FS_STAT(p, data_extents_scrubbed, fs_stat);
199 _SCRUB_FS_STAT(p, tree_extents_scrubbed, fs_stat);
200 _SCRUB_FS_STAT(p, data_bytes_scrubbed, fs_stat);
201 _SCRUB_FS_STAT(p, tree_bytes_scrubbed, fs_stat);
202 _SCRUB_FS_STAT(p, read_errors, fs_stat);
203 _SCRUB_FS_STAT(p, csum_errors, fs_stat);
204 _SCRUB_FS_STAT(p, verify_errors, fs_stat);
205 _SCRUB_FS_STAT(p, no_csum, fs_stat);
206 _SCRUB_FS_STAT(p, csum_discards, fs_stat);
207 _SCRUB_FS_STAT(p, super_errors, fs_stat);
208 _SCRUB_FS_STAT(p, malloc_errors, fs_stat);
209 _SCRUB_FS_STAT(p, uncorrectable_errors, fs_stat);
210 _SCRUB_FS_STAT(p, corrected_errors, fs_stat);
211 _SCRUB_FS_STAT(p, last_physical, fs_stat);
212 _SCRUB_FS_STAT_ZMIN(ss, t_start, fs_stat);
213 _SCRUB_FS_STAT_ZMIN(ss, t_resumed, fs_stat);
214 _SCRUB_FS_STAT_ZMAX(ss, duration, fs_stat);
215 _SCRUB_FS_STAT_ZMAX(ss, canceled, fs_stat);
216 _SCRUB_FS_STAT_MIN(ss, finished, fs_stat);
219 static void init_fs_stat(struct scrub_fs_stat *fs_stat)
221 memset(fs_stat, 0, sizeof(*fs_stat));
222 fs_stat->s.finished = 1;
225 static void _print_scrub_ss(struct scrub_stats *ss)
232 if (!ss || !ss->t_start) {
233 printf("\tno stats available\n");
237 localtime_r(&ss->t_resumed, &tm);
238 strftime(t, sizeof(t), "%c", &tm);
239 t[sizeof(t) - 1] = '\0';
240 printf("\tscrub resumed at %s", t);
242 localtime_r(&ss->t_start, &tm);
243 strftime(t, sizeof(t), "%c", &tm);
244 t[sizeof(t) - 1] = '\0';
245 printf("\tscrub started at %s", t);
248 seconds = ss->duration;
249 hours = ss->duration / (60 * 60);
250 gmtime_r(&seconds, &tm);
251 strftime(t, sizeof(t), "%M:%S", &tm);
253 printf(", running for %02u:%s\n", hours, t);
254 else if (ss->canceled)
255 printf(" and was aborted after %02u:%s\n", hours, t);
256 else if (ss->finished)
257 printf(" and finished after %02u:%s\n", hours, t);
259 printf(", interrupted after %02u:%s, not running\n",
263 static void print_scrub_dev(struct btrfs_ioctl_dev_info_args *di,
264 struct btrfs_scrub_progress *p, int raw,
265 const char *append, struct scrub_stats *ss)
267 printf("scrub device %s (id %llu) %s\n", di->path, di->devid,
268 append ? append : "");
276 print_scrub_summary(p);
280 static void print_fs_stat(struct scrub_fs_stat *fs_stat, int raw)
282 _print_scrub_ss(&fs_stat->s);
285 print_scrub_full(&fs_stat->p);
287 print_scrub_summary(&fs_stat->p);
290 static void free_history(struct scrub_file_record **last_scrubs)
292 struct scrub_file_record **l = last_scrubs;
301 * cancels a running scrub and makes the master process record the current
302 * progress status before exiting.
304 static int cancel_fd = -1;
305 static void scrub_sigint_record_progress(int signal)
309 ret = ioctl(cancel_fd, BTRFS_IOC_SCRUB_CANCEL, NULL);
311 perror("Scrub cancel failed");
314 static int scrub_handle_sigint_parent(void)
316 struct sigaction sa = {
317 .sa_handler = SIG_IGN,
318 .sa_flags = SA_RESTART,
321 return sigaction(SIGINT, &sa, NULL);
324 static int scrub_handle_sigint_child(int fd)
326 struct sigaction sa = {
327 .sa_handler = fd == -1 ? SIG_DFL : scrub_sigint_record_progress,
331 return sigaction(SIGINT, &sa, NULL);
334 static int scrub_datafile(const char *fn_base, const char *fn_local,
335 const char *fn_tmp, char *datafile, int size)
340 datafile[end + 1] = '\0';
341 strncpy(datafile, fn_base, end);
342 ret = strlen(datafile);
348 strncpy(datafile + ret + 1, fn_local, end - ret - 1);
349 ret = strlen(datafile);
356 strncpy(datafile + ret + 1, fn_tmp, end - ret - 1);
357 ret = strlen(datafile);
366 static int scrub_open_file(const char *datafile, int m)
371 fd = open(datafile, m, 0600);
375 ret = flock(fd, LOCK_EX|LOCK_NB);
385 static int scrub_open_file_r(const char *fn_base, const char *fn_local)
388 char datafile[PATH_MAX];
389 ret = scrub_datafile(fn_base, fn_local, NULL,
390 datafile, sizeof(datafile));
393 return scrub_open_file(datafile, O_RDONLY);
396 static int scrub_open_file_w(const char *fn_base, const char *fn_local,
400 char datafile[PATH_MAX];
401 ret = scrub_datafile(fn_base, fn_local, tmp,
402 datafile, sizeof(datafile));
405 return scrub_open_file(datafile, O_WRONLY|O_CREAT);
408 static int scrub_rename_file(const char *fn_base, const char *fn_local,
412 char datafile_old[PATH_MAX];
413 char datafile_new[PATH_MAX];
414 ret = scrub_datafile(fn_base, fn_local, tmp,
415 datafile_old, sizeof(datafile_old));
418 ret = scrub_datafile(fn_base, fn_local, NULL,
419 datafile_new, sizeof(datafile_new));
422 ret = rename(datafile_old, datafile_new);
423 return ret ? -errno : 0;
426 #define _SCRUB_KVREAD(ret, i, name, avail, l, dest) if (ret == 0) { \
427 ret = scrub_kvread(i, sizeof(#name), avail, l, #name, dest.name); \
431 * returns 0 if the key did not match (nothing was read)
432 * 1 if the key did match (success)
433 * -1 if the key did match and an error occurred
435 static int scrub_kvread(int *i, int len, int avail, const char *buf,
436 const char *key, u64 *dest)
440 if (*i + len + 1 < avail && strncmp(&buf[*i], key, len - 1) == 0) {
445 for (j = 0; isdigit(buf[*i + j]) && *i + j < avail; ++j)
449 *dest = atoll(&buf[*i]);
457 #define _SCRUB_INVALID do { \
459 warning("invalid data on line %d pos " \
460 "%d state %d (near \"%.*s\") at %s:%d", \
461 lineno, i, state, 20 > avail ? avail : 20, \
462 l + i, __FILE__, __LINE__); \
466 static struct scrub_file_record **scrub_read_file(int fd, int report_errors)
479 char empty_uuid[BTRFS_FSID_SIZE] = {0};
480 struct scrub_file_record **p = NULL;
483 old_avail = avail - i;
485 error("scrub record file corrupted near byte %d", i);
486 return ERR_PTR(-EINVAL);
489 memmove(l, l + i, old_avail);
490 avail = read(fd, l + old_avail, sizeof(l) - old_avail);
493 if (avail == 0 && old_avail == 0) {
495 memcmp(p[curr]->fsid, empty_uuid, BTRFS_FSID_SIZE) == 0) {
497 } else if (curr == -1) {
498 p = ERR_PTR(-ENODATA);
504 return ERR_PTR(-errno);
513 case 0: /* start of file */
514 ret = scrub_kvread(&i,
515 sizeof(SCRUB_FILE_VERSION_PREFIX), avail, l,
516 SCRUB_FILE_VERSION_PREFIX, &version);
519 if (version != atoll(SCRUB_FILE_VERSION))
520 return ERR_PTR(-ENOTSUP);
523 case 1: /* start of line, alloc */
525 * this state makes sure we have a complete line in
526 * further processing, so we don't need wrap-tracking
529 if (!eof && !memchr(l + i, '\n', avail - i))
532 if (curr > -1 && memcmp(p[curr]->fsid, empty_uuid,
533 BTRFS_FSID_SIZE) == 0) {
539 p = realloc(p, (curr + 2) * sizeof(*p));
542 return ERR_PTR(-errno);
544 p[curr] = malloc(sizeof(**p));
547 return ERR_PTR(-errno);
549 memset(p[curr], 0, sizeof(**p));
553 case 2: /* start of line, skip space */
554 while (isspace(l[i]) && i < avail) {
560 (!eof && !memchr(l + i, '\n', avail - i)))
564 case 3: /* read fsid */
567 for (j = 0; l[i + j] != ':' && i + j < avail; ++j)
569 if (i + j + 1 >= avail)
571 if (j != BTRFS_UUID_UNPARSED_SIZE - 1)
574 ret = uuid_parse(l + i, p[curr]->fsid);
580 case 4: /* read dev id */
581 for (j = 0; isdigit(l[i + j]) && i+j < avail; ++j)
583 if (j == 0 || i + j + 1 >= avail)
585 p[curr]->devid = atoll(&l[i]);
589 case 5: /* read key/value pair */
591 _SCRUB_KVREAD(ret, &i, data_extents_scrubbed, avail, l,
593 _SCRUB_KVREAD(ret, &i, data_extents_scrubbed, avail, l,
595 _SCRUB_KVREAD(ret, &i, tree_extents_scrubbed, avail, l,
597 _SCRUB_KVREAD(ret, &i, data_bytes_scrubbed, avail, l,
599 _SCRUB_KVREAD(ret, &i, tree_bytes_scrubbed, avail, l,
601 _SCRUB_KVREAD(ret, &i, read_errors, avail, l,
603 _SCRUB_KVREAD(ret, &i, csum_errors, avail, l,
605 _SCRUB_KVREAD(ret, &i, verify_errors, avail, l,
607 _SCRUB_KVREAD(ret, &i, no_csum, avail, l,
609 _SCRUB_KVREAD(ret, &i, csum_discards, avail, l,
611 _SCRUB_KVREAD(ret, &i, super_errors, avail, l,
613 _SCRUB_KVREAD(ret, &i, malloc_errors, avail, l,
615 _SCRUB_KVREAD(ret, &i, uncorrectable_errors, avail, l,
617 _SCRUB_KVREAD(ret, &i, corrected_errors, avail, l,
619 _SCRUB_KVREAD(ret, &i, last_physical, avail, l,
621 _SCRUB_KVREAD(ret, &i, finished, avail, l,
623 _SCRUB_KVREAD(ret, &i, t_start, avail, l,
624 (u64 *)&p[curr]->stats);
625 _SCRUB_KVREAD(ret, &i, t_resumed, avail, l,
626 (u64 *)&p[curr]->stats);
627 _SCRUB_KVREAD(ret, &i, duration, avail, l,
628 (u64 *)&p[curr]->stats);
629 _SCRUB_KVREAD(ret, &i, canceled, avail, l,
635 case 6: /* after number */
638 else if (l[i] == '\n')
644 case 99: /* skip rest of line */
649 if (l[i - 1] == '\n') {
656 error("internal error: unknown parser state %d near byte %d",
658 return ERR_PTR(-EINVAL);
663 static int scrub_write_buf(int fd, const void *data, int len)
666 ret = write(fd, data, len);
670 static int scrub_writev(int fd, char *buf, int max, const char *fmt, ...)
671 __attribute__ ((format (printf, 4, 5)));
672 static int scrub_writev(int fd, char *buf, int max, const char *fmt, ...)
678 ret = vsnprintf(buf, max, fmt, args);
682 return scrub_write_buf(fd, buf, ret);
685 #define _SCRUB_SUM(dest, data, name) dest->scrub_args.progress.name = \
686 data->resumed->p.name + data->scrub_args.progress.name
688 static struct scrub_progress *scrub_resumed_stats(struct scrub_progress *data,
689 struct scrub_progress *dest)
691 if (!data->resumed || data->skip)
694 _SCRUB_SUM(dest, data, data_extents_scrubbed);
695 _SCRUB_SUM(dest, data, tree_extents_scrubbed);
696 _SCRUB_SUM(dest, data, data_bytes_scrubbed);
697 _SCRUB_SUM(dest, data, tree_bytes_scrubbed);
698 _SCRUB_SUM(dest, data, read_errors);
699 _SCRUB_SUM(dest, data, csum_errors);
700 _SCRUB_SUM(dest, data, verify_errors);
701 _SCRUB_SUM(dest, data, no_csum);
702 _SCRUB_SUM(dest, data, csum_discards);
703 _SCRUB_SUM(dest, data, super_errors);
704 _SCRUB_SUM(dest, data, malloc_errors);
705 _SCRUB_SUM(dest, data, uncorrectable_errors);
706 _SCRUB_SUM(dest, data, corrected_errors);
707 _SCRUB_SUM(dest, data, last_physical);
708 dest->stats.canceled = data->stats.canceled;
709 dest->stats.finished = data->stats.finished;
710 dest->stats.t_resumed = data->stats.t_start;
711 dest->stats.t_start = data->resumed->stats.t_start;
712 dest->stats.duration = data->resumed->stats.duration +
713 data->stats.duration;
714 dest->scrub_args.devid = data->scrub_args.devid;
718 #define _SCRUB_KVWRITE(fd, buf, name, use) \
719 scrub_kvwrite(fd, buf, sizeof(buf), #name, \
720 use->scrub_args.progress.name)
722 #define _SCRUB_KVWRITE_STATS(fd, buf, name, use) \
723 scrub_kvwrite(fd, buf, sizeof(buf), #name, \
726 static int scrub_kvwrite(int fd, char *buf, int max, const char *key, u64 val)
728 return scrub_writev(fd, buf, max, "|%s:%lld", key, val);
731 static int scrub_write_file(int fd, const char *fsid,
732 struct scrub_progress *data, int n)
737 struct scrub_progress local;
738 struct scrub_progress *use;
743 /* each -1 is to subtract one \0 byte, the + 2 is for ':' and '\n' */
744 ret = scrub_write_buf(fd, SCRUB_FILE_VERSION_PREFIX ":"
745 SCRUB_FILE_VERSION "\n",
746 (sizeof(SCRUB_FILE_VERSION_PREFIX) - 1) +
747 (sizeof(SCRUB_FILE_VERSION) - 1) + 2);
751 for (i = 0; i < n; ++i) {
752 use = scrub_resumed_stats(&data[i], &local);
753 if (scrub_write_buf(fd, fsid, strlen(fsid)) ||
754 scrub_write_buf(fd, ":", 1) ||
755 scrub_writev(fd, buf, sizeof(buf), "%lld",
756 use->scrub_args.devid) ||
757 scrub_write_buf(fd, buf, ret) ||
758 _SCRUB_KVWRITE(fd, buf, data_extents_scrubbed, use) ||
759 _SCRUB_KVWRITE(fd, buf, tree_extents_scrubbed, use) ||
760 _SCRUB_KVWRITE(fd, buf, data_bytes_scrubbed, use) ||
761 _SCRUB_KVWRITE(fd, buf, tree_bytes_scrubbed, use) ||
762 _SCRUB_KVWRITE(fd, buf, read_errors, use) ||
763 _SCRUB_KVWRITE(fd, buf, csum_errors, use) ||
764 _SCRUB_KVWRITE(fd, buf, verify_errors, use) ||
765 _SCRUB_KVWRITE(fd, buf, no_csum, use) ||
766 _SCRUB_KVWRITE(fd, buf, csum_discards, use) ||
767 _SCRUB_KVWRITE(fd, buf, super_errors, use) ||
768 _SCRUB_KVWRITE(fd, buf, malloc_errors, use) ||
769 _SCRUB_KVWRITE(fd, buf, uncorrectable_errors, use) ||
770 _SCRUB_KVWRITE(fd, buf, corrected_errors, use) ||
771 _SCRUB_KVWRITE(fd, buf, last_physical, use) ||
772 _SCRUB_KVWRITE_STATS(fd, buf, t_start, use) ||
773 _SCRUB_KVWRITE_STATS(fd, buf, t_resumed, use) ||
774 _SCRUB_KVWRITE_STATS(fd, buf, duration, use) ||
775 _SCRUB_KVWRITE_STATS(fd, buf, canceled, use) ||
776 _SCRUB_KVWRITE_STATS(fd, buf, finished, use) ||
777 scrub_write_buf(fd, "\n", 1)) {
785 static int scrub_write_progress(pthread_mutex_t *m, const char *fsid,
786 struct scrub_progress *data, int n)
793 ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old);
799 ret = pthread_mutex_lock(m);
805 fd = scrub_open_file_w(SCRUB_DATA_FILE, fsid, "tmp");
810 err = scrub_write_file(fd, fsid, data, n);
813 err = scrub_rename_file(SCRUB_DATA_FILE, fsid, "tmp");
824 ret = pthread_mutex_unlock(m);
829 ret = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old);
837 static void *scrub_one_dev(void *ctx)
839 struct scrub_progress *sp = ctx;
843 sp->stats.canceled = 0;
844 sp->stats.duration = 0;
845 sp->stats.finished = 0;
847 ret = syscall(SYS_ioprio_set, IOPRIO_WHO_PROCESS, 0,
848 IOPRIO_PRIO_VALUE(sp->ioprio_class,
849 sp->ioprio_classdata));
851 warning("setting ioprio failed: %s (ignored)",
854 ret = ioctl(sp->fd, BTRFS_IOC_SCRUB, &sp->scrub_args);
855 gettimeofday(&tv, NULL);
857 sp->stats.duration = tv.tv_sec - sp->stats.t_start;
858 sp->stats.canceled = !!ret;
859 sp->ioctl_errno = errno;
860 ret = pthread_mutex_lock(&sp->progress_mutex);
862 return ERR_PTR(-ret);
863 sp->stats.finished = 1;
864 ret = pthread_mutex_unlock(&sp->progress_mutex);
866 return ERR_PTR(-ret);
871 static void *progress_one_dev(void *ctx)
873 struct scrub_progress *sp = ctx;
875 sp->ret = ioctl(sp->fd, BTRFS_IOC_SCRUB_PROGRESS, &sp->scrub_args);
876 sp->ioctl_errno = errno;
881 /* nb: returns a negative errno via ERR_PTR */
882 static void *scrub_progress_cycle(void *ctx)
885 int perr = 0; /* positive / pthread error returns */
888 char fsid[BTRFS_UUID_UNPARSED_SIZE];
889 struct scrub_progress *sp;
890 struct scrub_progress *sp_last;
891 struct scrub_progress *sp_shared;
893 struct scrub_progress_cycle *spc = ctx;
894 int ndev = spc->fi->num_devices;
898 struct pollfd accept_poll_fd = {
903 struct pollfd write_poll_fd = {
907 struct sockaddr_un peer;
908 socklen_t peer_size = sizeof(peer);
910 perr = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
914 uuid_unparse(spc->fi->fsid, fsid);
916 for (i = 0; i < ndev; ++i) {
917 sp = &spc->progress[i];
918 sp_last = &spc->progress[i + ndev];
919 sp_shared = &spc->shared_progress[i];
920 sp->scrub_args.devid = sp_last->scrub_args.devid =
921 sp_shared->scrub_args.devid;
922 sp->fd = sp_last->fd = spc->fdmnt;
923 sp->stats.t_start = sp_last->stats.t_start =
924 sp_shared->stats.t_start;
925 sp->resumed = sp_last->resumed = sp_shared->resumed;
926 sp->skip = sp_last->skip = sp_shared->skip;
927 sp->stats.finished = sp_last->stats.finished =
928 sp_shared->stats.finished;
932 ret = poll(&accept_poll_fd, 1, 5 * 1000);
938 peer_fd = accept(spc->prg_fd, (struct sockaddr *)&peer,
940 gettimeofday(&tv, NULL);
943 for (i = 0; i < ndev; ++i) {
944 sp = &spc->progress[this * ndev + i];
945 sp_last = &spc->progress[last * ndev + i];
946 sp_shared = &spc->shared_progress[i];
947 if (sp->stats.finished)
949 progress_one_dev(sp);
950 sp->stats.duration = tv.tv_sec - sp->stats.t_start;
953 if (sp->ioctl_errno != ENOTCONN &&
954 sp->ioctl_errno != ENODEV) {
955 ret = -sp->ioctl_errno;
959 * scrub finished or device removed, check the
960 * finished flag. if unset, just use the last
961 * result we got for the current write and go
962 * on. flag should be set on next cycle, then.
964 perr = pthread_setcancelstate(
965 PTHREAD_CANCEL_DISABLE, &old);
968 perr = pthread_mutex_lock(&sp_shared->progress_mutex);
971 if (!sp_shared->stats.finished) {
972 perr = pthread_mutex_unlock(
973 &sp_shared->progress_mutex);
976 perr = pthread_setcancelstate(
977 PTHREAD_CANCEL_ENABLE, &old);
980 memcpy(sp, sp_last, sizeof(*sp));
983 perr = pthread_mutex_unlock(&sp_shared->progress_mutex);
986 perr = pthread_setcancelstate(
987 PTHREAD_CANCEL_ENABLE, &old);
990 memcpy(sp, sp_shared, sizeof(*sp));
991 memcpy(sp_last, sp_shared, sizeof(*sp));
994 write_poll_fd.fd = peer_fd;
995 ret = poll(&write_poll_fd, 1, 0);
1001 ret = scrub_write_file(
1003 &spc->progress[this * ndev], ndev);
1010 if (!spc->do_record)
1012 ret = scrub_write_progress(spc->write_mutex, fsid,
1013 &spc->progress[this * ndev], ndev);
1022 return ERR_PTR(ret);
1025 static struct scrub_file_record *last_dev_scrub(
1026 struct scrub_file_record *const *const past_scrubs, u64 devid)
1030 if (!past_scrubs || IS_ERR(past_scrubs))
1033 for (i = 0; past_scrubs[i]; ++i)
1034 if (past_scrubs[i]->devid == devid)
1035 return past_scrubs[i];
1040 static int mkdir_p(char *path)
1045 for (i = 1; i < strlen(path); ++i) {
1049 ret = mkdir(path, 0777);
1050 if (ret && errno != EEXIST)
1058 static int is_scrub_running_on_fs(struct btrfs_ioctl_fs_info_args *fi_args,
1059 struct btrfs_ioctl_dev_info_args *di_args,
1060 struct scrub_file_record **past_scrubs)
1064 if (!fi_args || !di_args || !past_scrubs)
1067 for (i = 0; i < fi_args->num_devices; i++) {
1068 struct scrub_file_record *sfr =
1069 last_dev_scrub(past_scrubs, di_args[i].devid);
1073 if (!(sfr->stats.finished || sfr->stats.canceled))
1079 static int is_scrub_running_in_kernel(int fd,
1080 struct btrfs_ioctl_dev_info_args *di_args, u64 max_devices)
1082 struct scrub_progress sp;
1086 for (i = 0; i < max_devices; i++) {
1087 memset(&sp, 0, sizeof(sp));
1088 sp.scrub_args.devid = di_args[i].devid;
1089 ret = ioctl(fd, BTRFS_IOC_SCRUB_PROGRESS, &sp.scrub_args);
1097 static const char * const cmd_scrub_start_usage[];
1098 static const char * const cmd_scrub_resume_usage[];
1100 static int scrub_start(int argc, char **argv, int resume)
1110 int e_uncorrectable = 0;
1111 int e_correctable = 0;
1114 int do_background = 1;
1120 int do_stats_per_dev = 0;
1121 int ioprio_class = IOPRIO_CLASS_IDLE;
1122 int ioprio_classdata = 0;
1126 struct btrfs_ioctl_fs_info_args fi_args;
1127 struct btrfs_ioctl_dev_info_args *di_args = NULL;
1128 struct scrub_progress *sp = NULL;
1129 struct scrub_fs_stat fs_stat;
1131 struct sockaddr_un addr = {
1132 .sun_family = AF_UNIX,
1134 pthread_t *t_devs = NULL;
1136 struct scrub_file_record **past_scrubs = NULL;
1137 struct scrub_file_record *last_scrub = NULL;
1138 char *datafile = strdup(SCRUB_DATA_FILE);
1139 char fsid[BTRFS_UUID_UNPARSED_SIZE];
1140 char sock_path[PATH_MAX] = "";
1141 struct scrub_progress_cycle spc;
1142 pthread_mutex_t spc_write_mutex = PTHREAD_MUTEX_INITIALIZER;
1145 DIR *dirstream = NULL;
1147 int nothing_to_resume = 0;
1149 while ((c = getopt(argc, argv, "BdqrRc:n:f")) != -1) {
1157 do_stats_per_dev = 1;
1169 ioprio_class = (int)strtol(optarg, NULL, 10);
1172 ioprio_classdata = (int)strtol(optarg, NULL, 10);
1179 usage(resume ? cmd_scrub_resume_usage :
1180 cmd_scrub_start_usage);
1184 /* try to catch most error cases before forking */
1186 if (check_argc_exact(argc - optind, 1)) {
1187 usage(resume ? cmd_scrub_resume_usage :
1188 cmd_scrub_start_usage);
1191 spc.progress = NULL;
1192 if (do_quiet && do_print)
1195 if (mkdir_p(datafile)) {
1196 warning_on(!do_quiet,
1197 "cannot create scrub data file, mkdir %s failed: %s. Status recording disabled",
1198 datafile, strerror(errno));
1203 path = argv[optind];
1205 fdmnt = open_path_or_dev_mnt(path, &dirstream, !do_quiet);
1209 ret = get_fs_info(path, &fi_args, &di_args);
1212 "getting dev info for scrub failed: %s",
1217 if (!fi_args.num_devices) {
1218 error_on(!do_quiet, "no devices found");
1223 uuid_unparse(fi_args.fsid, fsid);
1224 fdres = scrub_open_file_r(SCRUB_DATA_FILE, fsid);
1225 if (fdres < 0 && fdres != -ENOENT) {
1226 warning_on(!do_quiet, "failed to open status file: %s",
1228 } else if (fdres >= 0) {
1229 past_scrubs = scrub_read_file(fdres, !do_quiet);
1230 if (IS_ERR(past_scrubs))
1231 warning_on(!do_quiet, "failed to read status file: %s",
1232 strerror(-PTR_ERR(past_scrubs)));
1237 * Check for stale information in the status file, ie. if it's
1238 * canceled=0, finished=0 but no scrub is running.
1240 if (!is_scrub_running_in_kernel(fdmnt, di_args, fi_args.num_devices))
1244 * check whether any involved device is already busy running a
1245 * scrub. This would cause damaged status messages and the state
1246 * "aborted" without the explanation that a scrub was already
1247 * running. Therefore check it first, prevent it and give some
1248 * feedback to the user if scrub is already running.
1249 * Note that if scrub is started with a block device as the
1250 * parameter, only that particular block device is checked. It
1251 * is a normal mode of operation to start scrub on multiple
1252 * single devices, there is no reason to prevent this.
1254 if (!force && is_scrub_running_on_fs(&fi_args, di_args, past_scrubs)) {
1256 "Scrub is already running.\n"
1257 "To cancel use 'btrfs scrub cancel %s'.\n"
1258 "To see the status use 'btrfs scrub status [-d] %s'",
1264 t_devs = malloc(fi_args.num_devices * sizeof(*t_devs));
1265 sp = calloc(fi_args.num_devices, sizeof(*sp));
1266 spc.progress = calloc(fi_args.num_devices * 2, sizeof(*spc.progress));
1268 if (!t_devs || !sp || !spc.progress) {
1269 error_on(!do_quiet, "scrub failed: %s", strerror(errno));
1274 for (i = 0; i < fi_args.num_devices; ++i) {
1275 devid = di_args[i].devid;
1276 ret = pthread_mutex_init(&sp[i].progress_mutex, NULL);
1278 error_on(!do_quiet, "pthread_mutex_init failed: %s",
1283 last_scrub = last_dev_scrub(past_scrubs, devid);
1284 sp[i].scrub_args.devid = devid;
1286 if (resume && last_scrub && (last_scrub->stats.canceled ||
1287 !last_scrub->stats.finished)) {
1289 sp[i].scrub_args.start = last_scrub->p.last_physical;
1290 sp[i].resumed = last_scrub;
1291 } else if (resume) {
1294 sp[i].resumed = last_scrub;
1298 sp[i].scrub_args.start = 0ll;
1299 sp[i].resumed = NULL;
1302 sp[i].scrub_args.end = (u64)-1ll;
1303 sp[i].scrub_args.flags = readonly ? BTRFS_SCRUB_READONLY : 0;
1304 sp[i].ioprio_class = ioprio_class;
1305 sp[i].ioprio_classdata = ioprio_classdata;
1308 if (!n_start && !n_resume) {
1310 printf("scrub: nothing to resume for %s, fsid %s\n",
1312 nothing_to_resume = 1;
1316 ret = prg_fd = socket(AF_UNIX, SOCK_STREAM, 0);
1318 ret = scrub_datafile(SCRUB_PROGRESS_SOCKET_PATH, fsid, NULL,
1319 sock_path, sizeof(sock_path));
1320 /* ignore EOVERFLOW, try using a shorter path for the socket */
1321 addr.sun_path[sizeof(addr.sun_path) - 1] = '\0';
1322 strncpy(addr.sun_path, sock_path, sizeof(addr.sun_path) - 1);
1323 ret = bind(prg_fd, (struct sockaddr *)&addr, sizeof(addr));
1324 if (ret != -1 || errno != EADDRINUSE)
1327 * bind failed with EADDRINUSE. so let's see if anyone answers
1328 * when we make a call to the socket ...
1330 ret = connect(prg_fd, (struct sockaddr *)&addr, sizeof(addr));
1331 if (!ret || errno != ECONNREFUSED) {
1332 /* ... yes, so scrub must be running. error out */
1333 error("scrub already running");
1339 * ... no, this means someone left us alone with an unused
1340 * socket in the file system. remove it and try again.
1342 ret = unlink(sock_path);
1345 ret = listen(prg_fd, 100);
1347 warning_on(!do_quiet,
1348 "failed to open the progress status socket at %s: %s. Progress cannot be queried",
1349 sock_path[0] ? sock_path :
1350 SCRUB_PROGRESS_SOCKET_PATH, strerror(errno));
1360 /* write all-zero progress file for a start */
1361 ret = scrub_write_progress(&spc_write_mutex, fsid, sp,
1362 fi_args.num_devices);
1364 warning_on(!do_quiet,
1365 "failed to write the progress status file: %s. Status recording disabled",
1371 if (do_background) {
1374 error_on(!do_quiet, "cannot scrub, fork failed: %s",
1382 scrub_handle_sigint_parent();
1384 printf("scrub %s on %s, fsid %s (pid=%d)\n",
1385 n_start ? "started" : "resumed",
1393 error_on(!do_quiet, "wait failed (ret=%d): %s",
1394 ret, strerror(errno));
1398 if (!WIFEXITED(stat) || WEXITSTATUS(stat)) {
1399 error_on(!do_quiet, "scrub process failed");
1400 err = WIFEXITED(stat) ? WEXITSTATUS(stat) : -1;
1408 scrub_handle_sigint_child(fdmnt);
1410 for (i = 0; i < fi_args.num_devices; ++i) {
1412 sp[i].scrub_args.progress = sp[i].resumed->p;
1413 sp[i].stats = sp[i].resumed->stats;
1415 sp[i].stats.finished = 1;
1418 devid = di_args[i].devid;
1419 gettimeofday(&tv, NULL);
1420 sp[i].stats.t_start = tv.tv_sec;
1421 ret = pthread_create(&t_devs[i], NULL,
1422 scrub_one_dev, &sp[i]);
1425 error("creating scrub_one_dev[%llu] thread failed: %s",
1426 devid, strerror(ret));
1433 spc.prg_fd = prg_fd;
1434 spc.do_record = do_record;
1435 spc.write_mutex = &spc_write_mutex;
1436 spc.shared_progress = sp;
1438 ret = pthread_create(&t_prog, NULL, scrub_progress_cycle, &spc);
1441 error("creating progress thread failed: %s",
1448 for (i = 0; i < fi_args.num_devices; ++i) {
1451 devid = di_args[i].devid;
1452 ret = pthread_join(t_devs[i], NULL);
1455 error("pthread_join failed for scrub_one_dev[%llu]: %s",
1456 devid, strerror(ret));
1461 switch (sp[i].ioctl_errno) {
1464 warning("device %lld not present",
1472 error("scrubbing %s failed for device id %lld: ret=%d, errno=%d (%s)",
1474 sp[i].ret, sp[i].ioctl_errno,
1475 strerror(sp[i].ioctl_errno));
1480 if (sp[i].scrub_args.progress.uncorrectable_errors > 0)
1482 if (sp[i].scrub_args.progress.corrected_errors > 0
1483 || sp[i].scrub_args.progress.unverified_errors > 0)
1488 const char *append = "done";
1489 if (!do_stats_per_dev)
1490 init_fs_stat(&fs_stat);
1491 for (i = 0; i < fi_args.num_devices; ++i) {
1492 if (do_stats_per_dev) {
1493 print_scrub_dev(&di_args[i],
1494 &sp[i].scrub_args.progress,
1496 sp[i].ret ? "canceled" : "done",
1500 append = "canceled";
1501 add_to_fs_stat(&sp[i].scrub_args.progress,
1502 &sp[i].stats, &fs_stat);
1505 if (!do_stats_per_dev) {
1506 printf("scrub %s for %s\n", append, fsid);
1507 print_fs_stat(&fs_stat, print_raw);
1511 ret = pthread_cancel(t_prog);
1513 ret = pthread_join(t_prog, &terr);
1515 /* check for errors from the handling of the progress thread */
1516 if (do_print && ret) {
1517 error("progress thread handling failed: %s",
1521 /* check for errors returned from the progress thread itself */
1522 if (do_print && terr && terr != PTHREAD_CANCELED)
1523 error("recording progress failed: %s",
1524 strerror(-PTR_ERR(terr)));
1527 ret = scrub_write_progress(&spc_write_mutex, fsid, sp,
1528 fi_args.num_devices);
1529 if (ret && do_print)
1530 error("failed to record the result: %s",
1534 scrub_handle_sigint_child(-1);
1537 free_history(past_scrubs);
1547 close_file_or_dir(fdmnt, dirstream);
1551 if (nothing_to_resume)
1553 if (e_uncorrectable) {
1554 error_on(!do_quiet, "there are uncorrectable errors");
1558 warning_on(!do_quiet,
1559 "errors detected during scrubbing, corrected");
1564 static const char * const cmd_scrub_start_usage[] = {
1565 "btrfs scrub start [-BdqrRf] [-c ioprio_class -n ioprio_classdata] <path>|<device>",
1566 "Start a new scrub. If a scrub is already running, the new one fails.",
1568 "-B do not background",
1569 "-d stats per device (-B only)",
1571 "-r read only mode",
1572 "-R raw print mode, print full data instead of summary",
1573 "-c set ioprio class (see ionice(1) manpage)",
1574 "-n set ioprio classdata (see ionice(1) manpage)",
1575 "-f force starting new scrub even if a scrub is already running",
1576 " this is useful when scrub stats record file is damaged",
1580 static int cmd_scrub_start(int argc, char **argv)
1582 return scrub_start(argc, argv, 0);
1585 static const char * const cmd_scrub_cancel_usage[] = {
1586 "btrfs scrub cancel <path>|<device>",
1587 "Cancel a running scrub",
1591 static int cmd_scrub_cancel(int argc, char **argv)
1596 DIR *dirstream = NULL;
1598 clean_args_no_options(argc, argv, cmd_scrub_cancel_usage);
1600 if (check_argc_exact(argc - optind, 1))
1601 usage(cmd_scrub_cancel_usage);
1603 path = argv[optind];
1605 fdmnt = open_path_or_dev_mnt(path, &dirstream, 1);
1611 ret = ioctl(fdmnt, BTRFS_IOC_SCRUB_CANCEL, NULL);
1614 error("scrub cancel failed on %s: %s", path,
1615 errno == ENOTCONN ? "not running" : strerror(errno));
1616 if (errno == ENOTCONN)
1624 printf("scrub cancelled\n");
1627 close_file_or_dir(fdmnt, dirstream);
1631 static const char * const cmd_scrub_resume_usage[] = {
1632 "btrfs scrub resume [-BdqrR] [-c ioprio_class -n ioprio_classdata] <path>|<device>",
1633 "Resume previously canceled or interrupted scrub",
1635 "-B do not background",
1636 "-d stats per device (-B only)",
1638 "-r read only mode",
1639 "-R raw print mode, print full data instead of summary",
1640 "-c set ioprio class (see ionice(1) manpage)",
1641 "-n set ioprio classdata (see ionice(1) manpage)",
1645 static int cmd_scrub_resume(int argc, char **argv)
1647 return scrub_start(argc, argv, 1);
1650 static const char * const cmd_scrub_status_usage[] = {
1651 "btrfs scrub status [-dR] <path>|<device>",
1652 "Show status of running or finished scrub",
1654 "-d stats per device",
1655 "-R print raw stats",
1659 static int cmd_scrub_status(int argc, char **argv)
1662 struct btrfs_ioctl_fs_info_args fi_args;
1663 struct btrfs_ioctl_dev_info_args *di_args = NULL;
1664 struct scrub_file_record **past_scrubs = NULL;
1665 struct scrub_file_record *last_scrub;
1666 struct scrub_fs_stat fs_stat;
1667 struct sockaddr_un addr = {
1668 .sun_family = AF_UNIX,
1675 int do_stats_per_dev = 0;
1677 char fsid[BTRFS_UUID_UNPARSED_SIZE];
1680 DIR *dirstream = NULL;
1682 while ((c = getopt(argc, argv, "dR")) != -1) {
1685 do_stats_per_dev = 1;
1692 usage(cmd_scrub_status_usage);
1696 if (check_argc_exact(argc - optind, 1))
1697 usage(cmd_scrub_status_usage);
1699 path = argv[optind];
1701 fdmnt = open_path_or_dev_mnt(path, &dirstream, 1);
1705 ret = get_fs_info(path, &fi_args, &di_args);
1707 error("getting dev info for scrub failed: %s",
1712 if (!fi_args.num_devices) {
1713 error("no devices found");
1718 uuid_unparse(fi_args.fsid, fsid);
1720 fdres = socket(AF_UNIX, SOCK_STREAM, 0);
1722 error("failed to create socket to receive progress information: %s",
1727 scrub_datafile(SCRUB_PROGRESS_SOCKET_PATH, fsid,
1728 NULL, addr.sun_path, sizeof(addr.sun_path));
1729 /* ignore EOVERFLOW, just use shorter name and hope for the best */
1730 addr.sun_path[sizeof(addr.sun_path) - 1] = '\0';
1731 ret = connect(fdres, (struct sockaddr *)&addr, sizeof(addr));
1734 fdres = scrub_open_file_r(SCRUB_DATA_FILE, fsid);
1735 if (fdres < 0 && fdres != -ENOENT) {
1736 warning("failed to open status file: %s",
1744 past_scrubs = scrub_read_file(fdres, 1);
1745 if (IS_ERR(past_scrubs))
1746 warning("failed to read status: %s",
1747 strerror(-PTR_ERR(past_scrubs)));
1749 in_progress = is_scrub_running_in_kernel(fdmnt, di_args, fi_args.num_devices);
1751 printf("scrub status for %s\n", fsid);
1753 if (do_stats_per_dev) {
1754 for (i = 0; i < fi_args.num_devices; ++i) {
1755 last_scrub = last_dev_scrub(past_scrubs,
1758 print_scrub_dev(&di_args[i], NULL, print_raw,
1762 last_scrub->stats.in_progress = in_progress;
1763 print_scrub_dev(&di_args[i], &last_scrub->p, print_raw,
1764 last_scrub->stats.finished ?
1765 "history" : "status",
1766 &last_scrub->stats);
1769 init_fs_stat(&fs_stat);
1770 fs_stat.s.in_progress = in_progress;
1771 for (i = 0; i < fi_args.num_devices; ++i) {
1772 last_scrub = last_dev_scrub(past_scrubs,
1776 add_to_fs_stat(&last_scrub->p, &last_scrub->stats,
1779 print_fs_stat(&fs_stat, print_raw);
1783 free_history(past_scrubs);
1787 close_file_or_dir(fdmnt, dirstream);
1792 static const char scrub_cmd_group_info[] =
1793 "verify checksums of data and metadata";
1795 const struct cmd_group scrub_cmd_group = {
1796 scrub_cmd_group_usage, scrub_cmd_group_info, {
1797 { "start", cmd_scrub_start, cmd_scrub_start_usage, NULL, 0 },
1798 { "cancel", cmd_scrub_cancel, cmd_scrub_cancel_usage, NULL, 0 },
1799 { "resume", cmd_scrub_resume, cmd_scrub_resume_usage, NULL, 0 },
1800 { "status", cmd_scrub_status, cmd_scrub_status_usage, NULL, 0 },
1805 int cmd_scrub(int argc, char **argv)
1807 return handle_command_group(&scrub_cmd_group, argc, argv);