2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include "kerncompat.h"
20 #include "androidcompat.h"
22 #include <sys/ioctl.h>
25 #include <sys/types.h>
26 #include <sys/socket.h>
28 #include <sys/syscall.h>
31 #include <uuid/uuid.h>
49 static const char * const scrub_cmd_group_usage[] = {
50 "btrfs scrub <command> [options] <path>|<device>",
54 #define SCRUB_DATA_FILE "/var/lib/btrfs/scrub.status"
55 #define SCRUB_PROGRESS_SOCKET_PATH "/var/lib/btrfs/scrub.progress"
56 #define SCRUB_FILE_VERSION_PREFIX "scrub status"
57 #define SCRUB_FILE_VERSION "1"
68 /* TBD: replace with #include "linux/ioprio.h" in some years */
69 #if !defined (IOPRIO_H)
70 #define IOPRIO_WHO_PROCESS 1
71 #define IOPRIO_CLASS_SHIFT 13
72 #define IOPRIO_PRIO_VALUE(class, data) \
73 (((class) << IOPRIO_CLASS_SHIFT) | (data))
74 #define IOPRIO_CLASS_IDLE 3
77 struct scrub_progress {
78 struct btrfs_ioctl_scrub_args scrub_args;
82 struct scrub_stats stats;
83 struct scrub_file_record *resumed;
85 pthread_mutex_t progress_mutex;
90 struct scrub_file_record {
91 u8 fsid[BTRFS_FSID_SIZE];
93 struct scrub_stats stats;
94 struct btrfs_scrub_progress p;
97 struct scrub_progress_cycle {
101 struct btrfs_ioctl_fs_info_args *fi;
102 struct scrub_progress *progress;
103 struct scrub_progress *shared_progress;
104 pthread_mutex_t *write_mutex;
107 struct scrub_fs_stat {
108 struct btrfs_scrub_progress p;
109 struct scrub_stats s;
113 static void print_scrub_full(struct btrfs_scrub_progress *sp)
115 printf("\tdata_extents_scrubbed: %lld\n", sp->data_extents_scrubbed);
116 printf("\ttree_extents_scrubbed: %lld\n", sp->tree_extents_scrubbed);
117 printf("\tdata_bytes_scrubbed: %lld\n", sp->data_bytes_scrubbed);
118 printf("\ttree_bytes_scrubbed: %lld\n", sp->tree_bytes_scrubbed);
119 printf("\tread_errors: %lld\n", sp->read_errors);
120 printf("\tcsum_errors: %lld\n", sp->csum_errors);
121 printf("\tverify_errors: %lld\n", sp->verify_errors);
122 printf("\tno_csum: %lld\n", sp->no_csum);
123 printf("\tcsum_discards: %lld\n", sp->csum_discards);
124 printf("\tsuper_errors: %lld\n", sp->super_errors);
125 printf("\tmalloc_errors: %lld\n", sp->malloc_errors);
126 printf("\tuncorrectable_errors: %lld\n", sp->uncorrectable_errors);
127 printf("\tunverified_errors: %lld\n", sp->unverified_errors);
128 printf("\tcorrected_errors: %lld\n", sp->corrected_errors);
129 printf("\tlast_physical: %lld\n", sp->last_physical);
132 #define PRINT_SCRUB_ERROR(test, desc) do { \
134 printf(" %s=%llu", desc, test); \
137 static void print_scrub_summary(struct btrfs_scrub_progress *p)
142 err_cnt = p->read_errors +
147 err_cnt2 = p->corrected_errors + p->uncorrectable_errors;
149 if (p->malloc_errors)
150 printf("*** WARNING: memory allocation failed while scrubbing. "
151 "results may be inaccurate\n");
153 printf("\ttotal bytes scrubbed: %s with %llu errors\n",
154 pretty_size(p->data_bytes_scrubbed + p->tree_bytes_scrubbed),
155 max(err_cnt, err_cnt2));
157 if (err_cnt || err_cnt2) {
158 printf("\terror details:");
159 PRINT_SCRUB_ERROR(p->read_errors, "read");
160 PRINT_SCRUB_ERROR(p->super_errors, "super");
161 PRINT_SCRUB_ERROR(p->verify_errors, "verify");
162 PRINT_SCRUB_ERROR(p->csum_errors, "csum");
164 printf("\tcorrected errors: %llu, uncorrectable errors: %llu, "
165 "unverified errors: %llu\n", p->corrected_errors,
166 p->uncorrectable_errors, p->unverified_errors);
170 #define _SCRUB_FS_STAT(p, name, fs_stat) do { \
171 fs_stat->p.name += p->name; \
174 #define _SCRUB_FS_STAT_MIN(ss, name, fs_stat) \
176 if (fs_stat->s.name > ss->name) { \
177 fs_stat->s.name = ss->name; \
181 #define _SCRUB_FS_STAT_ZMIN(ss, name, fs_stat) \
183 if (!fs_stat->s.name || fs_stat->s.name > ss->name) { \
184 fs_stat->s.name = ss->name; \
188 #define _SCRUB_FS_STAT_ZMAX(ss, name, fs_stat) \
190 if (!(fs_stat)->s.name || (fs_stat)->s.name < (ss)->name) { \
191 (fs_stat)->s.name = (ss)->name; \
195 static void add_to_fs_stat(struct btrfs_scrub_progress *p,
196 struct scrub_stats *ss,
197 struct scrub_fs_stat *fs_stat)
199 _SCRUB_FS_STAT(p, data_extents_scrubbed, fs_stat);
200 _SCRUB_FS_STAT(p, tree_extents_scrubbed, fs_stat);
201 _SCRUB_FS_STAT(p, data_bytes_scrubbed, fs_stat);
202 _SCRUB_FS_STAT(p, tree_bytes_scrubbed, fs_stat);
203 _SCRUB_FS_STAT(p, read_errors, fs_stat);
204 _SCRUB_FS_STAT(p, csum_errors, fs_stat);
205 _SCRUB_FS_STAT(p, verify_errors, fs_stat);
206 _SCRUB_FS_STAT(p, no_csum, fs_stat);
207 _SCRUB_FS_STAT(p, csum_discards, fs_stat);
208 _SCRUB_FS_STAT(p, super_errors, fs_stat);
209 _SCRUB_FS_STAT(p, malloc_errors, fs_stat);
210 _SCRUB_FS_STAT(p, uncorrectable_errors, fs_stat);
211 _SCRUB_FS_STAT(p, corrected_errors, fs_stat);
212 _SCRUB_FS_STAT(p, last_physical, fs_stat);
213 _SCRUB_FS_STAT_ZMIN(ss, t_start, fs_stat);
214 _SCRUB_FS_STAT_ZMIN(ss, t_resumed, fs_stat);
215 _SCRUB_FS_STAT_ZMAX(ss, duration, fs_stat);
216 _SCRUB_FS_STAT_ZMAX(ss, canceled, fs_stat);
217 _SCRUB_FS_STAT_MIN(ss, finished, fs_stat);
220 static void init_fs_stat(struct scrub_fs_stat *fs_stat)
222 memset(fs_stat, 0, sizeof(*fs_stat));
223 fs_stat->s.finished = 1;
226 static void _print_scrub_ss(struct scrub_stats *ss)
233 if (!ss || !ss->t_start) {
234 printf("\tno stats available\n");
238 localtime_r(&ss->t_resumed, &tm);
239 strftime(t, sizeof(t), "%c", &tm);
240 t[sizeof(t) - 1] = '\0';
241 printf("\tscrub resumed at %s", t);
243 localtime_r(&ss->t_start, &tm);
244 strftime(t, sizeof(t), "%c", &tm);
245 t[sizeof(t) - 1] = '\0';
246 printf("\tscrub started at %s", t);
249 seconds = ss->duration;
250 hours = ss->duration / (60 * 60);
251 gmtime_r(&seconds, &tm);
252 strftime(t, sizeof(t), "%M:%S", &tm);
254 printf(", running for %02u:%s\n", hours, t);
255 else if (ss->canceled)
256 printf(" and was aborted after %02u:%s\n", hours, t);
257 else if (ss->finished)
258 printf(" and finished after %02u:%s\n", hours, t);
260 printf(", interrupted after %02u:%s, not running\n",
264 static void print_scrub_dev(struct btrfs_ioctl_dev_info_args *di,
265 struct btrfs_scrub_progress *p, int raw,
266 const char *append, struct scrub_stats *ss)
268 printf("scrub device %s (id %llu) %s\n", di->path, di->devid,
269 append ? append : "");
277 print_scrub_summary(p);
281 static void print_fs_stat(struct scrub_fs_stat *fs_stat, int raw)
283 _print_scrub_ss(&fs_stat->s);
286 print_scrub_full(&fs_stat->p);
288 print_scrub_summary(&fs_stat->p);
291 static void free_history(struct scrub_file_record **last_scrubs)
293 struct scrub_file_record **l = last_scrubs;
302 * cancels a running scrub and makes the master process record the current
303 * progress status before exiting.
305 static int cancel_fd = -1;
306 static void scrub_sigint_record_progress(int signal)
310 ret = ioctl(cancel_fd, BTRFS_IOC_SCRUB_CANCEL, NULL);
312 perror("Scrub cancel failed");
315 static int scrub_handle_sigint_parent(void)
317 struct sigaction sa = {
318 .sa_handler = SIG_IGN,
319 .sa_flags = SA_RESTART,
322 return sigaction(SIGINT, &sa, NULL);
325 static int scrub_handle_sigint_child(int fd)
327 struct sigaction sa = {
328 .sa_handler = fd == -1 ? SIG_DFL : scrub_sigint_record_progress,
332 return sigaction(SIGINT, &sa, NULL);
335 static int scrub_datafile(const char *fn_base, const char *fn_local,
336 const char *fn_tmp, char *datafile, int size)
341 datafile[end + 1] = '\0';
342 strncpy(datafile, fn_base, end);
343 ret = strlen(datafile);
349 strncpy(datafile + ret + 1, fn_local, end - ret - 1);
350 ret = strlen(datafile);
357 strncpy(datafile + ret + 1, fn_tmp, end - ret - 1);
358 ret = strlen(datafile);
367 static int scrub_open_file(const char *datafile, int m)
372 fd = open(datafile, m, 0600);
376 ret = flock(fd, LOCK_EX|LOCK_NB);
386 static int scrub_open_file_r(const char *fn_base, const char *fn_local)
389 char datafile[PATH_MAX];
390 ret = scrub_datafile(fn_base, fn_local, NULL,
391 datafile, sizeof(datafile));
394 return scrub_open_file(datafile, O_RDONLY);
397 static int scrub_open_file_w(const char *fn_base, const char *fn_local,
401 char datafile[PATH_MAX];
402 ret = scrub_datafile(fn_base, fn_local, tmp,
403 datafile, sizeof(datafile));
406 return scrub_open_file(datafile, O_WRONLY|O_CREAT);
409 static int scrub_rename_file(const char *fn_base, const char *fn_local,
413 char datafile_old[PATH_MAX];
414 char datafile_new[PATH_MAX];
415 ret = scrub_datafile(fn_base, fn_local, tmp,
416 datafile_old, sizeof(datafile_old));
419 ret = scrub_datafile(fn_base, fn_local, NULL,
420 datafile_new, sizeof(datafile_new));
423 ret = rename(datafile_old, datafile_new);
424 return ret ? -errno : 0;
427 #define _SCRUB_KVREAD(ret, i, name, avail, l, dest) if (ret == 0) { \
428 ret = scrub_kvread(i, sizeof(#name), avail, l, #name, dest.name); \
432 * returns 0 if the key did not match (nothing was read)
433 * 1 if the key did match (success)
434 * -1 if the key did match and an error occurred
436 static int scrub_kvread(int *i, int len, int avail, const char *buf,
437 const char *key, u64 *dest)
441 if (*i + len + 1 < avail && strncmp(&buf[*i], key, len - 1) == 0) {
446 for (j = 0; isdigit(buf[*i + j]) && *i + j < avail; ++j)
450 *dest = atoll(&buf[*i]);
458 #define _SCRUB_INVALID do { \
460 warning("invalid data on line %d pos " \
461 "%d state %d (near \"%.*s\") at %s:%d", \
462 lineno, i, state, 20 > avail ? avail : 20, \
463 l + i, __FILE__, __LINE__); \
467 static struct scrub_file_record **scrub_read_file(int fd, int report_errors)
480 char empty_uuid[BTRFS_FSID_SIZE] = {0};
481 struct scrub_file_record **p = NULL;
484 old_avail = avail - i;
486 error("scrub record file corrupted near byte %d", i);
487 return ERR_PTR(-EINVAL);
490 memmove(l, l + i, old_avail);
491 avail = read(fd, l + old_avail, sizeof(l) - old_avail);
494 if (avail == 0 && old_avail == 0) {
496 memcmp(p[curr]->fsid, empty_uuid, BTRFS_FSID_SIZE) == 0) {
498 } else if (curr == -1) {
499 p = ERR_PTR(-ENODATA);
505 return ERR_PTR(-errno);
514 case 0: /* start of file */
515 ret = scrub_kvread(&i,
516 sizeof(SCRUB_FILE_VERSION_PREFIX), avail, l,
517 SCRUB_FILE_VERSION_PREFIX, &version);
520 if (version != atoll(SCRUB_FILE_VERSION))
521 return ERR_PTR(-ENOTSUP);
524 case 1: /* start of line, alloc */
526 * this state makes sure we have a complete line in
527 * further processing, so we don't need wrap-tracking
530 if (!eof && !memchr(l + i, '\n', avail - i))
533 if (curr > -1 && memcmp(p[curr]->fsid, empty_uuid,
534 BTRFS_FSID_SIZE) == 0) {
540 p = realloc(p, (curr + 2) * sizeof(*p));
543 return ERR_PTR(-errno);
545 p[curr] = malloc(sizeof(**p));
548 return ERR_PTR(-errno);
550 memset(p[curr], 0, sizeof(**p));
554 case 2: /* start of line, skip space */
555 while (isspace(l[i]) && i < avail) {
561 (!eof && !memchr(l + i, '\n', avail - i)))
565 case 3: /* read fsid */
568 for (j = 0; l[i + j] != ':' && i + j < avail; ++j)
570 if (i + j + 1 >= avail)
572 if (j != BTRFS_UUID_UNPARSED_SIZE - 1)
575 ret = uuid_parse(l + i, p[curr]->fsid);
581 case 4: /* read dev id */
582 for (j = 0; isdigit(l[i + j]) && i+j < avail; ++j)
584 if (j == 0 || i + j + 1 >= avail)
586 p[curr]->devid = atoll(&l[i]);
590 case 5: /* read key/value pair */
592 _SCRUB_KVREAD(ret, &i, data_extents_scrubbed, avail, l,
594 _SCRUB_KVREAD(ret, &i, tree_extents_scrubbed, avail, l,
596 _SCRUB_KVREAD(ret, &i, data_bytes_scrubbed, avail, l,
598 _SCRUB_KVREAD(ret, &i, tree_bytes_scrubbed, avail, l,
600 _SCRUB_KVREAD(ret, &i, read_errors, avail, l,
602 _SCRUB_KVREAD(ret, &i, csum_errors, avail, l,
604 _SCRUB_KVREAD(ret, &i, verify_errors, avail, l,
606 _SCRUB_KVREAD(ret, &i, no_csum, avail, l,
608 _SCRUB_KVREAD(ret, &i, csum_discards, avail, l,
610 _SCRUB_KVREAD(ret, &i, super_errors, avail, l,
612 _SCRUB_KVREAD(ret, &i, malloc_errors, avail, l,
614 _SCRUB_KVREAD(ret, &i, uncorrectable_errors, avail, l,
616 _SCRUB_KVREAD(ret, &i, corrected_errors, avail, l,
618 _SCRUB_KVREAD(ret, &i, last_physical, avail, l,
620 _SCRUB_KVREAD(ret, &i, finished, avail, l,
622 _SCRUB_KVREAD(ret, &i, t_start, avail, l,
623 (u64 *)&p[curr]->stats);
624 _SCRUB_KVREAD(ret, &i, t_resumed, avail, l,
625 (u64 *)&p[curr]->stats);
626 _SCRUB_KVREAD(ret, &i, duration, avail, l,
627 (u64 *)&p[curr]->stats);
628 _SCRUB_KVREAD(ret, &i, canceled, avail, l,
634 case 6: /* after number */
637 else if (l[i] == '\n')
643 case 99: /* skip rest of line */
648 if (l[i - 1] == '\n') {
655 error("internal error: unknown parser state %d near byte %d",
657 return ERR_PTR(-EINVAL);
662 static int scrub_write_buf(int fd, const void *data, int len)
665 ret = write(fd, data, len);
669 static int scrub_writev(int fd, char *buf, int max, const char *fmt, ...)
670 __attribute__ ((format (printf, 4, 5)));
671 static int scrub_writev(int fd, char *buf, int max, const char *fmt, ...)
677 ret = vsnprintf(buf, max, fmt, args);
681 return scrub_write_buf(fd, buf, ret);
684 #define _SCRUB_SUM(dest, data, name) dest->scrub_args.progress.name = \
685 data->resumed->p.name + data->scrub_args.progress.name
687 static struct scrub_progress *scrub_resumed_stats(struct scrub_progress *data,
688 struct scrub_progress *dest)
690 if (!data->resumed || data->skip)
693 _SCRUB_SUM(dest, data, data_extents_scrubbed);
694 _SCRUB_SUM(dest, data, tree_extents_scrubbed);
695 _SCRUB_SUM(dest, data, data_bytes_scrubbed);
696 _SCRUB_SUM(dest, data, tree_bytes_scrubbed);
697 _SCRUB_SUM(dest, data, read_errors);
698 _SCRUB_SUM(dest, data, csum_errors);
699 _SCRUB_SUM(dest, data, verify_errors);
700 _SCRUB_SUM(dest, data, no_csum);
701 _SCRUB_SUM(dest, data, csum_discards);
702 _SCRUB_SUM(dest, data, super_errors);
703 _SCRUB_SUM(dest, data, malloc_errors);
704 _SCRUB_SUM(dest, data, uncorrectable_errors);
705 _SCRUB_SUM(dest, data, corrected_errors);
706 _SCRUB_SUM(dest, data, last_physical);
707 dest->stats.canceled = data->stats.canceled;
708 dest->stats.finished = data->stats.finished;
709 dest->stats.t_resumed = data->stats.t_start;
710 dest->stats.t_start = data->resumed->stats.t_start;
711 dest->stats.duration = data->resumed->stats.duration +
712 data->stats.duration;
713 dest->scrub_args.devid = data->scrub_args.devid;
717 #define _SCRUB_KVWRITE(fd, buf, name, use) \
718 scrub_kvwrite(fd, buf, sizeof(buf), #name, \
719 use->scrub_args.progress.name)
721 #define _SCRUB_KVWRITE_STATS(fd, buf, name, use) \
722 scrub_kvwrite(fd, buf, sizeof(buf), #name, \
725 static int scrub_kvwrite(int fd, char *buf, int max, const char *key, u64 val)
727 return scrub_writev(fd, buf, max, "|%s:%lld", key, val);
730 static int scrub_write_file(int fd, const char *fsid,
731 struct scrub_progress *data, int n)
736 struct scrub_progress local;
737 struct scrub_progress *use;
742 /* each -1 is to subtract one \0 byte, the + 2 is for ':' and '\n' */
743 ret = scrub_write_buf(fd, SCRUB_FILE_VERSION_PREFIX ":"
744 SCRUB_FILE_VERSION "\n",
745 (sizeof(SCRUB_FILE_VERSION_PREFIX) - 1) +
746 (sizeof(SCRUB_FILE_VERSION) - 1) + 2);
750 for (i = 0; i < n; ++i) {
751 use = scrub_resumed_stats(&data[i], &local);
752 if (scrub_write_buf(fd, fsid, strlen(fsid)) ||
753 scrub_write_buf(fd, ":", 1) ||
754 scrub_writev(fd, buf, sizeof(buf), "%lld",
755 use->scrub_args.devid) ||
756 scrub_write_buf(fd, buf, ret) ||
757 _SCRUB_KVWRITE(fd, buf, data_extents_scrubbed, use) ||
758 _SCRUB_KVWRITE(fd, buf, tree_extents_scrubbed, use) ||
759 _SCRUB_KVWRITE(fd, buf, data_bytes_scrubbed, use) ||
760 _SCRUB_KVWRITE(fd, buf, tree_bytes_scrubbed, use) ||
761 _SCRUB_KVWRITE(fd, buf, read_errors, use) ||
762 _SCRUB_KVWRITE(fd, buf, csum_errors, use) ||
763 _SCRUB_KVWRITE(fd, buf, verify_errors, use) ||
764 _SCRUB_KVWRITE(fd, buf, no_csum, use) ||
765 _SCRUB_KVWRITE(fd, buf, csum_discards, use) ||
766 _SCRUB_KVWRITE(fd, buf, super_errors, use) ||
767 _SCRUB_KVWRITE(fd, buf, malloc_errors, use) ||
768 _SCRUB_KVWRITE(fd, buf, uncorrectable_errors, use) ||
769 _SCRUB_KVWRITE(fd, buf, corrected_errors, use) ||
770 _SCRUB_KVWRITE(fd, buf, last_physical, use) ||
771 _SCRUB_KVWRITE_STATS(fd, buf, t_start, use) ||
772 _SCRUB_KVWRITE_STATS(fd, buf, t_resumed, use) ||
773 _SCRUB_KVWRITE_STATS(fd, buf, duration, use) ||
774 _SCRUB_KVWRITE_STATS(fd, buf, canceled, use) ||
775 _SCRUB_KVWRITE_STATS(fd, buf, finished, use) ||
776 scrub_write_buf(fd, "\n", 1)) {
784 static int scrub_write_progress(pthread_mutex_t *m, const char *fsid,
785 struct scrub_progress *data, int n)
792 ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old);
798 ret = pthread_mutex_lock(m);
804 fd = scrub_open_file_w(SCRUB_DATA_FILE, fsid, "tmp");
809 err = scrub_write_file(fd, fsid, data, n);
812 err = scrub_rename_file(SCRUB_DATA_FILE, fsid, "tmp");
823 ret = pthread_mutex_unlock(m);
828 ret = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old);
836 static void *scrub_one_dev(void *ctx)
838 struct scrub_progress *sp = ctx;
842 sp->stats.canceled = 0;
843 sp->stats.duration = 0;
844 sp->stats.finished = 0;
846 ret = syscall(SYS_ioprio_set, IOPRIO_WHO_PROCESS, 0,
847 IOPRIO_PRIO_VALUE(sp->ioprio_class,
848 sp->ioprio_classdata));
850 warning("setting ioprio failed: %m (ignored)");
852 ret = ioctl(sp->fd, BTRFS_IOC_SCRUB, &sp->scrub_args);
853 gettimeofday(&tv, NULL);
855 sp->stats.duration = tv.tv_sec - sp->stats.t_start;
856 sp->stats.canceled = !!ret;
857 sp->ioctl_errno = errno;
858 ret = pthread_mutex_lock(&sp->progress_mutex);
860 return ERR_PTR(-ret);
861 sp->stats.finished = 1;
862 ret = pthread_mutex_unlock(&sp->progress_mutex);
864 return ERR_PTR(-ret);
869 static void *progress_one_dev(void *ctx)
871 struct scrub_progress *sp = ctx;
873 sp->ret = ioctl(sp->fd, BTRFS_IOC_SCRUB_PROGRESS, &sp->scrub_args);
874 sp->ioctl_errno = errno;
879 /* nb: returns a negative errno via ERR_PTR */
880 static void *scrub_progress_cycle(void *ctx)
883 int perr = 0; /* positive / pthread error returns */
886 char fsid[BTRFS_UUID_UNPARSED_SIZE];
887 struct scrub_progress *sp;
888 struct scrub_progress *sp_last;
889 struct scrub_progress *sp_shared;
891 struct scrub_progress_cycle *spc = ctx;
892 int ndev = spc->fi->num_devices;
896 struct pollfd accept_poll_fd = {
901 struct pollfd write_poll_fd = {
905 struct sockaddr_un peer;
906 socklen_t peer_size = sizeof(peer);
908 perr = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
912 uuid_unparse(spc->fi->fsid, fsid);
914 for (i = 0; i < ndev; ++i) {
915 sp = &spc->progress[i];
916 sp_last = &spc->progress[i + ndev];
917 sp_shared = &spc->shared_progress[i];
918 sp->scrub_args.devid = sp_last->scrub_args.devid =
919 sp_shared->scrub_args.devid;
920 sp->fd = sp_last->fd = spc->fdmnt;
921 sp->stats.t_start = sp_last->stats.t_start =
922 sp_shared->stats.t_start;
923 sp->resumed = sp_last->resumed = sp_shared->resumed;
924 sp->skip = sp_last->skip = sp_shared->skip;
925 sp->stats.finished = sp_last->stats.finished =
926 sp_shared->stats.finished;
930 ret = poll(&accept_poll_fd, 1, 5 * 1000);
936 peer_fd = accept(spc->prg_fd, (struct sockaddr *)&peer,
938 gettimeofday(&tv, NULL);
941 for (i = 0; i < ndev; ++i) {
942 sp = &spc->progress[this * ndev + i];
943 sp_last = &spc->progress[last * ndev + i];
944 sp_shared = &spc->shared_progress[i];
945 if (sp->stats.finished)
947 progress_one_dev(sp);
948 sp->stats.duration = tv.tv_sec - sp->stats.t_start;
951 if (sp->ioctl_errno != ENOTCONN &&
952 sp->ioctl_errno != ENODEV) {
953 ret = -sp->ioctl_errno;
957 * scrub finished or device removed, check the
958 * finished flag. if unset, just use the last
959 * result we got for the current write and go
960 * on. flag should be set on next cycle, then.
962 perr = pthread_setcancelstate(
963 PTHREAD_CANCEL_DISABLE, &old);
966 perr = pthread_mutex_lock(&sp_shared->progress_mutex);
969 if (!sp_shared->stats.finished) {
970 perr = pthread_mutex_unlock(
971 &sp_shared->progress_mutex);
974 perr = pthread_setcancelstate(
975 PTHREAD_CANCEL_ENABLE, &old);
978 memcpy(sp, sp_last, sizeof(*sp));
981 perr = pthread_mutex_unlock(&sp_shared->progress_mutex);
984 perr = pthread_setcancelstate(
985 PTHREAD_CANCEL_ENABLE, &old);
988 memcpy(sp, sp_shared, sizeof(*sp));
989 memcpy(sp_last, sp_shared, sizeof(*sp));
992 write_poll_fd.fd = peer_fd;
993 ret = poll(&write_poll_fd, 1, 0);
999 ret = scrub_write_file(
1001 &spc->progress[this * ndev], ndev);
1008 if (!spc->do_record)
1010 ret = scrub_write_progress(spc->write_mutex, fsid,
1011 &spc->progress[this * ndev], ndev);
1020 return ERR_PTR(ret);
1023 static struct scrub_file_record *last_dev_scrub(
1024 struct scrub_file_record *const *const past_scrubs, u64 devid)
1028 if (!past_scrubs || IS_ERR(past_scrubs))
1031 for (i = 0; past_scrubs[i]; ++i)
1032 if (past_scrubs[i]->devid == devid)
1033 return past_scrubs[i];
1038 static int mkdir_p(char *path)
1043 for (i = 1; i < strlen(path); ++i) {
1047 ret = mkdir(path, 0777);
1048 if (ret && errno != EEXIST)
1056 static int is_scrub_running_on_fs(struct btrfs_ioctl_fs_info_args *fi_args,
1057 struct btrfs_ioctl_dev_info_args *di_args,
1058 struct scrub_file_record **past_scrubs)
1062 if (!fi_args || !di_args || !past_scrubs)
1065 for (i = 0; i < fi_args->num_devices; i++) {
1066 struct scrub_file_record *sfr =
1067 last_dev_scrub(past_scrubs, di_args[i].devid);
1071 if (!(sfr->stats.finished || sfr->stats.canceled))
1077 static int is_scrub_running_in_kernel(int fd,
1078 struct btrfs_ioctl_dev_info_args *di_args, u64 max_devices)
1080 struct scrub_progress sp;
1084 for (i = 0; i < max_devices; i++) {
1085 memset(&sp, 0, sizeof(sp));
1086 sp.scrub_args.devid = di_args[i].devid;
1087 ret = ioctl(fd, BTRFS_IOC_SCRUB_PROGRESS, &sp.scrub_args);
1095 static const char * const cmd_scrub_start_usage[];
1096 static const char * const cmd_scrub_resume_usage[];
1098 static int scrub_start(int argc, char **argv, int resume)
1108 int e_uncorrectable = 0;
1109 int e_correctable = 0;
1112 int do_background = 1;
1118 int do_stats_per_dev = 0;
1119 int ioprio_class = IOPRIO_CLASS_IDLE;
1120 int ioprio_classdata = 0;
1124 struct btrfs_ioctl_fs_info_args fi_args;
1125 struct btrfs_ioctl_dev_info_args *di_args = NULL;
1126 struct scrub_progress *sp = NULL;
1127 struct scrub_fs_stat fs_stat;
1129 struct sockaddr_un addr = {
1130 .sun_family = AF_UNIX,
1132 pthread_t *t_devs = NULL;
1134 struct scrub_file_record **past_scrubs = NULL;
1135 struct scrub_file_record *last_scrub = NULL;
1136 char *datafile = strdup(SCRUB_DATA_FILE);
1137 char fsid[BTRFS_UUID_UNPARSED_SIZE];
1138 char sock_path[PATH_MAX] = "";
1139 struct scrub_progress_cycle spc;
1140 pthread_mutex_t spc_write_mutex = PTHREAD_MUTEX_INITIALIZER;
1143 DIR *dirstream = NULL;
1145 int nothing_to_resume = 0;
1147 while ((c = getopt(argc, argv, "BdqrRc:n:f")) != -1) {
1155 do_stats_per_dev = 1;
1167 ioprio_class = (int)strtol(optarg, NULL, 10);
1170 ioprio_classdata = (int)strtol(optarg, NULL, 10);
1177 usage(resume ? cmd_scrub_resume_usage :
1178 cmd_scrub_start_usage);
1182 /* try to catch most error cases before forking */
1184 if (check_argc_exact(argc - optind, 1)) {
1185 usage(resume ? cmd_scrub_resume_usage :
1186 cmd_scrub_start_usage);
1189 spc.progress = NULL;
1190 if (do_quiet && do_print)
1193 if (mkdir_p(datafile)) {
1194 warning_on(!do_quiet,
1195 "cannot create scrub data file, mkdir %s failed: %m. Status recording disabled",
1201 path = argv[optind];
1203 fdmnt = open_path_or_dev_mnt(path, &dirstream, !do_quiet);
1207 ret = get_fs_info(path, &fi_args, &di_args);
1210 "getting dev info for scrub failed: %s",
1215 if (!fi_args.num_devices) {
1216 error_on(!do_quiet, "no devices found");
1221 uuid_unparse(fi_args.fsid, fsid);
1222 fdres = scrub_open_file_r(SCRUB_DATA_FILE, fsid);
1223 if (fdres < 0 && fdres != -ENOENT) {
1224 warning_on(!do_quiet, "failed to open status file: %s",
1226 } else if (fdres >= 0) {
1227 past_scrubs = scrub_read_file(fdres, !do_quiet);
1228 if (IS_ERR(past_scrubs))
1229 warning_on(!do_quiet, "failed to read status file: %s",
1230 strerror(-PTR_ERR(past_scrubs)));
1235 * Check for stale information in the status file, ie. if it's
1236 * canceled=0, finished=0 but no scrub is running.
1238 if (!is_scrub_running_in_kernel(fdmnt, di_args, fi_args.num_devices))
1242 * check whether any involved device is already busy running a
1243 * scrub. This would cause damaged status messages and the state
1244 * "aborted" without the explanation that a scrub was already
1245 * running. Therefore check it first, prevent it and give some
1246 * feedback to the user if scrub is already running.
1247 * Note that if scrub is started with a block device as the
1248 * parameter, only that particular block device is checked. It
1249 * is a normal mode of operation to start scrub on multiple
1250 * single devices, there is no reason to prevent this.
1252 if (!force && is_scrub_running_on_fs(&fi_args, di_args, past_scrubs)) {
1254 "Scrub is already running.\n"
1255 "To cancel use 'btrfs scrub cancel %s'.\n"
1256 "To see the status use 'btrfs scrub status [-d] %s'",
1262 t_devs = malloc(fi_args.num_devices * sizeof(*t_devs));
1263 sp = calloc(fi_args.num_devices, sizeof(*sp));
1264 spc.progress = calloc(fi_args.num_devices * 2, sizeof(*spc.progress));
1266 if (!t_devs || !sp || !spc.progress) {
1267 error_on(!do_quiet, "scrub failed: %m");
1272 for (i = 0; i < fi_args.num_devices; ++i) {
1273 devid = di_args[i].devid;
1274 ret = pthread_mutex_init(&sp[i].progress_mutex, NULL);
1276 error_on(!do_quiet, "pthread_mutex_init failed: %s",
1281 last_scrub = last_dev_scrub(past_scrubs, devid);
1282 sp[i].scrub_args.devid = devid;
1284 if (resume && last_scrub && (last_scrub->stats.canceled ||
1285 !last_scrub->stats.finished)) {
1287 sp[i].scrub_args.start = last_scrub->p.last_physical;
1288 sp[i].resumed = last_scrub;
1289 } else if (resume) {
1292 sp[i].resumed = last_scrub;
1296 sp[i].scrub_args.start = 0ll;
1297 sp[i].resumed = NULL;
1300 sp[i].scrub_args.end = (u64)-1ll;
1301 sp[i].scrub_args.flags = readonly ? BTRFS_SCRUB_READONLY : 0;
1302 sp[i].ioprio_class = ioprio_class;
1303 sp[i].ioprio_classdata = ioprio_classdata;
1306 if (!n_start && !n_resume) {
1308 printf("scrub: nothing to resume for %s, fsid %s\n",
1310 nothing_to_resume = 1;
1314 ret = prg_fd = socket(AF_UNIX, SOCK_STREAM, 0);
1316 ret = scrub_datafile(SCRUB_PROGRESS_SOCKET_PATH, fsid, NULL,
1317 sock_path, sizeof(sock_path));
1318 /* ignore EOVERFLOW, try using a shorter path for the socket */
1319 addr.sun_path[sizeof(addr.sun_path) - 1] = '\0';
1320 strncpy(addr.sun_path, sock_path, sizeof(addr.sun_path) - 1);
1321 ret = bind(prg_fd, (struct sockaddr *)&addr, sizeof(addr));
1322 if (ret != -1 || errno != EADDRINUSE)
1325 * bind failed with EADDRINUSE. so let's see if anyone answers
1326 * when we make a call to the socket ...
1328 ret = connect(prg_fd, (struct sockaddr *)&addr, sizeof(addr));
1329 if (!ret || errno != ECONNREFUSED) {
1330 /* ... yes, so scrub must be running. error out */
1331 error("scrub already running");
1337 * ... no, this means someone left us alone with an unused
1338 * socket in the file system. remove it and try again.
1340 ret = unlink(sock_path);
1343 ret = listen(prg_fd, 100);
1345 warning_on(!do_quiet,
1346 "failed to open the progress status socket at %s: %m. Progress cannot be queried",
1347 sock_path[0] ? sock_path :
1348 SCRUB_PROGRESS_SOCKET_PATH);
1358 /* write all-zero progress file for a start */
1359 ret = scrub_write_progress(&spc_write_mutex, fsid, sp,
1360 fi_args.num_devices);
1362 warning_on(!do_quiet,
1363 "failed to write the progress status file: %s. Status recording disabled",
1369 if (do_background) {
1372 error_on(!do_quiet, "cannot scrub, fork failed: %m");
1379 scrub_handle_sigint_parent();
1381 printf("scrub %s on %s, fsid %s (pid=%d)\n",
1382 n_start ? "started" : "resumed",
1390 error_on(!do_quiet, "wait failed (ret=%d): %m",
1395 if (!WIFEXITED(stat) || WEXITSTATUS(stat)) {
1396 error_on(!do_quiet, "scrub process failed");
1397 err = WIFEXITED(stat) ? WEXITSTATUS(stat) : -1;
1405 scrub_handle_sigint_child(fdmnt);
1407 for (i = 0; i < fi_args.num_devices; ++i) {
1409 sp[i].scrub_args.progress = sp[i].resumed->p;
1410 sp[i].stats = sp[i].resumed->stats;
1412 sp[i].stats.finished = 1;
1415 devid = di_args[i].devid;
1416 gettimeofday(&tv, NULL);
1417 sp[i].stats.t_start = tv.tv_sec;
1418 ret = pthread_create(&t_devs[i], NULL,
1419 scrub_one_dev, &sp[i]);
1422 error("creating scrub_one_dev[%llu] thread failed: %s",
1423 devid, strerror(ret));
1430 spc.prg_fd = prg_fd;
1431 spc.do_record = do_record;
1432 spc.write_mutex = &spc_write_mutex;
1433 spc.shared_progress = sp;
1435 ret = pthread_create(&t_prog, NULL, scrub_progress_cycle, &spc);
1438 error("creating progress thread failed: %s",
1445 for (i = 0; i < fi_args.num_devices; ++i) {
1448 devid = di_args[i].devid;
1449 ret = pthread_join(t_devs[i], NULL);
1452 error("pthread_join failed for scrub_one_dev[%llu]: %s",
1453 devid, strerror(ret));
1458 switch (sp[i].ioctl_errno) {
1461 warning("device %lld not present",
1469 error("scrubbing %s failed for device id %lld: ret=%d, errno=%d (%s)",
1471 sp[i].ret, sp[i].ioctl_errno,
1472 strerror(sp[i].ioctl_errno));
1477 if (sp[i].scrub_args.progress.uncorrectable_errors > 0)
1479 if (sp[i].scrub_args.progress.corrected_errors > 0
1480 || sp[i].scrub_args.progress.unverified_errors > 0)
1485 const char *append = "done";
1486 if (!do_stats_per_dev)
1487 init_fs_stat(&fs_stat);
1488 for (i = 0; i < fi_args.num_devices; ++i) {
1489 if (do_stats_per_dev) {
1490 print_scrub_dev(&di_args[i],
1491 &sp[i].scrub_args.progress,
1493 sp[i].ret ? "canceled" : "done",
1497 append = "canceled";
1498 add_to_fs_stat(&sp[i].scrub_args.progress,
1499 &sp[i].stats, &fs_stat);
1502 if (!do_stats_per_dev) {
1503 printf("scrub %s for %s\n", append, fsid);
1504 print_fs_stat(&fs_stat, print_raw);
1508 ret = pthread_cancel(t_prog);
1510 ret = pthread_join(t_prog, &terr);
1512 /* check for errors from the handling of the progress thread */
1513 if (do_print && ret) {
1514 error("progress thread handling failed: %s",
1518 /* check for errors returned from the progress thread itself */
1519 if (do_print && terr && terr != PTHREAD_CANCELED)
1520 error("recording progress failed: %s",
1521 strerror(-PTR_ERR(terr)));
1524 ret = scrub_write_progress(&spc_write_mutex, fsid, sp,
1525 fi_args.num_devices);
1526 if (ret && do_print)
1527 error("failed to record the result: %s",
1531 scrub_handle_sigint_child(-1);
1534 free_history(past_scrubs);
1544 close_file_or_dir(fdmnt, dirstream);
1548 if (nothing_to_resume)
1550 if (e_uncorrectable) {
1551 error_on(!do_quiet, "there are uncorrectable errors");
1555 warning_on(!do_quiet,
1556 "errors detected during scrubbing, corrected");
1561 static const char * const cmd_scrub_start_usage[] = {
1562 "btrfs scrub start [-BdqrRf] [-c ioprio_class -n ioprio_classdata] <path>|<device>",
1563 "Start a new scrub. If a scrub is already running, the new one fails.",
1565 "-B do not background",
1566 "-d stats per device (-B only)",
1568 "-r read only mode",
1569 "-R raw print mode, print full data instead of summary",
1570 "-c set ioprio class (see ionice(1) manpage)",
1571 "-n set ioprio classdata (see ionice(1) manpage)",
1572 "-f force starting new scrub even if a scrub is already running",
1573 " this is useful when scrub stats record file is damaged",
1577 static int cmd_scrub_start(int argc, char **argv)
1579 return scrub_start(argc, argv, 0);
1582 static const char * const cmd_scrub_cancel_usage[] = {
1583 "btrfs scrub cancel <path>|<device>",
1584 "Cancel a running scrub",
1588 static int cmd_scrub_cancel(int argc, char **argv)
1593 DIR *dirstream = NULL;
1595 clean_args_no_options(argc, argv, cmd_scrub_cancel_usage);
1597 if (check_argc_exact(argc - optind, 1))
1598 usage(cmd_scrub_cancel_usage);
1600 path = argv[optind];
1602 fdmnt = open_path_or_dev_mnt(path, &dirstream, 1);
1608 ret = ioctl(fdmnt, BTRFS_IOC_SCRUB_CANCEL, NULL);
1611 error("scrub cancel failed on %s: %s", path,
1612 errno == ENOTCONN ? "not running" : strerror(errno));
1613 if (errno == ENOTCONN)
1621 printf("scrub cancelled\n");
1624 close_file_or_dir(fdmnt, dirstream);
1628 static const char * const cmd_scrub_resume_usage[] = {
1629 "btrfs scrub resume [-BdqrR] [-c ioprio_class -n ioprio_classdata] <path>|<device>",
1630 "Resume previously canceled or interrupted scrub",
1632 "-B do not background",
1633 "-d stats per device (-B only)",
1635 "-r read only mode",
1636 "-R raw print mode, print full data instead of summary",
1637 "-c set ioprio class (see ionice(1) manpage)",
1638 "-n set ioprio classdata (see ionice(1) manpage)",
1642 static int cmd_scrub_resume(int argc, char **argv)
1644 return scrub_start(argc, argv, 1);
1647 static const char * const cmd_scrub_status_usage[] = {
1648 "btrfs scrub status [-dR] <path>|<device>",
1649 "Show status of running or finished scrub",
1651 "-d stats per device",
1652 "-R print raw stats",
1656 static int cmd_scrub_status(int argc, char **argv)
1659 struct btrfs_ioctl_fs_info_args fi_args;
1660 struct btrfs_ioctl_dev_info_args *di_args = NULL;
1661 struct scrub_file_record **past_scrubs = NULL;
1662 struct scrub_file_record *last_scrub;
1663 struct scrub_fs_stat fs_stat;
1664 struct sockaddr_un addr = {
1665 .sun_family = AF_UNIX,
1672 int do_stats_per_dev = 0;
1674 char fsid[BTRFS_UUID_UNPARSED_SIZE];
1677 DIR *dirstream = NULL;
1679 while ((c = getopt(argc, argv, "dR")) != -1) {
1682 do_stats_per_dev = 1;
1689 usage(cmd_scrub_status_usage);
1693 if (check_argc_exact(argc - optind, 1))
1694 usage(cmd_scrub_status_usage);
1696 path = argv[optind];
1698 fdmnt = open_path_or_dev_mnt(path, &dirstream, 1);
1702 ret = get_fs_info(path, &fi_args, &di_args);
1704 error("getting dev info for scrub failed: %s",
1709 if (!fi_args.num_devices) {
1710 error("no devices found");
1715 uuid_unparse(fi_args.fsid, fsid);
1717 fdres = socket(AF_UNIX, SOCK_STREAM, 0);
1719 error("failed to create socket to receive progress information: %m");
1723 scrub_datafile(SCRUB_PROGRESS_SOCKET_PATH, fsid,
1724 NULL, addr.sun_path, sizeof(addr.sun_path));
1725 /* ignore EOVERFLOW, just use shorter name and hope for the best */
1726 addr.sun_path[sizeof(addr.sun_path) - 1] = '\0';
1727 ret = connect(fdres, (struct sockaddr *)&addr, sizeof(addr));
1730 fdres = scrub_open_file_r(SCRUB_DATA_FILE, fsid);
1731 if (fdres < 0 && fdres != -ENOENT) {
1732 warning("failed to open status file: %s",
1740 past_scrubs = scrub_read_file(fdres, 1);
1741 if (IS_ERR(past_scrubs))
1742 warning("failed to read status: %s",
1743 strerror(-PTR_ERR(past_scrubs)));
1745 in_progress = is_scrub_running_in_kernel(fdmnt, di_args, fi_args.num_devices);
1747 printf("scrub status for %s\n", fsid);
1749 if (do_stats_per_dev) {
1750 for (i = 0; i < fi_args.num_devices; ++i) {
1751 last_scrub = last_dev_scrub(past_scrubs,
1754 print_scrub_dev(&di_args[i], NULL, print_raw,
1758 last_scrub->stats.in_progress = in_progress;
1759 print_scrub_dev(&di_args[i], &last_scrub->p, print_raw,
1760 last_scrub->stats.finished ?
1761 "history" : "status",
1762 &last_scrub->stats);
1765 init_fs_stat(&fs_stat);
1766 fs_stat.s.in_progress = in_progress;
1767 for (i = 0; i < fi_args.num_devices; ++i) {
1768 last_scrub = last_dev_scrub(past_scrubs,
1772 add_to_fs_stat(&last_scrub->p, &last_scrub->stats,
1775 print_fs_stat(&fs_stat, print_raw);
1779 free_history(past_scrubs);
1783 close_file_or_dir(fdmnt, dirstream);
1788 static const char scrub_cmd_group_info[] =
1789 "verify checksums of data and metadata";
1791 const struct cmd_group scrub_cmd_group = {
1792 scrub_cmd_group_usage, scrub_cmd_group_info, {
1793 { "start", cmd_scrub_start, cmd_scrub_start_usage, NULL, 0 },
1794 { "cancel", cmd_scrub_cancel, cmd_scrub_cancel_usage, NULL, 0 },
1795 { "resume", cmd_scrub_resume, cmd_scrub_resume_usage, NULL, 0 },
1796 { "status", cmd_scrub_status, cmd_scrub_status_usage, NULL, 0 },
1801 int cmd_scrub(int argc, char **argv)
1803 return handle_command_group(&scrub_cmd_group, argc, argv);