1 // SPDX-License-Identifier: GPL-2.0
3 * bcache sysfs interfaces
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
13 #include "writeback.h"
15 #include <linux/blkdev.h>
16 #include <linux/sort.h>
17 #include <linux/sched/clock.h>
19 /* Default is 0 ("writethrough") */
20 static const char * const bch_cache_modes[] = {
28 /* Default is 0 ("auto") */
29 static const char * const bch_stop_on_failure_modes[] = {
35 static const char * const cache_replacement_policies[] = {
42 static const char * const error_actions[] = {
48 write_attribute(attach);
49 write_attribute(detach);
50 write_attribute(unregister);
51 write_attribute(stop);
52 write_attribute(clear_stats);
53 write_attribute(trigger_gc);
54 write_attribute(prune_cache);
55 write_attribute(flash_vol_create);
57 read_attribute(bucket_size);
58 read_attribute(block_size);
59 read_attribute(nbuckets);
60 read_attribute(tree_depth);
61 read_attribute(root_usage_percent);
62 read_attribute(priority_stats);
63 read_attribute(btree_cache_size);
64 read_attribute(btree_cache_max_chain);
65 read_attribute(cache_available_percent);
66 read_attribute(written);
67 read_attribute(btree_written);
68 read_attribute(metadata_written);
69 read_attribute(active_journal_entries);
70 read_attribute(backing_dev_name);
71 read_attribute(backing_dev_uuid);
73 sysfs_time_stats_attribute(btree_gc, sec, ms);
74 sysfs_time_stats_attribute(btree_split, sec, us);
75 sysfs_time_stats_attribute(btree_sort, ms, us);
76 sysfs_time_stats_attribute(btree_read, ms, us);
78 read_attribute(btree_nodes);
79 read_attribute(btree_used_percent);
80 read_attribute(average_key_size);
81 read_attribute(dirty_data);
82 read_attribute(bset_tree_stats);
84 read_attribute(state);
85 read_attribute(cache_read_races);
86 read_attribute(reclaim);
87 read_attribute(flush_write);
88 read_attribute(retry_flush_write);
89 read_attribute(writeback_keys_done);
90 read_attribute(writeback_keys_failed);
91 read_attribute(io_errors);
92 read_attribute(congested);
93 read_attribute(cutoff_writeback);
94 read_attribute(cutoff_writeback_sync);
95 rw_attribute(congested_read_threshold_us);
96 rw_attribute(congested_write_threshold_us);
98 rw_attribute(sequential_cutoff);
99 rw_attribute(data_csum);
100 rw_attribute(cache_mode);
101 rw_attribute(stop_when_cache_set_failed);
102 rw_attribute(writeback_metadata);
103 rw_attribute(writeback_running);
104 rw_attribute(writeback_percent);
105 rw_attribute(writeback_delay);
106 rw_attribute(writeback_rate);
108 rw_attribute(writeback_rate_update_seconds);
109 rw_attribute(writeback_rate_i_term_inverse);
110 rw_attribute(writeback_rate_p_term_inverse);
111 rw_attribute(writeback_rate_minimum);
112 read_attribute(writeback_rate_debug);
114 read_attribute(stripe_size);
115 read_attribute(partial_stripes_expensive);
117 rw_attribute(synchronous);
118 rw_attribute(journal_delay_ms);
119 rw_attribute(io_disable);
120 rw_attribute(discard);
121 rw_attribute(running);
123 rw_attribute(readahead);
124 rw_attribute(errors);
125 rw_attribute(io_error_limit);
126 rw_attribute(io_error_halflife);
127 rw_attribute(verify);
128 rw_attribute(bypass_torture_test);
129 rw_attribute(key_merging_disabled);
130 rw_attribute(gc_always_rewrite);
131 rw_attribute(expensive_debug_checks);
132 rw_attribute(cache_replacement_policy);
133 rw_attribute(btree_shrinker_disabled);
134 rw_attribute(copy_gc_enabled);
135 rw_attribute(gc_after_writeback);
138 static ssize_t bch_snprint_string_list(char *buf,
140 const char * const list[],
146 for (i = 0; list[i]; i++)
147 out += snprintf(out, buf + size - out,
148 i == selected ? "[%s] " : "%s ", list[i]);
154 SHOW(__bch_cached_dev)
156 struct cached_dev *dc = container_of(kobj, struct cached_dev,
158 char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
159 int wb = dc->writeback_running;
161 #define var(stat) (dc->stat)
163 if (attr == &sysfs_cache_mode)
164 return bch_snprint_string_list(buf, PAGE_SIZE,
166 BDEV_CACHE_MODE(&dc->sb));
168 if (attr == &sysfs_stop_when_cache_set_failed)
169 return bch_snprint_string_list(buf, PAGE_SIZE,
170 bch_stop_on_failure_modes,
171 dc->stop_when_cache_set_failed);
174 sysfs_printf(data_csum, "%i", dc->disk.data_csum);
175 var_printf(verify, "%i");
176 var_printf(bypass_torture_test, "%i");
177 var_printf(writeback_metadata, "%i");
178 var_printf(writeback_running, "%i");
179 var_print(writeback_delay);
180 var_print(writeback_percent);
181 sysfs_hprint(writeback_rate,
182 wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
183 sysfs_hprint(io_errors, atomic_read(&dc->io_errors));
184 sysfs_printf(io_error_limit, "%i", dc->error_limit);
185 sysfs_printf(io_disable, "%i", dc->io_disable);
186 var_print(writeback_rate_update_seconds);
187 var_print(writeback_rate_i_term_inverse);
188 var_print(writeback_rate_p_term_inverse);
189 var_print(writeback_rate_minimum);
191 if (attr == &sysfs_writeback_rate_debug) {
195 char proportional[20];
201 * Except for dirty and target, other values should
202 * be 0 if writeback is not running.
205 wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
207 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
208 bch_hprint(target, dc->writeback_rate_target << 9);
209 bch_hprint(proportional,
210 wb ? dc->writeback_rate_proportional << 9 : 0);
212 wb ? dc->writeback_rate_integral_scaled << 9 : 0);
213 bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
214 next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
221 "proportional:\t%s\n"
223 "change:\t\t%s/sec\n"
224 "next io:\t%llims\n",
225 rate, dirty, target, proportional,
226 integral, change, next_io);
229 sysfs_hprint(dirty_data,
230 bcache_dev_sectors_dirty(&dc->disk) << 9);
232 sysfs_hprint(stripe_size, ((uint64_t)dc->disk.stripe_size) << 9);
233 var_printf(partial_stripes_expensive, "%u");
235 var_hprint(sequential_cutoff);
236 var_hprint(readahead);
238 sysfs_print(running, atomic_read(&dc->running));
239 sysfs_print(state, states[BDEV_STATE(&dc->sb)]);
241 if (attr == &sysfs_label) {
242 memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
243 buf[SB_LABEL_SIZE + 1] = '\0';
248 if (attr == &sysfs_backing_dev_name) {
249 snprintf(buf, BDEVNAME_SIZE + 1, "%s", dc->backing_dev_name);
254 if (attr == &sysfs_backing_dev_uuid) {
255 /* convert binary uuid into 36-byte string plus '\0' */
256 snprintf(buf, 36+1, "%pU", dc->sb.uuid);
264 SHOW_LOCKED(bch_cached_dev)
268 struct cached_dev *dc = container_of(kobj, struct cached_dev,
272 struct kobj_uevent_env *env;
274 #define d_strtoul(var) sysfs_strtoul(var, dc->var)
275 #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
276 #define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
278 sysfs_strtoul(data_csum, dc->disk.data_csum);
280 sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test);
281 sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata);
282 sysfs_strtoul_bool(writeback_running, dc->writeback_running);
283 sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX);
285 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent,
286 0, bch_cutoff_writeback);
288 if (attr == &sysfs_writeback_rate) {
290 long int v = atomic_long_read(&dc->writeback_rate.rate);
292 ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
295 atomic_long_set(&dc->writeback_rate.rate, v);
302 sysfs_strtoul_clamp(writeback_rate_update_seconds,
303 dc->writeback_rate_update_seconds,
304 1, WRITEBACK_RATE_UPDATE_SECS_MAX);
305 sysfs_strtoul_clamp(writeback_rate_i_term_inverse,
306 dc->writeback_rate_i_term_inverse,
308 sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
309 dc->writeback_rate_p_term_inverse,
311 d_strtoul_nonzero(writeback_rate_minimum);
313 sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
315 if (attr == &sysfs_io_disable) {
316 int v = strtoul_or_return(buf);
318 dc->io_disable = v ? 1 : 0;
321 sysfs_strtoul_clamp(sequential_cutoff,
322 dc->sequential_cutoff,
324 d_strtoi_h(readahead);
326 if (attr == &sysfs_clear_stats)
327 bch_cache_accounting_clear(&dc->accounting);
329 if (attr == &sysfs_running &&
330 strtoul_or_return(buf))
331 bch_cached_dev_run(dc);
333 if (attr == &sysfs_cache_mode) {
334 v = __sysfs_match_string(bch_cache_modes, -1, buf);
338 if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
339 SET_BDEV_CACHE_MODE(&dc->sb, v);
340 bch_write_bdev_super(dc, NULL);
344 if (attr == &sysfs_stop_when_cache_set_failed) {
345 v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
349 dc->stop_when_cache_set_failed = v;
352 if (attr == &sysfs_label) {
353 if (size > SB_LABEL_SIZE)
355 memcpy(dc->sb.label, buf, size);
356 if (size < SB_LABEL_SIZE)
357 dc->sb.label[size] = '\0';
358 if (size && dc->sb.label[size - 1] == '\n')
359 dc->sb.label[size - 1] = '\0';
360 bch_write_bdev_super(dc, NULL);
362 memcpy(dc->disk.c->uuids[dc->disk.id].label,
364 bch_uuid_write(dc->disk.c);
366 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
369 add_uevent_var(env, "DRIVER=bcache");
370 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
371 add_uevent_var(env, "CACHED_LABEL=%s", buf);
372 kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
378 if (attr == &sysfs_attach) {
379 uint8_t set_uuid[16];
381 if (bch_parse_uuid(buf, set_uuid) < 16)
385 list_for_each_entry(c, &bch_cache_sets, list) {
386 v = bch_cached_dev_attach(dc, c, set_uuid);
391 pr_err("Can't attach %s: cache set not found", buf);
395 if (attr == &sysfs_detach && dc->disk.c)
396 bch_cached_dev_detach(dc);
398 if (attr == &sysfs_stop)
399 bcache_device_stop(&dc->disk);
404 STORE(bch_cached_dev)
406 struct cached_dev *dc = container_of(kobj, struct cached_dev,
409 mutex_lock(&bch_register_lock);
410 size = __cached_dev_store(kobj, attr, buf, size);
412 if (attr == &sysfs_writeback_running) {
413 /* dc->writeback_running changed in __cached_dev_store() */
414 if (IS_ERR_OR_NULL(dc->writeback_thread)) {
416 * reject setting it to 1 via sysfs if writeback
417 * kthread is not created yet.
419 if (dc->writeback_running) {
420 dc->writeback_running = false;
421 pr_err("%s: failed to run non-existent writeback thread",
422 dc->disk.disk->disk_name);
426 * writeback kthread will check if dc->writeback_running
429 bch_writeback_queue(dc);
432 if (attr == &sysfs_writeback_percent)
433 if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
434 schedule_delayed_work(&dc->writeback_rate_update,
435 dc->writeback_rate_update_seconds * HZ);
437 mutex_unlock(&bch_register_lock);
441 static struct attribute *bch_cached_dev_files[] = {
449 &sysfs_stop_when_cache_set_failed,
450 &sysfs_writeback_metadata,
451 &sysfs_writeback_running,
452 &sysfs_writeback_delay,
453 &sysfs_writeback_percent,
454 &sysfs_writeback_rate,
455 &sysfs_writeback_rate_update_seconds,
456 &sysfs_writeback_rate_i_term_inverse,
457 &sysfs_writeback_rate_p_term_inverse,
458 &sysfs_writeback_rate_minimum,
459 &sysfs_writeback_rate_debug,
461 &sysfs_io_error_limit,
465 &sysfs_partial_stripes_expensive,
466 &sysfs_sequential_cutoff,
472 #ifdef CONFIG_BCACHE_DEBUG
474 &sysfs_bypass_torture_test,
476 &sysfs_backing_dev_name,
477 &sysfs_backing_dev_uuid,
480 KTYPE(bch_cached_dev);
484 struct bcache_device *d = container_of(kobj, struct bcache_device,
486 struct uuid_entry *u = &d->c->uuids[d->id];
488 sysfs_printf(data_csum, "%i", d->data_csum);
489 sysfs_hprint(size, u->sectors << 9);
491 if (attr == &sysfs_label) {
492 memcpy(buf, u->label, SB_LABEL_SIZE);
493 buf[SB_LABEL_SIZE + 1] = '\0';
501 STORE(__bch_flash_dev)
503 struct bcache_device *d = container_of(kobj, struct bcache_device,
505 struct uuid_entry *u = &d->c->uuids[d->id];
507 sysfs_strtoul(data_csum, d->data_csum);
509 if (attr == &sysfs_size) {
512 strtoi_h_or_return(buf, v);
515 bch_uuid_write(d->c);
516 set_capacity(d->disk, u->sectors);
519 if (attr == &sysfs_label) {
520 memcpy(u->label, buf, SB_LABEL_SIZE);
521 bch_uuid_write(d->c);
524 if (attr == &sysfs_unregister) {
525 set_bit(BCACHE_DEV_DETACHING, &d->flags);
526 bcache_device_stop(d);
531 STORE_LOCKED(bch_flash_dev)
533 static struct attribute *bch_flash_dev_files[] = {
542 KTYPE(bch_flash_dev);
544 struct bset_stats_op {
547 struct bset_stats stats;
550 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
552 struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
555 bch_btree_keys_stats(&b->keys, &op->stats);
560 static int bch_bset_print_stats(struct cache_set *c, char *buf)
562 struct bset_stats_op op;
565 memset(&op, 0, sizeof(op));
566 bch_btree_op_init(&op.op, -1);
568 ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
572 return snprintf(buf, PAGE_SIZE,
574 "written sets: %zu\n"
575 "unwritten sets: %zu\n"
576 "written key bytes: %zu\n"
577 "unwritten key bytes: %zu\n"
581 op.stats.sets_written, op.stats.sets_unwritten,
582 op.stats.bytes_written, op.stats.bytes_unwritten,
583 op.stats.floats, op.stats.failed);
586 static unsigned int bch_root_usage(struct cache_set *c)
588 unsigned int bytes = 0;
591 struct btree_iter iter;
599 rw_lock(false, b, b->level);
600 } while (b != c->root);
602 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
603 bytes += bkey_bytes(k);
607 return (bytes * 100) / btree_bytes(c);
610 static size_t bch_cache_size(struct cache_set *c)
615 mutex_lock(&c->bucket_lock);
616 list_for_each_entry(b, &c->btree_cache, list)
617 ret += 1 << (b->keys.page_order + PAGE_SHIFT);
619 mutex_unlock(&c->bucket_lock);
623 static unsigned int bch_cache_max_chain(struct cache_set *c)
625 unsigned int ret = 0;
626 struct hlist_head *h;
628 mutex_lock(&c->bucket_lock);
630 for (h = c->bucket_hash;
631 h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
634 struct hlist_node *p;
642 mutex_unlock(&c->bucket_lock);
646 static unsigned int bch_btree_used(struct cache_set *c)
648 return div64_u64(c->gc_stats.key_bytes * 100,
649 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
652 static unsigned int bch_average_key_size(struct cache_set *c)
654 return c->gc_stats.nkeys
655 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
659 SHOW(__bch_cache_set)
661 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
663 sysfs_print(synchronous, CACHE_SYNC(&c->sb));
664 sysfs_print(journal_delay_ms, c->journal_delay_ms);
665 sysfs_hprint(bucket_size, bucket_bytes(c));
666 sysfs_hprint(block_size, block_bytes(c));
667 sysfs_print(tree_depth, c->root->level);
668 sysfs_print(root_usage_percent, bch_root_usage(c));
670 sysfs_hprint(btree_cache_size, bch_cache_size(c));
671 sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c));
672 sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use);
674 sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
675 sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us);
676 sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us);
677 sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
679 sysfs_print(btree_used_percent, bch_btree_used(c));
680 sysfs_print(btree_nodes, c->gc_stats.nodes);
681 sysfs_hprint(average_key_size, bch_average_key_size(c));
683 sysfs_print(cache_read_races,
684 atomic_long_read(&c->cache_read_races));
687 atomic_long_read(&c->reclaim));
689 sysfs_print(flush_write,
690 atomic_long_read(&c->flush_write));
692 sysfs_print(retry_flush_write,
693 atomic_long_read(&c->retry_flush_write));
695 sysfs_print(writeback_keys_done,
696 atomic_long_read(&c->writeback_keys_done));
697 sysfs_print(writeback_keys_failed,
698 atomic_long_read(&c->writeback_keys_failed));
700 if (attr == &sysfs_errors)
701 return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
704 /* See count_io_errors for why 88 */
705 sysfs_print(io_error_halflife, c->error_decay * 88);
706 sysfs_print(io_error_limit, c->error_limit);
708 sysfs_hprint(congested,
709 ((uint64_t) bch_get_congested(c)) << 9);
710 sysfs_print(congested_read_threshold_us,
711 c->congested_read_threshold_us);
712 sysfs_print(congested_write_threshold_us,
713 c->congested_write_threshold_us);
715 sysfs_print(cutoff_writeback, bch_cutoff_writeback);
716 sysfs_print(cutoff_writeback_sync, bch_cutoff_writeback_sync);
718 sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
719 sysfs_printf(verify, "%i", c->verify);
720 sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
721 sysfs_printf(expensive_debug_checks,
722 "%i", c->expensive_debug_checks);
723 sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
724 sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
725 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
726 sysfs_printf(gc_after_writeback, "%i", c->gc_after_writeback);
727 sysfs_printf(io_disable, "%i",
728 test_bit(CACHE_SET_IO_DISABLE, &c->flags));
730 if (attr == &sysfs_bset_tree_stats)
731 return bch_bset_print_stats(c, buf);
735 SHOW_LOCKED(bch_cache_set)
737 STORE(__bch_cache_set)
739 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
742 if (attr == &sysfs_unregister)
743 bch_cache_set_unregister(c);
745 if (attr == &sysfs_stop)
746 bch_cache_set_stop(c);
748 if (attr == &sysfs_synchronous) {
749 bool sync = strtoul_or_return(buf);
751 if (sync != CACHE_SYNC(&c->sb)) {
752 SET_CACHE_SYNC(&c->sb, sync);
753 bcache_write_super(c);
757 if (attr == &sysfs_flash_vol_create) {
761 strtoi_h_or_return(buf, v);
763 r = bch_flash_dev_create(c, v);
768 if (attr == &sysfs_clear_stats) {
769 atomic_long_set(&c->writeback_keys_done, 0);
770 atomic_long_set(&c->writeback_keys_failed, 0);
772 memset(&c->gc_stats, 0, sizeof(struct gc_stat));
773 bch_cache_accounting_clear(&c->accounting);
776 if (attr == &sysfs_trigger_gc)
779 if (attr == &sysfs_prune_cache) {
780 struct shrink_control sc;
782 sc.gfp_mask = GFP_KERNEL;
783 sc.nr_to_scan = strtoul_or_return(buf);
784 c->shrink.scan_objects(&c->shrink, &sc);
787 sysfs_strtoul_clamp(congested_read_threshold_us,
788 c->congested_read_threshold_us,
790 sysfs_strtoul_clamp(congested_write_threshold_us,
791 c->congested_write_threshold_us,
794 if (attr == &sysfs_errors) {
795 v = __sysfs_match_string(error_actions, -1, buf);
802 if (attr == &sysfs_io_error_limit)
803 c->error_limit = strtoul_or_return(buf);
805 /* See count_io_errors() for why 88 */
806 if (attr == &sysfs_io_error_halflife)
807 c->error_decay = strtoul_or_return(buf) / 88;
809 if (attr == &sysfs_io_disable) {
810 v = strtoul_or_return(buf);
812 if (test_and_set_bit(CACHE_SET_IO_DISABLE,
814 pr_warn("CACHE_SET_IO_DISABLE already set");
816 if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
818 pr_warn("CACHE_SET_IO_DISABLE already cleared");
822 sysfs_strtoul(journal_delay_ms, c->journal_delay_ms);
823 sysfs_strtoul_bool(verify, c->verify);
824 sysfs_strtoul_bool(key_merging_disabled, c->key_merging_disabled);
825 sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks);
826 sysfs_strtoul_bool(gc_always_rewrite, c->gc_always_rewrite);
827 sysfs_strtoul_bool(btree_shrinker_disabled, c->shrinker_disabled);
828 sysfs_strtoul_bool(copy_gc_enabled, c->copy_gc_enabled);
830 * write gc_after_writeback here may overwrite an already set
831 * BCH_DO_AUTO_GC, it doesn't matter because this flag will be
832 * set in next chance.
834 sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1);
838 STORE_LOCKED(bch_cache_set)
840 SHOW(bch_cache_set_internal)
842 struct cache_set *c = container_of(kobj, struct cache_set, internal);
844 return bch_cache_set_show(&c->kobj, attr, buf);
847 STORE(bch_cache_set_internal)
849 struct cache_set *c = container_of(kobj, struct cache_set, internal);
851 return bch_cache_set_store(&c->kobj, attr, buf, size);
854 static void bch_cache_set_internal_release(struct kobject *k)
858 static struct attribute *bch_cache_set_files[] = {
862 &sysfs_journal_delay_ms,
863 &sysfs_flash_vol_create,
868 &sysfs_root_usage_percent,
869 &sysfs_btree_cache_size,
870 &sysfs_cache_available_percent,
872 &sysfs_average_key_size,
875 &sysfs_io_error_limit,
876 &sysfs_io_error_halflife,
878 &sysfs_congested_read_threshold_us,
879 &sysfs_congested_write_threshold_us,
883 KTYPE(bch_cache_set);
885 static struct attribute *bch_cache_set_internal_files[] = {
886 &sysfs_active_journal_entries,
888 sysfs_time_stats_attribute_list(btree_gc, sec, ms)
889 sysfs_time_stats_attribute_list(btree_split, sec, us)
890 sysfs_time_stats_attribute_list(btree_sort, ms, us)
891 sysfs_time_stats_attribute_list(btree_read, ms, us)
894 &sysfs_btree_used_percent,
895 &sysfs_btree_cache_max_chain,
897 &sysfs_bset_tree_stats,
898 &sysfs_cache_read_races,
901 &sysfs_retry_flush_write,
902 &sysfs_writeback_keys_done,
903 &sysfs_writeback_keys_failed,
907 #ifdef CONFIG_BCACHE_DEBUG
909 &sysfs_key_merging_disabled,
910 &sysfs_expensive_debug_checks,
912 &sysfs_gc_always_rewrite,
913 &sysfs_btree_shrinker_disabled,
914 &sysfs_copy_gc_enabled,
915 &sysfs_gc_after_writeback,
917 &sysfs_cutoff_writeback,
918 &sysfs_cutoff_writeback_sync,
921 KTYPE(bch_cache_set_internal);
923 static int __bch_cache_cmp(const void *l, const void *r)
925 return *((uint16_t *)r) - *((uint16_t *)l);
930 struct cache *ca = container_of(kobj, struct cache, kobj);
932 sysfs_hprint(bucket_size, bucket_bytes(ca));
933 sysfs_hprint(block_size, block_bytes(ca));
934 sysfs_print(nbuckets, ca->sb.nbuckets);
935 sysfs_print(discard, ca->discard);
936 sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
937 sysfs_hprint(btree_written,
938 atomic_long_read(&ca->btree_sectors_written) << 9);
939 sysfs_hprint(metadata_written,
940 (atomic_long_read(&ca->meta_sectors_written) +
941 atomic_long_read(&ca->btree_sectors_written)) << 9);
943 sysfs_print(io_errors,
944 atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
946 if (attr == &sysfs_cache_replacement_policy)
947 return bch_snprint_string_list(buf, PAGE_SIZE,
948 cache_replacement_policies,
949 CACHE_REPLACEMENT(&ca->sb));
951 if (attr == &sysfs_priority_stats) {
953 size_t n = ca->sb.nbuckets, i;
954 size_t unused = 0, available = 0, dirty = 0, meta = 0;
956 /* Compute 31 quantiles */
957 uint16_t q[31], *p, *cached;
960 cached = p = vmalloc(array_size(sizeof(uint16_t),
965 mutex_lock(&ca->set->bucket_lock);
966 for_each_bucket(b, ca) {
967 if (!GC_SECTORS_USED(b))
969 if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
971 if (GC_MARK(b) == GC_MARK_DIRTY)
973 if (GC_MARK(b) == GC_MARK_METADATA)
977 for (i = ca->sb.first_bucket; i < n; i++)
978 p[i] = ca->buckets[i].prio;
979 mutex_unlock(&ca->set->bucket_lock);
981 sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
987 unused = ca->sb.nbuckets - n;
989 while (cached < p + n &&
990 *cached == BTREE_PRIO)
993 for (i = 0; i < n; i++)
994 sum += INITIAL_PRIO - cached[i];
999 for (i = 0; i < ARRAY_SIZE(q); i++)
1000 q[i] = INITIAL_PRIO - cached[n * (i + 1) /
1001 (ARRAY_SIZE(q) + 1)];
1005 ret = scnprintf(buf, PAGE_SIZE,
1011 "Sectors per Q: %zu\n"
1013 unused * 100 / (size_t) ca->sb.nbuckets,
1014 available * 100 / (size_t) ca->sb.nbuckets,
1015 dirty * 100 / (size_t) ca->sb.nbuckets,
1016 meta * 100 / (size_t) ca->sb.nbuckets, sum,
1017 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
1019 for (i = 0; i < ARRAY_SIZE(q); i++)
1020 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1024 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
1031 SHOW_LOCKED(bch_cache)
1035 struct cache *ca = container_of(kobj, struct cache, kobj);
1038 if (attr == &sysfs_discard) {
1039 bool v = strtoul_or_return(buf);
1041 if (blk_queue_discard(bdev_get_queue(ca->bdev)))
1044 if (v != CACHE_DISCARD(&ca->sb)) {
1045 SET_CACHE_DISCARD(&ca->sb, v);
1046 bcache_write_super(ca->set);
1050 if (attr == &sysfs_cache_replacement_policy) {
1051 v = __sysfs_match_string(cache_replacement_policies, -1, buf);
1055 if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
1056 mutex_lock(&ca->set->bucket_lock);
1057 SET_CACHE_REPLACEMENT(&ca->sb, v);
1058 mutex_unlock(&ca->set->bucket_lock);
1060 bcache_write_super(ca->set);
1064 if (attr == &sysfs_clear_stats) {
1065 atomic_long_set(&ca->sectors_written, 0);
1066 atomic_long_set(&ca->btree_sectors_written, 0);
1067 atomic_long_set(&ca->meta_sectors_written, 0);
1068 atomic_set(&ca->io_count, 0);
1069 atomic_set(&ca->io_errors, 0);
1074 STORE_LOCKED(bch_cache)
1076 static struct attribute *bch_cache_files[] = {
1080 &sysfs_priority_stats,
1083 &sysfs_btree_written,
1084 &sysfs_metadata_written,
1087 &sysfs_cache_replacement_policy,