blob: 3e8d1f1b562f8be02f99ac43b382ec4e5333980d [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * bcache sysfs interfaces
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9#include "bcache.h"
10#include "sysfs.h"
11#include "btree.h"
12#include "request.h"
13#include "writeback.h"
14
15#include <linux/blkdev.h>
16#include <linux/sort.h>
17#include <linux/sched/clock.h>
18
19/* Default is -1; we skip past it for struct cached_dev's cache mode */
20static const char * const bch_cache_modes[] = {
21 "writethrough",
22 "writeback",
23 "writearound",
24 "none",
25 NULL
26};
27
28/* Default is -1; we skip past it for stop_when_cache_set_failed */
29static const char * const bch_stop_on_failure_modes[] = {
30 "auto",
31 "always",
32 NULL
33};
34
35static const char * const cache_replacement_policies[] = {
36 "lru",
37 "fifo",
38 "random",
39 NULL
40};
41
42static const char * const error_actions[] = {
43 "unregister",
44 "panic",
45 NULL
46};
47
48write_attribute(attach);
49write_attribute(detach);
50write_attribute(unregister);
51write_attribute(stop);
52write_attribute(clear_stats);
53write_attribute(trigger_gc);
54write_attribute(prune_cache);
55write_attribute(flash_vol_create);
56
57read_attribute(bucket_size);
58read_attribute(block_size);
59read_attribute(nbuckets);
60read_attribute(tree_depth);
61read_attribute(root_usage_percent);
62read_attribute(priority_stats);
63read_attribute(btree_cache_size);
64read_attribute(btree_cache_max_chain);
65read_attribute(cache_available_percent);
66read_attribute(written);
67read_attribute(btree_written);
68read_attribute(metadata_written);
69read_attribute(active_journal_entries);
70
71sysfs_time_stats_attribute(btree_gc, sec, ms);
72sysfs_time_stats_attribute(btree_split, sec, us);
73sysfs_time_stats_attribute(btree_sort, ms, us);
74sysfs_time_stats_attribute(btree_read, ms, us);
75
76read_attribute(btree_nodes);
77read_attribute(btree_used_percent);
78read_attribute(average_key_size);
79read_attribute(dirty_data);
80read_attribute(bset_tree_stats);
81
82read_attribute(state);
83read_attribute(cache_read_races);
84read_attribute(reclaim);
85read_attribute(flush_write);
86read_attribute(retry_flush_write);
87read_attribute(writeback_keys_done);
88read_attribute(writeback_keys_failed);
89read_attribute(io_errors);
90read_attribute(congested);
91rw_attribute(congested_read_threshold_us);
92rw_attribute(congested_write_threshold_us);
93
94rw_attribute(sequential_cutoff);
95rw_attribute(data_csum);
96rw_attribute(cache_mode);
97rw_attribute(stop_when_cache_set_failed);
98rw_attribute(writeback_metadata);
99rw_attribute(writeback_running);
100rw_attribute(writeback_percent);
101rw_attribute(writeback_delay);
102rw_attribute(writeback_rate);
103
104rw_attribute(writeback_rate_update_seconds);
105rw_attribute(writeback_rate_i_term_inverse);
106rw_attribute(writeback_rate_p_term_inverse);
107rw_attribute(writeback_rate_minimum);
108read_attribute(writeback_rate_debug);
109
110read_attribute(stripe_size);
111read_attribute(partial_stripes_expensive);
112
113rw_attribute(synchronous);
114rw_attribute(journal_delay_ms);
115rw_attribute(io_disable);
116rw_attribute(discard);
117rw_attribute(running);
118rw_attribute(label);
119rw_attribute(readahead);
120rw_attribute(errors);
121rw_attribute(io_error_limit);
122rw_attribute(io_error_halflife);
123rw_attribute(verify);
124rw_attribute(bypass_torture_test);
125rw_attribute(key_merging_disabled);
126rw_attribute(gc_always_rewrite);
127rw_attribute(expensive_debug_checks);
128rw_attribute(cache_replacement_policy);
129rw_attribute(btree_shrinker_disabled);
130rw_attribute(copy_gc_enabled);
131rw_attribute(size);
132
133static ssize_t bch_snprint_string_list(char *buf,
134 size_t size,
135 const char * const list[],
136 size_t selected)
137{
138 char *out = buf;
139 size_t i;
140
141 for (i = 0; list[i]; i++)
142 out += snprintf(out, buf + size - out,
143 i == selected ? "[%s] " : "%s ", list[i]);
144
145 out[-1] = '\n';
146 return out - buf;
147}
148
149SHOW(__bch_cached_dev)
150{
151 struct cached_dev *dc = container_of(kobj, struct cached_dev,
152 disk.kobj);
153 char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
154 int wb = dc->writeback_running;
155
156#define var(stat) (dc->stat)
157
158 if (attr == &sysfs_cache_mode)
159 return bch_snprint_string_list(buf, PAGE_SIZE,
160 bch_cache_modes,
161 BDEV_CACHE_MODE(&dc->sb));
162
163 if (attr == &sysfs_stop_when_cache_set_failed)
164 return bch_snprint_string_list(buf, PAGE_SIZE,
165 bch_stop_on_failure_modes,
166 dc->stop_when_cache_set_failed);
167
168
169 sysfs_printf(data_csum, "%i", dc->disk.data_csum);
170 var_printf(verify, "%i");
171 var_printf(bypass_torture_test, "%i");
172 var_printf(writeback_metadata, "%i");
173 var_printf(writeback_running, "%i");
174 var_print(writeback_delay);
175 var_print(writeback_percent);
176 sysfs_hprint(writeback_rate,
177 wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
178 sysfs_printf(io_errors, "%i", atomic_read(&dc->io_errors));
179 sysfs_printf(io_error_limit, "%i", dc->error_limit);
180 sysfs_printf(io_disable, "%i", dc->io_disable);
181 var_print(writeback_rate_update_seconds);
182 var_print(writeback_rate_i_term_inverse);
183 var_print(writeback_rate_p_term_inverse);
184 var_print(writeback_rate_minimum);
185
186 if (attr == &sysfs_writeback_rate_debug) {
187 char rate[20];
188 char dirty[20];
189 char target[20];
190 char proportional[20];
191 char integral[20];
192 char change[20];
193 s64 next_io;
194
195 /*
196 * Except for dirty and target, other values should
197 * be 0 if writeback is not running.
198 */
199 bch_hprint(rate,
200 wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
201 : 0);
202 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
203 bch_hprint(target, dc->writeback_rate_target << 9);
204 bch_hprint(proportional,
205 wb ? dc->writeback_rate_proportional << 9 : 0);
206 bch_hprint(integral,
207 wb ? dc->writeback_rate_integral_scaled << 9 : 0);
208 bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
209 next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
210 NSEC_PER_MSEC) : 0;
211
212 return sprintf(buf,
213 "rate:\t\t%s/sec\n"
214 "dirty:\t\t%s\n"
215 "target:\t\t%s\n"
216 "proportional:\t%s\n"
217 "integral:\t%s\n"
218 "change:\t\t%s/sec\n"
219 "next io:\t%llims\n",
220 rate, dirty, target, proportional,
221 integral, change, next_io);
222 }
223
224 sysfs_hprint(dirty_data,
225 bcache_dev_sectors_dirty(&dc->disk) << 9);
226
227 sysfs_hprint(stripe_size, ((uint64_t)dc->disk.stripe_size) << 9);
228 var_printf(partial_stripes_expensive, "%u");
229
230 var_hprint(sequential_cutoff);
231 var_hprint(readahead);
232
233 sysfs_print(running, atomic_read(&dc->running));
234 sysfs_print(state, states[BDEV_STATE(&dc->sb)]);
235
236 if (attr == &sysfs_label) {
237 memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
238 buf[SB_LABEL_SIZE + 1] = '\0';
239 strcat(buf, "\n");
240 return strlen(buf);
241 }
242
243#undef var
244 return 0;
245}
246SHOW_LOCKED(bch_cached_dev)
247
248STORE(__cached_dev)
249{
250 struct cached_dev *dc = container_of(kobj, struct cached_dev,
251 disk.kobj);
252 ssize_t v;
253 struct cache_set *c;
254 struct kobj_uevent_env *env;
255
256#define d_strtoul(var) sysfs_strtoul(var, dc->var)
257#define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
258#define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
259
260 sysfs_strtoul(data_csum, dc->disk.data_csum);
261 d_strtoul(verify);
262 d_strtoul(bypass_torture_test);
263 d_strtoul(writeback_metadata);
264 d_strtoul(writeback_running);
265 d_strtoul(writeback_delay);
266
267 sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
268
269 if (attr == &sysfs_writeback_rate) {
270 ssize_t ret;
271 long int v = atomic_long_read(&dc->writeback_rate.rate);
272
273 ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
274
275 if (!ret) {
276 atomic_long_set(&dc->writeback_rate.rate, v);
277 ret = size;
278 }
279
280 return ret;
281 }
282
283 sysfs_strtoul_clamp(writeback_rate_update_seconds,
284 dc->writeback_rate_update_seconds,
285 1, WRITEBACK_RATE_UPDATE_SECS_MAX);
286 sysfs_strtoul_clamp(writeback_rate_i_term_inverse,
287 dc->writeback_rate_i_term_inverse,
288 1, UINT_MAX);
289 sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
290 dc->writeback_rate_p_term_inverse,
291 1, UINT_MAX);
292 sysfs_strtoul_clamp(writeback_rate_minimum,
293 dc->writeback_rate_minimum,
294 1, UINT_MAX);
295
296 sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
297
298 if (attr == &sysfs_io_disable) {
299 int v = strtoul_or_return(buf);
300
301 dc->io_disable = v ? 1 : 0;
302 }
303
304 sysfs_strtoul_clamp(sequential_cutoff,
305 dc->sequential_cutoff,
306 0, UINT_MAX);
307 d_strtoi_h(readahead);
308
309 if (attr == &sysfs_clear_stats)
310 bch_cache_accounting_clear(&dc->accounting);
311
312 if (attr == &sysfs_running &&
313 strtoul_or_return(buf))
314 bch_cached_dev_run(dc);
315
316 if (attr == &sysfs_cache_mode) {
317 v = __sysfs_match_string(bch_cache_modes, -1, buf);
318 if (v < 0)
319 return v;
320
321 if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
322 SET_BDEV_CACHE_MODE(&dc->sb, v);
323 bch_write_bdev_super(dc, NULL);
324 }
325 }
326
327 if (attr == &sysfs_stop_when_cache_set_failed) {
328 v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
329 if (v < 0)
330 return v;
331
332 dc->stop_when_cache_set_failed = v;
333 }
334
335 if (attr == &sysfs_label) {
336 if (size > SB_LABEL_SIZE)
337 return -EINVAL;
338 memcpy(dc->sb.label, buf, size);
339 if (size < SB_LABEL_SIZE)
340 dc->sb.label[size] = '\0';
341 if (size && dc->sb.label[size - 1] == '\n')
342 dc->sb.label[size - 1] = '\0';
343 bch_write_bdev_super(dc, NULL);
344 if (dc->disk.c) {
345 memcpy(dc->disk.c->uuids[dc->disk.id].label,
346 buf, SB_LABEL_SIZE);
347 bch_uuid_write(dc->disk.c);
348 }
349 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
350 if (!env)
351 return -ENOMEM;
352 add_uevent_var(env, "DRIVER=bcache");
353 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
354 add_uevent_var(env, "CACHED_LABEL=%s", buf);
355 kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
356 KOBJ_CHANGE,
357 env->envp);
358 kfree(env);
359 }
360
361 if (attr == &sysfs_attach) {
362 uint8_t set_uuid[16];
363
364 if (bch_parse_uuid(buf, set_uuid) < 16)
365 return -EINVAL;
366
367 v = -ENOENT;
368 list_for_each_entry(c, &bch_cache_sets, list) {
369 v = bch_cached_dev_attach(dc, c, set_uuid);
370 if (!v)
371 return size;
372 }
373 if (v == -ENOENT)
374 pr_err("Can't attach %s: cache set not found", buf);
375 return v;
376 }
377
378 if (attr == &sysfs_detach && dc->disk.c)
379 bch_cached_dev_detach(dc);
380
381 if (attr == &sysfs_stop)
382 bcache_device_stop(&dc->disk);
383
384 return size;
385}
386
387STORE(bch_cached_dev)
388{
389 struct cached_dev *dc = container_of(kobj, struct cached_dev,
390 disk.kobj);
391
392 mutex_lock(&bch_register_lock);
393 size = __cached_dev_store(kobj, attr, buf, size);
394
395 if (attr == &sysfs_writeback_running)
396 bch_writeback_queue(dc);
397
398 /*
399 * Only set BCACHE_DEV_WB_RUNNING when cached device attached to
400 * a cache set, otherwise it doesn't make sense.
401 */
402 if (attr == &sysfs_writeback_percent)
403 if ((dc->disk.c != NULL) &&
404 (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)))
405 schedule_delayed_work(&dc->writeback_rate_update,
406 dc->writeback_rate_update_seconds * HZ);
407
408 mutex_unlock(&bch_register_lock);
409 return size;
410}
411
412static struct attribute *bch_cached_dev_files[] = {
413 &sysfs_attach,
414 &sysfs_detach,
415 &sysfs_stop,
416#if 0
417 &sysfs_data_csum,
418#endif
419 &sysfs_cache_mode,
420 &sysfs_stop_when_cache_set_failed,
421 &sysfs_writeback_metadata,
422 &sysfs_writeback_running,
423 &sysfs_writeback_delay,
424 &sysfs_writeback_percent,
425 &sysfs_writeback_rate,
426 &sysfs_writeback_rate_update_seconds,
427 &sysfs_writeback_rate_i_term_inverse,
428 &sysfs_writeback_rate_p_term_inverse,
429 &sysfs_writeback_rate_minimum,
430 &sysfs_writeback_rate_debug,
431 &sysfs_io_errors,
432 &sysfs_io_error_limit,
433 &sysfs_io_disable,
434 &sysfs_dirty_data,
435 &sysfs_stripe_size,
436 &sysfs_partial_stripes_expensive,
437 &sysfs_sequential_cutoff,
438 &sysfs_clear_stats,
439 &sysfs_running,
440 &sysfs_state,
441 &sysfs_label,
442 &sysfs_readahead,
443#ifdef CONFIG_BCACHE_DEBUG
444 &sysfs_verify,
445 &sysfs_bypass_torture_test,
446#endif
447 NULL
448};
449KTYPE(bch_cached_dev);
450
451SHOW(bch_flash_dev)
452{
453 struct bcache_device *d = container_of(kobj, struct bcache_device,
454 kobj);
455 struct uuid_entry *u = &d->c->uuids[d->id];
456
457 sysfs_printf(data_csum, "%i", d->data_csum);
458 sysfs_hprint(size, u->sectors << 9);
459
460 if (attr == &sysfs_label) {
461 memcpy(buf, u->label, SB_LABEL_SIZE);
462 buf[SB_LABEL_SIZE + 1] = '\0';
463 strcat(buf, "\n");
464 return strlen(buf);
465 }
466
467 return 0;
468}
469
470STORE(__bch_flash_dev)
471{
472 struct bcache_device *d = container_of(kobj, struct bcache_device,
473 kobj);
474 struct uuid_entry *u = &d->c->uuids[d->id];
475
476 sysfs_strtoul(data_csum, d->data_csum);
477
478 if (attr == &sysfs_size) {
479 uint64_t v;
480
481 strtoi_h_or_return(buf, v);
482
483 u->sectors = v >> 9;
484 bch_uuid_write(d->c);
485 set_capacity(d->disk, u->sectors);
486 }
487
488 if (attr == &sysfs_label) {
489 memcpy(u->label, buf, SB_LABEL_SIZE);
490 bch_uuid_write(d->c);
491 }
492
493 if (attr == &sysfs_unregister) {
494 set_bit(BCACHE_DEV_DETACHING, &d->flags);
495 bcache_device_stop(d);
496 }
497
498 return size;
499}
500STORE_LOCKED(bch_flash_dev)
501
502static struct attribute *bch_flash_dev_files[] = {
503 &sysfs_unregister,
504#if 0
505 &sysfs_data_csum,
506#endif
507 &sysfs_label,
508 &sysfs_size,
509 NULL
510};
511KTYPE(bch_flash_dev);
512
513struct bset_stats_op {
514 struct btree_op op;
515 size_t nodes;
516 struct bset_stats stats;
517};
518
519static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
520{
521 struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
522
523 op->nodes++;
524 bch_btree_keys_stats(&b->keys, &op->stats);
525
526 return MAP_CONTINUE;
527}
528
529static int bch_bset_print_stats(struct cache_set *c, char *buf)
530{
531 struct bset_stats_op op;
532 int ret;
533
534 memset(&op, 0, sizeof(op));
535 bch_btree_op_init(&op.op, -1);
536
537 ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
538 if (ret < 0)
539 return ret;
540
541 return snprintf(buf, PAGE_SIZE,
542 "btree nodes: %zu\n"
543 "written sets: %zu\n"
544 "unwritten sets: %zu\n"
545 "written key bytes: %zu\n"
546 "unwritten key bytes: %zu\n"
547 "floats: %zu\n"
548 "failed: %zu\n",
549 op.nodes,
550 op.stats.sets_written, op.stats.sets_unwritten,
551 op.stats.bytes_written, op.stats.bytes_unwritten,
552 op.stats.floats, op.stats.failed);
553}
554
555static unsigned int bch_root_usage(struct cache_set *c)
556{
557 unsigned int bytes = 0;
558 struct bkey *k;
559 struct btree *b;
560 struct btree_iter iter;
561
562 goto lock_root;
563
564 do {
565 rw_unlock(false, b);
566lock_root:
567 b = c->root;
568 rw_lock(false, b, b->level);
569 } while (b != c->root);
570
571 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
572 bytes += bkey_bytes(k);
573
574 rw_unlock(false, b);
575
576 return (bytes * 100) / btree_bytes(c);
577}
578
579static size_t bch_cache_size(struct cache_set *c)
580{
581 size_t ret = 0;
582 struct btree *b;
583
584 mutex_lock(&c->bucket_lock);
585 list_for_each_entry(b, &c->btree_cache, list)
586 ret += 1 << (b->keys.page_order + PAGE_SHIFT);
587
588 mutex_unlock(&c->bucket_lock);
589 return ret;
590}
591
592static unsigned int bch_cache_max_chain(struct cache_set *c)
593{
594 unsigned int ret = 0;
595 struct hlist_head *h;
596
597 mutex_lock(&c->bucket_lock);
598
599 for (h = c->bucket_hash;
600 h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
601 h++) {
602 unsigned int i = 0;
603 struct hlist_node *p;
604
605 hlist_for_each(p, h)
606 i++;
607
608 ret = max(ret, i);
609 }
610
611 mutex_unlock(&c->bucket_lock);
612 return ret;
613}
614
615static unsigned int bch_btree_used(struct cache_set *c)
616{
617 return div64_u64(c->gc_stats.key_bytes * 100,
618 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
619}
620
621static unsigned int bch_average_key_size(struct cache_set *c)
622{
623 return c->gc_stats.nkeys
624 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
625 : 0;
626}
627
628SHOW(__bch_cache_set)
629{
630 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
631
632 sysfs_print(synchronous, CACHE_SYNC(&c->sb));
633 sysfs_print(journal_delay_ms, c->journal_delay_ms);
634 sysfs_hprint(bucket_size, bucket_bytes(c));
635 sysfs_hprint(block_size, block_bytes(c));
636 sysfs_print(tree_depth, c->root->level);
637 sysfs_print(root_usage_percent, bch_root_usage(c));
638
639 sysfs_hprint(btree_cache_size, bch_cache_size(c));
640 sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c));
641 sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use);
642
643 sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms);
644 sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us);
645 sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us);
646 sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
647
648 sysfs_print(btree_used_percent, bch_btree_used(c));
649 sysfs_print(btree_nodes, c->gc_stats.nodes);
650 sysfs_hprint(average_key_size, bch_average_key_size(c));
651
652 sysfs_print(cache_read_races,
653 atomic_long_read(&c->cache_read_races));
654
655 sysfs_print(reclaim,
656 atomic_long_read(&c->reclaim));
657
658 sysfs_print(flush_write,
659 atomic_long_read(&c->flush_write));
660
661 sysfs_print(retry_flush_write,
662 atomic_long_read(&c->retry_flush_write));
663
664 sysfs_print(writeback_keys_done,
665 atomic_long_read(&c->writeback_keys_done));
666 sysfs_print(writeback_keys_failed,
667 atomic_long_read(&c->writeback_keys_failed));
668
669 if (attr == &sysfs_errors)
670 return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
671 c->on_error);
672
673 /* See count_io_errors for why 88 */
674 sysfs_print(io_error_halflife, c->error_decay * 88);
675 sysfs_print(io_error_limit, c->error_limit);
676
677 sysfs_hprint(congested,
678 ((uint64_t) bch_get_congested(c)) << 9);
679 sysfs_print(congested_read_threshold_us,
680 c->congested_read_threshold_us);
681 sysfs_print(congested_write_threshold_us,
682 c->congested_write_threshold_us);
683
684 sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
685 sysfs_printf(verify, "%i", c->verify);
686 sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
687 sysfs_printf(expensive_debug_checks,
688 "%i", c->expensive_debug_checks);
689 sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
690 sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
691 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
692 sysfs_printf(io_disable, "%i",
693 test_bit(CACHE_SET_IO_DISABLE, &c->flags));
694
695 if (attr == &sysfs_bset_tree_stats)
696 return bch_bset_print_stats(c, buf);
697
698 return 0;
699}
700SHOW_LOCKED(bch_cache_set)
701
702STORE(__bch_cache_set)
703{
704 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
705 ssize_t v;
706
707 if (attr == &sysfs_unregister)
708 bch_cache_set_unregister(c);
709
710 if (attr == &sysfs_stop)
711 bch_cache_set_stop(c);
712
713 if (attr == &sysfs_synchronous) {
714 bool sync = strtoul_or_return(buf);
715
716 if (sync != CACHE_SYNC(&c->sb)) {
717 SET_CACHE_SYNC(&c->sb, sync);
718 bcache_write_super(c);
719 }
720 }
721
722 if (attr == &sysfs_flash_vol_create) {
723 int r;
724 uint64_t v;
725
726 strtoi_h_or_return(buf, v);
727
728 r = bch_flash_dev_create(c, v);
729 if (r)
730 return r;
731 }
732
733 if (attr == &sysfs_clear_stats) {
734 atomic_long_set(&c->writeback_keys_done, 0);
735 atomic_long_set(&c->writeback_keys_failed, 0);
736
737 memset(&c->gc_stats, 0, sizeof(struct gc_stat));
738 bch_cache_accounting_clear(&c->accounting);
739 }
740
741 if (attr == &sysfs_trigger_gc) {
742 /*
743 * Garbage collection thread only works when sectors_to_gc < 0,
744 * when users write to sysfs entry trigger_gc, most of time
745 * they want to forcibly triger gargage collection. Here -1 is
746 * set to c->sectors_to_gc, to make gc_should_run() give a
747 * chance to permit gc thread to run. "give a chance" means
748 * before going into gc_should_run(), there is still chance
749 * that c->sectors_to_gc being set to other positive value. So
750 * writing sysfs entry trigger_gc won't always make sure gc
751 * thread takes effect.
752 */
753 atomic_set(&c->sectors_to_gc, -1);
754 wake_up_gc(c);
755 }
756
757 if (attr == &sysfs_prune_cache) {
758 struct shrink_control sc;
759
760 sc.gfp_mask = GFP_KERNEL;
761 sc.nr_to_scan = strtoul_or_return(buf);
762 c->shrink.scan_objects(&c->shrink, &sc);
763 }
764
765 sysfs_strtoul(congested_read_threshold_us,
766 c->congested_read_threshold_us);
767 sysfs_strtoul(congested_write_threshold_us,
768 c->congested_write_threshold_us);
769
770 if (attr == &sysfs_errors) {
771 v = __sysfs_match_string(error_actions, -1, buf);
772 if (v < 0)
773 return v;
774
775 c->on_error = v;
776 }
777
778 if (attr == &sysfs_io_error_limit)
779 c->error_limit = strtoul_or_return(buf);
780
781 /* See count_io_errors() for why 88 */
782 if (attr == &sysfs_io_error_halflife) {
783 unsigned long v = 0;
784 ssize_t ret;
785
786 ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
787 if (!ret) {
788 c->error_decay = v / 88;
789 return size;
790 }
791 return ret;
792 }
793
794 if (attr == &sysfs_io_disable) {
795 v = strtoul_or_return(buf);
796 if (v) {
797 if (test_and_set_bit(CACHE_SET_IO_DISABLE,
798 &c->flags))
799 pr_warn("CACHE_SET_IO_DISABLE already set");
800 } else {
801 if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
802 &c->flags))
803 pr_warn("CACHE_SET_IO_DISABLE already cleared");
804 }
805 }
806
807 sysfs_strtoul(journal_delay_ms, c->journal_delay_ms);
808 sysfs_strtoul(verify, c->verify);
809 sysfs_strtoul(key_merging_disabled, c->key_merging_disabled);
810 sysfs_strtoul(expensive_debug_checks, c->expensive_debug_checks);
811 sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite);
812 sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled);
813 sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled);
814
815 return size;
816}
817STORE_LOCKED(bch_cache_set)
818
819SHOW(bch_cache_set_internal)
820{
821 struct cache_set *c = container_of(kobj, struct cache_set, internal);
822
823 return bch_cache_set_show(&c->kobj, attr, buf);
824}
825
826STORE(bch_cache_set_internal)
827{
828 struct cache_set *c = container_of(kobj, struct cache_set, internal);
829
830 return bch_cache_set_store(&c->kobj, attr, buf, size);
831}
832
833static void bch_cache_set_internal_release(struct kobject *k)
834{
835}
836
837static struct attribute *bch_cache_set_files[] = {
838 &sysfs_unregister,
839 &sysfs_stop,
840 &sysfs_synchronous,
841 &sysfs_journal_delay_ms,
842 &sysfs_flash_vol_create,
843
844 &sysfs_bucket_size,
845 &sysfs_block_size,
846 &sysfs_tree_depth,
847 &sysfs_root_usage_percent,
848 &sysfs_btree_cache_size,
849 &sysfs_cache_available_percent,
850
851 &sysfs_average_key_size,
852
853 &sysfs_errors,
854 &sysfs_io_error_limit,
855 &sysfs_io_error_halflife,
856 &sysfs_congested,
857 &sysfs_congested_read_threshold_us,
858 &sysfs_congested_write_threshold_us,
859 &sysfs_clear_stats,
860 NULL
861};
862KTYPE(bch_cache_set);
863
864static struct attribute *bch_cache_set_internal_files[] = {
865 &sysfs_active_journal_entries,
866
867 sysfs_time_stats_attribute_list(btree_gc, sec, ms)
868 sysfs_time_stats_attribute_list(btree_split, sec, us)
869 sysfs_time_stats_attribute_list(btree_sort, ms, us)
870 sysfs_time_stats_attribute_list(btree_read, ms, us)
871
872 &sysfs_btree_nodes,
873 &sysfs_btree_used_percent,
874 &sysfs_btree_cache_max_chain,
875
876 &sysfs_bset_tree_stats,
877 &sysfs_cache_read_races,
878 &sysfs_reclaim,
879 &sysfs_flush_write,
880 &sysfs_retry_flush_write,
881 &sysfs_writeback_keys_done,
882 &sysfs_writeback_keys_failed,
883
884 &sysfs_trigger_gc,
885 &sysfs_prune_cache,
886#ifdef CONFIG_BCACHE_DEBUG
887 &sysfs_verify,
888 &sysfs_key_merging_disabled,
889 &sysfs_expensive_debug_checks,
890#endif
891 &sysfs_gc_always_rewrite,
892 &sysfs_btree_shrinker_disabled,
893 &sysfs_copy_gc_enabled,
894 &sysfs_io_disable,
895 NULL
896};
897KTYPE(bch_cache_set_internal);
898
899static int __bch_cache_cmp(const void *l, const void *r)
900{
901 return *((uint16_t *)r) - *((uint16_t *)l);
902}
903
904SHOW(__bch_cache)
905{
906 struct cache *ca = container_of(kobj, struct cache, kobj);
907
908 sysfs_hprint(bucket_size, bucket_bytes(ca));
909 sysfs_hprint(block_size, block_bytes(ca));
910 sysfs_print(nbuckets, ca->sb.nbuckets);
911 sysfs_print(discard, ca->discard);
912 sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
913 sysfs_hprint(btree_written,
914 atomic_long_read(&ca->btree_sectors_written) << 9);
915 sysfs_hprint(metadata_written,
916 (atomic_long_read(&ca->meta_sectors_written) +
917 atomic_long_read(&ca->btree_sectors_written)) << 9);
918
919 sysfs_print(io_errors,
920 atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
921
922 if (attr == &sysfs_cache_replacement_policy)
923 return bch_snprint_string_list(buf, PAGE_SIZE,
924 cache_replacement_policies,
925 CACHE_REPLACEMENT(&ca->sb));
926
927 if (attr == &sysfs_priority_stats) {
928 struct bucket *b;
929 size_t n = ca->sb.nbuckets, i;
930 size_t unused = 0, available = 0, dirty = 0, meta = 0;
931 uint64_t sum = 0;
932 /* Compute 31 quantiles */
933 uint16_t q[31], *p, *cached;
934 ssize_t ret;
935
936 cached = p = vmalloc(array_size(sizeof(uint16_t),
937 ca->sb.nbuckets));
938 if (!p)
939 return -ENOMEM;
940
941 mutex_lock(&ca->set->bucket_lock);
942 for_each_bucket(b, ca) {
943 if (!GC_SECTORS_USED(b))
944 unused++;
945 if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
946 available++;
947 if (GC_MARK(b) == GC_MARK_DIRTY)
948 dirty++;
949 if (GC_MARK(b) == GC_MARK_METADATA)
950 meta++;
951 }
952
953 for (i = ca->sb.first_bucket; i < n; i++)
954 p[i] = ca->buckets[i].prio;
955 mutex_unlock(&ca->set->bucket_lock);
956
957 sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
958
959 while (n &&
960 !cached[n - 1])
961 --n;
962
963 unused = ca->sb.nbuckets - n;
964
965 while (cached < p + n &&
966 *cached == BTREE_PRIO)
967 cached++, n--;
968
969 for (i = 0; i < n; i++)
970 sum += INITIAL_PRIO - cached[i];
971
972 if (n)
973 do_div(sum, n);
974
975 for (i = 0; i < ARRAY_SIZE(q); i++)
976 q[i] = INITIAL_PRIO - cached[n * (i + 1) /
977 (ARRAY_SIZE(q) + 1)];
978
979 vfree(p);
980
981 ret = scnprintf(buf, PAGE_SIZE,
982 "Unused: %zu%%\n"
983 "Clean: %zu%%\n"
984 "Dirty: %zu%%\n"
985 "Metadata: %zu%%\n"
986 "Average: %llu\n"
987 "Sectors per Q: %zu\n"
988 "Quantiles: [",
989 unused * 100 / (size_t) ca->sb.nbuckets,
990 available * 100 / (size_t) ca->sb.nbuckets,
991 dirty * 100 / (size_t) ca->sb.nbuckets,
992 meta * 100 / (size_t) ca->sb.nbuckets, sum,
993 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
994
995 for (i = 0; i < ARRAY_SIZE(q); i++)
996 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
997 "%u ", q[i]);
998 ret--;
999
1000 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
1001
1002 return ret;
1003 }
1004
1005 return 0;
1006}
1007SHOW_LOCKED(bch_cache)
1008
1009STORE(__bch_cache)
1010{
1011 struct cache *ca = container_of(kobj, struct cache, kobj);
1012 ssize_t v;
1013
1014 if (attr == &sysfs_discard) {
1015 bool v = strtoul_or_return(buf);
1016
1017 if (blk_queue_discard(bdev_get_queue(ca->bdev)))
1018 ca->discard = v;
1019
1020 if (v != CACHE_DISCARD(&ca->sb)) {
1021 SET_CACHE_DISCARD(&ca->sb, v);
1022 bcache_write_super(ca->set);
1023 }
1024 }
1025
1026 if (attr == &sysfs_cache_replacement_policy) {
1027 v = __sysfs_match_string(cache_replacement_policies, -1, buf);
1028 if (v < 0)
1029 return v;
1030
1031 if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
1032 mutex_lock(&ca->set->bucket_lock);
1033 SET_CACHE_REPLACEMENT(&ca->sb, v);
1034 mutex_unlock(&ca->set->bucket_lock);
1035
1036 bcache_write_super(ca->set);
1037 }
1038 }
1039
1040 if (attr == &sysfs_clear_stats) {
1041 atomic_long_set(&ca->sectors_written, 0);
1042 atomic_long_set(&ca->btree_sectors_written, 0);
1043 atomic_long_set(&ca->meta_sectors_written, 0);
1044 atomic_set(&ca->io_count, 0);
1045 atomic_set(&ca->io_errors, 0);
1046 }
1047
1048 return size;
1049}
1050STORE_LOCKED(bch_cache)
1051
1052static struct attribute *bch_cache_files[] = {
1053 &sysfs_bucket_size,
1054 &sysfs_block_size,
1055 &sysfs_nbuckets,
1056 &sysfs_priority_stats,
1057 &sysfs_discard,
1058 &sysfs_written,
1059 &sysfs_btree_written,
1060 &sysfs_metadata_written,
1061 &sysfs_io_errors,
1062 &sysfs_clear_stats,
1063 &sysfs_cache_replacement_policy,
1064 NULL
1065};
1066KTYPE(bch_cache);