blob: d95439154556d58a6d33742c921112d144de09e3 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Copyright (C) 2017 Facebook
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <https://www.gnu.org/licenses/>.
15 */
16
17#include <linux/kernel.h>
18#include <linux/blkdev.h>
19#include <linux/debugfs.h>
20
21#include <linux/blk-mq.h>
22#include "blk.h"
23#include "blk-mq.h"
24#include "blk-mq-debugfs.h"
25#include "blk-mq-tag.h"
26
27static int blk_flags_show(struct seq_file *m, const unsigned long flags,
28 const char *const *flag_name, int flag_name_count)
29{
30 bool sep = false;
31 int i;
32
33 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
34 if (!(flags & BIT(i)))
35 continue;
36 if (sep)
37 seq_puts(m, "|");
38 sep = true;
39 if (i < flag_name_count && flag_name[i])
40 seq_puts(m, flag_name[i]);
41 else
42 seq_printf(m, "%d", i);
43 }
44 return 0;
45}
46
47#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
48static const char *const blk_queue_flag_name[] = {
49 QUEUE_FLAG_NAME(QUEUED),
50 QUEUE_FLAG_NAME(STOPPED),
51 QUEUE_FLAG_NAME(DYING),
52 QUEUE_FLAG_NAME(BYPASS),
53 QUEUE_FLAG_NAME(BIDI),
54 QUEUE_FLAG_NAME(NOMERGES),
55 QUEUE_FLAG_NAME(SAME_COMP),
56 QUEUE_FLAG_NAME(FAIL_IO),
57 QUEUE_FLAG_NAME(STACKABLE),
58 QUEUE_FLAG_NAME(NONROT),
59 QUEUE_FLAG_NAME(IO_STAT),
60 QUEUE_FLAG_NAME(DISCARD),
61 QUEUE_FLAG_NAME(NOXMERGES),
62 QUEUE_FLAG_NAME(ADD_RANDOM),
63 QUEUE_FLAG_NAME(SECERASE),
64 QUEUE_FLAG_NAME(SAME_FORCE),
65 QUEUE_FLAG_NAME(DEAD),
66 QUEUE_FLAG_NAME(INIT_DONE),
67 QUEUE_FLAG_NAME(NO_SG_MERGE),
68 QUEUE_FLAG_NAME(POLL),
69 QUEUE_FLAG_NAME(WC),
70 QUEUE_FLAG_NAME(FUA),
71 QUEUE_FLAG_NAME(FLUSH_NQ),
72 QUEUE_FLAG_NAME(DAX),
73 QUEUE_FLAG_NAME(STATS),
74 QUEUE_FLAG_NAME(POLL_STATS),
75 QUEUE_FLAG_NAME(REGISTERED),
76 QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
77 QUEUE_FLAG_NAME(QUIESCED),
78};
79#undef QUEUE_FLAG_NAME
80
81static int queue_state_show(void *data, struct seq_file *m)
82{
83 struct request_queue *q = data;
84
85 blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
86 ARRAY_SIZE(blk_queue_flag_name));
87 seq_puts(m, "\n");
88 return 0;
89}
90
91static ssize_t queue_state_write(void *data, const char __user *buf,
92 size_t count, loff_t *ppos)
93{
94 struct request_queue *q = data;
95 char opbuf[16] = { }, *op;
96
97 /*
98 * The "state" attribute is removed after blk_cleanup_queue() has called
99 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
100 * triggering a use-after-free.
101 */
102 if (blk_queue_dead(q))
103 return -ENOENT;
104
105 if (count >= sizeof(opbuf)) {
106 pr_err("%s: operation too long\n", __func__);
107 goto inval;
108 }
109
110 if (copy_from_user(opbuf, buf, count))
111 return -EFAULT;
112 op = strstrip(opbuf);
113 if (strcmp(op, "run") == 0) {
114 blk_mq_run_hw_queues(q, true);
115 } else if (strcmp(op, "start") == 0) {
116 blk_mq_start_stopped_hw_queues(q, true);
117 } else if (strcmp(op, "kick") == 0) {
118 blk_mq_kick_requeue_list(q);
119 } else {
120 pr_err("%s: unsupported operation '%s'\n", __func__, op);
121inval:
122 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
123 return -EINVAL;
124 }
125 return count;
126}
127
128static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
129{
130 if (stat->nr_samples) {
131 seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu",
132 stat->nr_samples, stat->mean, stat->min, stat->max);
133 } else {
134 seq_puts(m, "samples=0");
135 }
136}
137
138static int queue_write_hint_show(void *data, struct seq_file *m)
139{
140 struct request_queue *q = data;
141 int i;
142
143 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
144 seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
145
146 return 0;
147}
148
149static ssize_t queue_write_hint_store(void *data, const char __user *buf,
150 size_t count, loff_t *ppos)
151{
152 struct request_queue *q = data;
153 int i;
154
155 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
156 q->write_hints[i] = 0;
157
158 return count;
159}
160
161static int queue_poll_stat_show(void *data, struct seq_file *m)
162{
163 struct request_queue *q = data;
164 int bucket;
165
166 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) {
167 seq_printf(m, "read (%d Bytes): ", 1 << (9+bucket));
168 print_stat(m, &q->poll_stat[2*bucket]);
169 seq_puts(m, "\n");
170
171 seq_printf(m, "write (%d Bytes): ", 1 << (9+bucket));
172 print_stat(m, &q->poll_stat[2*bucket+1]);
173 seq_puts(m, "\n");
174 }
175 return 0;
176}
177
178#define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
179static const char *const hctx_state_name[] = {
180 HCTX_STATE_NAME(STOPPED),
181 HCTX_STATE_NAME(TAG_ACTIVE),
182 HCTX_STATE_NAME(SCHED_RESTART),
183 HCTX_STATE_NAME(TAG_WAITING),
184 HCTX_STATE_NAME(START_ON_RUN),
185};
186#undef HCTX_STATE_NAME
187
188static int hctx_state_show(void *data, struct seq_file *m)
189{
190 struct blk_mq_hw_ctx *hctx = data;
191
192 blk_flags_show(m, hctx->state, hctx_state_name,
193 ARRAY_SIZE(hctx_state_name));
194 seq_puts(m, "\n");
195 return 0;
196}
197
198#define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
199static const char *const alloc_policy_name[] = {
200 BLK_TAG_ALLOC_NAME(FIFO),
201 BLK_TAG_ALLOC_NAME(RR),
202};
203#undef BLK_TAG_ALLOC_NAME
204
205#define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
206static const char *const hctx_flag_name[] = {
207 HCTX_FLAG_NAME(SHOULD_MERGE),
208 HCTX_FLAG_NAME(TAG_SHARED),
209 HCTX_FLAG_NAME(SG_MERGE),
210 HCTX_FLAG_NAME(BLOCKING),
211 HCTX_FLAG_NAME(NO_SCHED),
212};
213#undef HCTX_FLAG_NAME
214
215static int hctx_flags_show(void *data, struct seq_file *m)
216{
217 struct blk_mq_hw_ctx *hctx = data;
218 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
219
220 seq_puts(m, "alloc_policy=");
221 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
222 alloc_policy_name[alloc_policy])
223 seq_puts(m, alloc_policy_name[alloc_policy]);
224 else
225 seq_printf(m, "%d", alloc_policy);
226 seq_puts(m, " ");
227 blk_flags_show(m,
228 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
229 hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
230 seq_puts(m, "\n");
231 return 0;
232}
233
234#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
235static const char *const op_name[] = {
236 REQ_OP_NAME(READ),
237 REQ_OP_NAME(WRITE),
238 REQ_OP_NAME(FLUSH),
239 REQ_OP_NAME(DISCARD),
240 REQ_OP_NAME(ZONE_REPORT),
241 REQ_OP_NAME(SECURE_ERASE),
242 REQ_OP_NAME(ZONE_RESET),
243 REQ_OP_NAME(WRITE_SAME),
244 REQ_OP_NAME(WRITE_ZEROES),
245 REQ_OP_NAME(SCSI_IN),
246 REQ_OP_NAME(SCSI_OUT),
247 REQ_OP_NAME(DRV_IN),
248 REQ_OP_NAME(DRV_OUT),
249};
250#undef REQ_OP_NAME
251
252#define CMD_FLAG_NAME(name) [__REQ_##name] = #name
253static const char *const cmd_flag_name[] = {
254 CMD_FLAG_NAME(FAILFAST_DEV),
255 CMD_FLAG_NAME(FAILFAST_TRANSPORT),
256 CMD_FLAG_NAME(FAILFAST_DRIVER),
257 CMD_FLAG_NAME(SYNC),
258 CMD_FLAG_NAME(META),
259 CMD_FLAG_NAME(PRIO),
260 CMD_FLAG_NAME(NOMERGE),
261 CMD_FLAG_NAME(IDLE),
262 CMD_FLAG_NAME(INTEGRITY),
263 CMD_FLAG_NAME(FUA),
264 CMD_FLAG_NAME(PREFLUSH),
265 CMD_FLAG_NAME(RAHEAD),
266 CMD_FLAG_NAME(BACKGROUND),
267 CMD_FLAG_NAME(NOUNMAP),
268 CMD_FLAG_NAME(NOWAIT),
269};
270#undef CMD_FLAG_NAME
271
272#define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
273static const char *const rqf_name[] = {
274 RQF_NAME(SORTED),
275 RQF_NAME(STARTED),
276 RQF_NAME(QUEUED),
277 RQF_NAME(SOFTBARRIER),
278 RQF_NAME(FLUSH_SEQ),
279 RQF_NAME(MIXED_MERGE),
280 RQF_NAME(MQ_INFLIGHT),
281 RQF_NAME(DONTPREP),
282 RQF_NAME(PREEMPT),
283 RQF_NAME(COPY_USER),
284 RQF_NAME(FAILED),
285 RQF_NAME(QUIET),
286 RQF_NAME(ELVPRIV),
287 RQF_NAME(IO_STAT),
288 RQF_NAME(ALLOCED),
289 RQF_NAME(PM),
290 RQF_NAME(HASHED),
291 RQF_NAME(STATS),
292 RQF_NAME(SPECIAL_PAYLOAD),
293};
294#undef RQF_NAME
295
296#define RQAF_NAME(name) [REQ_ATOM_##name] = #name
297static const char *const rqaf_name[] = {
298 RQAF_NAME(COMPLETE),
299 RQAF_NAME(STARTED),
300 RQAF_NAME(POLL_SLEPT),
301};
302#undef RQAF_NAME
303
304int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
305{
306 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
307 const unsigned int op = rq->cmd_flags & REQ_OP_MASK;
308
309 seq_printf(m, "%p {.op=", rq);
310 if (op < ARRAY_SIZE(op_name) && op_name[op])
311 seq_printf(m, "%s", op_name[op]);
312 else
313 seq_printf(m, "%d", op);
314 seq_puts(m, ", .cmd_flags=");
315 blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
316 ARRAY_SIZE(cmd_flag_name));
317 seq_puts(m, ", .rq_flags=");
318 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
319 ARRAY_SIZE(rqf_name));
320 seq_puts(m, ", .atomic_flags=");
321 blk_flags_show(m, rq->atomic_flags, rqaf_name, ARRAY_SIZE(rqaf_name));
322 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
323 rq->internal_tag);
324 if (mq_ops->show_rq)
325 mq_ops->show_rq(m, rq);
326 seq_puts(m, "}\n");
327 return 0;
328}
329EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
330
331int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
332{
333 return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
334}
335EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
336
337static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
338 __acquires(&q->requeue_lock)
339{
340 struct request_queue *q = m->private;
341
342 spin_lock_irq(&q->requeue_lock);
343 return seq_list_start(&q->requeue_list, *pos);
344}
345
346static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
347{
348 struct request_queue *q = m->private;
349
350 return seq_list_next(v, &q->requeue_list, pos);
351}
352
353static void queue_requeue_list_stop(struct seq_file *m, void *v)
354 __releases(&q->requeue_lock)
355{
356 struct request_queue *q = m->private;
357
358 spin_unlock_irq(&q->requeue_lock);
359}
360
361static const struct seq_operations queue_requeue_list_seq_ops = {
362 .start = queue_requeue_list_start,
363 .next = queue_requeue_list_next,
364 .stop = queue_requeue_list_stop,
365 .show = blk_mq_debugfs_rq_show,
366};
367
368static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
369 __acquires(&hctx->lock)
370{
371 struct blk_mq_hw_ctx *hctx = m->private;
372
373 spin_lock(&hctx->lock);
374 return seq_list_start(&hctx->dispatch, *pos);
375}
376
377static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
378{
379 struct blk_mq_hw_ctx *hctx = m->private;
380
381 return seq_list_next(v, &hctx->dispatch, pos);
382}
383
384static void hctx_dispatch_stop(struct seq_file *m, void *v)
385 __releases(&hctx->lock)
386{
387 struct blk_mq_hw_ctx *hctx = m->private;
388
389 spin_unlock(&hctx->lock);
390}
391
392static const struct seq_operations hctx_dispatch_seq_ops = {
393 .start = hctx_dispatch_start,
394 .next = hctx_dispatch_next,
395 .stop = hctx_dispatch_stop,
396 .show = blk_mq_debugfs_rq_show,
397};
398
399struct show_busy_params {
400 struct seq_file *m;
401 struct blk_mq_hw_ctx *hctx;
402};
403
404/*
405 * Note: the state of a request may change while this function is in progress,
406 * e.g. due to a concurrent blk_mq_finish_request() call.
407 */
408static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
409{
410 const struct show_busy_params *params = data;
411
412 if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx &&
413 test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
414 __blk_mq_debugfs_rq_show(params->m,
415 list_entry_rq(&rq->queuelist));
416}
417
418static int hctx_busy_show(void *data, struct seq_file *m)
419{
420 struct blk_mq_hw_ctx *hctx = data;
421 struct show_busy_params params = { .m = m, .hctx = hctx };
422
423 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
424 &params);
425
426 return 0;
427}
428
429static int hctx_ctx_map_show(void *data, struct seq_file *m)
430{
431 struct blk_mq_hw_ctx *hctx = data;
432
433 sbitmap_bitmap_show(&hctx->ctx_map, m);
434 return 0;
435}
436
437static void blk_mq_debugfs_tags_show(struct seq_file *m,
438 struct blk_mq_tags *tags)
439{
440 seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
441 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
442 seq_printf(m, "active_queues=%d\n",
443 atomic_read(&tags->active_queues));
444
445 seq_puts(m, "\nbitmap_tags:\n");
446 sbitmap_queue_show(&tags->bitmap_tags, m);
447
448 if (tags->nr_reserved_tags) {
449 seq_puts(m, "\nbreserved_tags:\n");
450 sbitmap_queue_show(&tags->breserved_tags, m);
451 }
452}
453
454static int hctx_tags_show(void *data, struct seq_file *m)
455{
456 struct blk_mq_hw_ctx *hctx = data;
457 struct request_queue *q = hctx->queue;
458 int res;
459
460 res = mutex_lock_interruptible(&q->sysfs_lock);
461 if (res)
462 goto out;
463 if (hctx->tags)
464 blk_mq_debugfs_tags_show(m, hctx->tags);
465 mutex_unlock(&q->sysfs_lock);
466
467out:
468 return res;
469}
470
471static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
472{
473 struct blk_mq_hw_ctx *hctx = data;
474 struct request_queue *q = hctx->queue;
475 int res;
476
477 res = mutex_lock_interruptible(&q->sysfs_lock);
478 if (res)
479 goto out;
480 if (hctx->tags)
481 sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
482 mutex_unlock(&q->sysfs_lock);
483
484out:
485 return res;
486}
487
488static int hctx_sched_tags_show(void *data, struct seq_file *m)
489{
490 struct blk_mq_hw_ctx *hctx = data;
491 struct request_queue *q = hctx->queue;
492 int res;
493
494 res = mutex_lock_interruptible(&q->sysfs_lock);
495 if (res)
496 goto out;
497 if (hctx->sched_tags)
498 blk_mq_debugfs_tags_show(m, hctx->sched_tags);
499 mutex_unlock(&q->sysfs_lock);
500
501out:
502 return res;
503}
504
505static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
506{
507 struct blk_mq_hw_ctx *hctx = data;
508 struct request_queue *q = hctx->queue;
509 int res;
510
511 res = mutex_lock_interruptible(&q->sysfs_lock);
512 if (res)
513 goto out;
514 if (hctx->sched_tags)
515 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
516 mutex_unlock(&q->sysfs_lock);
517
518out:
519 return res;
520}
521
522static int hctx_io_poll_show(void *data, struct seq_file *m)
523{
524 struct blk_mq_hw_ctx *hctx = data;
525
526 seq_printf(m, "considered=%lu\n", hctx->poll_considered);
527 seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
528 seq_printf(m, "success=%lu\n", hctx->poll_success);
529 return 0;
530}
531
532static ssize_t hctx_io_poll_write(void *data, const char __user *buf,
533 size_t count, loff_t *ppos)
534{
535 struct blk_mq_hw_ctx *hctx = data;
536
537 hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
538 return count;
539}
540
541static int hctx_dispatched_show(void *data, struct seq_file *m)
542{
543 struct blk_mq_hw_ctx *hctx = data;
544 int i;
545
546 seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
547
548 for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
549 unsigned int d = 1U << (i - 1);
550
551 seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
552 }
553
554 seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
555 return 0;
556}
557
558static ssize_t hctx_dispatched_write(void *data, const char __user *buf,
559 size_t count, loff_t *ppos)
560{
561 struct blk_mq_hw_ctx *hctx = data;
562 int i;
563
564 for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
565 hctx->dispatched[i] = 0;
566 return count;
567}
568
569static int hctx_queued_show(void *data, struct seq_file *m)
570{
571 struct blk_mq_hw_ctx *hctx = data;
572
573 seq_printf(m, "%lu\n", hctx->queued);
574 return 0;
575}
576
577static ssize_t hctx_queued_write(void *data, const char __user *buf,
578 size_t count, loff_t *ppos)
579{
580 struct blk_mq_hw_ctx *hctx = data;
581
582 hctx->queued = 0;
583 return count;
584}
585
586static int hctx_run_show(void *data, struct seq_file *m)
587{
588 struct blk_mq_hw_ctx *hctx = data;
589
590 seq_printf(m, "%lu\n", hctx->run);
591 return 0;
592}
593
594static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
595 loff_t *ppos)
596{
597 struct blk_mq_hw_ctx *hctx = data;
598
599 hctx->run = 0;
600 return count;
601}
602
603static int hctx_active_show(void *data, struct seq_file *m)
604{
605 struct blk_mq_hw_ctx *hctx = data;
606
607 seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
608 return 0;
609}
610
611static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos)
612 __acquires(&ctx->lock)
613{
614 struct blk_mq_ctx *ctx = m->private;
615
616 spin_lock(&ctx->lock);
617 return seq_list_start(&ctx->rq_list, *pos);
618}
619
620static void *ctx_rq_list_next(struct seq_file *m, void *v, loff_t *pos)
621{
622 struct blk_mq_ctx *ctx = m->private;
623
624 return seq_list_next(v, &ctx->rq_list, pos);
625}
626
627static void ctx_rq_list_stop(struct seq_file *m, void *v)
628 __releases(&ctx->lock)
629{
630 struct blk_mq_ctx *ctx = m->private;
631
632 spin_unlock(&ctx->lock);
633}
634
635static const struct seq_operations ctx_rq_list_seq_ops = {
636 .start = ctx_rq_list_start,
637 .next = ctx_rq_list_next,
638 .stop = ctx_rq_list_stop,
639 .show = blk_mq_debugfs_rq_show,
640};
641static int ctx_dispatched_show(void *data, struct seq_file *m)
642{
643 struct blk_mq_ctx *ctx = data;
644
645 seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
646 return 0;
647}
648
649static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
650 size_t count, loff_t *ppos)
651{
652 struct blk_mq_ctx *ctx = data;
653
654 ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
655 return count;
656}
657
658static int ctx_merged_show(void *data, struct seq_file *m)
659{
660 struct blk_mq_ctx *ctx = data;
661
662 seq_printf(m, "%lu\n", ctx->rq_merged);
663 return 0;
664}
665
666static ssize_t ctx_merged_write(void *data, const char __user *buf,
667 size_t count, loff_t *ppos)
668{
669 struct blk_mq_ctx *ctx = data;
670
671 ctx->rq_merged = 0;
672 return count;
673}
674
675static int ctx_completed_show(void *data, struct seq_file *m)
676{
677 struct blk_mq_ctx *ctx = data;
678
679 seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
680 return 0;
681}
682
683static ssize_t ctx_completed_write(void *data, const char __user *buf,
684 size_t count, loff_t *ppos)
685{
686 struct blk_mq_ctx *ctx = data;
687
688 ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
689 return count;
690}
691
692static int blk_mq_debugfs_show(struct seq_file *m, void *v)
693{
694 const struct blk_mq_debugfs_attr *attr = m->private;
695 void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
696
697 return attr->show(data, m);
698}
699
700static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
701 size_t count, loff_t *ppos)
702{
703 struct seq_file *m = file->private_data;
704 const struct blk_mq_debugfs_attr *attr = m->private;
705 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
706
707 /*
708 * Attributes that only implement .seq_ops are read-only and 'attr' is
709 * the same with 'data' in this case.
710 */
711 if (attr == data || !attr->write)
712 return -EPERM;
713
714 return attr->write(data, buf, count, ppos);
715}
716
717static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
718{
719 const struct blk_mq_debugfs_attr *attr = inode->i_private;
720 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
721 struct seq_file *m;
722 int ret;
723
724 if (attr->seq_ops) {
725 ret = seq_open(file, attr->seq_ops);
726 if (!ret) {
727 m = file->private_data;
728 m->private = data;
729 }
730 return ret;
731 }
732
733 if (WARN_ON_ONCE(!attr->show))
734 return -EPERM;
735
736 return single_open(file, blk_mq_debugfs_show, inode->i_private);
737}
738
739static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
740{
741 const struct blk_mq_debugfs_attr *attr = inode->i_private;
742
743 if (attr->show)
744 return single_release(inode, file);
745 else
746 return seq_release(inode, file);
747}
748
749static const struct file_operations blk_mq_debugfs_fops = {
750 .open = blk_mq_debugfs_open,
751 .read = seq_read,
752 .write = blk_mq_debugfs_write,
753 .llseek = seq_lseek,
754 .release = blk_mq_debugfs_release,
755};
756
757static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
758 {"poll_stat", 0400, queue_poll_stat_show},
759 {"requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops},
760 {"state", 0600, queue_state_show, queue_state_write},
761 {"write_hints", 0600, queue_write_hint_show, queue_write_hint_store},
762 {},
763};
764
765static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
766 {"state", 0400, hctx_state_show},
767 {"flags", 0400, hctx_flags_show},
768 {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
769 {"busy", 0400, hctx_busy_show},
770 {"ctx_map", 0400, hctx_ctx_map_show},
771 {"tags", 0400, hctx_tags_show},
772 {"tags_bitmap", 0400, hctx_tags_bitmap_show},
773 {"sched_tags", 0400, hctx_sched_tags_show},
774 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
775 {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write},
776 {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write},
777 {"queued", 0600, hctx_queued_show, hctx_queued_write},
778 {"run", 0600, hctx_run_show, hctx_run_write},
779 {"active", 0400, hctx_active_show},
780 {},
781};
782
783static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
784 {"rq_list", 0400, .seq_ops = &ctx_rq_list_seq_ops},
785 {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
786 {"merged", 0600, ctx_merged_show, ctx_merged_write},
787 {"completed", 0600, ctx_completed_show, ctx_completed_write},
788 {},
789};
790
791static bool debugfs_create_files(struct dentry *parent, void *data,
792 const struct blk_mq_debugfs_attr *attr)
793{
794 d_inode(parent)->i_private = data;
795
796 for (; attr->name; attr++) {
797 if (!debugfs_create_file(attr->name, attr->mode, parent,
798 (void *)attr, &blk_mq_debugfs_fops))
799 return false;
800 }
801 return true;
802}
803
804int blk_mq_debugfs_register(struct request_queue *q)
805{
806 struct blk_mq_hw_ctx *hctx;
807 int i;
808
809 if (!blk_debugfs_root)
810 return -ENOENT;
811
812 q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
813 blk_debugfs_root);
814 if (!q->debugfs_dir)
815 return -ENOMEM;
816
817 if (!debugfs_create_files(q->debugfs_dir, q,
818 blk_mq_debugfs_queue_attrs))
819 goto err;
820
821 /*
822 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
823 * didn't exist yet (because we don't know what to name the directory
824 * until the queue is registered to a gendisk).
825 */
826 if (q->elevator && !q->sched_debugfs_dir)
827 blk_mq_debugfs_register_sched(q);
828
829 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
830 queue_for_each_hw_ctx(q, hctx, i) {
831 if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
832 goto err;
833 if (q->elevator && !hctx->sched_debugfs_dir &&
834 blk_mq_debugfs_register_sched_hctx(q, hctx))
835 goto err;
836 }
837
838 return 0;
839
840err:
841 blk_mq_debugfs_unregister(q);
842 return -ENOMEM;
843}
844
845void blk_mq_debugfs_unregister(struct request_queue *q)
846{
847 debugfs_remove_recursive(q->debugfs_dir);
848 q->sched_debugfs_dir = NULL;
849 q->debugfs_dir = NULL;
850}
851
852static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
853 struct blk_mq_ctx *ctx)
854{
855 struct dentry *ctx_dir;
856 char name[20];
857
858 snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
859 ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
860 if (!ctx_dir)
861 return -ENOMEM;
862
863 if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs))
864 return -ENOMEM;
865
866 return 0;
867}
868
869int blk_mq_debugfs_register_hctx(struct request_queue *q,
870 struct blk_mq_hw_ctx *hctx)
871{
872 struct blk_mq_ctx *ctx;
873 char name[20];
874 int i;
875
876 if (!q->debugfs_dir)
877 return -ENOENT;
878
879 snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
880 hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
881 if (!hctx->debugfs_dir)
882 return -ENOMEM;
883
884 if (!debugfs_create_files(hctx->debugfs_dir, hctx,
885 blk_mq_debugfs_hctx_attrs))
886 goto err;
887
888 hctx_for_each_ctx(hctx, ctx, i) {
889 if (blk_mq_debugfs_register_ctx(hctx, ctx))
890 goto err;
891 }
892
893 return 0;
894
895err:
896 blk_mq_debugfs_unregister_hctx(hctx);
897 return -ENOMEM;
898}
899
900void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
901{
902 debugfs_remove_recursive(hctx->debugfs_dir);
903 hctx->sched_debugfs_dir = NULL;
904 hctx->debugfs_dir = NULL;
905}
906
907int blk_mq_debugfs_register_hctxs(struct request_queue *q)
908{
909 struct blk_mq_hw_ctx *hctx;
910 int i;
911
912 queue_for_each_hw_ctx(q, hctx, i) {
913 if (blk_mq_debugfs_register_hctx(q, hctx))
914 return -ENOMEM;
915 }
916
917 return 0;
918}
919
920void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
921{
922 struct blk_mq_hw_ctx *hctx;
923 int i;
924
925 queue_for_each_hw_ctx(q, hctx, i)
926 blk_mq_debugfs_unregister_hctx(hctx);
927}
928
929int blk_mq_debugfs_register_sched(struct request_queue *q)
930{
931 struct elevator_type *e = q->elevator->type;
932
933 if (!q->debugfs_dir)
934 return -ENOENT;
935
936 if (!e->queue_debugfs_attrs)
937 return 0;
938
939 q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
940 if (!q->sched_debugfs_dir)
941 return -ENOMEM;
942
943 if (!debugfs_create_files(q->sched_debugfs_dir, q,
944 e->queue_debugfs_attrs))
945 goto err;
946
947 return 0;
948
949err:
950 blk_mq_debugfs_unregister_sched(q);
951 return -ENOMEM;
952}
953
954void blk_mq_debugfs_unregister_sched(struct request_queue *q)
955{
956 debugfs_remove_recursive(q->sched_debugfs_dir);
957 q->sched_debugfs_dir = NULL;
958}
959
960int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
961 struct blk_mq_hw_ctx *hctx)
962{
963 struct elevator_type *e = q->elevator->type;
964
965 if (!hctx->debugfs_dir)
966 return -ENOENT;
967
968 if (!e->hctx_debugfs_attrs)
969 return 0;
970
971 hctx->sched_debugfs_dir = debugfs_create_dir("sched",
972 hctx->debugfs_dir);
973 if (!hctx->sched_debugfs_dir)
974 return -ENOMEM;
975
976 if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx,
977 e->hctx_debugfs_attrs))
978 return -ENOMEM;
979
980 return 0;
981}
982
983void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
984{
985 debugfs_remove_recursive(hctx->sched_debugfs_dir);
986 hctx->sched_debugfs_dir = NULL;
987}