rjw | 1f88458 | 2022-01-06 17:20:42 +0800 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef INT_BLK_MQ_H |
| 3 | #define INT_BLK_MQ_H |
| 4 | |
| 5 | #include "blk-stat.h" |
| 6 | |
| 7 | struct blk_mq_tag_set; |
| 8 | |
| 9 | struct blk_mq_ctx { |
| 10 | struct { |
| 11 | spinlock_t lock; |
| 12 | struct list_head rq_list; |
| 13 | } ____cacheline_aligned_in_smp; |
| 14 | |
| 15 | unsigned int cpu; |
| 16 | unsigned int index_hw; |
| 17 | |
| 18 | /* incremented at dispatch time */ |
| 19 | unsigned long rq_dispatched[2]; |
| 20 | unsigned long rq_merged; |
| 21 | |
| 22 | /* incremented at completion time */ |
| 23 | unsigned long ____cacheline_aligned_in_smp rq_completed[2]; |
| 24 | |
| 25 | struct request_queue *queue; |
| 26 | struct kobject kobj; |
| 27 | } ____cacheline_aligned_in_smp; |
| 28 | |
| 29 | void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); |
| 30 | void blk_mq_freeze_queue(struct request_queue *q); |
| 31 | void blk_mq_free_queue(struct request_queue *q); |
| 32 | int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); |
| 33 | void blk_mq_wake_waiters(struct request_queue *q); |
| 34 | bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *); |
| 35 | void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list); |
| 36 | bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx); |
| 37 | bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx, |
| 38 | bool wait); |
| 39 | |
| 40 | /* |
| 41 | * Internal helpers for allocating/freeing the request map |
| 42 | */ |
| 43 | void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, |
| 44 | unsigned int hctx_idx); |
| 45 | void blk_mq_free_rq_map(struct blk_mq_tags *tags); |
| 46 | struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, |
| 47 | unsigned int hctx_idx, |
| 48 | unsigned int nr_tags, |
| 49 | unsigned int reserved_tags); |
| 50 | int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, |
| 51 | unsigned int hctx_idx, unsigned int depth); |
| 52 | |
| 53 | /* |
| 54 | * Internal helpers for request insertion into sw queues |
| 55 | */ |
| 56 | void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, |
| 57 | bool at_head); |
| 58 | void blk_mq_request_bypass_insert(struct request *rq); |
| 59 | void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, |
| 60 | struct list_head *list); |
| 61 | |
| 62 | /* |
| 63 | * CPU -> queue mappings |
| 64 | */ |
| 65 | extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); |
| 66 | |
| 67 | static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, |
| 68 | int cpu) |
| 69 | { |
| 70 | return q->queue_hw_ctx[q->mq_map[cpu]]; |
| 71 | } |
| 72 | |
| 73 | /* |
| 74 | * sysfs helpers |
| 75 | */ |
| 76 | extern void blk_mq_sysfs_init(struct request_queue *q); |
| 77 | extern void blk_mq_sysfs_deinit(struct request_queue *q); |
| 78 | extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q); |
| 79 | extern int blk_mq_sysfs_register(struct request_queue *q); |
| 80 | extern void blk_mq_sysfs_unregister(struct request_queue *q); |
| 81 | extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); |
| 82 | |
| 83 | extern void blk_mq_rq_timed_out(struct request *req, bool reserved); |
| 84 | |
| 85 | void blk_mq_release(struct request_queue *q); |
| 86 | |
| 87 | static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, |
| 88 | unsigned int cpu) |
| 89 | { |
| 90 | return per_cpu_ptr(q->queue_ctx, cpu); |
| 91 | } |
| 92 | |
| 93 | /* |
| 94 | * This assumes per-cpu software queueing queues. They could be per-node |
| 95 | * as well, for instance. For now this is hardcoded as-is. Note that we don't |
| 96 | * care about preemption, since we know the ctx's are persistent. This does |
| 97 | * mean that we can't rely on ctx always matching the currently running CPU. |
| 98 | */ |
| 99 | static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) |
| 100 | { |
| 101 | return __blk_mq_get_ctx(q, get_cpu()); |
| 102 | } |
| 103 | |
| 104 | static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) |
| 105 | { |
| 106 | put_cpu(); |
| 107 | } |
| 108 | |
| 109 | struct blk_mq_alloc_data { |
| 110 | /* input parameter */ |
| 111 | struct request_queue *q; |
| 112 | unsigned int flags; |
| 113 | unsigned int shallow_depth; |
| 114 | |
| 115 | /* input & output parameter */ |
| 116 | struct blk_mq_ctx *ctx; |
| 117 | struct blk_mq_hw_ctx *hctx; |
| 118 | }; |
| 119 | |
| 120 | static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) |
| 121 | { |
| 122 | if (data->flags & BLK_MQ_REQ_INTERNAL) |
| 123 | return data->hctx->sched_tags; |
| 124 | |
| 125 | return data->hctx->tags; |
| 126 | } |
| 127 | |
| 128 | static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) |
| 129 | { |
| 130 | return test_bit(BLK_MQ_S_STOPPED, &hctx->state); |
| 131 | } |
| 132 | |
| 133 | static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx) |
| 134 | { |
| 135 | return hctx->nr_ctx && hctx->tags; |
| 136 | } |
| 137 | |
| 138 | void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, |
| 139 | unsigned int inflight[2]); |
| 140 | void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, |
| 141 | unsigned int inflight[2]); |
| 142 | |
| 143 | #endif |