blob: f7ca9b411d2a4f114948ab50cdb242147d2d2d9f [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001/*
2 * Functions related to interrupt-poll handling in the block layer. This
3 * is similar to NAPI for network devices.
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/bio.h>
9#include <linux/blkdev.h>
10#include <linux/interrupt.h>
11#include <linux/cpu.h>
12#include <linux/blk-iopoll.h>
13#include <linux/delay.h>
14
15#include "blk.h"
16
17int blk_iopoll_enabled = 1;
18EXPORT_SYMBOL(blk_iopoll_enabled);
19
20static unsigned int blk_iopoll_budget __read_mostly = 256;
21
22static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
23
24/**
25 * blk_iopoll_sched - Schedule a run of the iopoll handler
26 * @iop: The parent iopoll structure
27 *
28 * Description:
29 * Add this blk_iopoll structure to the pending poll list and trigger the
30 * raise of the blk iopoll softirq. The driver must already have gotten a
31 * successful return from blk_iopoll_sched_prep() before calling this.
32 **/
33void blk_iopoll_sched(struct blk_iopoll *iop)
34{
35 unsigned long flags;
36
37 local_irq_save(flags);
38 list_add_tail(&iop->list, &__get_cpu_var(blk_cpu_iopoll));
39 __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
40 local_irq_restore(flags);
41 preempt_check_resched_rt();
42}
43EXPORT_SYMBOL(blk_iopoll_sched);
44
45/**
46 * __blk_iopoll_complete - Mark this @iop as un-polled again
47 * @iop: The parent iopoll structure
48 *
49 * Description:
50 * See blk_iopoll_complete(). This function must be called with interrupts
51 * disabled.
52 **/
53void __blk_iopoll_complete(struct blk_iopoll *iop)
54{
55 list_del(&iop->list);
56 smp_mb__before_clear_bit();
57 clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);
58}
59EXPORT_SYMBOL(__blk_iopoll_complete);
60
61/**
62 * blk_iopoll_complete - Mark this @iop as un-polled again
63 * @iop: The parent iopoll structure
64 *
65 * Description:
66 * If a driver consumes less than the assigned budget in its run of the
67 * iopoll handler, it'll end the polled mode by calling this function. The
68 * iopoll handler will not be invoked again before blk_iopoll_sched_prep()
69 * is called.
70 **/
71void blk_iopoll_complete(struct blk_iopoll *iopoll)
72{
73 unsigned long flags;
74
75 local_irq_save(flags);
76 __blk_iopoll_complete(iopoll);
77 local_irq_restore(flags);
78}
79EXPORT_SYMBOL(blk_iopoll_complete);
80
81static void blk_iopoll_softirq(struct softirq_action *h)
82{
83 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
84 int rearm = 0, budget = blk_iopoll_budget;
85 unsigned long start_time = jiffies;
86
87 local_irq_disable();
88
89 while (!list_empty(list)) {
90 struct blk_iopoll *iop;
91 int work, weight;
92
93 /*
94 * If softirq window is exhausted then punt.
95 */
96 if (budget <= 0 || time_after(jiffies, start_time)) {
97 rearm = 1;
98 break;
99 }
100
101 local_irq_enable();
102
103 /* Even though interrupts have been re-enabled, this
104 * access is safe because interrupts can only add new
105 * entries to the tail of this list, and only ->poll()
106 * calls can remove this head entry from the list.
107 */
108 iop = list_entry(list->next, struct blk_iopoll, list);
109
110 weight = iop->weight;
111 work = 0;
112 if (test_bit(IOPOLL_F_SCHED, &iop->state))
113 work = iop->poll(iop, weight);
114
115 budget -= work;
116
117 local_irq_disable();
118
119 /*
120 * Drivers must not modify the iopoll state, if they
121 * consume their assigned weight (or more, some drivers can't
122 * easily just stop processing, they have to complete an
123 * entire mask of commands).In such cases this code
124 * still "owns" the iopoll instance and therefore can
125 * move the instance around on the list at-will.
126 */
127 if (work >= weight) {
128 if (blk_iopoll_disable_pending(iop))
129 __blk_iopoll_complete(iop);
130 else
131 list_move_tail(&iop->list, list);
132 }
133 }
134
135 if (rearm)
136 __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
137
138 local_irq_enable();
139 preempt_check_resched_rt();
140}
141
142/**
143 * blk_iopoll_disable - Disable iopoll on this @iop
144 * @iop: The parent iopoll structure
145 *
146 * Description:
147 * Disable io polling and wait for any pending callbacks to have completed.
148 **/
149void blk_iopoll_disable(struct blk_iopoll *iop)
150{
151 set_bit(IOPOLL_F_DISABLE, &iop->state);
152 while (test_and_set_bit(IOPOLL_F_SCHED, &iop->state))
153 msleep(1);
154 clear_bit(IOPOLL_F_DISABLE, &iop->state);
155}
156EXPORT_SYMBOL(blk_iopoll_disable);
157
158/**
159 * blk_iopoll_enable - Enable iopoll on this @iop
160 * @iop: The parent iopoll structure
161 *
162 * Description:
163 * Enable iopoll on this @iop. Note that the handler run will not be
164 * scheduled, it will only mark it as active.
165 **/
166void blk_iopoll_enable(struct blk_iopoll *iop)
167{
168 BUG_ON(!test_bit(IOPOLL_F_SCHED, &iop->state));
169 smp_mb__before_clear_bit();
170 clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);
171}
172EXPORT_SYMBOL(blk_iopoll_enable);
173
174/**
175 * blk_iopoll_init - Initialize this @iop
176 * @iop: The parent iopoll structure
177 * @weight: The default weight (or command completion budget)
178 * @poll_fn: The handler to invoke
179 *
180 * Description:
181 * Initialize this blk_iopoll structure. Before being actively used, the
182 * driver must call blk_iopoll_enable().
183 **/
184void blk_iopoll_init(struct blk_iopoll *iop, int weight, blk_iopoll_fn *poll_fn)
185{
186 memset(iop, 0, sizeof(*iop));
187 INIT_LIST_HEAD(&iop->list);
188 iop->weight = weight;
189 iop->poll = poll_fn;
190 set_bit(IOPOLL_F_SCHED, &iop->state);
191}
192EXPORT_SYMBOL(blk_iopoll_init);
193
194static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
195 unsigned long action, void *hcpu)
196{
197 /*
198 * If a CPU goes away, splice its entries to the current CPU
199 * and trigger a run of the softirq
200 */
201 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
202 int cpu = (unsigned long) hcpu;
203
204 local_irq_disable();
205 list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
206 &__get_cpu_var(blk_cpu_iopoll));
207 __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
208 local_irq_enable();
209 preempt_check_resched_rt();
210 }
211
212 return NOTIFY_OK;
213}
214
215static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
216 .notifier_call = blk_iopoll_cpu_notify,
217};
218
219static __init int blk_iopoll_setup(void)
220{
221 int i;
222
223 for_each_possible_cpu(i)
224 INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i));
225
226 open_softirq(BLOCK_IOPOLL_SOFTIRQ, blk_iopoll_softirq);
227 register_hotcpu_notifier(&blk_iopoll_cpu_notifier);
228 return 0;
229}
230subsys_initcall(blk_iopoll_setup);