blob: f1de8ba483a978bd40b4fa1de8af56313e8ab52f [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * buffered writeback throttling. loosely based on CoDel. We can't drop
3 * packets for IO scheduling, so the logic is something like this:
4 *
5 * - Monitor latencies in a defined window of time.
6 * - If the minimum latency in the above window exceeds some target, increment
7 * scaling step and scale down queue depth by a factor of 2x. The monitoring
8 * window is then shrunk to 100 / sqrt(scaling step + 1).
9 * - For any window where we don't have solid data on what the latencies
10 * look like, retain status quo.
11 * - If latencies look good, decrement scaling step.
12 * - If we're only doing writes, allow the scaling step to go negative. This
13 * will temporarily boost write performance, snapping back to a stable
14 * scaling step of 0 if reads show up or the heavy writers finish. Unlike
15 * positive scaling steps where we shrink the monitoring window, a negative
16 * scaling step retains the default step==0 window size.
17 *
18 * Copyright (C) 2016 Jens Axboe
19 *
20 */
21#include <linux/kernel.h>
22#include <linux/blk_types.h>
23#include <linux/slab.h>
24#include <linux/backing-dev.h>
25#include <linux/swap.h>
26
27#include "blk-wbt.h"
28#include "blk-rq-qos.h"
29
30#define CREATE_TRACE_POINTS
31#include <trace/events/wbt.h>
32
33static inline void wbt_clear_state(struct request *rq)
34{
35 rq->wbt_flags = 0;
36}
37
38static inline enum wbt_flags wbt_flags(struct request *rq)
39{
40 return rq->wbt_flags;
41}
42
43static inline bool wbt_is_tracked(struct request *rq)
44{
45 return rq->wbt_flags & WBT_TRACKED;
46}
47
48static inline bool wbt_is_read(struct request *rq)
49{
50 return rq->wbt_flags & WBT_READ;
51}
52
53enum {
54 /*
55 * Default setting, we'll scale up (to 75% of QD max) or down (min 1)
56 * from here depending on device stats
57 */
58 RWB_DEF_DEPTH = 16,
59
60 /*
61 * 100msec window
62 */
63 RWB_WINDOW_NSEC = 100 * 1000 * 1000ULL,
64
65 /*
66 * Disregard stats, if we don't meet this minimum
67 */
68 RWB_MIN_WRITE_SAMPLES = 3,
69
70 /*
71 * If we have this number of consecutive windows with not enough
72 * information to scale up or down, scale up.
73 */
74 RWB_UNKNOWN_BUMP = 5,
75};
76
77static inline bool rwb_enabled(struct rq_wb *rwb)
78{
79 return rwb && rwb->wb_normal != 0;
80}
81
82static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
83{
84 if (rwb_enabled(rwb)) {
85 const unsigned long cur = jiffies;
86
87 if (cur != *var)
88 *var = cur;
89 }
90}
91
92/*
93 * If a task was rate throttled in balance_dirty_pages() within the last
94 * second or so, use that to indicate a higher cleaning rate.
95 */
96static bool wb_recent_wait(struct rq_wb *rwb)
97{
98 struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb;
99
100 return time_before(jiffies, wb->dirty_sleep + HZ);
101}
102
103static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
104 enum wbt_flags wb_acct)
105{
106 if (wb_acct & WBT_KSWAPD)
107 return &rwb->rq_wait[WBT_RWQ_KSWAPD];
108 else if (wb_acct & WBT_DISCARD)
109 return &rwb->rq_wait[WBT_RWQ_DISCARD];
110
111 return &rwb->rq_wait[WBT_RWQ_BG];
112}
113
114static void rwb_wake_all(struct rq_wb *rwb)
115{
116 int i;
117
118 for (i = 0; i < WBT_NUM_RWQ; i++) {
119 struct rq_wait *rqw = &rwb->rq_wait[i];
120
121 if (wq_has_sleeper(&rqw->wait))
122 wake_up_all(&rqw->wait);
123 }
124}
125
126static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
127 enum wbt_flags wb_acct)
128{
129 int inflight, limit;
130
131 inflight = atomic_dec_return(&rqw->inflight);
132
133 /*
134 * wbt got disabled with IO in flight. Wake up any potential
135 * waiters, we don't have to do more than that.
136 */
137 if (unlikely(!rwb_enabled(rwb))) {
138 rwb_wake_all(rwb);
139 return;
140 }
141
142 /*
143 * For discards, our limit is always the background. For writes, if
144 * the device does write back caching, drop further down before we
145 * wake people up.
146 */
147 if (wb_acct & WBT_DISCARD)
148 limit = rwb->wb_background;
149 else if (rwb->wc && !wb_recent_wait(rwb))
150 limit = 0;
151 else
152 limit = rwb->wb_normal;
153
154 /*
155 * Don't wake anyone up if we are above the normal limit.
156 */
157 if (inflight && inflight >= limit)
158 return;
159
160 if (wq_has_sleeper(&rqw->wait)) {
161 int diff = limit - inflight;
162
163 if (!inflight || diff >= rwb->wb_background / 2)
164 wake_up_all(&rqw->wait);
165 }
166}
167
168static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
169{
170 struct rq_wb *rwb = RQWB(rqos);
171 struct rq_wait *rqw;
172
173 if (!(wb_acct & WBT_TRACKED))
174 return;
175
176 rqw = get_rq_wait(rwb, wb_acct);
177 wbt_rqw_done(rwb, rqw, wb_acct);
178}
179
180/*
181 * Called on completion of a request. Note that it's also called when
182 * a request is merged, when the request gets freed.
183 */
184static void wbt_done(struct rq_qos *rqos, struct request *rq)
185{
186 struct rq_wb *rwb = RQWB(rqos);
187
188 if (!wbt_is_tracked(rq)) {
189 if (rwb->sync_cookie == rq) {
190 rwb->sync_issue = 0;
191 rwb->sync_cookie = NULL;
192 }
193
194 if (wbt_is_read(rq))
195 wb_timestamp(rwb, &rwb->last_comp);
196 } else {
197 WARN_ON_ONCE(rq == rwb->sync_cookie);
198 __wbt_done(rqos, wbt_flags(rq));
199 }
200 wbt_clear_state(rq);
201}
202
203static inline bool stat_sample_valid(struct blk_rq_stat *stat)
204{
205 /*
206 * We need at least one read sample, and a minimum of
207 * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
208 * that it's writes impacting us, and not just some sole read on
209 * a device that is in a lower power state.
210 */
211 return (stat[READ].nr_samples >= 1 &&
212 stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
213}
214
215static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
216{
217 u64 now, issue = READ_ONCE(rwb->sync_issue);
218
219 if (!issue || !rwb->sync_cookie)
220 return 0;
221
222 now = ktime_to_ns(ktime_get());
223 return now - issue;
224}
225
226enum {
227 LAT_OK = 1,
228 LAT_UNKNOWN,
229 LAT_UNKNOWN_WRITES,
230 LAT_EXCEEDED,
231};
232
233static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
234{
235 struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
236 struct rq_depth *rqd = &rwb->rq_depth;
237 u64 thislat;
238
239 /*
240 * If our stored sync issue exceeds the window size, or it
241 * exceeds our min target AND we haven't logged any entries,
242 * flag the latency as exceeded. wbt works off completion latencies,
243 * but for a flooded device, a single sync IO can take a long time
244 * to complete after being issued. If this time exceeds our
245 * monitoring window AND we didn't see any other completions in that
246 * window, then count that sync IO as a violation of the latency.
247 */
248 thislat = rwb_sync_issue_lat(rwb);
249 if (thislat > rwb->cur_win_nsec ||
250 (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
251 trace_wbt_lat(bdi, thislat);
252 return LAT_EXCEEDED;
253 }
254
255 /*
256 * No read/write mix, if stat isn't valid
257 */
258 if (!stat_sample_valid(stat)) {
259 /*
260 * If we had writes in this stat window and the window is
261 * current, we're only doing writes. If a task recently
262 * waited or still has writes in flights, consider us doing
263 * just writes as well.
264 */
265 if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
266 wbt_inflight(rwb))
267 return LAT_UNKNOWN_WRITES;
268 return LAT_UNKNOWN;
269 }
270
271 /*
272 * If the 'min' latency exceeds our target, step down.
273 */
274 if (stat[READ].min > rwb->min_lat_nsec) {
275 trace_wbt_lat(bdi, stat[READ].min);
276 trace_wbt_stat(bdi, stat);
277 return LAT_EXCEEDED;
278 }
279
280 if (rqd->scale_step)
281 trace_wbt_stat(bdi, stat);
282
283 return LAT_OK;
284}
285
286static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
287{
288 struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
289 struct rq_depth *rqd = &rwb->rq_depth;
290
291 trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
292 rwb->wb_background, rwb->wb_normal, rqd->max_depth);
293}
294
295static void calc_wb_limits(struct rq_wb *rwb)
296{
297 if (rwb->min_lat_nsec == 0) {
298 rwb->wb_normal = rwb->wb_background = 0;
299 } else if (rwb->rq_depth.max_depth <= 2) {
300 rwb->wb_normal = rwb->rq_depth.max_depth;
301 rwb->wb_background = 1;
302 } else {
303 rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
304 rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
305 }
306}
307
308static void scale_up(struct rq_wb *rwb)
309{
310 if (!rq_depth_scale_up(&rwb->rq_depth))
311 return;
312 calc_wb_limits(rwb);
313 rwb->unknown_cnt = 0;
314 rwb_wake_all(rwb);
315 rwb_trace_step(rwb, "scale up");
316}
317
318static void scale_down(struct rq_wb *rwb, bool hard_throttle)
319{
320 if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
321 return;
322 calc_wb_limits(rwb);
323 rwb->unknown_cnt = 0;
324 rwb_trace_step(rwb, "scale down");
325}
326
327static void rwb_arm_timer(struct rq_wb *rwb)
328{
329 struct rq_depth *rqd = &rwb->rq_depth;
330
331 if (rqd->scale_step > 0) {
332 /*
333 * We should speed this up, using some variant of a fast
334 * integer inverse square root calculation. Since we only do
335 * this for every window expiration, it's not a huge deal,
336 * though.
337 */
338 rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
339 int_sqrt((rqd->scale_step + 1) << 8));
340 } else {
341 /*
342 * For step < 0, we don't want to increase/decrease the
343 * window size.
344 */
345 rwb->cur_win_nsec = rwb->win_nsec;
346 }
347
348 blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
349}
350
351static void wb_timer_fn(struct blk_stat_callback *cb)
352{
353 struct rq_wb *rwb = cb->data;
354 struct rq_depth *rqd = &rwb->rq_depth;
355 unsigned int inflight = wbt_inflight(rwb);
356 int status;
357
358 status = latency_exceeded(rwb, cb->stat);
359
360 trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
361 inflight);
362
363 /*
364 * If we exceeded the latency target, step down. If we did not,
365 * step one level up. If we don't know enough to say either exceeded
366 * or ok, then don't do anything.
367 */
368 switch (status) {
369 case LAT_EXCEEDED:
370 scale_down(rwb, true);
371 break;
372 case LAT_OK:
373 scale_up(rwb);
374 break;
375 case LAT_UNKNOWN_WRITES:
376 /*
377 * We started a the center step, but don't have a valid
378 * read/write sample, but we do have writes going on.
379 * Allow step to go negative, to increase write perf.
380 */
381 scale_up(rwb);
382 break;
383 case LAT_UNKNOWN:
384 if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
385 break;
386 /*
387 * We get here when previously scaled reduced depth, and we
388 * currently don't have a valid read/write sample. For that
389 * case, slowly return to center state (step == 0).
390 */
391 if (rqd->scale_step > 0)
392 scale_up(rwb);
393 else if (rqd->scale_step < 0)
394 scale_down(rwb, false);
395 break;
396 default:
397 break;
398 }
399
400 /*
401 * Re-arm timer, if we have IO in flight
402 */
403 if (rqd->scale_step || inflight)
404 rwb_arm_timer(rwb);
405}
406
407static void __wbt_update_limits(struct rq_wb *rwb)
408{
409 struct rq_depth *rqd = &rwb->rq_depth;
410
411 rqd->scale_step = 0;
412 rqd->scaled_max = false;
413
414 rq_depth_calc_max_depth(rqd);
415 calc_wb_limits(rwb);
416
417 rwb_wake_all(rwb);
418}
419
420void wbt_update_limits(struct request_queue *q)
421{
422 struct rq_qos *rqos = wbt_rq_qos(q);
423 if (!rqos)
424 return;
425 __wbt_update_limits(RQWB(rqos));
426}
427
428u64 wbt_get_min_lat(struct request_queue *q)
429{
430 struct rq_qos *rqos = wbt_rq_qos(q);
431 if (!rqos)
432 return 0;
433 return RQWB(rqos)->min_lat_nsec;
434}
435
436void wbt_set_min_lat(struct request_queue *q, u64 val)
437{
438 struct rq_qos *rqos = wbt_rq_qos(q);
439 if (!rqos)
440 return;
441 RQWB(rqos)->min_lat_nsec = val;
442 RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
443 __wbt_update_limits(RQWB(rqos));
444}
445
446
447static bool close_io(struct rq_wb *rwb)
448{
449 const unsigned long now = jiffies;
450
451 return time_before(now, rwb->last_issue + HZ / 10) ||
452 time_before(now, rwb->last_comp + HZ / 10);
453}
454
455#define REQ_HIPRIO (REQ_SYNC | REQ_META | REQ_PRIO)
456
457static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
458{
459 unsigned int limit;
460
461 /*
462 * If we got disabled, just return UINT_MAX. This ensures that
463 * we'll properly inc a new IO, and dec+wakeup at the end.
464 */
465 if (!rwb_enabled(rwb))
466 return UINT_MAX;
467
468 if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
469 return rwb->wb_background;
470
471 /*
472 * At this point we know it's a buffered write. If this is
473 * kswapd trying to free memory, or REQ_SYNC is set, then
474 * it's WB_SYNC_ALL writeback, and we'll use the max limit for
475 * that. If the write is marked as a background write, then use
476 * the idle limit, or go to normal if we haven't had competing
477 * IO for a bit.
478 */
479 if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
480 limit = rwb->rq_depth.max_depth;
481 else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
482 /*
483 * If less than 100ms since we completed unrelated IO,
484 * limit us to half the depth for background writeback.
485 */
486 limit = rwb->wb_background;
487 } else
488 limit = rwb->wb_normal;
489
490 return limit;
491}
492
493struct wbt_wait_data {
494 struct wait_queue_entry wq;
495 struct task_struct *task;
496 struct rq_wb *rwb;
497 struct rq_wait *rqw;
498 unsigned long rw;
499 bool got_token;
500};
501
502static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode,
503 int wake_flags, void *key)
504{
505 struct wbt_wait_data *data = container_of(curr, struct wbt_wait_data,
506 wq);
507
508 /*
509 * If we fail to get a budget, return -1 to interrupt the wake up
510 * loop in __wake_up_common.
511 */
512 if (!rq_wait_inc_below(data->rqw, get_limit(data->rwb, data->rw)))
513 return -1;
514
515 data->got_token = true;
516 list_del_init(&curr->entry);
517 wake_up_process(data->task);
518 return 1;
519}
520
521/*
522 * Block if we will exceed our limit, or if we are currently waiting for
523 * the timer to kick off queuing again.
524 */
525static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
526 unsigned long rw, spinlock_t *lock)
527 __releases(lock)
528 __acquires(lock)
529{
530 struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
531 struct wbt_wait_data data = {
532 .wq = {
533 .func = wbt_wake_function,
534 .entry = LIST_HEAD_INIT(data.wq.entry),
535 },
536 .task = current,
537 .rwb = rwb,
538 .rqw = rqw,
539 .rw = rw,
540 };
541 bool has_sleeper;
542
543 has_sleeper = wq_has_sleeper(&rqw->wait);
544 if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
545 return;
546
547 prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
548 do {
549 if (data.got_token)
550 break;
551
552 if (!has_sleeper &&
553 rq_wait_inc_below(rqw, get_limit(rwb, rw))) {
554 finish_wait(&rqw->wait, &data.wq);
555
556 /*
557 * We raced with wbt_wake_function() getting a token,
558 * which means we now have two. Put our local token
559 * and wake anyone else potentially waiting for one.
560 */
561 if (data.got_token)
562 wbt_rqw_done(rwb, rqw, wb_acct);
563 break;
564 }
565
566 if (lock) {
567 spin_unlock_irq(lock);
568 io_schedule();
569 spin_lock_irq(lock);
570 } else
571 io_schedule();
572
573 has_sleeper = false;
574 } while (1);
575
576 finish_wait(&rqw->wait, &data.wq);
577}
578
579static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
580{
581 switch (bio_op(bio)) {
582 case REQ_OP_WRITE:
583 /*
584 * Don't throttle WRITE_ODIRECT
585 */
586 if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
587 (REQ_SYNC | REQ_IDLE))
588 return false;
589 /* fallthrough */
590 case REQ_OP_DISCARD:
591 return true;
592 default:
593 return false;
594 }
595}
596
597static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
598{
599 enum wbt_flags flags = 0;
600
601 if (!rwb_enabled(rwb))
602 return 0;
603
604 if (bio_op(bio) == REQ_OP_READ) {
605 flags = WBT_READ;
606 } else if (wbt_should_throttle(rwb, bio)) {
607 if (current_is_kswapd())
608 flags |= WBT_KSWAPD;
609 if (bio_op(bio) == REQ_OP_DISCARD)
610 flags |= WBT_DISCARD;
611 flags |= WBT_TRACKED;
612 }
613 return flags;
614}
615
616static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
617{
618 struct rq_wb *rwb = RQWB(rqos);
619 enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
620 __wbt_done(rqos, flags);
621}
622
623/*
624 * Returns true if the IO request should be accounted, false if not.
625 * May sleep, if we have exceeded the writeback limits. Caller can pass
626 * in an irq held spinlock, if it holds one when calling this function.
627 * If we do sleep, we'll release and re-grab it.
628 */
629static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
630{
631 struct rq_wb *rwb = RQWB(rqos);
632 enum wbt_flags flags;
633
634 flags = bio_to_wbt_flags(rwb, bio);
635 if (!(flags & WBT_TRACKED)) {
636 if (flags & WBT_READ)
637 wb_timestamp(rwb, &rwb->last_issue);
638 return;
639 }
640
641 __wbt_wait(rwb, flags, bio->bi_opf, lock);
642
643 if (!blk_stat_is_active(rwb->cb))
644 rwb_arm_timer(rwb);
645}
646
647static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
648{
649 struct rq_wb *rwb = RQWB(rqos);
650 rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
651}
652
653void wbt_issue(struct rq_qos *rqos, struct request *rq)
654{
655 struct rq_wb *rwb = RQWB(rqos);
656
657 if (!rwb_enabled(rwb))
658 return;
659
660 /*
661 * Track sync issue, in case it takes a long time to complete. Allows us
662 * to react quicker, if a sync IO takes a long time to complete. Note
663 * that this is just a hint. The request can go away when it completes,
664 * so it's important we never dereference it. We only use the address to
665 * compare with, which is why we store the sync_issue time locally.
666 */
667 if (wbt_is_read(rq) && !rwb->sync_issue) {
668 rwb->sync_cookie = rq;
669 rwb->sync_issue = rq->io_start_time_ns;
670 }
671}
672
673void wbt_requeue(struct rq_qos *rqos, struct request *rq)
674{
675 struct rq_wb *rwb = RQWB(rqos);
676 if (!rwb_enabled(rwb))
677 return;
678 if (rq == rwb->sync_cookie) {
679 rwb->sync_issue = 0;
680 rwb->sync_cookie = NULL;
681 }
682}
683
684void wbt_set_queue_depth(struct request_queue *q, unsigned int depth)
685{
686 struct rq_qos *rqos = wbt_rq_qos(q);
687 if (rqos) {
688 RQWB(rqos)->rq_depth.queue_depth = depth;
689 __wbt_update_limits(RQWB(rqos));
690 }
691}
692
693void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
694{
695 struct rq_qos *rqos = wbt_rq_qos(q);
696 if (rqos)
697 RQWB(rqos)->wc = write_cache_on;
698}
699
700/*
701 * Enable wbt if defaults are configured that way
702 */
703void wbt_enable_default(struct request_queue *q)
704{
705 struct rq_qos *rqos = wbt_rq_qos(q);
706 /* Throttling already enabled? */
707 if (rqos)
708 return;
709
710 /* Queue not registered? Maybe shutting down... */
711 if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
712 return;
713
714 if ((q->mq_ops && IS_ENABLED(CONFIG_BLK_WBT_MQ)) ||
715 (q->request_fn && IS_ENABLED(CONFIG_BLK_WBT_SQ)))
716 wbt_init(q);
717}
718EXPORT_SYMBOL_GPL(wbt_enable_default);
719
720u64 wbt_default_latency_nsec(struct request_queue *q)
721{
722 /*
723 * We default to 2msec for non-rotational storage, and 75msec
724 * for rotational storage.
725 */
726 if (blk_queue_nonrot(q))
727 return 2000000ULL;
728 else
729 return 75000000ULL;
730}
731
732static int wbt_data_dir(const struct request *rq)
733{
734 const int op = req_op(rq);
735
736 if (op == REQ_OP_READ)
737 return READ;
738 else if (op_is_write(op))
739 return WRITE;
740
741 /* don't account */
742 return -1;
743}
744
745static void wbt_exit(struct rq_qos *rqos)
746{
747 struct rq_wb *rwb = RQWB(rqos);
748 struct request_queue *q = rqos->q;
749
750 blk_stat_remove_callback(q, rwb->cb);
751 blk_stat_free_callback(rwb->cb);
752 kfree(rwb);
753}
754
755/*
756 * Disable wbt, if enabled by default.
757 */
758void wbt_disable_default(struct request_queue *q)
759{
760 struct rq_qos *rqos = wbt_rq_qos(q);
761 struct rq_wb *rwb;
762 if (!rqos)
763 return;
764 rwb = RQWB(rqos);
765 if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
766 blk_stat_deactivate(rwb->cb);
767 rwb->wb_normal = 0;
768 }
769}
770EXPORT_SYMBOL_GPL(wbt_disable_default);
771
772
773static struct rq_qos_ops wbt_rqos_ops = {
774 .throttle = wbt_wait,
775 .issue = wbt_issue,
776 .track = wbt_track,
777 .requeue = wbt_requeue,
778 .done = wbt_done,
779 .cleanup = wbt_cleanup,
780 .exit = wbt_exit,
781};
782
783int wbt_init(struct request_queue *q)
784{
785 struct rq_wb *rwb;
786 int i;
787
788 rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
789 if (!rwb)
790 return -ENOMEM;
791
792 rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
793 if (!rwb->cb) {
794 kfree(rwb);
795 return -ENOMEM;
796 }
797
798 for (i = 0; i < WBT_NUM_RWQ; i++)
799 rq_wait_init(&rwb->rq_wait[i]);
800
801 rwb->rqos.id = RQ_QOS_WBT;
802 rwb->rqos.ops = &wbt_rqos_ops;
803 rwb->rqos.q = q;
804 rwb->last_comp = rwb->last_issue = jiffies;
805 rwb->win_nsec = RWB_WINDOW_NSEC;
806 rwb->enable_state = WBT_STATE_ON_DEFAULT;
807 rwb->wc = 1;
808 rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
809 __wbt_update_limits(rwb);
810
811 /*
812 * Assign rwb and add the stats callback.
813 */
814 rq_qos_add(q, &rwb->rqos);
815 blk_stat_add_callback(q, rwb->cb);
816
817 rwb->min_lat_nsec = wbt_default_latency_nsec(q);
818
819 wbt_set_queue_depth(q, blk_queue_depth(q));
820 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
821
822 return 0;
823}