| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * Budget Fair Queueing (BFQ) I/O scheduler. | 
|  | 3 | * | 
|  | 4 | * Based on ideas and code from CFQ: | 
|  | 5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> | 
|  | 6 | * | 
|  | 7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> | 
|  | 8 | *		      Paolo Valente <paolo.valente@unimore.it> | 
|  | 9 | * | 
|  | 10 | * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it> | 
|  | 11 | *                    Arianna Avanzini <avanzini@google.com> | 
|  | 12 | * | 
|  | 13 | * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org> | 
|  | 14 | * | 
|  | 15 | *  This program is free software; you can redistribute it and/or | 
|  | 16 | *  modify it under the terms of the GNU General Public License as | 
|  | 17 | *  published by the Free Software Foundation; either version 2 of the | 
|  | 18 | *  License, or (at your option) any later version. | 
|  | 19 | * | 
|  | 20 | *  This program is distributed in the hope that it will be useful, | 
|  | 21 | *  but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 22 | *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
|  | 23 | *  General Public License for more details. | 
|  | 24 | * | 
|  | 25 | * BFQ is a proportional-share I/O scheduler, with some extra | 
|  | 26 | * low-latency capabilities. BFQ also supports full hierarchical | 
|  | 27 | * scheduling through cgroups. Next paragraphs provide an introduction | 
|  | 28 | * on BFQ inner workings. Details on BFQ benefits, usage and | 
|  | 29 | * limitations can be found in Documentation/block/bfq-iosched.txt. | 
|  | 30 | * | 
|  | 31 | * BFQ is a proportional-share storage-I/O scheduling algorithm based | 
|  | 32 | * on the slice-by-slice service scheme of CFQ. But BFQ assigns | 
|  | 33 | * budgets, measured in number of sectors, to processes instead of | 
|  | 34 | * time slices. The device is not granted to the in-service process | 
|  | 35 | * for a given time slice, but until it has exhausted its assigned | 
|  | 36 | * budget. This change from the time to the service domain enables BFQ | 
|  | 37 | * to distribute the device throughput among processes as desired, | 
|  | 38 | * without any distortion due to throughput fluctuations, or to device | 
|  | 39 | * internal queueing. BFQ uses an ad hoc internal scheduler, called | 
|  | 40 | * B-WF2Q+, to schedule processes according to their budgets. More | 
|  | 41 | * precisely, BFQ schedules queues associated with processes. Each | 
|  | 42 | * process/queue is assigned a user-configurable weight, and B-WF2Q+ | 
|  | 43 | * guarantees that each queue receives a fraction of the throughput | 
|  | 44 | * proportional to its weight. Thanks to the accurate policy of | 
|  | 45 | * B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound | 
|  | 46 | * processes issuing sequential requests (to boost the throughput), | 
|  | 47 | * and yet guarantee a low latency to interactive and soft real-time | 
|  | 48 | * applications. | 
|  | 49 | * | 
|  | 50 | * In particular, to provide these low-latency guarantees, BFQ | 
|  | 51 | * explicitly privileges the I/O of two classes of time-sensitive | 
|  | 52 | * applications: interactive and soft real-time. In more detail, BFQ | 
|  | 53 | * behaves this way if the low_latency parameter is set (default | 
|  | 54 | * configuration). This feature enables BFQ to provide applications in | 
|  | 55 | * these classes with a very low latency. | 
|  | 56 | * | 
|  | 57 | * To implement this feature, BFQ constantly tries to detect whether | 
|  | 58 | * the I/O requests in a bfq_queue come from an interactive or a soft | 
|  | 59 | * real-time application. For brevity, in these cases, the queue is | 
|  | 60 | * said to be interactive or soft real-time. In both cases, BFQ | 
|  | 61 | * privileges the service of the queue, over that of non-interactive | 
|  | 62 | * and non-soft-real-time queues. This privileging is performed, | 
|  | 63 | * mainly, by raising the weight of the queue. So, for brevity, we | 
|  | 64 | * call just weight-raising periods the time periods during which a | 
|  | 65 | * queue is privileged, because deemed interactive or soft real-time. | 
|  | 66 | * | 
|  | 67 | * The detection of soft real-time queues/applications is described in | 
|  | 68 | * detail in the comments on the function | 
|  | 69 | * bfq_bfqq_softrt_next_start. On the other hand, the detection of an | 
|  | 70 | * interactive queue works as follows: a queue is deemed interactive | 
|  | 71 | * if it is constantly non empty only for a limited time interval, | 
|  | 72 | * after which it does become empty. The queue may be deemed | 
|  | 73 | * interactive again (for a limited time), if it restarts being | 
|  | 74 | * constantly non empty, provided that this happens only after the | 
|  | 75 | * queue has remained empty for a given minimum idle time. | 
|  | 76 | * | 
|  | 77 | * By default, BFQ computes automatically the above maximum time | 
|  | 78 | * interval, i.e., the time interval after which a constantly | 
|  | 79 | * non-empty queue stops being deemed interactive. Since a queue is | 
|  | 80 | * weight-raised while it is deemed interactive, this maximum time | 
|  | 81 | * interval happens to coincide with the (maximum) duration of the | 
|  | 82 | * weight-raising for interactive queues. | 
|  | 83 | * | 
|  | 84 | * Finally, BFQ also features additional heuristics for | 
|  | 85 | * preserving both a low latency and a high throughput on NCQ-capable, | 
|  | 86 | * rotational or flash-based devices, and to get the job done quickly | 
|  | 87 | * for applications consisting in many I/O-bound processes. | 
|  | 88 | * | 
|  | 89 | * NOTE: if the main or only goal, with a given device, is to achieve | 
|  | 90 | * the maximum-possible throughput at all times, then do switch off | 
|  | 91 | * all low-latency heuristics for that device, by setting low_latency | 
|  | 92 | * to 0. | 
|  | 93 | * | 
|  | 94 | * BFQ is described in [1], where also a reference to the initial, | 
|  | 95 | * more theoretical paper on BFQ can be found. The interested reader | 
|  | 96 | * can find in the latter paper full details on the main algorithm, as | 
|  | 97 | * well as formulas of the guarantees and formal proofs of all the | 
|  | 98 | * properties.  With respect to the version of BFQ presented in these | 
|  | 99 | * papers, this implementation adds a few more heuristics, such as the | 
|  | 100 | * ones that guarantee a low latency to interactive and soft real-time | 
|  | 101 | * applications, and a hierarchical extension based on H-WF2Q+. | 
|  | 102 | * | 
|  | 103 | * B-WF2Q+ is based on WF2Q+, which is described in [2], together with | 
|  | 104 | * H-WF2Q+, while the augmented tree used here to implement B-WF2Q+ | 
|  | 105 | * with O(log N) complexity derives from the one introduced with EEVDF | 
|  | 106 | * in [3]. | 
|  | 107 | * | 
|  | 108 | * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O | 
|  | 109 | *     Scheduler", Proceedings of the First Workshop on Mobile System | 
|  | 110 | *     Technologies (MST-2015), May 2015. | 
|  | 111 | *     http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf | 
|  | 112 | * | 
|  | 113 | * [2] Jon C.R. Bennett and H. Zhang, "Hierarchical Packet Fair Queueing | 
|  | 114 | *     Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689, | 
|  | 115 | *     Oct 1997. | 
|  | 116 | * | 
|  | 117 | * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz | 
|  | 118 | * | 
|  | 119 | * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline | 
|  | 120 | *     First: A Flexible and Accurate Mechanism for Proportional Share | 
|  | 121 | *     Resource Allocation", technical report. | 
|  | 122 | * | 
|  | 123 | * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf | 
|  | 124 | */ | 
|  | 125 | #include <linux/module.h> | 
|  | 126 | #include <linux/slab.h> | 
|  | 127 | #include <linux/blkdev.h> | 
|  | 128 | #include <linux/cgroup.h> | 
|  | 129 | #include <linux/elevator.h> | 
|  | 130 | #include <linux/ktime.h> | 
|  | 131 | #include <linux/rbtree.h> | 
|  | 132 | #include <linux/ioprio.h> | 
|  | 133 | #include <linux/sbitmap.h> | 
|  | 134 | #include <linux/delay.h> | 
|  | 135 |  | 
|  | 136 | #include "blk.h" | 
|  | 137 | #include "blk-mq.h" | 
|  | 138 | #include "blk-mq-tag.h" | 
|  | 139 | #include "blk-mq-sched.h" | 
|  | 140 | #include "bfq-iosched.h" | 
|  | 141 | #include "blk-wbt.h" | 
|  | 142 |  | 
|  | 143 | #define BFQ_BFQQ_FNS(name)						\ | 
|  | 144 | void bfq_mark_bfqq_##name(struct bfq_queue *bfqq)			\ | 
|  | 145 | {									\ | 
|  | 146 | __set_bit(BFQQF_##name, &(bfqq)->flags);			\ | 
|  | 147 | }									\ | 
|  | 148 | void bfq_clear_bfqq_##name(struct bfq_queue *bfqq)			\ | 
|  | 149 | {									\ | 
|  | 150 | __clear_bit(BFQQF_##name, &(bfqq)->flags);		\ | 
|  | 151 | }									\ | 
|  | 152 | int bfq_bfqq_##name(const struct bfq_queue *bfqq)			\ | 
|  | 153 | {									\ | 
|  | 154 | return test_bit(BFQQF_##name, &(bfqq)->flags);		\ | 
|  | 155 | } | 
|  | 156 |  | 
|  | 157 | BFQ_BFQQ_FNS(just_created); | 
|  | 158 | BFQ_BFQQ_FNS(busy); | 
|  | 159 | BFQ_BFQQ_FNS(wait_request); | 
|  | 160 | BFQ_BFQQ_FNS(non_blocking_wait_rq); | 
|  | 161 | BFQ_BFQQ_FNS(fifo_expire); | 
|  | 162 | BFQ_BFQQ_FNS(has_short_ttime); | 
|  | 163 | BFQ_BFQQ_FNS(sync); | 
|  | 164 | BFQ_BFQQ_FNS(IO_bound); | 
|  | 165 | BFQ_BFQQ_FNS(in_large_burst); | 
|  | 166 | BFQ_BFQQ_FNS(coop); | 
|  | 167 | BFQ_BFQQ_FNS(split_coop); | 
|  | 168 | BFQ_BFQQ_FNS(softrt_update); | 
|  | 169 | #undef BFQ_BFQQ_FNS						\ | 
|  | 170 |  | 
|  | 171 | /* Expiration time of sync (0) and async (1) requests, in ns. */ | 
|  | 172 | static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 }; | 
|  | 173 |  | 
|  | 174 | /* Maximum backwards seek (magic number lifted from CFQ), in KiB. */ | 
|  | 175 | static const int bfq_back_max = 16 * 1024; | 
|  | 176 |  | 
|  | 177 | /* Penalty of a backwards seek, in number of sectors. */ | 
|  | 178 | static const int bfq_back_penalty = 2; | 
|  | 179 |  | 
|  | 180 | /* Idling period duration, in ns. */ | 
|  | 181 | static u64 bfq_slice_idle = NSEC_PER_SEC / 125; | 
|  | 182 |  | 
|  | 183 | /* Minimum number of assigned budgets for which stats are safe to compute. */ | 
|  | 184 | static const int bfq_stats_min_budgets = 194; | 
|  | 185 |  | 
|  | 186 | /* Default maximum budget values, in sectors and number of requests. */ | 
|  | 187 | static const int bfq_default_max_budget = 16 * 1024; | 
|  | 188 |  | 
|  | 189 | /* | 
|  | 190 | * When a sync request is dispatched, the queue that contains that | 
|  | 191 | * request, and all the ancestor entities of that queue, are charged | 
|  | 192 | * with the number of sectors of the request. In constrast, if the | 
|  | 193 | * request is async, then the queue and its ancestor entities are | 
|  | 194 | * charged with the number of sectors of the request, multiplied by | 
|  | 195 | * the factor below. This throttles the bandwidth for async I/O, | 
|  | 196 | * w.r.t. to sync I/O, and it is done to counter the tendency of async | 
|  | 197 | * writes to steal I/O throughput to reads. | 
|  | 198 | * | 
|  | 199 | * The current value of this parameter is the result of a tuning with | 
|  | 200 | * several hardware and software configurations. We tried to find the | 
|  | 201 | * lowest value for which writes do not cause noticeable problems to | 
|  | 202 | * reads. In fact, the lower this parameter, the stabler I/O control, | 
|  | 203 | * in the following respect.  The lower this parameter is, the less | 
|  | 204 | * the bandwidth enjoyed by a group decreases | 
|  | 205 | * - when the group does writes, w.r.t. to when it does reads; | 
|  | 206 | * - when other groups do reads, w.r.t. to when they do writes. | 
|  | 207 | */ | 
|  | 208 | static const int bfq_async_charge_factor = 3; | 
|  | 209 |  | 
|  | 210 | /* Default timeout values, in jiffies, approximating CFQ defaults. */ | 
|  | 211 | const int bfq_timeout = HZ / 8; | 
|  | 212 |  | 
|  | 213 | /* | 
|  | 214 | * Time limit for merging (see comments in bfq_setup_cooperator). Set | 
|  | 215 | * to the slowest value that, in our tests, proved to be effective in | 
|  | 216 | * removing false positives, while not causing true positives to miss | 
|  | 217 | * queue merging. | 
|  | 218 | * | 
|  | 219 | * As can be deduced from the low time limit below, queue merging, if | 
|  | 220 | * successful, happens at the very beggining of the I/O of the involved | 
|  | 221 | * cooperating processes, as a consequence of the arrival of the very | 
|  | 222 | * first requests from each cooperator.  After that, there is very | 
|  | 223 | * little chance to find cooperators. | 
|  | 224 | */ | 
|  | 225 | static const unsigned long bfq_merge_time_limit = HZ/10; | 
|  | 226 |  | 
|  | 227 | static struct kmem_cache *bfq_pool; | 
|  | 228 |  | 
|  | 229 | /* Below this threshold (in ns), we consider thinktime immediate. */ | 
|  | 230 | #define BFQ_MIN_TT		(2 * NSEC_PER_MSEC) | 
|  | 231 |  | 
|  | 232 | /* hw_tag detection: parallel requests threshold and min samples needed. */ | 
|  | 233 | #define BFQ_HW_QUEUE_THRESHOLD	4 | 
|  | 234 | #define BFQ_HW_QUEUE_SAMPLES	32 | 
|  | 235 |  | 
|  | 236 | #define BFQQ_SEEK_THR		(sector_t)(8 * 100) | 
|  | 237 | #define BFQQ_SECT_THR_NONROT	(sector_t)(2 * 32) | 
|  | 238 | #define BFQQ_CLOSE_THR		(sector_t)(8 * 1024) | 
|  | 239 | #define BFQQ_SEEKY(bfqq)	(hweight32(bfqq->seek_history) > 19) | 
|  | 240 |  | 
|  | 241 | /* Min number of samples required to perform peak-rate update */ | 
|  | 242 | #define BFQ_RATE_MIN_SAMPLES	32 | 
|  | 243 | /* Min observation time interval required to perform a peak-rate update (ns) */ | 
|  | 244 | #define BFQ_RATE_MIN_INTERVAL	(300*NSEC_PER_MSEC) | 
|  | 245 | /* Target observation time interval for a peak-rate update (ns) */ | 
|  | 246 | #define BFQ_RATE_REF_INTERVAL	NSEC_PER_SEC | 
|  | 247 |  | 
|  | 248 | /* | 
|  | 249 | * Shift used for peak-rate fixed precision calculations. | 
|  | 250 | * With | 
|  | 251 | * - the current shift: 16 positions | 
|  | 252 | * - the current type used to store rate: u32 | 
|  | 253 | * - the current unit of measure for rate: [sectors/usec], or, more precisely, | 
|  | 254 | *   [(sectors/usec) / 2^BFQ_RATE_SHIFT] to take into account the shift, | 
|  | 255 | * the range of rates that can be stored is | 
|  | 256 | * [1 / 2^BFQ_RATE_SHIFT, 2^(32 - BFQ_RATE_SHIFT)] sectors/usec = | 
|  | 257 | * [1 / 2^16, 2^16] sectors/usec = [15e-6, 65536] sectors/usec = | 
|  | 258 | * [15, 65G] sectors/sec | 
|  | 259 | * Which, assuming a sector size of 512B, corresponds to a range of | 
|  | 260 | * [7.5K, 33T] B/sec | 
|  | 261 | */ | 
|  | 262 | #define BFQ_RATE_SHIFT		16 | 
|  | 263 |  | 
|  | 264 | /* | 
|  | 265 | * When configured for computing the duration of the weight-raising | 
|  | 266 | * for interactive queues automatically (see the comments at the | 
|  | 267 | * beginning of this file), BFQ does it using the following formula: | 
|  | 268 | * duration = (ref_rate / r) * ref_wr_duration, | 
|  | 269 | * where r is the peak rate of the device, and ref_rate and | 
|  | 270 | * ref_wr_duration are two reference parameters.  In particular, | 
|  | 271 | * ref_rate is the peak rate of the reference storage device (see | 
|  | 272 | * below), and ref_wr_duration is about the maximum time needed, with | 
|  | 273 | * BFQ and while reading two files in parallel, to load typical large | 
|  | 274 | * applications on the reference device (see the comments on | 
|  | 275 | * max_service_from_wr below, for more details on how ref_wr_duration | 
|  | 276 | * is obtained).  In practice, the slower/faster the device at hand | 
|  | 277 | * is, the more/less it takes to load applications with respect to the | 
|  | 278 | * reference device.  Accordingly, the longer/shorter BFQ grants | 
|  | 279 | * weight raising to interactive applications. | 
|  | 280 | * | 
|  | 281 | * BFQ uses two different reference pairs (ref_rate, ref_wr_duration), | 
|  | 282 | * depending on whether the device is rotational or non-rotational. | 
|  | 283 | * | 
|  | 284 | * In the following definitions, ref_rate[0] and ref_wr_duration[0] | 
|  | 285 | * are the reference values for a rotational device, whereas | 
|  | 286 | * ref_rate[1] and ref_wr_duration[1] are the reference values for a | 
|  | 287 | * non-rotational device. The reference rates are not the actual peak | 
|  | 288 | * rates of the devices used as a reference, but slightly lower | 
|  | 289 | * values. The reason for using slightly lower values is that the | 
|  | 290 | * peak-rate estimator tends to yield slightly lower values than the | 
|  | 291 | * actual peak rate (it can yield the actual peak rate only if there | 
|  | 292 | * is only one process doing I/O, and the process does sequential | 
|  | 293 | * I/O). | 
|  | 294 | * | 
|  | 295 | * The reference peak rates are measured in sectors/usec, left-shifted | 
|  | 296 | * by BFQ_RATE_SHIFT. | 
|  | 297 | */ | 
|  | 298 | static int ref_rate[2] = {14000, 33000}; | 
|  | 299 | /* | 
|  | 300 | * To improve readability, a conversion function is used to initialize | 
|  | 301 | * the following array, which entails that the array can be | 
|  | 302 | * initialized only in a function. | 
|  | 303 | */ | 
|  | 304 | static int ref_wr_duration[2]; | 
|  | 305 |  | 
|  | 306 | /* | 
|  | 307 | * BFQ uses the above-detailed, time-based weight-raising mechanism to | 
|  | 308 | * privilege interactive tasks. This mechanism is vulnerable to the | 
|  | 309 | * following false positives: I/O-bound applications that will go on | 
|  | 310 | * doing I/O for much longer than the duration of weight | 
|  | 311 | * raising. These applications have basically no benefit from being | 
|  | 312 | * weight-raised at the beginning of their I/O. On the opposite end, | 
|  | 313 | * while being weight-raised, these applications | 
|  | 314 | * a) unjustly steal throughput to applications that may actually need | 
|  | 315 | * low latency; | 
|  | 316 | * b) make BFQ uselessly perform device idling; device idling results | 
|  | 317 | * in loss of device throughput with most flash-based storage, and may | 
|  | 318 | * increase latencies when used purposelessly. | 
|  | 319 | * | 
|  | 320 | * BFQ tries to reduce these problems, by adopting the following | 
|  | 321 | * countermeasure. To introduce this countermeasure, we need first to | 
|  | 322 | * finish explaining how the duration of weight-raising for | 
|  | 323 | * interactive tasks is computed. | 
|  | 324 | * | 
|  | 325 | * For a bfq_queue deemed as interactive, the duration of weight | 
|  | 326 | * raising is dynamically adjusted, as a function of the estimated | 
|  | 327 | * peak rate of the device, so as to be equal to the time needed to | 
|  | 328 | * execute the 'largest' interactive task we benchmarked so far. By | 
|  | 329 | * largest task, we mean the task for which each involved process has | 
|  | 330 | * to do more I/O than for any of the other tasks we benchmarked. This | 
|  | 331 | * reference interactive task is the start-up of LibreOffice Writer, | 
|  | 332 | * and in this task each process/bfq_queue needs to have at most ~110K | 
|  | 333 | * sectors transferred. | 
|  | 334 | * | 
|  | 335 | * This last piece of information enables BFQ to reduce the actual | 
|  | 336 | * duration of weight-raising for at least one class of I/O-bound | 
|  | 337 | * applications: those doing sequential or quasi-sequential I/O. An | 
|  | 338 | * example is file copy. In fact, once started, the main I/O-bound | 
|  | 339 | * processes of these applications usually consume the above 110K | 
|  | 340 | * sectors in much less time than the processes of an application that | 
|  | 341 | * is starting, because these I/O-bound processes will greedily devote | 
|  | 342 | * almost all their CPU cycles only to their target, | 
|  | 343 | * throughput-friendly I/O operations. This is even more true if BFQ | 
|  | 344 | * happens to be underestimating the device peak rate, and thus | 
|  | 345 | * overestimating the duration of weight raising. But, according to | 
|  | 346 | * our measurements, once transferred 110K sectors, these processes | 
|  | 347 | * have no right to be weight-raised any longer. | 
|  | 348 | * | 
|  | 349 | * Basing on the last consideration, BFQ ends weight-raising for a | 
|  | 350 | * bfq_queue if the latter happens to have received an amount of | 
|  | 351 | * service at least equal to the following constant. The constant is | 
|  | 352 | * set to slightly more than 110K, to have a minimum safety margin. | 
|  | 353 | * | 
|  | 354 | * This early ending of weight-raising reduces the amount of time | 
|  | 355 | * during which interactive false positives cause the two problems | 
|  | 356 | * described at the beginning of these comments. | 
|  | 357 | */ | 
|  | 358 | static const unsigned long max_service_from_wr = 120000; | 
|  | 359 |  | 
|  | 360 | #define RQ_BIC(rq)		icq_to_bic((rq)->elv.priv[0]) | 
|  | 361 | #define RQ_BFQQ(rq)		((rq)->elv.priv[1]) | 
|  | 362 |  | 
|  | 363 | struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync) | 
|  | 364 | { | 
|  | 365 | return bic->bfqq[is_sync]; | 
|  | 366 | } | 
|  | 367 |  | 
|  | 368 | void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync) | 
|  | 369 | { | 
|  | 370 | bic->bfqq[is_sync] = bfqq; | 
|  | 371 | } | 
|  | 372 |  | 
|  | 373 | struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic) | 
|  | 374 | { | 
|  | 375 | return bic->icq.q->elevator->elevator_data; | 
|  | 376 | } | 
|  | 377 |  | 
|  | 378 | /** | 
|  | 379 | * icq_to_bic - convert iocontext queue structure to bfq_io_cq. | 
|  | 380 | * @icq: the iocontext queue. | 
|  | 381 | */ | 
|  | 382 | static struct bfq_io_cq *icq_to_bic(struct io_cq *icq) | 
|  | 383 | { | 
|  | 384 | /* bic->icq is the first member, %NULL will convert to %NULL */ | 
|  | 385 | return container_of(icq, struct bfq_io_cq, icq); | 
|  | 386 | } | 
|  | 387 |  | 
|  | 388 | /** | 
|  | 389 | * bfq_bic_lookup - search into @ioc a bic associated to @bfqd. | 
|  | 390 | * @bfqd: the lookup key. | 
|  | 391 | * @ioc: the io_context of the process doing I/O. | 
|  | 392 | * @q: the request queue. | 
|  | 393 | */ | 
|  | 394 | static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd, | 
|  | 395 | struct io_context *ioc, | 
|  | 396 | struct request_queue *q) | 
|  | 397 | { | 
|  | 398 | if (ioc) { | 
|  | 399 | unsigned long flags; | 
|  | 400 | struct bfq_io_cq *icq; | 
|  | 401 |  | 
|  | 402 | spin_lock_irqsave(q->queue_lock, flags); | 
|  | 403 | icq = icq_to_bic(ioc_lookup_icq(ioc, q)); | 
|  | 404 | spin_unlock_irqrestore(q->queue_lock, flags); | 
|  | 405 |  | 
|  | 406 | return icq; | 
|  | 407 | } | 
|  | 408 |  | 
|  | 409 | return NULL; | 
|  | 410 | } | 
|  | 411 |  | 
|  | 412 | /* | 
|  | 413 | * Scheduler run of queue, if there are requests pending and no one in the | 
|  | 414 | * driver that will restart queueing. | 
|  | 415 | */ | 
|  | 416 | void bfq_schedule_dispatch(struct bfq_data *bfqd) | 
|  | 417 | { | 
|  | 418 | if (bfqd->queued != 0) { | 
|  | 419 | bfq_log(bfqd, "schedule dispatch"); | 
|  | 420 | blk_mq_run_hw_queues(bfqd->queue, true); | 
|  | 421 | } | 
|  | 422 | } | 
|  | 423 |  | 
|  | 424 | #define bfq_class_idle(bfqq)	((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE) | 
|  | 425 | #define bfq_class_rt(bfqq)	((bfqq)->ioprio_class == IOPRIO_CLASS_RT) | 
|  | 426 |  | 
|  | 427 | #define bfq_sample_valid(samples)	((samples) > 80) | 
|  | 428 |  | 
|  | 429 | /* | 
|  | 430 | * Lifted from AS - choose which of rq1 and rq2 that is best served now. | 
|  | 431 | * We choose the request that is closesr to the head right now.  Distance | 
|  | 432 | * behind the head is penalized and only allowed to a certain extent. | 
|  | 433 | */ | 
|  | 434 | static struct request *bfq_choose_req(struct bfq_data *bfqd, | 
|  | 435 | struct request *rq1, | 
|  | 436 | struct request *rq2, | 
|  | 437 | sector_t last) | 
|  | 438 | { | 
|  | 439 | sector_t s1, s2, d1 = 0, d2 = 0; | 
|  | 440 | unsigned long back_max; | 
|  | 441 | #define BFQ_RQ1_WRAP	0x01 /* request 1 wraps */ | 
|  | 442 | #define BFQ_RQ2_WRAP	0x02 /* request 2 wraps */ | 
|  | 443 | unsigned int wrap = 0; /* bit mask: requests behind the disk head? */ | 
|  | 444 |  | 
|  | 445 | if (!rq1 || rq1 == rq2) | 
|  | 446 | return rq2; | 
|  | 447 | if (!rq2) | 
|  | 448 | return rq1; | 
|  | 449 |  | 
|  | 450 | if (rq_is_sync(rq1) && !rq_is_sync(rq2)) | 
|  | 451 | return rq1; | 
|  | 452 | else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) | 
|  | 453 | return rq2; | 
|  | 454 | if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META)) | 
|  | 455 | return rq1; | 
|  | 456 | else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META)) | 
|  | 457 | return rq2; | 
|  | 458 |  | 
|  | 459 | s1 = blk_rq_pos(rq1); | 
|  | 460 | s2 = blk_rq_pos(rq2); | 
|  | 461 |  | 
|  | 462 | /* | 
|  | 463 | * By definition, 1KiB is 2 sectors. | 
|  | 464 | */ | 
|  | 465 | back_max = bfqd->bfq_back_max * 2; | 
|  | 466 |  | 
|  | 467 | /* | 
|  | 468 | * Strict one way elevator _except_ in the case where we allow | 
|  | 469 | * short backward seeks which are biased as twice the cost of a | 
|  | 470 | * similar forward seek. | 
|  | 471 | */ | 
|  | 472 | if (s1 >= last) | 
|  | 473 | d1 = s1 - last; | 
|  | 474 | else if (s1 + back_max >= last) | 
|  | 475 | d1 = (last - s1) * bfqd->bfq_back_penalty; | 
|  | 476 | else | 
|  | 477 | wrap |= BFQ_RQ1_WRAP; | 
|  | 478 |  | 
|  | 479 | if (s2 >= last) | 
|  | 480 | d2 = s2 - last; | 
|  | 481 | else if (s2 + back_max >= last) | 
|  | 482 | d2 = (last - s2) * bfqd->bfq_back_penalty; | 
|  | 483 | else | 
|  | 484 | wrap |= BFQ_RQ2_WRAP; | 
|  | 485 |  | 
|  | 486 | /* Found required data */ | 
|  | 487 |  | 
|  | 488 | /* | 
|  | 489 | * By doing switch() on the bit mask "wrap" we avoid having to | 
|  | 490 | * check two variables for all permutations: --> faster! | 
|  | 491 | */ | 
|  | 492 | switch (wrap) { | 
|  | 493 | case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ | 
|  | 494 | if (d1 < d2) | 
|  | 495 | return rq1; | 
|  | 496 | else if (d2 < d1) | 
|  | 497 | return rq2; | 
|  | 498 |  | 
|  | 499 | if (s1 >= s2) | 
|  | 500 | return rq1; | 
|  | 501 | else | 
|  | 502 | return rq2; | 
|  | 503 |  | 
|  | 504 | case BFQ_RQ2_WRAP: | 
|  | 505 | return rq1; | 
|  | 506 | case BFQ_RQ1_WRAP: | 
|  | 507 | return rq2; | 
|  | 508 | case BFQ_RQ1_WRAP|BFQ_RQ2_WRAP: /* both rqs wrapped */ | 
|  | 509 | default: | 
|  | 510 | /* | 
|  | 511 | * Since both rqs are wrapped, | 
|  | 512 | * start with the one that's further behind head | 
|  | 513 | * (--> only *one* back seek required), | 
|  | 514 | * since back seek takes more time than forward. | 
|  | 515 | */ | 
|  | 516 | if (s1 <= s2) | 
|  | 517 | return rq1; | 
|  | 518 | else | 
|  | 519 | return rq2; | 
|  | 520 | } | 
|  | 521 | } | 
|  | 522 |  | 
|  | 523 | /* | 
|  | 524 | * Async I/O can easily starve sync I/O (both sync reads and sync | 
|  | 525 | * writes), by consuming all tags. Similarly, storms of sync writes, | 
|  | 526 | * such as those that sync(2) may trigger, can starve sync reads. | 
|  | 527 | * Limit depths of async I/O and sync writes so as to counter both | 
|  | 528 | * problems. | 
|  | 529 | */ | 
|  | 530 | static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) | 
|  | 531 | { | 
|  | 532 | struct bfq_data *bfqd = data->q->elevator->elevator_data; | 
|  | 533 |  | 
|  | 534 | if (op_is_sync(op) && !op_is_write(op)) | 
|  | 535 | return; | 
|  | 536 |  | 
|  | 537 | data->shallow_depth = | 
|  | 538 | bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)]; | 
|  | 539 |  | 
|  | 540 | bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u", | 
|  | 541 | __func__, bfqd->wr_busy_queues, op_is_sync(op), | 
|  | 542 | data->shallow_depth); | 
|  | 543 | } | 
|  | 544 |  | 
|  | 545 | static struct bfq_queue * | 
|  | 546 | bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root, | 
|  | 547 | sector_t sector, struct rb_node **ret_parent, | 
|  | 548 | struct rb_node ***rb_link) | 
|  | 549 | { | 
|  | 550 | struct rb_node **p, *parent; | 
|  | 551 | struct bfq_queue *bfqq = NULL; | 
|  | 552 |  | 
|  | 553 | parent = NULL; | 
|  | 554 | p = &root->rb_node; | 
|  | 555 | while (*p) { | 
|  | 556 | struct rb_node **n; | 
|  | 557 |  | 
|  | 558 | parent = *p; | 
|  | 559 | bfqq = rb_entry(parent, struct bfq_queue, pos_node); | 
|  | 560 |  | 
|  | 561 | /* | 
|  | 562 | * Sort strictly based on sector. Smallest to the left, | 
|  | 563 | * largest to the right. | 
|  | 564 | */ | 
|  | 565 | if (sector > blk_rq_pos(bfqq->next_rq)) | 
|  | 566 | n = &(*p)->rb_right; | 
|  | 567 | else if (sector < blk_rq_pos(bfqq->next_rq)) | 
|  | 568 | n = &(*p)->rb_left; | 
|  | 569 | else | 
|  | 570 | break; | 
|  | 571 | p = n; | 
|  | 572 | bfqq = NULL; | 
|  | 573 | } | 
|  | 574 |  | 
|  | 575 | *ret_parent = parent; | 
|  | 576 | if (rb_link) | 
|  | 577 | *rb_link = p; | 
|  | 578 |  | 
|  | 579 | bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d", | 
|  | 580 | (unsigned long long)sector, | 
|  | 581 | bfqq ? bfqq->pid : 0); | 
|  | 582 |  | 
|  | 583 | return bfqq; | 
|  | 584 | } | 
|  | 585 |  | 
|  | 586 | static bool bfq_too_late_for_merging(struct bfq_queue *bfqq) | 
|  | 587 | { | 
|  | 588 | return bfqq->service_from_backlogged > 0 && | 
|  | 589 | time_is_before_jiffies(bfqq->first_IO_time + | 
|  | 590 | bfq_merge_time_limit); | 
|  | 591 | } | 
|  | 592 |  | 
|  | 593 | void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) | 
|  | 594 | { | 
|  | 595 | struct rb_node **p, *parent; | 
|  | 596 | struct bfq_queue *__bfqq; | 
|  | 597 |  | 
|  | 598 | if (bfqq->pos_root) { | 
|  | 599 | rb_erase(&bfqq->pos_node, bfqq->pos_root); | 
|  | 600 | bfqq->pos_root = NULL; | 
|  | 601 | } | 
|  | 602 |  | 
|  | 603 | /* | 
|  | 604 | * bfqq cannot be merged any longer (see comments in | 
|  | 605 | * bfq_setup_cooperator): no point in adding bfqq into the | 
|  | 606 | * position tree. | 
|  | 607 | */ | 
|  | 608 | if (bfq_too_late_for_merging(bfqq)) | 
|  | 609 | return; | 
|  | 610 |  | 
|  | 611 | if (bfq_class_idle(bfqq)) | 
|  | 612 | return; | 
|  | 613 | if (!bfqq->next_rq) | 
|  | 614 | return; | 
|  | 615 |  | 
|  | 616 | bfqq->pos_root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree; | 
|  | 617 | __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root, | 
|  | 618 | blk_rq_pos(bfqq->next_rq), &parent, &p); | 
|  | 619 | if (!__bfqq) { | 
|  | 620 | rb_link_node(&bfqq->pos_node, parent, p); | 
|  | 621 | rb_insert_color(&bfqq->pos_node, bfqq->pos_root); | 
|  | 622 | } else | 
|  | 623 | bfqq->pos_root = NULL; | 
|  | 624 | } | 
|  | 625 |  | 
|  | 626 | /* | 
|  | 627 | * Tell whether there are active queues or groups with differentiated weights. | 
|  | 628 | */ | 
|  | 629 | static bool bfq_differentiated_weights(struct bfq_data *bfqd) | 
|  | 630 | { | 
|  | 631 | /* | 
|  | 632 | * For weights to differ, at least one of the trees must contain | 
|  | 633 | * at least two nodes. | 
|  | 634 | */ | 
|  | 635 | return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) && | 
|  | 636 | (bfqd->queue_weights_tree.rb_node->rb_left || | 
|  | 637 | bfqd->queue_weights_tree.rb_node->rb_right) | 
|  | 638 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | 
|  | 639 | ) || | 
|  | 640 | (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) && | 
|  | 641 | (bfqd->group_weights_tree.rb_node->rb_left || | 
|  | 642 | bfqd->group_weights_tree.rb_node->rb_right) | 
|  | 643 | #endif | 
|  | 644 | ); | 
|  | 645 | } | 
|  | 646 |  | 
|  | 647 | /* | 
|  | 648 | * The following function returns true if every queue must receive the | 
|  | 649 | * same share of the throughput (this condition is used when deciding | 
|  | 650 | * whether idling may be disabled, see the comments in the function | 
|  | 651 | * bfq_better_to_idle()). | 
|  | 652 | * | 
|  | 653 | * Such a scenario occurs when: | 
|  | 654 | * 1) all active queues have the same weight, | 
|  | 655 | * 2) all active groups at the same level in the groups tree have the same | 
|  | 656 | *    weight, | 
|  | 657 | * 3) all active groups at the same level in the groups tree have the same | 
|  | 658 | *    number of children. | 
|  | 659 | * | 
|  | 660 | * Unfortunately, keeping the necessary state for evaluating exactly the | 
|  | 661 | * above symmetry conditions would be quite complex and time-consuming. | 
|  | 662 | * Therefore this function evaluates, instead, the following stronger | 
|  | 663 | * sub-conditions, for which it is much easier to maintain the needed | 
|  | 664 | * state: | 
|  | 665 | * 1) all active queues have the same weight, | 
|  | 666 | * 2) all active groups have the same weight, | 
|  | 667 | * 3) all active groups have at most one active child each. | 
|  | 668 | * In particular, the last two conditions are always true if hierarchical | 
|  | 669 | * support and the cgroups interface are not enabled, thus no state needs | 
|  | 670 | * to be maintained in this case. | 
|  | 671 | */ | 
|  | 672 | static bool bfq_symmetric_scenario(struct bfq_data *bfqd) | 
|  | 673 | { | 
|  | 674 | return !bfq_differentiated_weights(bfqd); | 
|  | 675 | } | 
|  | 676 |  | 
|  | 677 | /* | 
|  | 678 | * If the weight-counter tree passed as input contains no counter for | 
|  | 679 | * the weight of the input entity, then add that counter; otherwise just | 
|  | 680 | * increment the existing counter. | 
|  | 681 | * | 
|  | 682 | * Note that weight-counter trees contain few nodes in mostly symmetric | 
|  | 683 | * scenarios. For example, if all queues have the same weight, then the | 
|  | 684 | * weight-counter tree for the queues may contain at most one node. | 
|  | 685 | * This holds even if low_latency is on, because weight-raised queues | 
|  | 686 | * are not inserted in the tree. | 
|  | 687 | * In most scenarios, the rate at which nodes are created/destroyed | 
|  | 688 | * should be low too. | 
|  | 689 | */ | 
|  | 690 | void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity, | 
|  | 691 | struct rb_root *root) | 
|  | 692 | { | 
|  | 693 | struct rb_node **new = &(root->rb_node), *parent = NULL; | 
|  | 694 |  | 
|  | 695 | /* | 
|  | 696 | * Do not insert if the entity is already associated with a | 
|  | 697 | * counter, which happens if: | 
|  | 698 | *   1) the entity is associated with a queue, | 
|  | 699 | *   2) a request arrival has caused the queue to become both | 
|  | 700 | *      non-weight-raised, and hence change its weight, and | 
|  | 701 | *      backlogged; in this respect, each of the two events | 
|  | 702 | *      causes an invocation of this function, | 
|  | 703 | *   3) this is the invocation of this function caused by the | 
|  | 704 | *      second event. This second invocation is actually useless, | 
|  | 705 | *      and we handle this fact by exiting immediately. More | 
|  | 706 | *      efficient or clearer solutions might possibly be adopted. | 
|  | 707 | */ | 
|  | 708 | if (entity->weight_counter) | 
|  | 709 | return; | 
|  | 710 |  | 
|  | 711 | while (*new) { | 
|  | 712 | struct bfq_weight_counter *__counter = container_of(*new, | 
|  | 713 | struct bfq_weight_counter, | 
|  | 714 | weights_node); | 
|  | 715 | parent = *new; | 
|  | 716 |  | 
|  | 717 | if (entity->weight == __counter->weight) { | 
|  | 718 | entity->weight_counter = __counter; | 
|  | 719 | goto inc_counter; | 
|  | 720 | } | 
|  | 721 | if (entity->weight < __counter->weight) | 
|  | 722 | new = &((*new)->rb_left); | 
|  | 723 | else | 
|  | 724 | new = &((*new)->rb_right); | 
|  | 725 | } | 
|  | 726 |  | 
|  | 727 | entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter), | 
|  | 728 | GFP_ATOMIC); | 
|  | 729 |  | 
|  | 730 | /* | 
|  | 731 | * In the unlucky event of an allocation failure, we just | 
|  | 732 | * exit. This will cause the weight of entity to not be | 
|  | 733 | * considered in bfq_differentiated_weights, which, in its | 
|  | 734 | * turn, causes the scenario to be deemed wrongly symmetric in | 
|  | 735 | * case entity's weight would have been the only weight making | 
|  | 736 | * the scenario asymmetric. On the bright side, no unbalance | 
|  | 737 | * will however occur when entity becomes inactive again (the | 
|  | 738 | * invocation of this function is triggered by an activation | 
|  | 739 | * of entity). In fact, bfq_weights_tree_remove does nothing | 
|  | 740 | * if !entity->weight_counter. | 
|  | 741 | */ | 
|  | 742 | if (unlikely(!entity->weight_counter)) | 
|  | 743 | return; | 
|  | 744 |  | 
|  | 745 | entity->weight_counter->weight = entity->weight; | 
|  | 746 | rb_link_node(&entity->weight_counter->weights_node, parent, new); | 
|  | 747 | rb_insert_color(&entity->weight_counter->weights_node, root); | 
|  | 748 |  | 
|  | 749 | inc_counter: | 
|  | 750 | entity->weight_counter->num_active++; | 
|  | 751 | } | 
|  | 752 |  | 
|  | 753 | /* | 
|  | 754 | * Decrement the weight counter associated with the entity, and, if the | 
|  | 755 | * counter reaches 0, remove the counter from the tree. | 
|  | 756 | * See the comments to the function bfq_weights_tree_add() for considerations | 
|  | 757 | * about overhead. | 
|  | 758 | */ | 
|  | 759 | void __bfq_weights_tree_remove(struct bfq_data *bfqd, | 
|  | 760 | struct bfq_entity *entity, | 
|  | 761 | struct rb_root *root) | 
|  | 762 | { | 
|  | 763 | if (!entity->weight_counter) | 
|  | 764 | return; | 
|  | 765 |  | 
|  | 766 | entity->weight_counter->num_active--; | 
|  | 767 | if (entity->weight_counter->num_active > 0) | 
|  | 768 | goto reset_entity_pointer; | 
|  | 769 |  | 
|  | 770 | rb_erase(&entity->weight_counter->weights_node, root); | 
|  | 771 | kfree(entity->weight_counter); | 
|  | 772 |  | 
|  | 773 | reset_entity_pointer: | 
|  | 774 | entity->weight_counter = NULL; | 
|  | 775 | } | 
|  | 776 |  | 
|  | 777 | /* | 
|  | 778 | * Invoke __bfq_weights_tree_remove on bfqq and all its inactive | 
|  | 779 | * parent entities. | 
|  | 780 | */ | 
|  | 781 | void bfq_weights_tree_remove(struct bfq_data *bfqd, | 
|  | 782 | struct bfq_queue *bfqq) | 
|  | 783 | { | 
|  | 784 | struct bfq_entity *entity = bfqq->entity.parent; | 
|  | 785 |  | 
|  | 786 | __bfq_weights_tree_remove(bfqd, &bfqq->entity, | 
|  | 787 | &bfqd->queue_weights_tree); | 
|  | 788 |  | 
|  | 789 | for_each_entity(entity) { | 
|  | 790 | struct bfq_sched_data *sd = entity->my_sched_data; | 
|  | 791 |  | 
|  | 792 | if (sd->next_in_service || sd->in_service_entity) { | 
|  | 793 | /* | 
|  | 794 | * entity is still active, because either | 
|  | 795 | * next_in_service or in_service_entity is not | 
|  | 796 | * NULL (see the comments on the definition of | 
|  | 797 | * next_in_service for details on why | 
|  | 798 | * in_service_entity must be checked too). | 
|  | 799 | * | 
|  | 800 | * As a consequence, the weight of entity is | 
|  | 801 | * not to be removed. In addition, if entity | 
|  | 802 | * is active, then its parent entities are | 
|  | 803 | * active as well, and thus their weights are | 
|  | 804 | * not to be removed either. In the end, this | 
|  | 805 | * loop must stop here. | 
|  | 806 | */ | 
|  | 807 | break; | 
|  | 808 | } | 
|  | 809 | __bfq_weights_tree_remove(bfqd, entity, | 
|  | 810 | &bfqd->group_weights_tree); | 
|  | 811 | } | 
|  | 812 | } | 
|  | 813 |  | 
|  | 814 | /* | 
|  | 815 | * Return expired entry, or NULL to just start from scratch in rbtree. | 
|  | 816 | */ | 
|  | 817 | static struct request *bfq_check_fifo(struct bfq_queue *bfqq, | 
|  | 818 | struct request *last) | 
|  | 819 | { | 
|  | 820 | struct request *rq; | 
|  | 821 |  | 
|  | 822 | if (bfq_bfqq_fifo_expire(bfqq)) | 
|  | 823 | return NULL; | 
|  | 824 |  | 
|  | 825 | bfq_mark_bfqq_fifo_expire(bfqq); | 
|  | 826 |  | 
|  | 827 | rq = rq_entry_fifo(bfqq->fifo.next); | 
|  | 828 |  | 
|  | 829 | if (rq == last || ktime_get_ns() < rq->fifo_time) | 
|  | 830 | return NULL; | 
|  | 831 |  | 
|  | 832 | bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq); | 
|  | 833 | return rq; | 
|  | 834 | } | 
|  | 835 |  | 
|  | 836 | static struct request *bfq_find_next_rq(struct bfq_data *bfqd, | 
|  | 837 | struct bfq_queue *bfqq, | 
|  | 838 | struct request *last) | 
|  | 839 | { | 
|  | 840 | struct rb_node *rbnext = rb_next(&last->rb_node); | 
|  | 841 | struct rb_node *rbprev = rb_prev(&last->rb_node); | 
|  | 842 | struct request *next, *prev = NULL; | 
|  | 843 |  | 
|  | 844 | /* Follow expired path, else get first next available. */ | 
|  | 845 | next = bfq_check_fifo(bfqq, last); | 
|  | 846 | if (next) | 
|  | 847 | return next; | 
|  | 848 |  | 
|  | 849 | if (rbprev) | 
|  | 850 | prev = rb_entry_rq(rbprev); | 
|  | 851 |  | 
|  | 852 | if (rbnext) | 
|  | 853 | next = rb_entry_rq(rbnext); | 
|  | 854 | else { | 
|  | 855 | rbnext = rb_first(&bfqq->sort_list); | 
|  | 856 | if (rbnext && rbnext != &last->rb_node) | 
|  | 857 | next = rb_entry_rq(rbnext); | 
|  | 858 | } | 
|  | 859 |  | 
|  | 860 | return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last)); | 
|  | 861 | } | 
|  | 862 |  | 
|  | 863 | /* see the definition of bfq_async_charge_factor for details */ | 
|  | 864 | static unsigned long bfq_serv_to_charge(struct request *rq, | 
|  | 865 | struct bfq_queue *bfqq) | 
|  | 866 | { | 
|  | 867 | if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1) | 
|  | 868 | return blk_rq_sectors(rq); | 
|  | 869 |  | 
|  | 870 | return blk_rq_sectors(rq) * bfq_async_charge_factor; | 
|  | 871 | } | 
|  | 872 |  | 
|  | 873 | /** | 
|  | 874 | * bfq_updated_next_req - update the queue after a new next_rq selection. | 
|  | 875 | * @bfqd: the device data the queue belongs to. | 
|  | 876 | * @bfqq: the queue to update. | 
|  | 877 | * | 
|  | 878 | * If the first request of a queue changes we make sure that the queue | 
|  | 879 | * has enough budget to serve at least its first request (if the | 
|  | 880 | * request has grown).  We do this because if the queue has not enough | 
|  | 881 | * budget for its first request, it has to go through two dispatch | 
|  | 882 | * rounds to actually get it dispatched. | 
|  | 883 | */ | 
|  | 884 | static void bfq_updated_next_req(struct bfq_data *bfqd, | 
|  | 885 | struct bfq_queue *bfqq) | 
|  | 886 | { | 
|  | 887 | struct bfq_entity *entity = &bfqq->entity; | 
|  | 888 | struct request *next_rq = bfqq->next_rq; | 
|  | 889 | unsigned long new_budget; | 
|  | 890 |  | 
|  | 891 | if (!next_rq) | 
|  | 892 | return; | 
|  | 893 |  | 
|  | 894 | if (bfqq == bfqd->in_service_queue) | 
|  | 895 | /* | 
|  | 896 | * In order not to break guarantees, budgets cannot be | 
|  | 897 | * changed after an entity has been selected. | 
|  | 898 | */ | 
|  | 899 | return; | 
|  | 900 |  | 
|  | 901 | new_budget = max_t(unsigned long, bfqq->max_budget, | 
|  | 902 | bfq_serv_to_charge(next_rq, bfqq)); | 
|  | 903 | if (entity->budget != new_budget) { | 
|  | 904 | entity->budget = new_budget; | 
|  | 905 | bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", | 
|  | 906 | new_budget); | 
|  | 907 | bfq_requeue_bfqq(bfqd, bfqq, false); | 
|  | 908 | } | 
|  | 909 | } | 
|  | 910 |  | 
|  | 911 | static unsigned int bfq_wr_duration(struct bfq_data *bfqd) | 
|  | 912 | { | 
|  | 913 | u64 dur; | 
|  | 914 |  | 
|  | 915 | if (bfqd->bfq_wr_max_time > 0) | 
|  | 916 | return bfqd->bfq_wr_max_time; | 
|  | 917 |  | 
|  | 918 | dur = bfqd->rate_dur_prod; | 
|  | 919 | do_div(dur, bfqd->peak_rate); | 
|  | 920 |  | 
|  | 921 | /* | 
|  | 922 | * Limit duration between 3 and 25 seconds. The upper limit | 
|  | 923 | * has been conservatively set after the following worst case: | 
|  | 924 | * on a QEMU/KVM virtual machine | 
|  | 925 | * - running in a slow PC | 
|  | 926 | * - with a virtual disk stacked on a slow low-end 5400rpm HDD | 
|  | 927 | * - serving a heavy I/O workload, such as the sequential reading | 
|  | 928 | *   of several files | 
|  | 929 | * mplayer took 23 seconds to start, if constantly weight-raised. | 
|  | 930 | * | 
|  | 931 | * As for higher values than that accomodating the above bad | 
|  | 932 | * scenario, tests show that higher values would often yield | 
|  | 933 | * the opposite of the desired result, i.e., would worsen | 
|  | 934 | * responsiveness by allowing non-interactive applications to | 
|  | 935 | * preserve weight raising for too long. | 
|  | 936 | * | 
|  | 937 | * On the other end, lower values than 3 seconds make it | 
|  | 938 | * difficult for most interactive tasks to complete their jobs | 
|  | 939 | * before weight-raising finishes. | 
|  | 940 | */ | 
|  | 941 | return clamp_val(dur, msecs_to_jiffies(3000), msecs_to_jiffies(25000)); | 
|  | 942 | } | 
|  | 943 |  | 
|  | 944 | /* switch back from soft real-time to interactive weight raising */ | 
|  | 945 | static void switch_back_to_interactive_wr(struct bfq_queue *bfqq, | 
|  | 946 | struct bfq_data *bfqd) | 
|  | 947 | { | 
|  | 948 | bfqq->wr_coeff = bfqd->bfq_wr_coeff; | 
|  | 949 | bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); | 
|  | 950 | bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt; | 
|  | 951 | } | 
|  | 952 |  | 
|  | 953 | static void | 
|  | 954 | bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, | 
|  | 955 | struct bfq_io_cq *bic, bool bfq_already_existing) | 
|  | 956 | { | 
|  | 957 | unsigned int old_wr_coeff = bfqq->wr_coeff; | 
|  | 958 | bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq); | 
|  | 959 |  | 
|  | 960 | if (bic->saved_has_short_ttime) | 
|  | 961 | bfq_mark_bfqq_has_short_ttime(bfqq); | 
|  | 962 | else | 
|  | 963 | bfq_clear_bfqq_has_short_ttime(bfqq); | 
|  | 964 |  | 
|  | 965 | if (bic->saved_IO_bound) | 
|  | 966 | bfq_mark_bfqq_IO_bound(bfqq); | 
|  | 967 | else | 
|  | 968 | bfq_clear_bfqq_IO_bound(bfqq); | 
|  | 969 |  | 
|  | 970 | bfqq->ttime = bic->saved_ttime; | 
|  | 971 | bfqq->wr_coeff = bic->saved_wr_coeff; | 
|  | 972 | bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt; | 
|  | 973 | bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish; | 
|  | 974 | bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time; | 
|  | 975 |  | 
|  | 976 | if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) || | 
|  | 977 | time_is_before_jiffies(bfqq->last_wr_start_finish + | 
|  | 978 | bfqq->wr_cur_max_time))) { | 
|  | 979 | if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && | 
|  | 980 | !bfq_bfqq_in_large_burst(bfqq) && | 
|  | 981 | time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt + | 
|  | 982 | bfq_wr_duration(bfqd))) { | 
|  | 983 | switch_back_to_interactive_wr(bfqq, bfqd); | 
|  | 984 | } else { | 
|  | 985 | bfqq->wr_coeff = 1; | 
|  | 986 | bfq_log_bfqq(bfqq->bfqd, bfqq, | 
|  | 987 | "resume state: switching off wr"); | 
|  | 988 | } | 
|  | 989 | } | 
|  | 990 |  | 
|  | 991 | /* make sure weight will be updated, however we got here */ | 
|  | 992 | bfqq->entity.prio_changed = 1; | 
|  | 993 |  | 
|  | 994 | if (likely(!busy)) | 
|  | 995 | return; | 
|  | 996 |  | 
|  | 997 | if (old_wr_coeff == 1 && bfqq->wr_coeff > 1) | 
|  | 998 | bfqd->wr_busy_queues++; | 
|  | 999 | else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1) | 
|  | 1000 | bfqd->wr_busy_queues--; | 
|  | 1001 | } | 
|  | 1002 |  | 
|  | 1003 | static int bfqq_process_refs(struct bfq_queue *bfqq) | 
|  | 1004 | { | 
|  | 1005 | return bfqq->ref - bfqq->allocated - bfqq->entity.on_st; | 
|  | 1006 | } | 
|  | 1007 |  | 
|  | 1008 | /* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */ | 
|  | 1009 | static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq) | 
|  | 1010 | { | 
|  | 1011 | struct bfq_queue *item; | 
|  | 1012 | struct hlist_node *n; | 
|  | 1013 |  | 
|  | 1014 | hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node) | 
|  | 1015 | hlist_del_init(&item->burst_list_node); | 
|  | 1016 | hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); | 
|  | 1017 | bfqd->burst_size = 1; | 
|  | 1018 | bfqd->burst_parent_entity = bfqq->entity.parent; | 
|  | 1019 | } | 
|  | 1020 |  | 
|  | 1021 | /* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */ | 
|  | 1022 | static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) | 
|  | 1023 | { | 
|  | 1024 | /* Increment burst size to take into account also bfqq */ | 
|  | 1025 | bfqd->burst_size++; | 
|  | 1026 |  | 
|  | 1027 | if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) { | 
|  | 1028 | struct bfq_queue *pos, *bfqq_item; | 
|  | 1029 | struct hlist_node *n; | 
|  | 1030 |  | 
|  | 1031 | /* | 
|  | 1032 | * Enough queues have been activated shortly after each | 
|  | 1033 | * other to consider this burst as large. | 
|  | 1034 | */ | 
|  | 1035 | bfqd->large_burst = true; | 
|  | 1036 |  | 
|  | 1037 | /* | 
|  | 1038 | * We can now mark all queues in the burst list as | 
|  | 1039 | * belonging to a large burst. | 
|  | 1040 | */ | 
|  | 1041 | hlist_for_each_entry(bfqq_item, &bfqd->burst_list, | 
|  | 1042 | burst_list_node) | 
|  | 1043 | bfq_mark_bfqq_in_large_burst(bfqq_item); | 
|  | 1044 | bfq_mark_bfqq_in_large_burst(bfqq); | 
|  | 1045 |  | 
|  | 1046 | /* | 
|  | 1047 | * From now on, and until the current burst finishes, any | 
|  | 1048 | * new queue being activated shortly after the last queue | 
|  | 1049 | * was inserted in the burst can be immediately marked as | 
|  | 1050 | * belonging to a large burst. So the burst list is not | 
|  | 1051 | * needed any more. Remove it. | 
|  | 1052 | */ | 
|  | 1053 | hlist_for_each_entry_safe(pos, n, &bfqd->burst_list, | 
|  | 1054 | burst_list_node) | 
|  | 1055 | hlist_del_init(&pos->burst_list_node); | 
|  | 1056 | } else /* | 
|  | 1057 | * Burst not yet large: add bfqq to the burst list. Do | 
|  | 1058 | * not increment the ref counter for bfqq, because bfqq | 
|  | 1059 | * is removed from the burst list before freeing bfqq | 
|  | 1060 | * in put_queue. | 
|  | 1061 | */ | 
|  | 1062 | hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); | 
|  | 1063 | } | 
|  | 1064 |  | 
|  | 1065 | /* | 
|  | 1066 | * If many queues belonging to the same group happen to be created | 
|  | 1067 | * shortly after each other, then the processes associated with these | 
|  | 1068 | * queues have typically a common goal. In particular, bursts of queue | 
|  | 1069 | * creations are usually caused by services or applications that spawn | 
|  | 1070 | * many parallel threads/processes. Examples are systemd during boot, | 
|  | 1071 | * or git grep. To help these processes get their job done as soon as | 
|  | 1072 | * possible, it is usually better to not grant either weight-raising | 
|  | 1073 | * or device idling to their queues. | 
|  | 1074 | * | 
|  | 1075 | * In this comment we describe, firstly, the reasons why this fact | 
|  | 1076 | * holds, and, secondly, the next function, which implements the main | 
|  | 1077 | * steps needed to properly mark these queues so that they can then be | 
|  | 1078 | * treated in a different way. | 
|  | 1079 | * | 
|  | 1080 | * The above services or applications benefit mostly from a high | 
|  | 1081 | * throughput: the quicker the requests of the activated queues are | 
|  | 1082 | * cumulatively served, the sooner the target job of these queues gets | 
|  | 1083 | * completed. As a consequence, weight-raising any of these queues, | 
|  | 1084 | * which also implies idling the device for it, is almost always | 
|  | 1085 | * counterproductive. In most cases it just lowers throughput. | 
|  | 1086 | * | 
|  | 1087 | * On the other hand, a burst of queue creations may be caused also by | 
|  | 1088 | * the start of an application that does not consist of a lot of | 
|  | 1089 | * parallel I/O-bound threads. In fact, with a complex application, | 
|  | 1090 | * several short processes may need to be executed to start-up the | 
|  | 1091 | * application. In this respect, to start an application as quickly as | 
|  | 1092 | * possible, the best thing to do is in any case to privilege the I/O | 
|  | 1093 | * related to the application with respect to all other | 
|  | 1094 | * I/O. Therefore, the best strategy to start as quickly as possible | 
|  | 1095 | * an application that causes a burst of queue creations is to | 
|  | 1096 | * weight-raise all the queues created during the burst. This is the | 
|  | 1097 | * exact opposite of the best strategy for the other type of bursts. | 
|  | 1098 | * | 
|  | 1099 | * In the end, to take the best action for each of the two cases, the | 
|  | 1100 | * two types of bursts need to be distinguished. Fortunately, this | 
|  | 1101 | * seems relatively easy, by looking at the sizes of the bursts. In | 
|  | 1102 | * particular, we found a threshold such that only bursts with a | 
|  | 1103 | * larger size than that threshold are apparently caused by | 
|  | 1104 | * services or commands such as systemd or git grep. For brevity, | 
|  | 1105 | * hereafter we call just 'large' these bursts. BFQ *does not* | 
|  | 1106 | * weight-raise queues whose creation occurs in a large burst. In | 
|  | 1107 | * addition, for each of these queues BFQ performs or does not perform | 
|  | 1108 | * idling depending on which choice boosts the throughput more. The | 
|  | 1109 | * exact choice depends on the device and request pattern at | 
|  | 1110 | * hand. | 
|  | 1111 | * | 
|  | 1112 | * Unfortunately, false positives may occur while an interactive task | 
|  | 1113 | * is starting (e.g., an application is being started). The | 
|  | 1114 | * consequence is that the queues associated with the task do not | 
|  | 1115 | * enjoy weight raising as expected. Fortunately these false positives | 
|  | 1116 | * are very rare. They typically occur if some service happens to | 
|  | 1117 | * start doing I/O exactly when the interactive task starts. | 
|  | 1118 | * | 
|  | 1119 | * Turning back to the next function, it implements all the steps | 
|  | 1120 | * needed to detect the occurrence of a large burst and to properly | 
|  | 1121 | * mark all the queues belonging to it (so that they can then be | 
|  | 1122 | * treated in a different way). This goal is achieved by maintaining a | 
|  | 1123 | * "burst list" that holds, temporarily, the queues that belong to the | 
|  | 1124 | * burst in progress. The list is then used to mark these queues as | 
|  | 1125 | * belonging to a large burst if the burst does become large. The main | 
|  | 1126 | * steps are the following. | 
|  | 1127 | * | 
|  | 1128 | * . when the very first queue is created, the queue is inserted into the | 
|  | 1129 | *   list (as it could be the first queue in a possible burst) | 
|  | 1130 | * | 
|  | 1131 | * . if the current burst has not yet become large, and a queue Q that does | 
|  | 1132 | *   not yet belong to the burst is activated shortly after the last time | 
|  | 1133 | *   at which a new queue entered the burst list, then the function appends | 
|  | 1134 | *   Q to the burst list | 
|  | 1135 | * | 
|  | 1136 | * . if, as a consequence of the previous step, the burst size reaches | 
|  | 1137 | *   the large-burst threshold, then | 
|  | 1138 | * | 
|  | 1139 | *     . all the queues in the burst list are marked as belonging to a | 
|  | 1140 | *       large burst | 
|  | 1141 | * | 
|  | 1142 | *     . the burst list is deleted; in fact, the burst list already served | 
|  | 1143 | *       its purpose (keeping temporarily track of the queues in a burst, | 
|  | 1144 | *       so as to be able to mark them as belonging to a large burst in the | 
|  | 1145 | *       previous sub-step), and now is not needed any more | 
|  | 1146 | * | 
|  | 1147 | *     . the device enters a large-burst mode | 
|  | 1148 | * | 
|  | 1149 | * . if a queue Q that does not belong to the burst is created while | 
|  | 1150 | *   the device is in large-burst mode and shortly after the last time | 
|  | 1151 | *   at which a queue either entered the burst list or was marked as | 
|  | 1152 | *   belonging to the current large burst, then Q is immediately marked | 
|  | 1153 | *   as belonging to a large burst. | 
|  | 1154 | * | 
|  | 1155 | * . if a queue Q that does not belong to the burst is created a while | 
|  | 1156 | *   later, i.e., not shortly after, than the last time at which a queue | 
|  | 1157 | *   either entered the burst list or was marked as belonging to the | 
|  | 1158 | *   current large burst, then the current burst is deemed as finished and: | 
|  | 1159 | * | 
|  | 1160 | *        . the large-burst mode is reset if set | 
|  | 1161 | * | 
|  | 1162 | *        . the burst list is emptied | 
|  | 1163 | * | 
|  | 1164 | *        . Q is inserted in the burst list, as Q may be the first queue | 
|  | 1165 | *          in a possible new burst (then the burst list contains just Q | 
|  | 1166 | *          after this step). | 
|  | 1167 | */ | 
|  | 1168 | static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) | 
|  | 1169 | { | 
|  | 1170 | /* | 
|  | 1171 | * If bfqq is already in the burst list or is part of a large | 
|  | 1172 | * burst, or finally has just been split, then there is | 
|  | 1173 | * nothing else to do. | 
|  | 1174 | */ | 
|  | 1175 | if (!hlist_unhashed(&bfqq->burst_list_node) || | 
|  | 1176 | bfq_bfqq_in_large_burst(bfqq) || | 
|  | 1177 | time_is_after_eq_jiffies(bfqq->split_time + | 
|  | 1178 | msecs_to_jiffies(10))) | 
|  | 1179 | return; | 
|  | 1180 |  | 
|  | 1181 | /* | 
|  | 1182 | * If bfqq's creation happens late enough, or bfqq belongs to | 
|  | 1183 | * a different group than the burst group, then the current | 
|  | 1184 | * burst is finished, and related data structures must be | 
|  | 1185 | * reset. | 
|  | 1186 | * | 
|  | 1187 | * In this respect, consider the special case where bfqq is | 
|  | 1188 | * the very first queue created after BFQ is selected for this | 
|  | 1189 | * device. In this case, last_ins_in_burst and | 
|  | 1190 | * burst_parent_entity are not yet significant when we get | 
|  | 1191 | * here. But it is easy to verify that, whether or not the | 
|  | 1192 | * following condition is true, bfqq will end up being | 
|  | 1193 | * inserted into the burst list. In particular the list will | 
|  | 1194 | * happen to contain only bfqq. And this is exactly what has | 
|  | 1195 | * to happen, as bfqq may be the first queue of the first | 
|  | 1196 | * burst. | 
|  | 1197 | */ | 
|  | 1198 | if (time_is_before_jiffies(bfqd->last_ins_in_burst + | 
|  | 1199 | bfqd->bfq_burst_interval) || | 
|  | 1200 | bfqq->entity.parent != bfqd->burst_parent_entity) { | 
|  | 1201 | bfqd->large_burst = false; | 
|  | 1202 | bfq_reset_burst_list(bfqd, bfqq); | 
|  | 1203 | goto end; | 
|  | 1204 | } | 
|  | 1205 |  | 
|  | 1206 | /* | 
|  | 1207 | * If we get here, then bfqq is being activated shortly after the | 
|  | 1208 | * last queue. So, if the current burst is also large, we can mark | 
|  | 1209 | * bfqq as belonging to this large burst immediately. | 
|  | 1210 | */ | 
|  | 1211 | if (bfqd->large_burst) { | 
|  | 1212 | bfq_mark_bfqq_in_large_burst(bfqq); | 
|  | 1213 | goto end; | 
|  | 1214 | } | 
|  | 1215 |  | 
|  | 1216 | /* | 
|  | 1217 | * If we get here, then a large-burst state has not yet been | 
|  | 1218 | * reached, but bfqq is being activated shortly after the last | 
|  | 1219 | * queue. Then we add bfqq to the burst. | 
|  | 1220 | */ | 
|  | 1221 | bfq_add_to_burst(bfqd, bfqq); | 
|  | 1222 | end: | 
|  | 1223 | /* | 
|  | 1224 | * At this point, bfqq either has been added to the current | 
|  | 1225 | * burst or has caused the current burst to terminate and a | 
|  | 1226 | * possible new burst to start. In particular, in the second | 
|  | 1227 | * case, bfqq has become the first queue in the possible new | 
|  | 1228 | * burst.  In both cases last_ins_in_burst needs to be moved | 
|  | 1229 | * forward. | 
|  | 1230 | */ | 
|  | 1231 | bfqd->last_ins_in_burst = jiffies; | 
|  | 1232 | } | 
|  | 1233 |  | 
|  | 1234 | static int bfq_bfqq_budget_left(struct bfq_queue *bfqq) | 
|  | 1235 | { | 
|  | 1236 | struct bfq_entity *entity = &bfqq->entity; | 
|  | 1237 |  | 
|  | 1238 | return entity->budget - entity->service; | 
|  | 1239 | } | 
|  | 1240 |  | 
|  | 1241 | /* | 
|  | 1242 | * If enough samples have been computed, return the current max budget | 
|  | 1243 | * stored in bfqd, which is dynamically updated according to the | 
|  | 1244 | * estimated disk peak rate; otherwise return the default max budget | 
|  | 1245 | */ | 
|  | 1246 | static int bfq_max_budget(struct bfq_data *bfqd) | 
|  | 1247 | { | 
|  | 1248 | if (bfqd->budgets_assigned < bfq_stats_min_budgets) | 
|  | 1249 | return bfq_default_max_budget; | 
|  | 1250 | else | 
|  | 1251 | return bfqd->bfq_max_budget; | 
|  | 1252 | } | 
|  | 1253 |  | 
|  | 1254 | /* | 
|  | 1255 | * Return min budget, which is a fraction of the current or default | 
|  | 1256 | * max budget (trying with 1/32) | 
|  | 1257 | */ | 
|  | 1258 | static int bfq_min_budget(struct bfq_data *bfqd) | 
|  | 1259 | { | 
|  | 1260 | if (bfqd->budgets_assigned < bfq_stats_min_budgets) | 
|  | 1261 | return bfq_default_max_budget / 32; | 
|  | 1262 | else | 
|  | 1263 | return bfqd->bfq_max_budget / 32; | 
|  | 1264 | } | 
|  | 1265 |  | 
|  | 1266 | /* | 
|  | 1267 | * The next function, invoked after the input queue bfqq switches from | 
|  | 1268 | * idle to busy, updates the budget of bfqq. The function also tells | 
|  | 1269 | * whether the in-service queue should be expired, by returning | 
|  | 1270 | * true. The purpose of expiring the in-service queue is to give bfqq | 
|  | 1271 | * the chance to possibly preempt the in-service queue, and the reason | 
|  | 1272 | * for preempting the in-service queue is to achieve one of the two | 
|  | 1273 | * goals below. | 
|  | 1274 | * | 
|  | 1275 | * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has | 
|  | 1276 | * expired because it has remained idle. In particular, bfqq may have | 
|  | 1277 | * expired for one of the following two reasons: | 
|  | 1278 | * | 
|  | 1279 | * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling | 
|  | 1280 | *   and did not make it to issue a new request before its last | 
|  | 1281 | *   request was served; | 
|  | 1282 | * | 
|  | 1283 | * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue | 
|  | 1284 | *   a new request before the expiration of the idling-time. | 
|  | 1285 | * | 
|  | 1286 | * Even if bfqq has expired for one of the above reasons, the process | 
|  | 1287 | * associated with the queue may be however issuing requests greedily, | 
|  | 1288 | * and thus be sensitive to the bandwidth it receives (bfqq may have | 
|  | 1289 | * remained idle for other reasons: CPU high load, bfqq not enjoying | 
|  | 1290 | * idling, I/O throttling somewhere in the path from the process to | 
|  | 1291 | * the I/O scheduler, ...). But if, after every expiration for one of | 
|  | 1292 | * the above two reasons, bfqq has to wait for the service of at least | 
|  | 1293 | * one full budget of another queue before being served again, then | 
|  | 1294 | * bfqq is likely to get a much lower bandwidth or resource time than | 
|  | 1295 | * its reserved ones. To address this issue, two countermeasures need | 
|  | 1296 | * to be taken. | 
|  | 1297 | * | 
|  | 1298 | * First, the budget and the timestamps of bfqq need to be updated in | 
|  | 1299 | * a special way on bfqq reactivation: they need to be updated as if | 
|  | 1300 | * bfqq did not remain idle and did not expire. In fact, if they are | 
|  | 1301 | * computed as if bfqq expired and remained idle until reactivation, | 
|  | 1302 | * then the process associated with bfqq is treated as if, instead of | 
|  | 1303 | * being greedy, it stopped issuing requests when bfqq remained idle, | 
|  | 1304 | * and restarts issuing requests only on this reactivation. In other | 
|  | 1305 | * words, the scheduler does not help the process recover the "service | 
|  | 1306 | * hole" between bfqq expiration and reactivation. As a consequence, | 
|  | 1307 | * the process receives a lower bandwidth than its reserved one. In | 
|  | 1308 | * contrast, to recover this hole, the budget must be updated as if | 
|  | 1309 | * bfqq was not expired at all before this reactivation, i.e., it must | 
|  | 1310 | * be set to the value of the remaining budget when bfqq was | 
|  | 1311 | * expired. Along the same line, timestamps need to be assigned the | 
|  | 1312 | * value they had the last time bfqq was selected for service, i.e., | 
|  | 1313 | * before last expiration. Thus timestamps need to be back-shifted | 
|  | 1314 | * with respect to their normal computation (see [1] for more details | 
|  | 1315 | * on this tricky aspect). | 
|  | 1316 | * | 
|  | 1317 | * Secondly, to allow the process to recover the hole, the in-service | 
|  | 1318 | * queue must be expired too, to give bfqq the chance to preempt it | 
|  | 1319 | * immediately. In fact, if bfqq has to wait for a full budget of the | 
|  | 1320 | * in-service queue to be completed, then it may become impossible to | 
|  | 1321 | * let the process recover the hole, even if the back-shifted | 
|  | 1322 | * timestamps of bfqq are lower than those of the in-service queue. If | 
|  | 1323 | * this happens for most or all of the holes, then the process may not | 
|  | 1324 | * receive its reserved bandwidth. In this respect, it is worth noting | 
|  | 1325 | * that, being the service of outstanding requests unpreemptible, a | 
|  | 1326 | * little fraction of the holes may however be unrecoverable, thereby | 
|  | 1327 | * causing a little loss of bandwidth. | 
|  | 1328 | * | 
|  | 1329 | * The last important point is detecting whether bfqq does need this | 
|  | 1330 | * bandwidth recovery. In this respect, the next function deems the | 
|  | 1331 | * process associated with bfqq greedy, and thus allows it to recover | 
|  | 1332 | * the hole, if: 1) the process is waiting for the arrival of a new | 
|  | 1333 | * request (which implies that bfqq expired for one of the above two | 
|  | 1334 | * reasons), and 2) such a request has arrived soon. The first | 
|  | 1335 | * condition is controlled through the flag non_blocking_wait_rq, | 
|  | 1336 | * while the second through the flag arrived_in_time. If both | 
|  | 1337 | * conditions hold, then the function computes the budget in the | 
|  | 1338 | * above-described special way, and signals that the in-service queue | 
|  | 1339 | * should be expired. Timestamp back-shifting is done later in | 
|  | 1340 | * __bfq_activate_entity. | 
|  | 1341 | * | 
|  | 1342 | * 2. Reduce latency. Even if timestamps are not backshifted to let | 
|  | 1343 | * the process associated with bfqq recover a service hole, bfqq may | 
|  | 1344 | * however happen to have, after being (re)activated, a lower finish | 
|  | 1345 | * timestamp than the in-service queue.	 That is, the next budget of | 
|  | 1346 | * bfqq may have to be completed before the one of the in-service | 
|  | 1347 | * queue. If this is the case, then preempting the in-service queue | 
|  | 1348 | * allows this goal to be achieved, apart from the unpreemptible, | 
|  | 1349 | * outstanding requests mentioned above. | 
|  | 1350 | * | 
|  | 1351 | * Unfortunately, regardless of which of the above two goals one wants | 
|  | 1352 | * to achieve, service trees need first to be updated to know whether | 
|  | 1353 | * the in-service queue must be preempted. To have service trees | 
|  | 1354 | * correctly updated, the in-service queue must be expired and | 
|  | 1355 | * rescheduled, and bfqq must be scheduled too. This is one of the | 
|  | 1356 | * most costly operations (in future versions, the scheduling | 
|  | 1357 | * mechanism may be re-designed in such a way to make it possible to | 
|  | 1358 | * know whether preemption is needed without needing to update service | 
|  | 1359 | * trees). In addition, queue preemptions almost always cause random | 
|  | 1360 | * I/O, and thus loss of throughput. Because of these facts, the next | 
|  | 1361 | * function adopts the following simple scheme to avoid both costly | 
|  | 1362 | * operations and too frequent preemptions: it requests the expiration | 
|  | 1363 | * of the in-service queue (unconditionally) only for queues that need | 
|  | 1364 | * to recover a hole, or that either are weight-raised or deserve to | 
|  | 1365 | * be weight-raised. | 
|  | 1366 | */ | 
|  | 1367 | static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd, | 
|  | 1368 | struct bfq_queue *bfqq, | 
|  | 1369 | bool arrived_in_time, | 
|  | 1370 | bool wr_or_deserves_wr) | 
|  | 1371 | { | 
|  | 1372 | struct bfq_entity *entity = &bfqq->entity; | 
|  | 1373 |  | 
|  | 1374 | if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time) { | 
|  | 1375 | /* | 
|  | 1376 | * We do not clear the flag non_blocking_wait_rq here, as | 
|  | 1377 | * the latter is used in bfq_activate_bfqq to signal | 
|  | 1378 | * that timestamps need to be back-shifted (and is | 
|  | 1379 | * cleared right after). | 
|  | 1380 | */ | 
|  | 1381 |  | 
|  | 1382 | /* | 
|  | 1383 | * In next assignment we rely on that either | 
|  | 1384 | * entity->service or entity->budget are not updated | 
|  | 1385 | * on expiration if bfqq is empty (see | 
|  | 1386 | * __bfq_bfqq_recalc_budget). Thus both quantities | 
|  | 1387 | * remain unchanged after such an expiration, and the | 
|  | 1388 | * following statement therefore assigns to | 
|  | 1389 | * entity->budget the remaining budget on such an | 
|  | 1390 | * expiration. | 
|  | 1391 | */ | 
|  | 1392 | entity->budget = min_t(unsigned long, | 
|  | 1393 | bfq_bfqq_budget_left(bfqq), | 
|  | 1394 | bfqq->max_budget); | 
|  | 1395 |  | 
|  | 1396 | /* | 
|  | 1397 | * At this point, we have used entity->service to get | 
|  | 1398 | * the budget left (needed for updating | 
|  | 1399 | * entity->budget). Thus we finally can, and have to, | 
|  | 1400 | * reset entity->service. The latter must be reset | 
|  | 1401 | * because bfqq would otherwise be charged again for | 
|  | 1402 | * the service it has received during its previous | 
|  | 1403 | * service slot(s). | 
|  | 1404 | */ | 
|  | 1405 | entity->service = 0; | 
|  | 1406 |  | 
|  | 1407 | return true; | 
|  | 1408 | } | 
|  | 1409 |  | 
|  | 1410 | /* | 
|  | 1411 | * We can finally complete expiration, by setting service to 0. | 
|  | 1412 | */ | 
|  | 1413 | entity->service = 0; | 
|  | 1414 | entity->budget = max_t(unsigned long, bfqq->max_budget, | 
|  | 1415 | bfq_serv_to_charge(bfqq->next_rq, bfqq)); | 
|  | 1416 | bfq_clear_bfqq_non_blocking_wait_rq(bfqq); | 
|  | 1417 | return wr_or_deserves_wr; | 
|  | 1418 | } | 
|  | 1419 |  | 
|  | 1420 | /* | 
|  | 1421 | * Return the farthest past time instant according to jiffies | 
|  | 1422 | * macros. | 
|  | 1423 | */ | 
|  | 1424 | static unsigned long bfq_smallest_from_now(void) | 
|  | 1425 | { | 
|  | 1426 | return jiffies - MAX_JIFFY_OFFSET; | 
|  | 1427 | } | 
|  | 1428 |  | 
|  | 1429 | static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd, | 
|  | 1430 | struct bfq_queue *bfqq, | 
|  | 1431 | unsigned int old_wr_coeff, | 
|  | 1432 | bool wr_or_deserves_wr, | 
|  | 1433 | bool interactive, | 
|  | 1434 | bool in_burst, | 
|  | 1435 | bool soft_rt) | 
|  | 1436 | { | 
|  | 1437 | if (old_wr_coeff == 1 && wr_or_deserves_wr) { | 
|  | 1438 | /* start a weight-raising period */ | 
|  | 1439 | if (interactive) { | 
|  | 1440 | bfqq->service_from_wr = 0; | 
|  | 1441 | bfqq->wr_coeff = bfqd->bfq_wr_coeff; | 
|  | 1442 | bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); | 
|  | 1443 | } else { | 
|  | 1444 | /* | 
|  | 1445 | * No interactive weight raising in progress | 
|  | 1446 | * here: assign minus infinity to | 
|  | 1447 | * wr_start_at_switch_to_srt, to make sure | 
|  | 1448 | * that, at the end of the soft-real-time | 
|  | 1449 | * weight raising periods that is starting | 
|  | 1450 | * now, no interactive weight-raising period | 
|  | 1451 | * may be wrongly considered as still in | 
|  | 1452 | * progress (and thus actually started by | 
|  | 1453 | * mistake). | 
|  | 1454 | */ | 
|  | 1455 | bfqq->wr_start_at_switch_to_srt = | 
|  | 1456 | bfq_smallest_from_now(); | 
|  | 1457 | bfqq->wr_coeff = bfqd->bfq_wr_coeff * | 
|  | 1458 | BFQ_SOFTRT_WEIGHT_FACTOR; | 
|  | 1459 | bfqq->wr_cur_max_time = | 
|  | 1460 | bfqd->bfq_wr_rt_max_time; | 
|  | 1461 | } | 
|  | 1462 |  | 
|  | 1463 | /* | 
|  | 1464 | * If needed, further reduce budget to make sure it is | 
|  | 1465 | * close to bfqq's backlog, so as to reduce the | 
|  | 1466 | * scheduling-error component due to a too large | 
|  | 1467 | * budget. Do not care about throughput consequences, | 
|  | 1468 | * but only about latency. Finally, do not assign a | 
|  | 1469 | * too small budget either, to avoid increasing | 
|  | 1470 | * latency by causing too frequent expirations. | 
|  | 1471 | */ | 
|  | 1472 | bfqq->entity.budget = min_t(unsigned long, | 
|  | 1473 | bfqq->entity.budget, | 
|  | 1474 | 2 * bfq_min_budget(bfqd)); | 
|  | 1475 | } else if (old_wr_coeff > 1) { | 
|  | 1476 | if (interactive) { /* update wr coeff and duration */ | 
|  | 1477 | bfqq->wr_coeff = bfqd->bfq_wr_coeff; | 
|  | 1478 | bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); | 
|  | 1479 | } else if (in_burst) | 
|  | 1480 | bfqq->wr_coeff = 1; | 
|  | 1481 | else if (soft_rt) { | 
|  | 1482 | /* | 
|  | 1483 | * The application is now or still meeting the | 
|  | 1484 | * requirements for being deemed soft rt.  We | 
|  | 1485 | * can then correctly and safely (re)charge | 
|  | 1486 | * the weight-raising duration for the | 
|  | 1487 | * application with the weight-raising | 
|  | 1488 | * duration for soft rt applications. | 
|  | 1489 | * | 
|  | 1490 | * In particular, doing this recharge now, i.e., | 
|  | 1491 | * before the weight-raising period for the | 
|  | 1492 | * application finishes, reduces the probability | 
|  | 1493 | * of the following negative scenario: | 
|  | 1494 | * 1) the weight of a soft rt application is | 
|  | 1495 | *    raised at startup (as for any newly | 
|  | 1496 | *    created application), | 
|  | 1497 | * 2) since the application is not interactive, | 
|  | 1498 | *    at a certain time weight-raising is | 
|  | 1499 | *    stopped for the application, | 
|  | 1500 | * 3) at that time the application happens to | 
|  | 1501 | *    still have pending requests, and hence | 
|  | 1502 | *    is destined to not have a chance to be | 
|  | 1503 | *    deemed soft rt before these requests are | 
|  | 1504 | *    completed (see the comments to the | 
|  | 1505 | *    function bfq_bfqq_softrt_next_start() | 
|  | 1506 | *    for details on soft rt detection), | 
|  | 1507 | * 4) these pending requests experience a high | 
|  | 1508 | *    latency because the application is not | 
|  | 1509 | *    weight-raised while they are pending. | 
|  | 1510 | */ | 
|  | 1511 | if (bfqq->wr_cur_max_time != | 
|  | 1512 | bfqd->bfq_wr_rt_max_time) { | 
|  | 1513 | bfqq->wr_start_at_switch_to_srt = | 
|  | 1514 | bfqq->last_wr_start_finish; | 
|  | 1515 |  | 
|  | 1516 | bfqq->wr_cur_max_time = | 
|  | 1517 | bfqd->bfq_wr_rt_max_time; | 
|  | 1518 | bfqq->wr_coeff = bfqd->bfq_wr_coeff * | 
|  | 1519 | BFQ_SOFTRT_WEIGHT_FACTOR; | 
|  | 1520 | } | 
|  | 1521 | bfqq->last_wr_start_finish = jiffies; | 
|  | 1522 | } | 
|  | 1523 | } | 
|  | 1524 | } | 
|  | 1525 |  | 
|  | 1526 | static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd, | 
|  | 1527 | struct bfq_queue *bfqq) | 
|  | 1528 | { | 
|  | 1529 | return bfqq->dispatched == 0 && | 
|  | 1530 | time_is_before_jiffies( | 
|  | 1531 | bfqq->budget_timeout + | 
|  | 1532 | bfqd->bfq_wr_min_idle_time); | 
|  | 1533 | } | 
|  | 1534 |  | 
|  | 1535 | static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd, | 
|  | 1536 | struct bfq_queue *bfqq, | 
|  | 1537 | int old_wr_coeff, | 
|  | 1538 | struct request *rq, | 
|  | 1539 | bool *interactive) | 
|  | 1540 | { | 
|  | 1541 | bool soft_rt, in_burst,	wr_or_deserves_wr, | 
|  | 1542 | bfqq_wants_to_preempt, | 
|  | 1543 | idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq), | 
|  | 1544 | /* | 
|  | 1545 | * See the comments on | 
|  | 1546 | * bfq_bfqq_update_budg_for_activation for | 
|  | 1547 | * details on the usage of the next variable. | 
|  | 1548 | */ | 
|  | 1549 | arrived_in_time =  ktime_get_ns() <= | 
|  | 1550 | bfqq->ttime.last_end_request + | 
|  | 1551 | bfqd->bfq_slice_idle * 3; | 
|  | 1552 |  | 
|  | 1553 |  | 
|  | 1554 | /* | 
|  | 1555 | * bfqq deserves to be weight-raised if: | 
|  | 1556 | * - it is sync, | 
|  | 1557 | * - it does not belong to a large burst, | 
|  | 1558 | * - it has been idle for enough time or is soft real-time, | 
|  | 1559 | * - is linked to a bfq_io_cq (it is not shared in any sense). | 
|  | 1560 | */ | 
|  | 1561 | in_burst = bfq_bfqq_in_large_burst(bfqq); | 
|  | 1562 | soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 && | 
|  | 1563 | !in_burst && | 
|  | 1564 | time_is_before_jiffies(bfqq->soft_rt_next_start) && | 
|  | 1565 | bfqq->dispatched == 0; | 
|  | 1566 | *interactive = !in_burst && idle_for_long_time; | 
|  | 1567 | wr_or_deserves_wr = bfqd->low_latency && | 
|  | 1568 | (bfqq->wr_coeff > 1 || | 
|  | 1569 | (bfq_bfqq_sync(bfqq) && | 
|  | 1570 | bfqq->bic && (*interactive || soft_rt))); | 
|  | 1571 |  | 
|  | 1572 | /* | 
|  | 1573 | * Using the last flag, update budget and check whether bfqq | 
|  | 1574 | * may want to preempt the in-service queue. | 
|  | 1575 | */ | 
|  | 1576 | bfqq_wants_to_preempt = | 
|  | 1577 | bfq_bfqq_update_budg_for_activation(bfqd, bfqq, | 
|  | 1578 | arrived_in_time, | 
|  | 1579 | wr_or_deserves_wr); | 
|  | 1580 |  | 
|  | 1581 | /* | 
|  | 1582 | * If bfqq happened to be activated in a burst, but has been | 
|  | 1583 | * idle for much more than an interactive queue, then we | 
|  | 1584 | * assume that, in the overall I/O initiated in the burst, the | 
|  | 1585 | * I/O associated with bfqq is finished. So bfqq does not need | 
|  | 1586 | * to be treated as a queue belonging to a burst | 
|  | 1587 | * anymore. Accordingly, we reset bfqq's in_large_burst flag | 
|  | 1588 | * if set, and remove bfqq from the burst list if it's | 
|  | 1589 | * there. We do not decrement burst_size, because the fact | 
|  | 1590 | * that bfqq does not need to belong to the burst list any | 
|  | 1591 | * more does not invalidate the fact that bfqq was created in | 
|  | 1592 | * a burst. | 
|  | 1593 | */ | 
|  | 1594 | if (likely(!bfq_bfqq_just_created(bfqq)) && | 
|  | 1595 | idle_for_long_time && | 
|  | 1596 | time_is_before_jiffies( | 
|  | 1597 | bfqq->budget_timeout + | 
|  | 1598 | msecs_to_jiffies(10000))) { | 
|  | 1599 | hlist_del_init(&bfqq->burst_list_node); | 
|  | 1600 | bfq_clear_bfqq_in_large_burst(bfqq); | 
|  | 1601 | } | 
|  | 1602 |  | 
|  | 1603 | bfq_clear_bfqq_just_created(bfqq); | 
|  | 1604 |  | 
|  | 1605 |  | 
|  | 1606 | if (!bfq_bfqq_IO_bound(bfqq)) { | 
|  | 1607 | if (arrived_in_time) { | 
|  | 1608 | bfqq->requests_within_timer++; | 
|  | 1609 | if (bfqq->requests_within_timer >= | 
|  | 1610 | bfqd->bfq_requests_within_timer) | 
|  | 1611 | bfq_mark_bfqq_IO_bound(bfqq); | 
|  | 1612 | } else | 
|  | 1613 | bfqq->requests_within_timer = 0; | 
|  | 1614 | } | 
|  | 1615 |  | 
|  | 1616 | if (bfqd->low_latency) { | 
|  | 1617 | if (unlikely(time_is_after_jiffies(bfqq->split_time))) | 
|  | 1618 | /* wraparound */ | 
|  | 1619 | bfqq->split_time = | 
|  | 1620 | jiffies - bfqd->bfq_wr_min_idle_time - 1; | 
|  | 1621 |  | 
|  | 1622 | if (time_is_before_jiffies(bfqq->split_time + | 
|  | 1623 | bfqd->bfq_wr_min_idle_time)) { | 
|  | 1624 | bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq, | 
|  | 1625 | old_wr_coeff, | 
|  | 1626 | wr_or_deserves_wr, | 
|  | 1627 | *interactive, | 
|  | 1628 | in_burst, | 
|  | 1629 | soft_rt); | 
|  | 1630 |  | 
|  | 1631 | if (old_wr_coeff != bfqq->wr_coeff) | 
|  | 1632 | bfqq->entity.prio_changed = 1; | 
|  | 1633 | } | 
|  | 1634 | } | 
|  | 1635 |  | 
|  | 1636 | bfqq->last_idle_bklogged = jiffies; | 
|  | 1637 | bfqq->service_from_backlogged = 0; | 
|  | 1638 | bfq_clear_bfqq_softrt_update(bfqq); | 
|  | 1639 |  | 
|  | 1640 | bfq_add_bfqq_busy(bfqd, bfqq); | 
|  | 1641 |  | 
|  | 1642 | /* | 
|  | 1643 | * Expire in-service queue only if preemption may be needed | 
|  | 1644 | * for guarantees. In this respect, the function | 
|  | 1645 | * next_queue_may_preempt just checks a simple, necessary | 
|  | 1646 | * condition, and not a sufficient condition based on | 
|  | 1647 | * timestamps. In fact, for the latter condition to be | 
|  | 1648 | * evaluated, timestamps would need first to be updated, and | 
|  | 1649 | * this operation is quite costly (see the comments on the | 
|  | 1650 | * function bfq_bfqq_update_budg_for_activation). | 
|  | 1651 | */ | 
|  | 1652 | if (bfqd->in_service_queue && bfqq_wants_to_preempt && | 
|  | 1653 | bfqd->in_service_queue->wr_coeff < bfqq->wr_coeff && | 
|  | 1654 | next_queue_may_preempt(bfqd)) | 
|  | 1655 | bfq_bfqq_expire(bfqd, bfqd->in_service_queue, | 
|  | 1656 | false, BFQQE_PREEMPTED); | 
|  | 1657 | } | 
|  | 1658 |  | 
|  | 1659 | static void bfq_add_request(struct request *rq) | 
|  | 1660 | { | 
|  | 1661 | struct bfq_queue *bfqq = RQ_BFQQ(rq); | 
|  | 1662 | struct bfq_data *bfqd = bfqq->bfqd; | 
|  | 1663 | struct request *next_rq, *prev; | 
|  | 1664 | unsigned int old_wr_coeff = bfqq->wr_coeff; | 
|  | 1665 | bool interactive = false; | 
|  | 1666 |  | 
|  | 1667 | bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq)); | 
|  | 1668 | bfqq->queued[rq_is_sync(rq)]++; | 
|  | 1669 | bfqd->queued++; | 
|  | 1670 |  | 
|  | 1671 | elv_rb_add(&bfqq->sort_list, rq); | 
|  | 1672 |  | 
|  | 1673 | /* | 
|  | 1674 | * Check if this request is a better next-serve candidate. | 
|  | 1675 | */ | 
|  | 1676 | prev = bfqq->next_rq; | 
|  | 1677 | next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position); | 
|  | 1678 | bfqq->next_rq = next_rq; | 
|  | 1679 |  | 
|  | 1680 | /* | 
|  | 1681 | * Adjust priority tree position, if next_rq changes. | 
|  | 1682 | */ | 
|  | 1683 | if (prev != bfqq->next_rq) | 
|  | 1684 | bfq_pos_tree_add_move(bfqd, bfqq); | 
|  | 1685 |  | 
|  | 1686 | if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */ | 
|  | 1687 | bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff, | 
|  | 1688 | rq, &interactive); | 
|  | 1689 | else { | 
|  | 1690 | if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) && | 
|  | 1691 | time_is_before_jiffies( | 
|  | 1692 | bfqq->last_wr_start_finish + | 
|  | 1693 | bfqd->bfq_wr_min_inter_arr_async)) { | 
|  | 1694 | bfqq->wr_coeff = bfqd->bfq_wr_coeff; | 
|  | 1695 | bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); | 
|  | 1696 |  | 
|  | 1697 | bfqd->wr_busy_queues++; | 
|  | 1698 | bfqq->entity.prio_changed = 1; | 
|  | 1699 | } | 
|  | 1700 | if (prev != bfqq->next_rq) | 
|  | 1701 | bfq_updated_next_req(bfqd, bfqq); | 
|  | 1702 | } | 
|  | 1703 |  | 
|  | 1704 | /* | 
|  | 1705 | * Assign jiffies to last_wr_start_finish in the following | 
|  | 1706 | * cases: | 
|  | 1707 | * | 
|  | 1708 | * . if bfqq is not going to be weight-raised, because, for | 
|  | 1709 | *   non weight-raised queues, last_wr_start_finish stores the | 
|  | 1710 | *   arrival time of the last request; as of now, this piece | 
|  | 1711 | *   of information is used only for deciding whether to | 
|  | 1712 | *   weight-raise async queues | 
|  | 1713 | * | 
|  | 1714 | * . if bfqq is not weight-raised, because, if bfqq is now | 
|  | 1715 | *   switching to weight-raised, then last_wr_start_finish | 
|  | 1716 | *   stores the time when weight-raising starts | 
|  | 1717 | * | 
|  | 1718 | * . if bfqq is interactive, because, regardless of whether | 
|  | 1719 | *   bfqq is currently weight-raised, the weight-raising | 
|  | 1720 | *   period must start or restart (this case is considered | 
|  | 1721 | *   separately because it is not detected by the above | 
|  | 1722 | *   conditions, if bfqq is already weight-raised) | 
|  | 1723 | * | 
|  | 1724 | * last_wr_start_finish has to be updated also if bfqq is soft | 
|  | 1725 | * real-time, because the weight-raising period is constantly | 
|  | 1726 | * restarted on idle-to-busy transitions for these queues, but | 
|  | 1727 | * this is already done in bfq_bfqq_handle_idle_busy_switch if | 
|  | 1728 | * needed. | 
|  | 1729 | */ | 
|  | 1730 | if (bfqd->low_latency && | 
|  | 1731 | (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive)) | 
|  | 1732 | bfqq->last_wr_start_finish = jiffies; | 
|  | 1733 | } | 
|  | 1734 |  | 
|  | 1735 | static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd, | 
|  | 1736 | struct bio *bio, | 
|  | 1737 | struct request_queue *q) | 
|  | 1738 | { | 
|  | 1739 | struct bfq_queue *bfqq = bfqd->bio_bfqq; | 
|  | 1740 |  | 
|  | 1741 |  | 
|  | 1742 | if (bfqq) | 
|  | 1743 | return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio)); | 
|  | 1744 |  | 
|  | 1745 | return NULL; | 
|  | 1746 | } | 
|  | 1747 |  | 
|  | 1748 | static sector_t get_sdist(sector_t last_pos, struct request *rq) | 
|  | 1749 | { | 
|  | 1750 | if (last_pos) | 
|  | 1751 | return abs(blk_rq_pos(rq) - last_pos); | 
|  | 1752 |  | 
|  | 1753 | return 0; | 
|  | 1754 | } | 
|  | 1755 |  | 
|  | 1756 | #if 0 /* Still not clear if we can do without next two functions */ | 
|  | 1757 | static void bfq_activate_request(struct request_queue *q, struct request *rq) | 
|  | 1758 | { | 
|  | 1759 | struct bfq_data *bfqd = q->elevator->elevator_data; | 
|  | 1760 |  | 
|  | 1761 | bfqd->rq_in_driver++; | 
|  | 1762 | } | 
|  | 1763 |  | 
|  | 1764 | static void bfq_deactivate_request(struct request_queue *q, struct request *rq) | 
|  | 1765 | { | 
|  | 1766 | struct bfq_data *bfqd = q->elevator->elevator_data; | 
|  | 1767 |  | 
|  | 1768 | bfqd->rq_in_driver--; | 
|  | 1769 | } | 
|  | 1770 | #endif | 
|  | 1771 |  | 
|  | 1772 | static void bfq_remove_request(struct request_queue *q, | 
|  | 1773 | struct request *rq) | 
|  | 1774 | { | 
|  | 1775 | struct bfq_queue *bfqq = RQ_BFQQ(rq); | 
|  | 1776 | struct bfq_data *bfqd = bfqq->bfqd; | 
|  | 1777 | const int sync = rq_is_sync(rq); | 
|  | 1778 |  | 
|  | 1779 | if (bfqq->next_rq == rq) { | 
|  | 1780 | bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq); | 
|  | 1781 | bfq_updated_next_req(bfqd, bfqq); | 
|  | 1782 | } | 
|  | 1783 |  | 
|  | 1784 | if (rq->queuelist.prev != &rq->queuelist) | 
|  | 1785 | list_del_init(&rq->queuelist); | 
|  | 1786 | bfqq->queued[sync]--; | 
|  | 1787 | bfqd->queued--; | 
|  | 1788 | elv_rb_del(&bfqq->sort_list, rq); | 
|  | 1789 |  | 
|  | 1790 | elv_rqhash_del(q, rq); | 
|  | 1791 | if (q->last_merge == rq) | 
|  | 1792 | q->last_merge = NULL; | 
|  | 1793 |  | 
|  | 1794 | if (RB_EMPTY_ROOT(&bfqq->sort_list)) { | 
|  | 1795 | bfqq->next_rq = NULL; | 
|  | 1796 |  | 
|  | 1797 | if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) { | 
|  | 1798 | bfq_del_bfqq_busy(bfqd, bfqq, false); | 
|  | 1799 | /* | 
|  | 1800 | * bfqq emptied. In normal operation, when | 
|  | 1801 | * bfqq is empty, bfqq->entity.service and | 
|  | 1802 | * bfqq->entity.budget must contain, | 
|  | 1803 | * respectively, the service received and the | 
|  | 1804 | * budget used last time bfqq emptied. These | 
|  | 1805 | * facts do not hold in this case, as at least | 
|  | 1806 | * this last removal occurred while bfqq is | 
|  | 1807 | * not in service. To avoid inconsistencies, | 
|  | 1808 | * reset both bfqq->entity.service and | 
|  | 1809 | * bfqq->entity.budget, if bfqq has still a | 
|  | 1810 | * process that may issue I/O requests to it. | 
|  | 1811 | */ | 
|  | 1812 | bfqq->entity.budget = bfqq->entity.service = 0; | 
|  | 1813 | } | 
|  | 1814 |  | 
|  | 1815 | /* | 
|  | 1816 | * Remove queue from request-position tree as it is empty. | 
|  | 1817 | */ | 
|  | 1818 | if (bfqq->pos_root) { | 
|  | 1819 | rb_erase(&bfqq->pos_node, bfqq->pos_root); | 
|  | 1820 | bfqq->pos_root = NULL; | 
|  | 1821 | } | 
|  | 1822 | } else { | 
|  | 1823 | bfq_pos_tree_add_move(bfqd, bfqq); | 
|  | 1824 | } | 
|  | 1825 |  | 
|  | 1826 | if (rq->cmd_flags & REQ_META) | 
|  | 1827 | bfqq->meta_pending--; | 
|  | 1828 |  | 
|  | 1829 | } | 
|  | 1830 |  | 
|  | 1831 | static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) | 
|  | 1832 | { | 
|  | 1833 | struct request_queue *q = hctx->queue; | 
|  | 1834 | struct bfq_data *bfqd = q->elevator->elevator_data; | 
|  | 1835 | struct request *free = NULL; | 
|  | 1836 | /* | 
|  | 1837 | * bfq_bic_lookup grabs the queue_lock: invoke it now and | 
|  | 1838 | * store its return value for later use, to avoid nesting | 
|  | 1839 | * queue_lock inside the bfqd->lock. We assume that the bic | 
|  | 1840 | * returned by bfq_bic_lookup does not go away before | 
|  | 1841 | * bfqd->lock is taken. | 
|  | 1842 | */ | 
|  | 1843 | struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q); | 
|  | 1844 | bool ret; | 
|  | 1845 |  | 
|  | 1846 | spin_lock_irq(&bfqd->lock); | 
|  | 1847 |  | 
|  | 1848 | if (bic) | 
|  | 1849 | bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf)); | 
|  | 1850 | else | 
|  | 1851 | bfqd->bio_bfqq = NULL; | 
|  | 1852 | bfqd->bio_bic = bic; | 
|  | 1853 |  | 
|  | 1854 | ret = blk_mq_sched_try_merge(q, bio, &free); | 
|  | 1855 |  | 
|  | 1856 | if (free) | 
|  | 1857 | blk_mq_free_request(free); | 
|  | 1858 | spin_unlock_irq(&bfqd->lock); | 
|  | 1859 |  | 
|  | 1860 | return ret; | 
|  | 1861 | } | 
|  | 1862 |  | 
|  | 1863 | static int bfq_request_merge(struct request_queue *q, struct request **req, | 
|  | 1864 | struct bio *bio) | 
|  | 1865 | { | 
|  | 1866 | struct bfq_data *bfqd = q->elevator->elevator_data; | 
|  | 1867 | struct request *__rq; | 
|  | 1868 |  | 
|  | 1869 | __rq = bfq_find_rq_fmerge(bfqd, bio, q); | 
|  | 1870 | if (__rq && elv_bio_merge_ok(__rq, bio)) { | 
|  | 1871 | *req = __rq; | 
|  | 1872 | return ELEVATOR_FRONT_MERGE; | 
|  | 1873 | } | 
|  | 1874 |  | 
|  | 1875 | return ELEVATOR_NO_MERGE; | 
|  | 1876 | } | 
|  | 1877 |  | 
|  | 1878 | static struct bfq_queue *bfq_init_rq(struct request *rq); | 
|  | 1879 |  | 
|  | 1880 | static void bfq_request_merged(struct request_queue *q, struct request *req, | 
|  | 1881 | enum elv_merge type) | 
|  | 1882 | { | 
|  | 1883 | if (type == ELEVATOR_FRONT_MERGE && | 
|  | 1884 | rb_prev(&req->rb_node) && | 
|  | 1885 | blk_rq_pos(req) < | 
|  | 1886 | blk_rq_pos(container_of(rb_prev(&req->rb_node), | 
|  | 1887 | struct request, rb_node))) { | 
|  | 1888 | struct bfq_queue *bfqq = bfq_init_rq(req); | 
|  | 1889 | struct bfq_data *bfqd; | 
|  | 1890 | struct request *prev, *next_rq; | 
|  | 1891 |  | 
|  | 1892 | if (!bfqq) | 
|  | 1893 | return; | 
|  | 1894 |  | 
|  | 1895 | bfqd = bfqq->bfqd; | 
|  | 1896 |  | 
|  | 1897 | /* Reposition request in its sort_list */ | 
|  | 1898 | elv_rb_del(&bfqq->sort_list, req); | 
|  | 1899 | elv_rb_add(&bfqq->sort_list, req); | 
|  | 1900 |  | 
|  | 1901 | /* Choose next request to be served for bfqq */ | 
|  | 1902 | prev = bfqq->next_rq; | 
|  | 1903 | next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req, | 
|  | 1904 | bfqd->last_position); | 
|  | 1905 | bfqq->next_rq = next_rq; | 
|  | 1906 | /* | 
|  | 1907 | * If next_rq changes, update both the queue's budget to | 
|  | 1908 | * fit the new request and the queue's position in its | 
|  | 1909 | * rq_pos_tree. | 
|  | 1910 | */ | 
|  | 1911 | if (prev != bfqq->next_rq) { | 
|  | 1912 | bfq_updated_next_req(bfqd, bfqq); | 
|  | 1913 | bfq_pos_tree_add_move(bfqd, bfqq); | 
|  | 1914 | } | 
|  | 1915 | } | 
|  | 1916 | } | 
|  | 1917 |  | 
|  | 1918 | /* | 
|  | 1919 | * This function is called to notify the scheduler that the requests | 
|  | 1920 | * rq and 'next' have been merged, with 'next' going away.  BFQ | 
|  | 1921 | * exploits this hook to address the following issue: if 'next' has a | 
|  | 1922 | * fifo_time lower that rq, then the fifo_time of rq must be set to | 
|  | 1923 | * the value of 'next', to not forget the greater age of 'next'. | 
|  | 1924 | * | 
|  | 1925 | * NOTE: in this function we assume that rq is in a bfq_queue, basing | 
|  | 1926 | * on that rq is picked from the hash table q->elevator->hash, which, | 
|  | 1927 | * in its turn, is filled only with I/O requests present in | 
|  | 1928 | * bfq_queues, while BFQ is in use for the request queue q. In fact, | 
|  | 1929 | * the function that fills this hash table (elv_rqhash_add) is called | 
|  | 1930 | * only by bfq_insert_request. | 
|  | 1931 | */ | 
|  | 1932 | static void bfq_requests_merged(struct request_queue *q, struct request *rq, | 
|  | 1933 | struct request *next) | 
|  | 1934 | { | 
|  | 1935 | struct bfq_queue *bfqq = bfq_init_rq(rq), | 
|  | 1936 | *next_bfqq = bfq_init_rq(next); | 
|  | 1937 |  | 
|  | 1938 | if (!bfqq) | 
|  | 1939 | return; | 
|  | 1940 |  | 
|  | 1941 | /* | 
|  | 1942 | * If next and rq belong to the same bfq_queue and next is older | 
|  | 1943 | * than rq, then reposition rq in the fifo (by substituting next | 
|  | 1944 | * with rq). Otherwise, if next and rq belong to different | 
|  | 1945 | * bfq_queues, never reposition rq: in fact, we would have to | 
|  | 1946 | * reposition it with respect to next's position in its own fifo, | 
|  | 1947 | * which would most certainly be too expensive with respect to | 
|  | 1948 | * the benefits. | 
|  | 1949 | */ | 
|  | 1950 | if (bfqq == next_bfqq && | 
|  | 1951 | !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && | 
|  | 1952 | next->fifo_time < rq->fifo_time) { | 
|  | 1953 | list_del_init(&rq->queuelist); | 
|  | 1954 | list_replace_init(&next->queuelist, &rq->queuelist); | 
|  | 1955 | rq->fifo_time = next->fifo_time; | 
|  | 1956 | } | 
|  | 1957 |  | 
|  | 1958 | if (bfqq->next_rq == next) | 
|  | 1959 | bfqq->next_rq = rq; | 
|  | 1960 |  | 
|  | 1961 | bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags); | 
|  | 1962 | } | 
|  | 1963 |  | 
|  | 1964 | /* Must be called with bfqq != NULL */ | 
|  | 1965 | static void bfq_bfqq_end_wr(struct bfq_queue *bfqq) | 
|  | 1966 | { | 
|  | 1967 | if (bfq_bfqq_busy(bfqq)) | 
|  | 1968 | bfqq->bfqd->wr_busy_queues--; | 
|  | 1969 | bfqq->wr_coeff = 1; | 
|  | 1970 | bfqq->wr_cur_max_time = 0; | 
|  | 1971 | bfqq->last_wr_start_finish = jiffies; | 
|  | 1972 | /* | 
|  | 1973 | * Trigger a weight change on the next invocation of | 
|  | 1974 | * __bfq_entity_update_weight_prio. | 
|  | 1975 | */ | 
|  | 1976 | bfqq->entity.prio_changed = 1; | 
|  | 1977 | } | 
|  | 1978 |  | 
|  | 1979 | void bfq_end_wr_async_queues(struct bfq_data *bfqd, | 
|  | 1980 | struct bfq_group *bfqg) | 
|  | 1981 | { | 
|  | 1982 | int i, j; | 
|  | 1983 |  | 
|  | 1984 | for (i = 0; i < 2; i++) | 
|  | 1985 | for (j = 0; j < IOPRIO_BE_NR; j++) | 
|  | 1986 | if (bfqg->async_bfqq[i][j]) | 
|  | 1987 | bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]); | 
|  | 1988 | if (bfqg->async_idle_bfqq) | 
|  | 1989 | bfq_bfqq_end_wr(bfqg->async_idle_bfqq); | 
|  | 1990 | } | 
|  | 1991 |  | 
|  | 1992 | static void bfq_end_wr(struct bfq_data *bfqd) | 
|  | 1993 | { | 
|  | 1994 | struct bfq_queue *bfqq; | 
|  | 1995 |  | 
|  | 1996 | spin_lock_irq(&bfqd->lock); | 
|  | 1997 |  | 
|  | 1998 | list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) | 
|  | 1999 | bfq_bfqq_end_wr(bfqq); | 
|  | 2000 | list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) | 
|  | 2001 | bfq_bfqq_end_wr(bfqq); | 
|  | 2002 | bfq_end_wr_async(bfqd); | 
|  | 2003 |  | 
|  | 2004 | spin_unlock_irq(&bfqd->lock); | 
|  | 2005 | } | 
|  | 2006 |  | 
|  | 2007 | static sector_t bfq_io_struct_pos(void *io_struct, bool request) | 
|  | 2008 | { | 
|  | 2009 | if (request) | 
|  | 2010 | return blk_rq_pos(io_struct); | 
|  | 2011 | else | 
|  | 2012 | return ((struct bio *)io_struct)->bi_iter.bi_sector; | 
|  | 2013 | } | 
|  | 2014 |  | 
|  | 2015 | static int bfq_rq_close_to_sector(void *io_struct, bool request, | 
|  | 2016 | sector_t sector) | 
|  | 2017 | { | 
|  | 2018 | return abs(bfq_io_struct_pos(io_struct, request) - sector) <= | 
|  | 2019 | BFQQ_CLOSE_THR; | 
|  | 2020 | } | 
|  | 2021 |  | 
|  | 2022 | static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd, | 
|  | 2023 | struct bfq_queue *bfqq, | 
|  | 2024 | sector_t sector) | 
|  | 2025 | { | 
|  | 2026 | struct rb_root *root = &bfq_bfqq_to_bfqg(bfqq)->rq_pos_tree; | 
|  | 2027 | struct rb_node *parent, *node; | 
|  | 2028 | struct bfq_queue *__bfqq; | 
|  | 2029 |  | 
|  | 2030 | if (RB_EMPTY_ROOT(root)) | 
|  | 2031 | return NULL; | 
|  | 2032 |  | 
|  | 2033 | /* | 
|  | 2034 | * First, if we find a request starting at the end of the last | 
|  | 2035 | * request, choose it. | 
|  | 2036 | */ | 
|  | 2037 | __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL); | 
|  | 2038 | if (__bfqq) | 
|  | 2039 | return __bfqq; | 
|  | 2040 |  | 
|  | 2041 | /* | 
|  | 2042 | * If the exact sector wasn't found, the parent of the NULL leaf | 
|  | 2043 | * will contain the closest sector (rq_pos_tree sorted by | 
|  | 2044 | * next_request position). | 
|  | 2045 | */ | 
|  | 2046 | __bfqq = rb_entry(parent, struct bfq_queue, pos_node); | 
|  | 2047 | if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) | 
|  | 2048 | return __bfqq; | 
|  | 2049 |  | 
|  | 2050 | if (blk_rq_pos(__bfqq->next_rq) < sector) | 
|  | 2051 | node = rb_next(&__bfqq->pos_node); | 
|  | 2052 | else | 
|  | 2053 | node = rb_prev(&__bfqq->pos_node); | 
|  | 2054 | if (!node) | 
|  | 2055 | return NULL; | 
|  | 2056 |  | 
|  | 2057 | __bfqq = rb_entry(node, struct bfq_queue, pos_node); | 
|  | 2058 | if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector)) | 
|  | 2059 | return __bfqq; | 
|  | 2060 |  | 
|  | 2061 | return NULL; | 
|  | 2062 | } | 
|  | 2063 |  | 
|  | 2064 | static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd, | 
|  | 2065 | struct bfq_queue *cur_bfqq, | 
|  | 2066 | sector_t sector) | 
|  | 2067 | { | 
|  | 2068 | struct bfq_queue *bfqq; | 
|  | 2069 |  | 
|  | 2070 | /* | 
|  | 2071 | * We shall notice if some of the queues are cooperating, | 
|  | 2072 | * e.g., working closely on the same area of the device. In | 
|  | 2073 | * that case, we can group them together and: 1) don't waste | 
|  | 2074 | * time idling, and 2) serve the union of their requests in | 
|  | 2075 | * the best possible order for throughput. | 
|  | 2076 | */ | 
|  | 2077 | bfqq = bfqq_find_close(bfqd, cur_bfqq, sector); | 
|  | 2078 | if (!bfqq || bfqq == cur_bfqq) | 
|  | 2079 | return NULL; | 
|  | 2080 |  | 
|  | 2081 | return bfqq; | 
|  | 2082 | } | 
|  | 2083 |  | 
|  | 2084 | static struct bfq_queue * | 
|  | 2085 | bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) | 
|  | 2086 | { | 
|  | 2087 | int process_refs, new_process_refs; | 
|  | 2088 | struct bfq_queue *__bfqq; | 
|  | 2089 |  | 
|  | 2090 | /* | 
|  | 2091 | * If there are no process references on the new_bfqq, then it is | 
|  | 2092 | * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain | 
|  | 2093 | * may have dropped their last reference (not just their last process | 
|  | 2094 | * reference). | 
|  | 2095 | */ | 
|  | 2096 | if (!bfqq_process_refs(new_bfqq)) | 
|  | 2097 | return NULL; | 
|  | 2098 |  | 
|  | 2099 | /* Avoid a circular list and skip interim queue merges. */ | 
|  | 2100 | while ((__bfqq = new_bfqq->new_bfqq)) { | 
|  | 2101 | if (__bfqq == bfqq) | 
|  | 2102 | return NULL; | 
|  | 2103 | new_bfqq = __bfqq; | 
|  | 2104 | } | 
|  | 2105 |  | 
|  | 2106 | process_refs = bfqq_process_refs(bfqq); | 
|  | 2107 | new_process_refs = bfqq_process_refs(new_bfqq); | 
|  | 2108 | /* | 
|  | 2109 | * If the process for the bfqq has gone away, there is no | 
|  | 2110 | * sense in merging the queues. | 
|  | 2111 | */ | 
|  | 2112 | if (process_refs == 0 || new_process_refs == 0) | 
|  | 2113 | return NULL; | 
|  | 2114 |  | 
|  | 2115 | bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d", | 
|  | 2116 | new_bfqq->pid); | 
|  | 2117 |  | 
|  | 2118 | /* | 
|  | 2119 | * Merging is just a redirection: the requests of the process | 
|  | 2120 | * owning one of the two queues are redirected to the other queue. | 
|  | 2121 | * The latter queue, in its turn, is set as shared if this is the | 
|  | 2122 | * first time that the requests of some process are redirected to | 
|  | 2123 | * it. | 
|  | 2124 | * | 
|  | 2125 | * We redirect bfqq to new_bfqq and not the opposite, because | 
|  | 2126 | * we are in the context of the process owning bfqq, thus we | 
|  | 2127 | * have the io_cq of this process. So we can immediately | 
|  | 2128 | * configure this io_cq to redirect the requests of the | 
|  | 2129 | * process to new_bfqq. In contrast, the io_cq of new_bfqq is | 
|  | 2130 | * not available any more (new_bfqq->bic == NULL). | 
|  | 2131 | * | 
|  | 2132 | * Anyway, even in case new_bfqq coincides with the in-service | 
|  | 2133 | * queue, redirecting requests the in-service queue is the | 
|  | 2134 | * best option, as we feed the in-service queue with new | 
|  | 2135 | * requests close to the last request served and, by doing so, | 
|  | 2136 | * are likely to increase the throughput. | 
|  | 2137 | */ | 
|  | 2138 | bfqq->new_bfqq = new_bfqq; | 
|  | 2139 | new_bfqq->ref += process_refs; | 
|  | 2140 | return new_bfqq; | 
|  | 2141 | } | 
|  | 2142 |  | 
|  | 2143 | static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, | 
|  | 2144 | struct bfq_queue *new_bfqq) | 
|  | 2145 | { | 
|  | 2146 | if (bfq_too_late_for_merging(new_bfqq)) | 
|  | 2147 | return false; | 
|  | 2148 |  | 
|  | 2149 | if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) || | 
|  | 2150 | (bfqq->ioprio_class != new_bfqq->ioprio_class)) | 
|  | 2151 | return false; | 
|  | 2152 |  | 
|  | 2153 | /* | 
|  | 2154 | * If either of the queues has already been detected as seeky, | 
|  | 2155 | * then merging it with the other queue is unlikely to lead to | 
|  | 2156 | * sequential I/O. | 
|  | 2157 | */ | 
|  | 2158 | if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq)) | 
|  | 2159 | return false; | 
|  | 2160 |  | 
|  | 2161 | /* | 
|  | 2162 | * Interleaved I/O is known to be done by (some) applications | 
|  | 2163 | * only for reads, so it does not make sense to merge async | 
|  | 2164 | * queues. | 
|  | 2165 | */ | 
|  | 2166 | if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(new_bfqq)) | 
|  | 2167 | return false; | 
|  | 2168 |  | 
|  | 2169 | return true; | 
|  | 2170 | } | 
|  | 2171 |  | 
|  | 2172 | /* | 
|  | 2173 | * Attempt to schedule a merge of bfqq with the currently in-service | 
|  | 2174 | * queue or with a close queue among the scheduled queues.  Return | 
|  | 2175 | * NULL if no merge was scheduled, a pointer to the shared bfq_queue | 
|  | 2176 | * structure otherwise. | 
|  | 2177 | * | 
|  | 2178 | * The OOM queue is not allowed to participate to cooperation: in fact, since | 
|  | 2179 | * the requests temporarily redirected to the OOM queue could be redirected | 
|  | 2180 | * again to dedicated queues at any time, the state needed to correctly | 
|  | 2181 | * handle merging with the OOM queue would be quite complex and expensive | 
|  | 2182 | * to maintain. Besides, in such a critical condition as an out of memory, | 
|  | 2183 | * the benefits of queue merging may be little relevant, or even negligible. | 
|  | 2184 | * | 
|  | 2185 | * WARNING: queue merging may impair fairness among non-weight raised | 
|  | 2186 | * queues, for at least two reasons: 1) the original weight of a | 
|  | 2187 | * merged queue may change during the merged state, 2) even being the | 
|  | 2188 | * weight the same, a merged queue may be bloated with many more | 
|  | 2189 | * requests than the ones produced by its originally-associated | 
|  | 2190 | * process. | 
|  | 2191 | */ | 
|  | 2192 | static struct bfq_queue * | 
|  | 2193 | bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, | 
|  | 2194 | void *io_struct, bool request) | 
|  | 2195 | { | 
|  | 2196 | struct bfq_queue *in_service_bfqq, *new_bfqq; | 
|  | 2197 |  | 
|  | 2198 | /* | 
|  | 2199 | * Prevent bfqq from being merged if it has been created too | 
|  | 2200 | * long ago. The idea is that true cooperating processes, and | 
|  | 2201 | * thus their associated bfq_queues, are supposed to be | 
|  | 2202 | * created shortly after each other. This is the case, e.g., | 
|  | 2203 | * for KVM/QEMU and dump I/O threads. Basing on this | 
|  | 2204 | * assumption, the following filtering greatly reduces the | 
|  | 2205 | * probability that two non-cooperating processes, which just | 
|  | 2206 | * happen to do close I/O for some short time interval, have | 
|  | 2207 | * their queues merged by mistake. | 
|  | 2208 | */ | 
|  | 2209 | if (bfq_too_late_for_merging(bfqq)) | 
|  | 2210 | return NULL; | 
|  | 2211 |  | 
|  | 2212 | if (bfqq->new_bfqq) | 
|  | 2213 | return bfqq->new_bfqq; | 
|  | 2214 |  | 
|  | 2215 | if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) | 
|  | 2216 | return NULL; | 
|  | 2217 |  | 
|  | 2218 | /* If there is only one backlogged queue, don't search. */ | 
|  | 2219 | if (bfqd->busy_queues == 1) | 
|  | 2220 | return NULL; | 
|  | 2221 |  | 
|  | 2222 | in_service_bfqq = bfqd->in_service_queue; | 
|  | 2223 |  | 
|  | 2224 | if (in_service_bfqq && in_service_bfqq != bfqq && | 
|  | 2225 | likely(in_service_bfqq != &bfqd->oom_bfqq) && | 
|  | 2226 | bfq_rq_close_to_sector(io_struct, request, | 
|  | 2227 | bfqd->in_serv_last_pos) && | 
|  | 2228 | bfqq->entity.parent == in_service_bfqq->entity.parent && | 
|  | 2229 | bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) { | 
|  | 2230 | new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq); | 
|  | 2231 | if (new_bfqq) | 
|  | 2232 | return new_bfqq; | 
|  | 2233 | } | 
|  | 2234 | /* | 
|  | 2235 | * Check whether there is a cooperator among currently scheduled | 
|  | 2236 | * queues. The only thing we need is that the bio/request is not | 
|  | 2237 | * NULL, as we need it to establish whether a cooperator exists. | 
|  | 2238 | */ | 
|  | 2239 | new_bfqq = bfq_find_close_cooperator(bfqd, bfqq, | 
|  | 2240 | bfq_io_struct_pos(io_struct, request)); | 
|  | 2241 |  | 
|  | 2242 | if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) && | 
|  | 2243 | bfq_may_be_close_cooperator(bfqq, new_bfqq)) | 
|  | 2244 | return bfq_setup_merge(bfqq, new_bfqq); | 
|  | 2245 |  | 
|  | 2246 | return NULL; | 
|  | 2247 | } | 
|  | 2248 |  | 
|  | 2249 | static void bfq_bfqq_save_state(struct bfq_queue *bfqq) | 
|  | 2250 | { | 
|  | 2251 | struct bfq_io_cq *bic = bfqq->bic; | 
|  | 2252 |  | 
|  | 2253 | /* | 
|  | 2254 | * If !bfqq->bic, the queue is already shared or its requests | 
|  | 2255 | * have already been redirected to a shared queue; both idle window | 
|  | 2256 | * and weight raising state have already been saved. Do nothing. | 
|  | 2257 | */ | 
|  | 2258 | if (!bic) | 
|  | 2259 | return; | 
|  | 2260 |  | 
|  | 2261 | bic->saved_ttime = bfqq->ttime; | 
|  | 2262 | bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq); | 
|  | 2263 | bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq); | 
|  | 2264 | bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); | 
|  | 2265 | bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node); | 
|  | 2266 | if (unlikely(bfq_bfqq_just_created(bfqq) && | 
|  | 2267 | !bfq_bfqq_in_large_burst(bfqq) && | 
|  | 2268 | bfqq->bfqd->low_latency)) { | 
|  | 2269 | /* | 
|  | 2270 | * bfqq being merged right after being created: bfqq | 
|  | 2271 | * would have deserved interactive weight raising, but | 
|  | 2272 | * did not make it to be set in a weight-raised state, | 
|  | 2273 | * because of this early merge.	Store directly the | 
|  | 2274 | * weight-raising state that would have been assigned | 
|  | 2275 | * to bfqq, so that to avoid that bfqq unjustly fails | 
|  | 2276 | * to enjoy weight raising if split soon. | 
|  | 2277 | */ | 
|  | 2278 | bic->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff; | 
|  | 2279 | bic->saved_wr_cur_max_time = bfq_wr_duration(bfqq->bfqd); | 
|  | 2280 | bic->saved_last_wr_start_finish = jiffies; | 
|  | 2281 | } else { | 
|  | 2282 | bic->saved_wr_coeff = bfqq->wr_coeff; | 
|  | 2283 | bic->saved_wr_start_at_switch_to_srt = | 
|  | 2284 | bfqq->wr_start_at_switch_to_srt; | 
|  | 2285 | bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish; | 
|  | 2286 | bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time; | 
|  | 2287 | } | 
|  | 2288 | } | 
|  | 2289 |  | 
|  | 2290 | static void | 
|  | 2291 | bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, | 
|  | 2292 | struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) | 
|  | 2293 | { | 
|  | 2294 | bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", | 
|  | 2295 | (unsigned long)new_bfqq->pid); | 
|  | 2296 | /* Save weight raising and idle window of the merged queues */ | 
|  | 2297 | bfq_bfqq_save_state(bfqq); | 
|  | 2298 | bfq_bfqq_save_state(new_bfqq); | 
|  | 2299 | if (bfq_bfqq_IO_bound(bfqq)) | 
|  | 2300 | bfq_mark_bfqq_IO_bound(new_bfqq); | 
|  | 2301 | bfq_clear_bfqq_IO_bound(bfqq); | 
|  | 2302 |  | 
|  | 2303 | /* | 
|  | 2304 | * If bfqq is weight-raised, then let new_bfqq inherit | 
|  | 2305 | * weight-raising. To reduce false positives, neglect the case | 
|  | 2306 | * where bfqq has just been created, but has not yet made it | 
|  | 2307 | * to be weight-raised (which may happen because EQM may merge | 
|  | 2308 | * bfqq even before bfq_add_request is executed for the first | 
|  | 2309 | * time for bfqq). Handling this case would however be very | 
|  | 2310 | * easy, thanks to the flag just_created. | 
|  | 2311 | */ | 
|  | 2312 | if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) { | 
|  | 2313 | new_bfqq->wr_coeff = bfqq->wr_coeff; | 
|  | 2314 | new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time; | 
|  | 2315 | new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish; | 
|  | 2316 | new_bfqq->wr_start_at_switch_to_srt = | 
|  | 2317 | bfqq->wr_start_at_switch_to_srt; | 
|  | 2318 | if (bfq_bfqq_busy(new_bfqq)) | 
|  | 2319 | bfqd->wr_busy_queues++; | 
|  | 2320 | new_bfqq->entity.prio_changed = 1; | 
|  | 2321 | } | 
|  | 2322 |  | 
|  | 2323 | if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */ | 
|  | 2324 | bfqq->wr_coeff = 1; | 
|  | 2325 | bfqq->entity.prio_changed = 1; | 
|  | 2326 | if (bfq_bfqq_busy(bfqq)) | 
|  | 2327 | bfqd->wr_busy_queues--; | 
|  | 2328 | } | 
|  | 2329 |  | 
|  | 2330 | bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d", | 
|  | 2331 | bfqd->wr_busy_queues); | 
|  | 2332 |  | 
|  | 2333 | /* | 
|  | 2334 | * Merge queues (that is, let bic redirect its requests to new_bfqq) | 
|  | 2335 | */ | 
|  | 2336 | bic_set_bfqq(bic, new_bfqq, 1); | 
|  | 2337 | bfq_mark_bfqq_coop(new_bfqq); | 
|  | 2338 | /* | 
|  | 2339 | * new_bfqq now belongs to at least two bics (it is a shared queue): | 
|  | 2340 | * set new_bfqq->bic to NULL. bfqq either: | 
|  | 2341 | * - does not belong to any bic any more, and hence bfqq->bic must | 
|  | 2342 | *   be set to NULL, or | 
|  | 2343 | * - is a queue whose owning bics have already been redirected to a | 
|  | 2344 | *   different queue, hence the queue is destined to not belong to | 
|  | 2345 | *   any bic soon and bfqq->bic is already NULL (therefore the next | 
|  | 2346 | *   assignment causes no harm). | 
|  | 2347 | */ | 
|  | 2348 | new_bfqq->bic = NULL; | 
|  | 2349 | bfqq->bic = NULL; | 
|  | 2350 | /* release process reference to bfqq */ | 
|  | 2351 | bfq_put_queue(bfqq); | 
|  | 2352 | } | 
|  | 2353 |  | 
|  | 2354 | static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, | 
|  | 2355 | struct bio *bio) | 
|  | 2356 | { | 
|  | 2357 | struct bfq_data *bfqd = q->elevator->elevator_data; | 
|  | 2358 | bool is_sync = op_is_sync(bio->bi_opf); | 
|  | 2359 | struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq; | 
|  | 2360 |  | 
|  | 2361 | /* | 
|  | 2362 | * Disallow merge of a sync bio into an async request. | 
|  | 2363 | */ | 
|  | 2364 | if (is_sync && !rq_is_sync(rq)) | 
|  | 2365 | return false; | 
|  | 2366 |  | 
|  | 2367 | /* | 
|  | 2368 | * Lookup the bfqq that this bio will be queued with. Allow | 
|  | 2369 | * merge only if rq is queued there. | 
|  | 2370 | */ | 
|  | 2371 | if (!bfqq) | 
|  | 2372 | return false; | 
|  | 2373 |  | 
|  | 2374 | /* | 
|  | 2375 | * We take advantage of this function to perform an early merge | 
|  | 2376 | * of the queues of possible cooperating processes. | 
|  | 2377 | */ | 
|  | 2378 | new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false); | 
|  | 2379 | if (new_bfqq) { | 
|  | 2380 | /* | 
|  | 2381 | * bic still points to bfqq, then it has not yet been | 
|  | 2382 | * redirected to some other bfq_queue, and a queue | 
|  | 2383 | * merge beween bfqq and new_bfqq can be safely | 
|  | 2384 | * fulfillled, i.e., bic can be redirected to new_bfqq | 
|  | 2385 | * and bfqq can be put. | 
|  | 2386 | */ | 
|  | 2387 | bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq, | 
|  | 2388 | new_bfqq); | 
|  | 2389 | /* | 
|  | 2390 | * If we get here, bio will be queued into new_queue, | 
|  | 2391 | * so use new_bfqq to decide whether bio and rq can be | 
|  | 2392 | * merged. | 
|  | 2393 | */ | 
|  | 2394 | bfqq = new_bfqq; | 
|  | 2395 |  | 
|  | 2396 | /* | 
|  | 2397 | * Change also bqfd->bio_bfqq, as | 
|  | 2398 | * bfqd->bio_bic now points to new_bfqq, and | 
|  | 2399 | * this function may be invoked again (and then may | 
|  | 2400 | * use again bqfd->bio_bfqq). | 
|  | 2401 | */ | 
|  | 2402 | bfqd->bio_bfqq = bfqq; | 
|  | 2403 | } | 
|  | 2404 |  | 
|  | 2405 | return bfqq == RQ_BFQQ(rq); | 
|  | 2406 | } | 
|  | 2407 |  | 
|  | 2408 | /* | 
|  | 2409 | * Set the maximum time for the in-service queue to consume its | 
|  | 2410 | * budget. This prevents seeky processes from lowering the throughput. | 
|  | 2411 | * In practice, a time-slice service scheme is used with seeky | 
|  | 2412 | * processes. | 
|  | 2413 | */ | 
|  | 2414 | static void bfq_set_budget_timeout(struct bfq_data *bfqd, | 
|  | 2415 | struct bfq_queue *bfqq) | 
|  | 2416 | { | 
|  | 2417 | unsigned int timeout_coeff; | 
|  | 2418 |  | 
|  | 2419 | if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time) | 
|  | 2420 | timeout_coeff = 1; | 
|  | 2421 | else | 
|  | 2422 | timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight; | 
|  | 2423 |  | 
|  | 2424 | bfqd->last_budget_start = ktime_get(); | 
|  | 2425 |  | 
|  | 2426 | bfqq->budget_timeout = jiffies + | 
|  | 2427 | bfqd->bfq_timeout * timeout_coeff; | 
|  | 2428 | } | 
|  | 2429 |  | 
|  | 2430 | static void __bfq_set_in_service_queue(struct bfq_data *bfqd, | 
|  | 2431 | struct bfq_queue *bfqq) | 
|  | 2432 | { | 
|  | 2433 | if (bfqq) { | 
|  | 2434 | bfq_clear_bfqq_fifo_expire(bfqq); | 
|  | 2435 |  | 
|  | 2436 | bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8; | 
|  | 2437 |  | 
|  | 2438 | if (time_is_before_jiffies(bfqq->last_wr_start_finish) && | 
|  | 2439 | bfqq->wr_coeff > 1 && | 
|  | 2440 | bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && | 
|  | 2441 | time_is_before_jiffies(bfqq->budget_timeout)) { | 
|  | 2442 | /* | 
|  | 2443 | * For soft real-time queues, move the start | 
|  | 2444 | * of the weight-raising period forward by the | 
|  | 2445 | * time the queue has not received any | 
|  | 2446 | * service. Otherwise, a relatively long | 
|  | 2447 | * service delay is likely to cause the | 
|  | 2448 | * weight-raising period of the queue to end, | 
|  | 2449 | * because of the short duration of the | 
|  | 2450 | * weight-raising period of a soft real-time | 
|  | 2451 | * queue.  It is worth noting that this move | 
|  | 2452 | * is not so dangerous for the other queues, | 
|  | 2453 | * because soft real-time queues are not | 
|  | 2454 | * greedy. | 
|  | 2455 | * | 
|  | 2456 | * To not add a further variable, we use the | 
|  | 2457 | * overloaded field budget_timeout to | 
|  | 2458 | * determine for how long the queue has not | 
|  | 2459 | * received service, i.e., how much time has | 
|  | 2460 | * elapsed since the queue expired. However, | 
|  | 2461 | * this is a little imprecise, because | 
|  | 2462 | * budget_timeout is set to jiffies if bfqq | 
|  | 2463 | * not only expires, but also remains with no | 
|  | 2464 | * request. | 
|  | 2465 | */ | 
|  | 2466 | if (time_after(bfqq->budget_timeout, | 
|  | 2467 | bfqq->last_wr_start_finish)) | 
|  | 2468 | bfqq->last_wr_start_finish += | 
|  | 2469 | jiffies - bfqq->budget_timeout; | 
|  | 2470 | else | 
|  | 2471 | bfqq->last_wr_start_finish = jiffies; | 
|  | 2472 | } | 
|  | 2473 |  | 
|  | 2474 | bfq_set_budget_timeout(bfqd, bfqq); | 
|  | 2475 | bfq_log_bfqq(bfqd, bfqq, | 
|  | 2476 | "set_in_service_queue, cur-budget = %d", | 
|  | 2477 | bfqq->entity.budget); | 
|  | 2478 | } | 
|  | 2479 |  | 
|  | 2480 | bfqd->in_service_queue = bfqq; | 
|  | 2481 | } | 
|  | 2482 |  | 
|  | 2483 | /* | 
|  | 2484 | * Get and set a new queue for service. | 
|  | 2485 | */ | 
|  | 2486 | static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd) | 
|  | 2487 | { | 
|  | 2488 | struct bfq_queue *bfqq = bfq_get_next_queue(bfqd); | 
|  | 2489 |  | 
|  | 2490 | __bfq_set_in_service_queue(bfqd, bfqq); | 
|  | 2491 | return bfqq; | 
|  | 2492 | } | 
|  | 2493 |  | 
|  | 2494 | static void bfq_arm_slice_timer(struct bfq_data *bfqd) | 
|  | 2495 | { | 
|  | 2496 | struct bfq_queue *bfqq = bfqd->in_service_queue; | 
|  | 2497 | u32 sl; | 
|  | 2498 |  | 
|  | 2499 | bfq_mark_bfqq_wait_request(bfqq); | 
|  | 2500 |  | 
|  | 2501 | /* | 
|  | 2502 | * We don't want to idle for seeks, but we do want to allow | 
|  | 2503 | * fair distribution of slice time for a process doing back-to-back | 
|  | 2504 | * seeks. So allow a little bit of time for him to submit a new rq. | 
|  | 2505 | */ | 
|  | 2506 | sl = bfqd->bfq_slice_idle; | 
|  | 2507 | /* | 
|  | 2508 | * Unless the queue is being weight-raised or the scenario is | 
|  | 2509 | * asymmetric, grant only minimum idle time if the queue | 
|  | 2510 | * is seeky. A long idling is preserved for a weight-raised | 
|  | 2511 | * queue, or, more in general, in an asymmetric scenario, | 
|  | 2512 | * because a long idling is needed for guaranteeing to a queue | 
|  | 2513 | * its reserved share of the throughput (in particular, it is | 
|  | 2514 | * needed if the queue has a higher weight than some other | 
|  | 2515 | * queue). | 
|  | 2516 | */ | 
|  | 2517 | if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 && | 
|  | 2518 | bfq_symmetric_scenario(bfqd)) | 
|  | 2519 | sl = min_t(u64, sl, BFQ_MIN_TT); | 
|  | 2520 | else if (bfqq->wr_coeff > 1) | 
|  | 2521 | sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC); | 
|  | 2522 |  | 
|  | 2523 | bfqd->last_idling_start = ktime_get(); | 
|  | 2524 | hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl), | 
|  | 2525 | HRTIMER_MODE_REL); | 
|  | 2526 | bfqg_stats_set_start_idle_time(bfqq_group(bfqq)); | 
|  | 2527 | } | 
|  | 2528 |  | 
|  | 2529 | /* | 
|  | 2530 | * In autotuning mode, max_budget is dynamically recomputed as the | 
|  | 2531 | * amount of sectors transferred in timeout at the estimated peak | 
|  | 2532 | * rate. This enables BFQ to utilize a full timeslice with a full | 
|  | 2533 | * budget, even if the in-service queue is served at peak rate. And | 
|  | 2534 | * this maximises throughput with sequential workloads. | 
|  | 2535 | */ | 
|  | 2536 | static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd) | 
|  | 2537 | { | 
|  | 2538 | return (u64)bfqd->peak_rate * USEC_PER_MSEC * | 
|  | 2539 | jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT; | 
|  | 2540 | } | 
|  | 2541 |  | 
|  | 2542 | /* | 
|  | 2543 | * Update parameters related to throughput and responsiveness, as a | 
|  | 2544 | * function of the estimated peak rate. See comments on | 
|  | 2545 | * bfq_calc_max_budget(), and on the ref_wr_duration array. | 
|  | 2546 | */ | 
|  | 2547 | static void update_thr_responsiveness_params(struct bfq_data *bfqd) | 
|  | 2548 | { | 
|  | 2549 | if (bfqd->bfq_user_max_budget == 0) { | 
|  | 2550 | bfqd->bfq_max_budget = | 
|  | 2551 | bfq_calc_max_budget(bfqd); | 
|  | 2552 | bfq_log(bfqd, "new max_budget = %d", bfqd->bfq_max_budget); | 
|  | 2553 | } | 
|  | 2554 | } | 
|  | 2555 |  | 
|  | 2556 | static void bfq_reset_rate_computation(struct bfq_data *bfqd, | 
|  | 2557 | struct request *rq) | 
|  | 2558 | { | 
|  | 2559 | if (rq != NULL) { /* new rq dispatch now, reset accordingly */ | 
|  | 2560 | bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns(); | 
|  | 2561 | bfqd->peak_rate_samples = 1; | 
|  | 2562 | bfqd->sequential_samples = 0; | 
|  | 2563 | bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size = | 
|  | 2564 | blk_rq_sectors(rq); | 
|  | 2565 | } else /* no new rq dispatched, just reset the number of samples */ | 
|  | 2566 | bfqd->peak_rate_samples = 0; /* full re-init on next disp. */ | 
|  | 2567 |  | 
|  | 2568 | bfq_log(bfqd, | 
|  | 2569 | "reset_rate_computation at end, sample %u/%u tot_sects %llu", | 
|  | 2570 | bfqd->peak_rate_samples, bfqd->sequential_samples, | 
|  | 2571 | bfqd->tot_sectors_dispatched); | 
|  | 2572 | } | 
|  | 2573 |  | 
|  | 2574 | static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) | 
|  | 2575 | { | 
|  | 2576 | u32 rate, weight, divisor; | 
|  | 2577 |  | 
|  | 2578 | /* | 
|  | 2579 | * For the convergence property to hold (see comments on | 
|  | 2580 | * bfq_update_peak_rate()) and for the assessment to be | 
|  | 2581 | * reliable, a minimum number of samples must be present, and | 
|  | 2582 | * a minimum amount of time must have elapsed. If not so, do | 
|  | 2583 | * not compute new rate. Just reset parameters, to get ready | 
|  | 2584 | * for a new evaluation attempt. | 
|  | 2585 | */ | 
|  | 2586 | if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES || | 
|  | 2587 | bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) | 
|  | 2588 | goto reset_computation; | 
|  | 2589 |  | 
|  | 2590 | /* | 
|  | 2591 | * If a new request completion has occurred after last | 
|  | 2592 | * dispatch, then, to approximate the rate at which requests | 
|  | 2593 | * have been served by the device, it is more precise to | 
|  | 2594 | * extend the observation interval to the last completion. | 
|  | 2595 | */ | 
|  | 2596 | bfqd->delta_from_first = | 
|  | 2597 | max_t(u64, bfqd->delta_from_first, | 
|  | 2598 | bfqd->last_completion - bfqd->first_dispatch); | 
|  | 2599 |  | 
|  | 2600 | /* | 
|  | 2601 | * Rate computed in sects/usec, and not sects/nsec, for | 
|  | 2602 | * precision issues. | 
|  | 2603 | */ | 
|  | 2604 | rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT, | 
|  | 2605 | div_u64(bfqd->delta_from_first, NSEC_PER_USEC)); | 
|  | 2606 |  | 
|  | 2607 | /* | 
|  | 2608 | * Peak rate not updated if: | 
|  | 2609 | * - the percentage of sequential dispatches is below 3/4 of the | 
|  | 2610 | *   total, and rate is below the current estimated peak rate | 
|  | 2611 | * - rate is unreasonably high (> 20M sectors/sec) | 
|  | 2612 | */ | 
|  | 2613 | if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 && | 
|  | 2614 | rate <= bfqd->peak_rate) || | 
|  | 2615 | rate > 20<<BFQ_RATE_SHIFT) | 
|  | 2616 | goto reset_computation; | 
|  | 2617 |  | 
|  | 2618 | /* | 
|  | 2619 | * We have to update the peak rate, at last! To this purpose, | 
|  | 2620 | * we use a low-pass filter. We compute the smoothing constant | 
|  | 2621 | * of the filter as a function of the 'weight' of the new | 
|  | 2622 | * measured rate. | 
|  | 2623 | * | 
|  | 2624 | * As can be seen in next formulas, we define this weight as a | 
|  | 2625 | * quantity proportional to how sequential the workload is, | 
|  | 2626 | * and to how long the observation time interval is. | 
|  | 2627 | * | 
|  | 2628 | * The weight runs from 0 to 8. The maximum value of the | 
|  | 2629 | * weight, 8, yields the minimum value for the smoothing | 
|  | 2630 | * constant. At this minimum value for the smoothing constant, | 
|  | 2631 | * the measured rate contributes for half of the next value of | 
|  | 2632 | * the estimated peak rate. | 
|  | 2633 | * | 
|  | 2634 | * So, the first step is to compute the weight as a function | 
|  | 2635 | * of how sequential the workload is. Note that the weight | 
|  | 2636 | * cannot reach 9, because bfqd->sequential_samples cannot | 
|  | 2637 | * become equal to bfqd->peak_rate_samples, which, in its | 
|  | 2638 | * turn, holds true because bfqd->sequential_samples is not | 
|  | 2639 | * incremented for the first sample. | 
|  | 2640 | */ | 
|  | 2641 | weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples; | 
|  | 2642 |  | 
|  | 2643 | /* | 
|  | 2644 | * Second step: further refine the weight as a function of the | 
|  | 2645 | * duration of the observation interval. | 
|  | 2646 | */ | 
|  | 2647 | weight = min_t(u32, 8, | 
|  | 2648 | div_u64(weight * bfqd->delta_from_first, | 
|  | 2649 | BFQ_RATE_REF_INTERVAL)); | 
|  | 2650 |  | 
|  | 2651 | /* | 
|  | 2652 | * Divisor ranging from 10, for minimum weight, to 2, for | 
|  | 2653 | * maximum weight. | 
|  | 2654 | */ | 
|  | 2655 | divisor = 10 - weight; | 
|  | 2656 |  | 
|  | 2657 | /* | 
|  | 2658 | * Finally, update peak rate: | 
|  | 2659 | * | 
|  | 2660 | * peak_rate = peak_rate * (divisor-1) / divisor  +  rate / divisor | 
|  | 2661 | */ | 
|  | 2662 | bfqd->peak_rate *= divisor-1; | 
|  | 2663 | bfqd->peak_rate /= divisor; | 
|  | 2664 | rate /= divisor; /* smoothing constant alpha = 1/divisor */ | 
|  | 2665 |  | 
|  | 2666 | bfqd->peak_rate += rate; | 
|  | 2667 |  | 
|  | 2668 | /* | 
|  | 2669 | * For a very slow device, bfqd->peak_rate can reach 0 (see | 
|  | 2670 | * the minimum representable values reported in the comments | 
|  | 2671 | * on BFQ_RATE_SHIFT). Push to 1 if this happens, to avoid | 
|  | 2672 | * divisions by zero where bfqd->peak_rate is used as a | 
|  | 2673 | * divisor. | 
|  | 2674 | */ | 
|  | 2675 | bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate); | 
|  | 2676 |  | 
|  | 2677 | update_thr_responsiveness_params(bfqd); | 
|  | 2678 |  | 
|  | 2679 | reset_computation: | 
|  | 2680 | bfq_reset_rate_computation(bfqd, rq); | 
|  | 2681 | } | 
|  | 2682 |  | 
|  | 2683 | /* | 
|  | 2684 | * Update the read/write peak rate (the main quantity used for | 
|  | 2685 | * auto-tuning, see update_thr_responsiveness_params()). | 
|  | 2686 | * | 
|  | 2687 | * It is not trivial to estimate the peak rate (correctly): because of | 
|  | 2688 | * the presence of sw and hw queues between the scheduler and the | 
|  | 2689 | * device components that finally serve I/O requests, it is hard to | 
|  | 2690 | * say exactly when a given dispatched request is served inside the | 
|  | 2691 | * device, and for how long. As a consequence, it is hard to know | 
|  | 2692 | * precisely at what rate a given set of requests is actually served | 
|  | 2693 | * by the device. | 
|  | 2694 | * | 
|  | 2695 | * On the opposite end, the dispatch time of any request is trivially | 
|  | 2696 | * available, and, from this piece of information, the "dispatch rate" | 
|  | 2697 | * of requests can be immediately computed. So, the idea in the next | 
|  | 2698 | * function is to use what is known, namely request dispatch times | 
|  | 2699 | * (plus, when useful, request completion times), to estimate what is | 
|  | 2700 | * unknown, namely in-device request service rate. | 
|  | 2701 | * | 
|  | 2702 | * The main issue is that, because of the above facts, the rate at | 
|  | 2703 | * which a certain set of requests is dispatched over a certain time | 
|  | 2704 | * interval can vary greatly with respect to the rate at which the | 
|  | 2705 | * same requests are then served. But, since the size of any | 
|  | 2706 | * intermediate queue is limited, and the service scheme is lossless | 
|  | 2707 | * (no request is silently dropped), the following obvious convergence | 
|  | 2708 | * property holds: the number of requests dispatched MUST become | 
|  | 2709 | * closer and closer to the number of requests completed as the | 
|  | 2710 | * observation interval grows. This is the key property used in | 
|  | 2711 | * the next function to estimate the peak service rate as a function | 
|  | 2712 | * of the observed dispatch rate. The function assumes to be invoked | 
|  | 2713 | * on every request dispatch. | 
|  | 2714 | */ | 
|  | 2715 | static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) | 
|  | 2716 | { | 
|  | 2717 | u64 now_ns = ktime_get_ns(); | 
|  | 2718 |  | 
|  | 2719 | if (bfqd->peak_rate_samples == 0) { /* first dispatch */ | 
|  | 2720 | bfq_log(bfqd, "update_peak_rate: goto reset, samples %d", | 
|  | 2721 | bfqd->peak_rate_samples); | 
|  | 2722 | bfq_reset_rate_computation(bfqd, rq); | 
|  | 2723 | goto update_last_values; /* will add one sample */ | 
|  | 2724 | } | 
|  | 2725 |  | 
|  | 2726 | /* | 
|  | 2727 | * Device idle for very long: the observation interval lasting | 
|  | 2728 | * up to this dispatch cannot be a valid observation interval | 
|  | 2729 | * for computing a new peak rate (similarly to the late- | 
|  | 2730 | * completion event in bfq_completed_request()). Go to | 
|  | 2731 | * update_rate_and_reset to have the following three steps | 
|  | 2732 | * taken: | 
|  | 2733 | * - close the observation interval at the last (previous) | 
|  | 2734 | *   request dispatch or completion | 
|  | 2735 | * - compute rate, if possible, for that observation interval | 
|  | 2736 | * - start a new observation interval with this dispatch | 
|  | 2737 | */ | 
|  | 2738 | if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC && | 
|  | 2739 | bfqd->rq_in_driver == 0) | 
|  | 2740 | goto update_rate_and_reset; | 
|  | 2741 |  | 
|  | 2742 | /* Update sampling information */ | 
|  | 2743 | bfqd->peak_rate_samples++; | 
|  | 2744 |  | 
|  | 2745 | if ((bfqd->rq_in_driver > 0 || | 
|  | 2746 | now_ns - bfqd->last_completion < BFQ_MIN_TT) | 
|  | 2747 | && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR) | 
|  | 2748 | bfqd->sequential_samples++; | 
|  | 2749 |  | 
|  | 2750 | bfqd->tot_sectors_dispatched += blk_rq_sectors(rq); | 
|  | 2751 |  | 
|  | 2752 | /* Reset max observed rq size every 32 dispatches */ | 
|  | 2753 | if (likely(bfqd->peak_rate_samples % 32)) | 
|  | 2754 | bfqd->last_rq_max_size = | 
|  | 2755 | max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size); | 
|  | 2756 | else | 
|  | 2757 | bfqd->last_rq_max_size = blk_rq_sectors(rq); | 
|  | 2758 |  | 
|  | 2759 | bfqd->delta_from_first = now_ns - bfqd->first_dispatch; | 
|  | 2760 |  | 
|  | 2761 | /* Target observation interval not yet reached, go on sampling */ | 
|  | 2762 | if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL) | 
|  | 2763 | goto update_last_values; | 
|  | 2764 |  | 
|  | 2765 | update_rate_and_reset: | 
|  | 2766 | bfq_update_rate_reset(bfqd, rq); | 
|  | 2767 | update_last_values: | 
|  | 2768 | bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); | 
|  | 2769 | if (RQ_BFQQ(rq) == bfqd->in_service_queue) | 
|  | 2770 | bfqd->in_serv_last_pos = bfqd->last_position; | 
|  | 2771 | bfqd->last_dispatch = now_ns; | 
|  | 2772 | } | 
|  | 2773 |  | 
|  | 2774 | /* | 
|  | 2775 | * Remove request from internal lists. | 
|  | 2776 | */ | 
|  | 2777 | static void bfq_dispatch_remove(struct request_queue *q, struct request *rq) | 
|  | 2778 | { | 
|  | 2779 | struct bfq_queue *bfqq = RQ_BFQQ(rq); | 
|  | 2780 |  | 
|  | 2781 | /* | 
|  | 2782 | * For consistency, the next instruction should have been | 
|  | 2783 | * executed after removing the request from the queue and | 
|  | 2784 | * dispatching it.  We execute instead this instruction before | 
|  | 2785 | * bfq_remove_request() (and hence introduce a temporary | 
|  | 2786 | * inconsistency), for efficiency.  In fact, should this | 
|  | 2787 | * dispatch occur for a non in-service bfqq, this anticipated | 
|  | 2788 | * increment prevents two counters related to bfqq->dispatched | 
|  | 2789 | * from risking to be, first, uselessly decremented, and then | 
|  | 2790 | * incremented again when the (new) value of bfqq->dispatched | 
|  | 2791 | * happens to be taken into account. | 
|  | 2792 | */ | 
|  | 2793 | bfqq->dispatched++; | 
|  | 2794 | bfq_update_peak_rate(q->elevator->elevator_data, rq); | 
|  | 2795 |  | 
|  | 2796 | bfq_remove_request(q, rq); | 
|  | 2797 | } | 
|  | 2798 |  | 
|  | 2799 | static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) | 
|  | 2800 | { | 
|  | 2801 | /* | 
|  | 2802 | * If this bfqq is shared between multiple processes, check | 
|  | 2803 | * to make sure that those processes are still issuing I/Os | 
|  | 2804 | * within the mean seek distance. If not, it may be time to | 
|  | 2805 | * break the queues apart again. | 
|  | 2806 | */ | 
|  | 2807 | if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq)) | 
|  | 2808 | bfq_mark_bfqq_split_coop(bfqq); | 
|  | 2809 |  | 
|  | 2810 | if (RB_EMPTY_ROOT(&bfqq->sort_list)) { | 
|  | 2811 | if (bfqq->dispatched == 0) | 
|  | 2812 | /* | 
|  | 2813 | * Overloading budget_timeout field to store | 
|  | 2814 | * the time at which the queue remains with no | 
|  | 2815 | * backlog and no outstanding request; used by | 
|  | 2816 | * the weight-raising mechanism. | 
|  | 2817 | */ | 
|  | 2818 | bfqq->budget_timeout = jiffies; | 
|  | 2819 |  | 
|  | 2820 | bfq_del_bfqq_busy(bfqd, bfqq, true); | 
|  | 2821 | } else { | 
|  | 2822 | bfq_requeue_bfqq(bfqd, bfqq, true); | 
|  | 2823 | /* | 
|  | 2824 | * Resort priority tree of potential close cooperators. | 
|  | 2825 | */ | 
|  | 2826 | bfq_pos_tree_add_move(bfqd, bfqq); | 
|  | 2827 | } | 
|  | 2828 |  | 
|  | 2829 | /* | 
|  | 2830 | * All in-service entities must have been properly deactivated | 
|  | 2831 | * or requeued before executing the next function, which | 
|  | 2832 | * resets all in-service entites as no more in service. | 
|  | 2833 | */ | 
|  | 2834 | __bfq_bfqd_reset_in_service(bfqd); | 
|  | 2835 | } | 
|  | 2836 |  | 
|  | 2837 | /** | 
|  | 2838 | * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior. | 
|  | 2839 | * @bfqd: device data. | 
|  | 2840 | * @bfqq: queue to update. | 
|  | 2841 | * @reason: reason for expiration. | 
|  | 2842 | * | 
|  | 2843 | * Handle the feedback on @bfqq budget at queue expiration. | 
|  | 2844 | * See the body for detailed comments. | 
|  | 2845 | */ | 
|  | 2846 | static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, | 
|  | 2847 | struct bfq_queue *bfqq, | 
|  | 2848 | enum bfqq_expiration reason) | 
|  | 2849 | { | 
|  | 2850 | struct request *next_rq; | 
|  | 2851 | int budget, min_budget; | 
|  | 2852 |  | 
|  | 2853 | min_budget = bfq_min_budget(bfqd); | 
|  | 2854 |  | 
|  | 2855 | if (bfqq->wr_coeff == 1) | 
|  | 2856 | budget = bfqq->max_budget; | 
|  | 2857 | else /* | 
|  | 2858 | * Use a constant, low budget for weight-raised queues, | 
|  | 2859 | * to help achieve a low latency. Keep it slightly higher | 
|  | 2860 | * than the minimum possible budget, to cause a little | 
|  | 2861 | * bit fewer expirations. | 
|  | 2862 | */ | 
|  | 2863 | budget = 2 * min_budget; | 
|  | 2864 |  | 
|  | 2865 | bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d", | 
|  | 2866 | bfqq->entity.budget, bfq_bfqq_budget_left(bfqq)); | 
|  | 2867 | bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d", | 
|  | 2868 | budget, bfq_min_budget(bfqd)); | 
|  | 2869 | bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d", | 
|  | 2870 | bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue)); | 
|  | 2871 |  | 
|  | 2872 | if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) { | 
|  | 2873 | switch (reason) { | 
|  | 2874 | /* | 
|  | 2875 | * Caveat: in all the following cases we trade latency | 
|  | 2876 | * for throughput. | 
|  | 2877 | */ | 
|  | 2878 | case BFQQE_TOO_IDLE: | 
|  | 2879 | /* | 
|  | 2880 | * This is the only case where we may reduce | 
|  | 2881 | * the budget: if there is no request of the | 
|  | 2882 | * process still waiting for completion, then | 
|  | 2883 | * we assume (tentatively) that the timer has | 
|  | 2884 | * expired because the batch of requests of | 
|  | 2885 | * the process could have been served with a | 
|  | 2886 | * smaller budget.  Hence, betting that | 
|  | 2887 | * process will behave in the same way when it | 
|  | 2888 | * becomes backlogged again, we reduce its | 
|  | 2889 | * next budget.  As long as we guess right, | 
|  | 2890 | * this budget cut reduces the latency | 
|  | 2891 | * experienced by the process. | 
|  | 2892 | * | 
|  | 2893 | * However, if there are still outstanding | 
|  | 2894 | * requests, then the process may have not yet | 
|  | 2895 | * issued its next request just because it is | 
|  | 2896 | * still waiting for the completion of some of | 
|  | 2897 | * the still outstanding ones.  So in this | 
|  | 2898 | * subcase we do not reduce its budget, on the | 
|  | 2899 | * contrary we increase it to possibly boost | 
|  | 2900 | * the throughput, as discussed in the | 
|  | 2901 | * comments to the BUDGET_TIMEOUT case. | 
|  | 2902 | */ | 
|  | 2903 | if (bfqq->dispatched > 0) /* still outstanding reqs */ | 
|  | 2904 | budget = min(budget * 2, bfqd->bfq_max_budget); | 
|  | 2905 | else { | 
|  | 2906 | if (budget > 5 * min_budget) | 
|  | 2907 | budget -= 4 * min_budget; | 
|  | 2908 | else | 
|  | 2909 | budget = min_budget; | 
|  | 2910 | } | 
|  | 2911 | break; | 
|  | 2912 | case BFQQE_BUDGET_TIMEOUT: | 
|  | 2913 | /* | 
|  | 2914 | * We double the budget here because it gives | 
|  | 2915 | * the chance to boost the throughput if this | 
|  | 2916 | * is not a seeky process (and has bumped into | 
|  | 2917 | * this timeout because of, e.g., ZBR). | 
|  | 2918 | */ | 
|  | 2919 | budget = min(budget * 2, bfqd->bfq_max_budget); | 
|  | 2920 | break; | 
|  | 2921 | case BFQQE_BUDGET_EXHAUSTED: | 
|  | 2922 | /* | 
|  | 2923 | * The process still has backlog, and did not | 
|  | 2924 | * let either the budget timeout or the disk | 
|  | 2925 | * idling timeout expire. Hence it is not | 
|  | 2926 | * seeky, has a short thinktime and may be | 
|  | 2927 | * happy with a higher budget too. So | 
|  | 2928 | * definitely increase the budget of this good | 
|  | 2929 | * candidate to boost the disk throughput. | 
|  | 2930 | */ | 
|  | 2931 | budget = min(budget * 4, bfqd->bfq_max_budget); | 
|  | 2932 | break; | 
|  | 2933 | case BFQQE_NO_MORE_REQUESTS: | 
|  | 2934 | /* | 
|  | 2935 | * For queues that expire for this reason, it | 
|  | 2936 | * is particularly important to keep the | 
|  | 2937 | * budget close to the actual service they | 
|  | 2938 | * need. Doing so reduces the timestamp | 
|  | 2939 | * misalignment problem described in the | 
|  | 2940 | * comments in the body of | 
|  | 2941 | * __bfq_activate_entity. In fact, suppose | 
|  | 2942 | * that a queue systematically expires for | 
|  | 2943 | * BFQQE_NO_MORE_REQUESTS and presents a | 
|  | 2944 | * new request in time to enjoy timestamp | 
|  | 2945 | * back-shifting. The larger the budget of the | 
|  | 2946 | * queue is with respect to the service the | 
|  | 2947 | * queue actually requests in each service | 
|  | 2948 | * slot, the more times the queue can be | 
|  | 2949 | * reactivated with the same virtual finish | 
|  | 2950 | * time. It follows that, even if this finish | 
|  | 2951 | * time is pushed to the system virtual time | 
|  | 2952 | * to reduce the consequent timestamp | 
|  | 2953 | * misalignment, the queue unjustly enjoys for | 
|  | 2954 | * many re-activations a lower finish time | 
|  | 2955 | * than all newly activated queues. | 
|  | 2956 | * | 
|  | 2957 | * The service needed by bfqq is measured | 
|  | 2958 | * quite precisely by bfqq->entity.service. | 
|  | 2959 | * Since bfqq does not enjoy device idling, | 
|  | 2960 | * bfqq->entity.service is equal to the number | 
|  | 2961 | * of sectors that the process associated with | 
|  | 2962 | * bfqq requested to read/write before waiting | 
|  | 2963 | * for request completions, or blocking for | 
|  | 2964 | * other reasons. | 
|  | 2965 | */ | 
|  | 2966 | budget = max_t(int, bfqq->entity.service, min_budget); | 
|  | 2967 | break; | 
|  | 2968 | default: | 
|  | 2969 | return; | 
|  | 2970 | } | 
|  | 2971 | } else if (!bfq_bfqq_sync(bfqq)) { | 
|  | 2972 | /* | 
|  | 2973 | * Async queues get always the maximum possible | 
|  | 2974 | * budget, as for them we do not care about latency | 
|  | 2975 | * (in addition, their ability to dispatch is limited | 
|  | 2976 | * by the charging factor). | 
|  | 2977 | */ | 
|  | 2978 | budget = bfqd->bfq_max_budget; | 
|  | 2979 | } | 
|  | 2980 |  | 
|  | 2981 | bfqq->max_budget = budget; | 
|  | 2982 |  | 
|  | 2983 | if (bfqd->budgets_assigned >= bfq_stats_min_budgets && | 
|  | 2984 | !bfqd->bfq_user_max_budget) | 
|  | 2985 | bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget); | 
|  | 2986 |  | 
|  | 2987 | /* | 
|  | 2988 | * If there is still backlog, then assign a new budget, making | 
|  | 2989 | * sure that it is large enough for the next request.  Since | 
|  | 2990 | * the finish time of bfqq must be kept in sync with the | 
|  | 2991 | * budget, be sure to call __bfq_bfqq_expire() *after* this | 
|  | 2992 | * update. | 
|  | 2993 | * | 
|  | 2994 | * If there is no backlog, then no need to update the budget; | 
|  | 2995 | * it will be updated on the arrival of a new request. | 
|  | 2996 | */ | 
|  | 2997 | next_rq = bfqq->next_rq; | 
|  | 2998 | if (next_rq) | 
|  | 2999 | bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget, | 
|  | 3000 | bfq_serv_to_charge(next_rq, bfqq)); | 
|  | 3001 |  | 
|  | 3002 | bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d", | 
|  | 3003 | next_rq ? blk_rq_sectors(next_rq) : 0, | 
|  | 3004 | bfqq->entity.budget); | 
|  | 3005 | } | 
|  | 3006 |  | 
|  | 3007 | /* | 
|  | 3008 | * Return true if the process associated with bfqq is "slow". The slow | 
|  | 3009 | * flag is used, in addition to the budget timeout, to reduce the | 
|  | 3010 | * amount of service provided to seeky processes, and thus reduce | 
|  | 3011 | * their chances to lower the throughput. More details in the comments | 
|  | 3012 | * on the function bfq_bfqq_expire(). | 
|  | 3013 | * | 
|  | 3014 | * An important observation is in order: as discussed in the comments | 
|  | 3015 | * on the function bfq_update_peak_rate(), with devices with internal | 
|  | 3016 | * queues, it is hard if ever possible to know when and for how long | 
|  | 3017 | * an I/O request is processed by the device (apart from the trivial | 
|  | 3018 | * I/O pattern where a new request is dispatched only after the | 
|  | 3019 | * previous one has been completed). This makes it hard to evaluate | 
|  | 3020 | * the real rate at which the I/O requests of each bfq_queue are | 
|  | 3021 | * served.  In fact, for an I/O scheduler like BFQ, serving a | 
|  | 3022 | * bfq_queue means just dispatching its requests during its service | 
|  | 3023 | * slot (i.e., until the budget of the queue is exhausted, or the | 
|  | 3024 | * queue remains idle, or, finally, a timeout fires). But, during the | 
|  | 3025 | * service slot of a bfq_queue, around 100 ms at most, the device may | 
|  | 3026 | * be even still processing requests of bfq_queues served in previous | 
|  | 3027 | * service slots. On the opposite end, the requests of the in-service | 
|  | 3028 | * bfq_queue may be completed after the service slot of the queue | 
|  | 3029 | * finishes. | 
|  | 3030 | * | 
|  | 3031 | * Anyway, unless more sophisticated solutions are used | 
|  | 3032 | * (where possible), the sum of the sizes of the requests dispatched | 
|  | 3033 | * during the service slot of a bfq_queue is probably the only | 
|  | 3034 | * approximation available for the service received by the bfq_queue | 
|  | 3035 | * during its service slot. And this sum is the quantity used in this | 
|  | 3036 | * function to evaluate the I/O speed of a process. | 
|  | 3037 | */ | 
|  | 3038 | static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq, | 
|  | 3039 | bool compensate, enum bfqq_expiration reason, | 
|  | 3040 | unsigned long *delta_ms) | 
|  | 3041 | { | 
|  | 3042 | ktime_t delta_ktime; | 
|  | 3043 | u32 delta_usecs; | 
|  | 3044 | bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */ | 
|  | 3045 |  | 
|  | 3046 | if (!bfq_bfqq_sync(bfqq)) | 
|  | 3047 | return false; | 
|  | 3048 |  | 
|  | 3049 | if (compensate) | 
|  | 3050 | delta_ktime = bfqd->last_idling_start; | 
|  | 3051 | else | 
|  | 3052 | delta_ktime = ktime_get(); | 
|  | 3053 | delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start); | 
|  | 3054 | delta_usecs = ktime_to_us(delta_ktime); | 
|  | 3055 |  | 
|  | 3056 | /* don't use too short time intervals */ | 
|  | 3057 | if (delta_usecs < 1000) { | 
|  | 3058 | if (blk_queue_nonrot(bfqd->queue)) | 
|  | 3059 | /* | 
|  | 3060 | * give same worst-case guarantees as idling | 
|  | 3061 | * for seeky | 
|  | 3062 | */ | 
|  | 3063 | *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC; | 
|  | 3064 | else /* charge at least one seek */ | 
|  | 3065 | *delta_ms = bfq_slice_idle / NSEC_PER_MSEC; | 
|  | 3066 |  | 
|  | 3067 | return slow; | 
|  | 3068 | } | 
|  | 3069 |  | 
|  | 3070 | *delta_ms = delta_usecs / USEC_PER_MSEC; | 
|  | 3071 |  | 
|  | 3072 | /* | 
|  | 3073 | * Use only long (> 20ms) intervals to filter out excessive | 
|  | 3074 | * spikes in service rate estimation. | 
|  | 3075 | */ | 
|  | 3076 | if (delta_usecs > 20000) { | 
|  | 3077 | /* | 
|  | 3078 | * Caveat for rotational devices: processes doing I/O | 
|  | 3079 | * in the slower disk zones tend to be slow(er) even | 
|  | 3080 | * if not seeky. In this respect, the estimated peak | 
|  | 3081 | * rate is likely to be an average over the disk | 
|  | 3082 | * surface. Accordingly, to not be too harsh with | 
|  | 3083 | * unlucky processes, a process is deemed slow only if | 
|  | 3084 | * its rate has been lower than half of the estimated | 
|  | 3085 | * peak rate. | 
|  | 3086 | */ | 
|  | 3087 | slow = bfqq->entity.service < bfqd->bfq_max_budget / 2; | 
|  | 3088 | } | 
|  | 3089 |  | 
|  | 3090 | bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow); | 
|  | 3091 |  | 
|  | 3092 | return slow; | 
|  | 3093 | } | 
|  | 3094 |  | 
|  | 3095 | /* | 
|  | 3096 | * To be deemed as soft real-time, an application must meet two | 
|  | 3097 | * requirements. First, the application must not require an average | 
|  | 3098 | * bandwidth higher than the approximate bandwidth required to playback or | 
|  | 3099 | * record a compressed high-definition video. | 
|  | 3100 | * The next function is invoked on the completion of the last request of a | 
|  | 3101 | * batch, to compute the next-start time instant, soft_rt_next_start, such | 
|  | 3102 | * that, if the next request of the application does not arrive before | 
|  | 3103 | * soft_rt_next_start, then the above requirement on the bandwidth is met. | 
|  | 3104 | * | 
|  | 3105 | * The second requirement is that the request pattern of the application is | 
|  | 3106 | * isochronous, i.e., that, after issuing a request or a batch of requests, | 
|  | 3107 | * the application stops issuing new requests until all its pending requests | 
|  | 3108 | * have been completed. After that, the application may issue a new batch, | 
|  | 3109 | * and so on. | 
|  | 3110 | * For this reason the next function is invoked to compute | 
|  | 3111 | * soft_rt_next_start only for applications that meet this requirement, | 
|  | 3112 | * whereas soft_rt_next_start is set to infinity for applications that do | 
|  | 3113 | * not. | 
|  | 3114 | * | 
|  | 3115 | * Unfortunately, even a greedy (i.e., I/O-bound) application may | 
|  | 3116 | * happen to meet, occasionally or systematically, both the above | 
|  | 3117 | * bandwidth and isochrony requirements. This may happen at least in | 
|  | 3118 | * the following circumstances. First, if the CPU load is high. The | 
|  | 3119 | * application may stop issuing requests while the CPUs are busy | 
|  | 3120 | * serving other processes, then restart, then stop again for a while, | 
|  | 3121 | * and so on. The other circumstances are related to the storage | 
|  | 3122 | * device: the storage device is highly loaded or reaches a low-enough | 
|  | 3123 | * throughput with the I/O of the application (e.g., because the I/O | 
|  | 3124 | * is random and/or the device is slow). In all these cases, the | 
|  | 3125 | * I/O of the application may be simply slowed down enough to meet | 
|  | 3126 | * the bandwidth and isochrony requirements. To reduce the probability | 
|  | 3127 | * that greedy applications are deemed as soft real-time in these | 
|  | 3128 | * corner cases, a further rule is used in the computation of | 
|  | 3129 | * soft_rt_next_start: the return value of this function is forced to | 
|  | 3130 | * be higher than the maximum between the following two quantities. | 
|  | 3131 | * | 
|  | 3132 | * (a) Current time plus: (1) the maximum time for which the arrival | 
|  | 3133 | *     of a request is waited for when a sync queue becomes idle, | 
|  | 3134 | *     namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We | 
|  | 3135 | *     postpone for a moment the reason for adding a few extra | 
|  | 3136 | *     jiffies; we get back to it after next item (b).  Lower-bounding | 
|  | 3137 | *     the return value of this function with the current time plus | 
|  | 3138 | *     bfqd->bfq_slice_idle tends to filter out greedy applications, | 
|  | 3139 | *     because the latter issue their next request as soon as possible | 
|  | 3140 | *     after the last one has been completed. In contrast, a soft | 
|  | 3141 | *     real-time application spends some time processing data, after a | 
|  | 3142 | *     batch of its requests has been completed. | 
|  | 3143 | * | 
|  | 3144 | * (b) Current value of bfqq->soft_rt_next_start. As pointed out | 
|  | 3145 | *     above, greedy applications may happen to meet both the | 
|  | 3146 | *     bandwidth and isochrony requirements under heavy CPU or | 
|  | 3147 | *     storage-device load. In more detail, in these scenarios, these | 
|  | 3148 | *     applications happen, only for limited time periods, to do I/O | 
|  | 3149 | *     slowly enough to meet all the requirements described so far, | 
|  | 3150 | *     including the filtering in above item (a). These slow-speed | 
|  | 3151 | *     time intervals are usually interspersed between other time | 
|  | 3152 | *     intervals during which these applications do I/O at a very high | 
|  | 3153 | *     speed. Fortunately, exactly because of the high speed of the | 
|  | 3154 | *     I/O in the high-speed intervals, the values returned by this | 
|  | 3155 | *     function happen to be so high, near the end of any such | 
|  | 3156 | *     high-speed interval, to be likely to fall *after* the end of | 
|  | 3157 | *     the low-speed time interval that follows. These high values are | 
|  | 3158 | *     stored in bfqq->soft_rt_next_start after each invocation of | 
|  | 3159 | *     this function. As a consequence, if the last value of | 
|  | 3160 | *     bfqq->soft_rt_next_start is constantly used to lower-bound the | 
|  | 3161 | *     next value that this function may return, then, from the very | 
|  | 3162 | *     beginning of a low-speed interval, bfqq->soft_rt_next_start is | 
|  | 3163 | *     likely to be constantly kept so high that any I/O request | 
|  | 3164 | *     issued during the low-speed interval is considered as arriving | 
|  | 3165 | *     to soon for the application to be deemed as soft | 
|  | 3166 | *     real-time. Then, in the high-speed interval that follows, the | 
|  | 3167 | *     application will not be deemed as soft real-time, just because | 
|  | 3168 | *     it will do I/O at a high speed. And so on. | 
|  | 3169 | * | 
|  | 3170 | * Getting back to the filtering in item (a), in the following two | 
|  | 3171 | * cases this filtering might be easily passed by a greedy | 
|  | 3172 | * application, if the reference quantity was just | 
|  | 3173 | * bfqd->bfq_slice_idle: | 
|  | 3174 | * 1) HZ is so low that the duration of a jiffy is comparable to or | 
|  | 3175 | *    higher than bfqd->bfq_slice_idle. This happens, e.g., on slow | 
|  | 3176 | *    devices with HZ=100. The time granularity may be so coarse | 
|  | 3177 | *    that the approximation, in jiffies, of bfqd->bfq_slice_idle | 
|  | 3178 | *    is rather lower than the exact value. | 
|  | 3179 | * 2) jiffies, instead of increasing at a constant rate, may stop increasing | 
|  | 3180 | *    for a while, then suddenly 'jump' by several units to recover the lost | 
|  | 3181 | *    increments. This seems to happen, e.g., inside virtual machines. | 
|  | 3182 | * To address this issue, in the filtering in (a) we do not use as a | 
|  | 3183 | * reference time interval just bfqd->bfq_slice_idle, but | 
|  | 3184 | * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the | 
|  | 3185 | * minimum number of jiffies for which the filter seems to be quite | 
|  | 3186 | * precise also in embedded systems and KVM/QEMU virtual machines. | 
|  | 3187 | */ | 
|  | 3188 | static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, | 
|  | 3189 | struct bfq_queue *bfqq) | 
|  | 3190 | { | 
|  | 3191 | return max3(bfqq->soft_rt_next_start, | 
|  | 3192 | bfqq->last_idle_bklogged + | 
|  | 3193 | HZ * bfqq->service_from_backlogged / | 
|  | 3194 | bfqd->bfq_wr_max_softrt_rate, | 
|  | 3195 | jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); | 
|  | 3196 | } | 
|  | 3197 |  | 
|  | 3198 | static bool bfq_bfqq_injectable(struct bfq_queue *bfqq) | 
|  | 3199 | { | 
|  | 3200 | return BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 && | 
|  | 3201 | blk_queue_nonrot(bfqq->bfqd->queue) && | 
|  | 3202 | bfqq->bfqd->hw_tag; | 
|  | 3203 | } | 
|  | 3204 |  | 
|  | 3205 | /** | 
|  | 3206 | * bfq_bfqq_expire - expire a queue. | 
|  | 3207 | * @bfqd: device owning the queue. | 
|  | 3208 | * @bfqq: the queue to expire. | 
|  | 3209 | * @compensate: if true, compensate for the time spent idling. | 
|  | 3210 | * @reason: the reason causing the expiration. | 
|  | 3211 | * | 
|  | 3212 | * If the process associated with bfqq does slow I/O (e.g., because it | 
|  | 3213 | * issues random requests), we charge bfqq with the time it has been | 
|  | 3214 | * in service instead of the service it has received (see | 
|  | 3215 | * bfq_bfqq_charge_time for details on how this goal is achieved). As | 
|  | 3216 | * a consequence, bfqq will typically get higher timestamps upon | 
|  | 3217 | * reactivation, and hence it will be rescheduled as if it had | 
|  | 3218 | * received more service than what it has actually received. In the | 
|  | 3219 | * end, bfqq receives less service in proportion to how slowly its | 
|  | 3220 | * associated process consumes its budgets (and hence how seriously it | 
|  | 3221 | * tends to lower the throughput). In addition, this time-charging | 
|  | 3222 | * strategy guarantees time fairness among slow processes. In | 
|  | 3223 | * contrast, if the process associated with bfqq is not slow, we | 
|  | 3224 | * charge bfqq exactly with the service it has received. | 
|  | 3225 | * | 
|  | 3226 | * Charging time to the first type of queues and the exact service to | 
|  | 3227 | * the other has the effect of using the WF2Q+ policy to schedule the | 
|  | 3228 | * former on a timeslice basis, without violating service domain | 
|  | 3229 | * guarantees among the latter. | 
|  | 3230 | */ | 
|  | 3231 | void bfq_bfqq_expire(struct bfq_data *bfqd, | 
|  | 3232 | struct bfq_queue *bfqq, | 
|  | 3233 | bool compensate, | 
|  | 3234 | enum bfqq_expiration reason) | 
|  | 3235 | { | 
|  | 3236 | bool slow; | 
|  | 3237 | unsigned long delta = 0; | 
|  | 3238 | struct bfq_entity *entity = &bfqq->entity; | 
|  | 3239 | int ref; | 
|  | 3240 |  | 
|  | 3241 | /* | 
|  | 3242 | * Check whether the process is slow (see bfq_bfqq_is_slow). | 
|  | 3243 | */ | 
|  | 3244 | slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta); | 
|  | 3245 |  | 
|  | 3246 | /* | 
|  | 3247 | * As above explained, charge slow (typically seeky) and | 
|  | 3248 | * timed-out queues with the time and not the service | 
|  | 3249 | * received, to favor sequential workloads. | 
|  | 3250 | * | 
|  | 3251 | * Processes doing I/O in the slower disk zones will tend to | 
|  | 3252 | * be slow(er) even if not seeky. Therefore, since the | 
|  | 3253 | * estimated peak rate is actually an average over the disk | 
|  | 3254 | * surface, these processes may timeout just for bad luck. To | 
|  | 3255 | * avoid punishing them, do not charge time to processes that | 
|  | 3256 | * succeeded in consuming at least 2/3 of their budget. This | 
|  | 3257 | * allows BFQ to preserve enough elasticity to still perform | 
|  | 3258 | * bandwidth, and not time, distribution with little unlucky | 
|  | 3259 | * or quasi-sequential processes. | 
|  | 3260 | */ | 
|  | 3261 | if (bfqq->wr_coeff == 1 && | 
|  | 3262 | (slow || | 
|  | 3263 | (reason == BFQQE_BUDGET_TIMEOUT && | 
|  | 3264 | bfq_bfqq_budget_left(bfqq) >=  entity->budget / 3))) | 
|  | 3265 | bfq_bfqq_charge_time(bfqd, bfqq, delta); | 
|  | 3266 |  | 
|  | 3267 | if (reason == BFQQE_TOO_IDLE && | 
|  | 3268 | entity->service <= 2 * entity->budget / 10) | 
|  | 3269 | bfq_clear_bfqq_IO_bound(bfqq); | 
|  | 3270 |  | 
|  | 3271 | if (bfqd->low_latency && bfqq->wr_coeff == 1) | 
|  | 3272 | bfqq->last_wr_start_finish = jiffies; | 
|  | 3273 |  | 
|  | 3274 | if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 && | 
|  | 3275 | RB_EMPTY_ROOT(&bfqq->sort_list)) { | 
|  | 3276 | /* | 
|  | 3277 | * If we get here, and there are no outstanding | 
|  | 3278 | * requests, then the request pattern is isochronous | 
|  | 3279 | * (see the comments on the function | 
|  | 3280 | * bfq_bfqq_softrt_next_start()). Thus we can compute | 
|  | 3281 | * soft_rt_next_start. If, instead, the queue still | 
|  | 3282 | * has outstanding requests, then we have to wait for | 
|  | 3283 | * the completion of all the outstanding requests to | 
|  | 3284 | * discover whether the request pattern is actually | 
|  | 3285 | * isochronous. | 
|  | 3286 | */ | 
|  | 3287 | if (bfqq->dispatched == 0) | 
|  | 3288 | bfqq->soft_rt_next_start = | 
|  | 3289 | bfq_bfqq_softrt_next_start(bfqd, bfqq); | 
|  | 3290 | else { | 
|  | 3291 | /* | 
|  | 3292 | * Schedule an update of soft_rt_next_start to when | 
|  | 3293 | * the task may be discovered to be isochronous. | 
|  | 3294 | */ | 
|  | 3295 | bfq_mark_bfqq_softrt_update(bfqq); | 
|  | 3296 | } | 
|  | 3297 | } | 
|  | 3298 |  | 
|  | 3299 | bfq_log_bfqq(bfqd, bfqq, | 
|  | 3300 | "expire (%d, slow %d, num_disp %d, short_ttime %d)", reason, | 
|  | 3301 | slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq)); | 
|  | 3302 |  | 
|  | 3303 | /* | 
|  | 3304 | * Increase, decrease or leave budget unchanged according to | 
|  | 3305 | * reason. | 
|  | 3306 | */ | 
|  | 3307 | __bfq_bfqq_recalc_budget(bfqd, bfqq, reason); | 
|  | 3308 | ref = bfqq->ref; | 
|  | 3309 | __bfq_bfqq_expire(bfqd, bfqq); | 
|  | 3310 |  | 
|  | 3311 | if (ref == 1) /* bfqq is gone, no more actions on it */ | 
|  | 3312 | return; | 
|  | 3313 |  | 
|  | 3314 | bfqq->injected_service = 0; | 
|  | 3315 |  | 
|  | 3316 | /* mark bfqq as waiting a request only if a bic still points to it */ | 
|  | 3317 | if (!bfq_bfqq_busy(bfqq) && | 
|  | 3318 | reason != BFQQE_BUDGET_TIMEOUT && | 
|  | 3319 | reason != BFQQE_BUDGET_EXHAUSTED) { | 
|  | 3320 | bfq_mark_bfqq_non_blocking_wait_rq(bfqq); | 
|  | 3321 | /* | 
|  | 3322 | * Not setting service to 0, because, if the next rq | 
|  | 3323 | * arrives in time, the queue will go on receiving | 
|  | 3324 | * service with this same budget (as if it never expired) | 
|  | 3325 | */ | 
|  | 3326 | } else | 
|  | 3327 | entity->service = 0; | 
|  | 3328 |  | 
|  | 3329 | /* | 
|  | 3330 | * Reset the received-service counter for every parent entity. | 
|  | 3331 | * Differently from what happens with bfqq->entity.service, | 
|  | 3332 | * the resetting of this counter never needs to be postponed | 
|  | 3333 | * for parent entities. In fact, in case bfqq may have a | 
|  | 3334 | * chance to go on being served using the last, partially | 
|  | 3335 | * consumed budget, bfqq->entity.service needs to be kept, | 
|  | 3336 | * because if bfqq then actually goes on being served using | 
|  | 3337 | * the same budget, the last value of bfqq->entity.service is | 
|  | 3338 | * needed to properly decrement bfqq->entity.budget by the | 
|  | 3339 | * portion already consumed. In contrast, it is not necessary | 
|  | 3340 | * to keep entity->service for parent entities too, because | 
|  | 3341 | * the bubble up of the new value of bfqq->entity.budget will | 
|  | 3342 | * make sure that the budgets of parent entities are correct, | 
|  | 3343 | * even in case bfqq and thus parent entities go on receiving | 
|  | 3344 | * service with the same budget. | 
|  | 3345 | */ | 
|  | 3346 | entity = entity->parent; | 
|  | 3347 | for_each_entity(entity) | 
|  | 3348 | entity->service = 0; | 
|  | 3349 | } | 
|  | 3350 |  | 
|  | 3351 | /* | 
|  | 3352 | * Budget timeout is not implemented through a dedicated timer, but | 
|  | 3353 | * just checked on request arrivals and completions, as well as on | 
|  | 3354 | * idle timer expirations. | 
|  | 3355 | */ | 
|  | 3356 | static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq) | 
|  | 3357 | { | 
|  | 3358 | return time_is_before_eq_jiffies(bfqq->budget_timeout); | 
|  | 3359 | } | 
|  | 3360 |  | 
|  | 3361 | /* | 
|  | 3362 | * If we expire a queue that is actively waiting (i.e., with the | 
|  | 3363 | * device idled) for the arrival of a new request, then we may incur | 
|  | 3364 | * the timestamp misalignment problem described in the body of the | 
|  | 3365 | * function __bfq_activate_entity. Hence we return true only if this | 
|  | 3366 | * condition does not hold, or if the queue is slow enough to deserve | 
|  | 3367 | * only to be kicked off for preserving a high throughput. | 
|  | 3368 | */ | 
|  | 3369 | static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq) | 
|  | 3370 | { | 
|  | 3371 | bfq_log_bfqq(bfqq->bfqd, bfqq, | 
|  | 3372 | "may_budget_timeout: wait_request %d left %d timeout %d", | 
|  | 3373 | bfq_bfqq_wait_request(bfqq), | 
|  | 3374 | bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3, | 
|  | 3375 | bfq_bfqq_budget_timeout(bfqq)); | 
|  | 3376 |  | 
|  | 3377 | return (!bfq_bfqq_wait_request(bfqq) || | 
|  | 3378 | bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3) | 
|  | 3379 | && | 
|  | 3380 | bfq_bfqq_budget_timeout(bfqq); | 
|  | 3381 | } | 
|  | 3382 |  | 
|  | 3383 | /* | 
|  | 3384 | * For a queue that becomes empty, device idling is allowed only if | 
|  | 3385 | * this function returns true for the queue. As a consequence, since | 
|  | 3386 | * device idling plays a critical role in both throughput boosting and | 
|  | 3387 | * service guarantees, the return value of this function plays a | 
|  | 3388 | * critical role in both these aspects as well. | 
|  | 3389 | * | 
|  | 3390 | * In a nutshell, this function returns true only if idling is | 
|  | 3391 | * beneficial for throughput or, even if detrimental for throughput, | 
|  | 3392 | * idling is however necessary to preserve service guarantees (low | 
|  | 3393 | * latency, desired throughput distribution, ...). In particular, on | 
|  | 3394 | * NCQ-capable devices, this function tries to return false, so as to | 
|  | 3395 | * help keep the drives' internal queues full, whenever this helps the | 
|  | 3396 | * device boost the throughput without causing any service-guarantee | 
|  | 3397 | * issue. | 
|  | 3398 | * | 
|  | 3399 | * In more detail, the return value of this function is obtained by, | 
|  | 3400 | * first, computing a number of boolean variables that take into | 
|  | 3401 | * account throughput and service-guarantee issues, and, then, | 
|  | 3402 | * combining these variables in a logical expression. Most of the | 
|  | 3403 | * issues taken into account are not trivial. We discuss these issues | 
|  | 3404 | * individually while introducing the variables. | 
|  | 3405 | */ | 
|  | 3406 | static bool bfq_better_to_idle(struct bfq_queue *bfqq) | 
|  | 3407 | { | 
|  | 3408 | struct bfq_data *bfqd = bfqq->bfqd; | 
|  | 3409 | bool rot_without_queueing = | 
|  | 3410 | !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag, | 
|  | 3411 | bfqq_sequential_and_IO_bound, | 
|  | 3412 | idling_boosts_thr, idling_boosts_thr_without_issues, | 
|  | 3413 | idling_needed_for_service_guarantees, | 
|  | 3414 | asymmetric_scenario; | 
|  | 3415 |  | 
|  | 3416 | if (bfqd->strict_guarantees) | 
|  | 3417 | return true; | 
|  | 3418 |  | 
|  | 3419 | /* | 
|  | 3420 | * Idling is performed only if slice_idle > 0. In addition, we | 
|  | 3421 | * do not idle if | 
|  | 3422 | * (a) bfqq is async | 
|  | 3423 | * (b) bfqq is in the idle io prio class: in this case we do | 
|  | 3424 | * not idle because we want to minimize the bandwidth that | 
|  | 3425 | * queues in this class can steal to higher-priority queues | 
|  | 3426 | */ | 
|  | 3427 | if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) || | 
|  | 3428 | bfq_class_idle(bfqq)) | 
|  | 3429 | return false; | 
|  | 3430 |  | 
|  | 3431 | bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) && | 
|  | 3432 | bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq); | 
|  | 3433 |  | 
|  | 3434 | /* | 
|  | 3435 | * The next variable takes into account the cases where idling | 
|  | 3436 | * boosts the throughput. | 
|  | 3437 | * | 
|  | 3438 | * The value of the variable is computed considering, first, that | 
|  | 3439 | * idling is virtually always beneficial for the throughput if: | 
|  | 3440 | * (a) the device is not NCQ-capable and rotational, or | 
|  | 3441 | * (b) regardless of the presence of NCQ, the device is rotational and | 
|  | 3442 | *     the request pattern for bfqq is I/O-bound and sequential, or | 
|  | 3443 | * (c) regardless of whether it is rotational, the device is | 
|  | 3444 | *     not NCQ-capable and the request pattern for bfqq is | 
|  | 3445 | *     I/O-bound and sequential. | 
|  | 3446 | * | 
|  | 3447 | * Secondly, and in contrast to the above item (b), idling an | 
|  | 3448 | * NCQ-capable flash-based device would not boost the | 
|  | 3449 | * throughput even with sequential I/O; rather it would lower | 
|  | 3450 | * the throughput in proportion to how fast the device | 
|  | 3451 | * is. Accordingly, the next variable is true if any of the | 
|  | 3452 | * above conditions (a), (b) or (c) is true, and, in | 
|  | 3453 | * particular, happens to be false if bfqd is an NCQ-capable | 
|  | 3454 | * flash-based device. | 
|  | 3455 | */ | 
|  | 3456 | idling_boosts_thr = rot_without_queueing || | 
|  | 3457 | ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) && | 
|  | 3458 | bfqq_sequential_and_IO_bound); | 
|  | 3459 |  | 
|  | 3460 | /* | 
|  | 3461 | * The value of the next variable, | 
|  | 3462 | * idling_boosts_thr_without_issues, is equal to that of | 
|  | 3463 | * idling_boosts_thr, unless a special case holds. In this | 
|  | 3464 | * special case, described below, idling may cause problems to | 
|  | 3465 | * weight-raised queues. | 
|  | 3466 | * | 
|  | 3467 | * When the request pool is saturated (e.g., in the presence | 
|  | 3468 | * of write hogs), if the processes associated with | 
|  | 3469 | * non-weight-raised queues ask for requests at a lower rate, | 
|  | 3470 | * then processes associated with weight-raised queues have a | 
|  | 3471 | * higher probability to get a request from the pool | 
|  | 3472 | * immediately (or at least soon) when they need one. Thus | 
|  | 3473 | * they have a higher probability to actually get a fraction | 
|  | 3474 | * of the device throughput proportional to their high | 
|  | 3475 | * weight. This is especially true with NCQ-capable drives, | 
|  | 3476 | * which enqueue several requests in advance, and further | 
|  | 3477 | * reorder internally-queued requests. | 
|  | 3478 | * | 
|  | 3479 | * For this reason, we force to false the value of | 
|  | 3480 | * idling_boosts_thr_without_issues if there are weight-raised | 
|  | 3481 | * busy queues. In this case, and if bfqq is not weight-raised, | 
|  | 3482 | * this guarantees that the device is not idled for bfqq (if, | 
|  | 3483 | * instead, bfqq is weight-raised, then idling will be | 
|  | 3484 | * guaranteed by another variable, see below). Combined with | 
|  | 3485 | * the timestamping rules of BFQ (see [1] for details), this | 
|  | 3486 | * behavior causes bfqq, and hence any sync non-weight-raised | 
|  | 3487 | * queue, to get a lower number of requests served, and thus | 
|  | 3488 | * to ask for a lower number of requests from the request | 
|  | 3489 | * pool, before the busy weight-raised queues get served | 
|  | 3490 | * again. This often mitigates starvation problems in the | 
|  | 3491 | * presence of heavy write workloads and NCQ, thereby | 
|  | 3492 | * guaranteeing a higher application and system responsiveness | 
|  | 3493 | * in these hostile scenarios. | 
|  | 3494 | */ | 
|  | 3495 | idling_boosts_thr_without_issues = idling_boosts_thr && | 
|  | 3496 | bfqd->wr_busy_queues == 0; | 
|  | 3497 |  | 
|  | 3498 | /* | 
|  | 3499 | * There is then a case where idling must be performed not | 
|  | 3500 | * for throughput concerns, but to preserve service | 
|  | 3501 | * guarantees. | 
|  | 3502 | * | 
|  | 3503 | * To introduce this case, we can note that allowing the drive | 
|  | 3504 | * to enqueue more than one request at a time, and hence | 
|  | 3505 | * delegating de facto final scheduling decisions to the | 
|  | 3506 | * drive's internal scheduler, entails loss of control on the | 
|  | 3507 | * actual request service order. In particular, the critical | 
|  | 3508 | * situation is when requests from different processes happen | 
|  | 3509 | * to be present, at the same time, in the internal queue(s) | 
|  | 3510 | * of the drive. In such a situation, the drive, by deciding | 
|  | 3511 | * the service order of the internally-queued requests, does | 
|  | 3512 | * determine also the actual throughput distribution among | 
|  | 3513 | * these processes. But the drive typically has no notion or | 
|  | 3514 | * concern about per-process throughput distribution, and | 
|  | 3515 | * makes its decisions only on a per-request basis. Therefore, | 
|  | 3516 | * the service distribution enforced by the drive's internal | 
|  | 3517 | * scheduler is likely to coincide with the desired | 
|  | 3518 | * device-throughput distribution only in a completely | 
|  | 3519 | * symmetric scenario where: | 
|  | 3520 | * (i)  each of these processes must get the same throughput as | 
|  | 3521 | *      the others; | 
|  | 3522 | * (ii) all these processes have the same I/O pattern | 
|  | 3523 | (either sequential or random). | 
|  | 3524 | * In fact, in such a scenario, the drive will tend to treat | 
|  | 3525 | * the requests of each of these processes in about the same | 
|  | 3526 | * way as the requests of the others, and thus to provide | 
|  | 3527 | * each of these processes with about the same throughput | 
|  | 3528 | * (which is exactly the desired throughput distribution). In | 
|  | 3529 | * contrast, in any asymmetric scenario, device idling is | 
|  | 3530 | * certainly needed to guarantee that bfqq receives its | 
|  | 3531 | * assigned fraction of the device throughput (see [1] for | 
|  | 3532 | * details). | 
|  | 3533 | * | 
|  | 3534 | * We address this issue by controlling, actually, only the | 
|  | 3535 | * symmetry sub-condition (i), i.e., provided that | 
|  | 3536 | * sub-condition (i) holds, idling is not performed, | 
|  | 3537 | * regardless of whether sub-condition (ii) holds. In other | 
|  | 3538 | * words, only if sub-condition (i) holds, then idling is | 
|  | 3539 | * allowed, and the device tends to be prevented from queueing | 
|  | 3540 | * many requests, possibly of several processes. The reason | 
|  | 3541 | * for not controlling also sub-condition (ii) is that we | 
|  | 3542 | * exploit preemption to preserve guarantees in case of | 
|  | 3543 | * symmetric scenarios, even if (ii) does not hold, as | 
|  | 3544 | * explained in the next two paragraphs. | 
|  | 3545 | * | 
|  | 3546 | * Even if a queue, say Q, is expired when it remains idle, Q | 
|  | 3547 | * can still preempt the new in-service queue if the next | 
|  | 3548 | * request of Q arrives soon (see the comments on | 
|  | 3549 | * bfq_bfqq_update_budg_for_activation). If all queues and | 
|  | 3550 | * groups have the same weight, this form of preemption, | 
|  | 3551 | * combined with the hole-recovery heuristic described in the | 
|  | 3552 | * comments on function bfq_bfqq_update_budg_for_activation, | 
|  | 3553 | * are enough to preserve a correct bandwidth distribution in | 
|  | 3554 | * the mid term, even without idling. In fact, even if not | 
|  | 3555 | * idling allows the internal queues of the device to contain | 
|  | 3556 | * many requests, and thus to reorder requests, we can rather | 
|  | 3557 | * safely assume that the internal scheduler still preserves a | 
|  | 3558 | * minimum of mid-term fairness. The motivation for using | 
|  | 3559 | * preemption instead of idling is that, by not idling, | 
|  | 3560 | * service guarantees are preserved without minimally | 
|  | 3561 | * sacrificing throughput. In other words, both a high | 
|  | 3562 | * throughput and its desired distribution are obtained. | 
|  | 3563 | * | 
|  | 3564 | * More precisely, this preemption-based, idleless approach | 
|  | 3565 | * provides fairness in terms of IOPS, and not sectors per | 
|  | 3566 | * second. This can be seen with a simple example. Suppose | 
|  | 3567 | * that there are two queues with the same weight, but that | 
|  | 3568 | * the first queue receives requests of 8 sectors, while the | 
|  | 3569 | * second queue receives requests of 1024 sectors. In | 
|  | 3570 | * addition, suppose that each of the two queues contains at | 
|  | 3571 | * most one request at a time, which implies that each queue | 
|  | 3572 | * always remains idle after it is served. Finally, after | 
|  | 3573 | * remaining idle, each queue receives very quickly a new | 
|  | 3574 | * request. It follows that the two queues are served | 
|  | 3575 | * alternatively, preempting each other if needed. This | 
|  | 3576 | * implies that, although both queues have the same weight, | 
|  | 3577 | * the queue with large requests receives a service that is | 
|  | 3578 | * 1024/8 times as high as the service received by the other | 
|  | 3579 | * queue. | 
|  | 3580 | * | 
|  | 3581 | * On the other hand, device idling is performed, and thus | 
|  | 3582 | * pure sector-domain guarantees are provided, for the | 
|  | 3583 | * following queues, which are likely to need stronger | 
|  | 3584 | * throughput guarantees: weight-raised queues, and queues | 
|  | 3585 | * with a higher weight than other queues. When such queues | 
|  | 3586 | * are active, sub-condition (i) is false, which triggers | 
|  | 3587 | * device idling. | 
|  | 3588 | * | 
|  | 3589 | * According to the above considerations, the next variable is | 
|  | 3590 | * true (only) if sub-condition (i) holds. To compute the | 
|  | 3591 | * value of this variable, we not only use the return value of | 
|  | 3592 | * the function bfq_symmetric_scenario(), but also check | 
|  | 3593 | * whether bfqq is being weight-raised, because | 
|  | 3594 | * bfq_symmetric_scenario() does not take into account also | 
|  | 3595 | * weight-raised queues (see comments on | 
|  | 3596 | * bfq_weights_tree_add()). In particular, if bfqq is being | 
|  | 3597 | * weight-raised, it is important to idle only if there are | 
|  | 3598 | * other, non-weight-raised queues that may steal throughput | 
|  | 3599 | * to bfqq. Actually, we should be even more precise, and | 
|  | 3600 | * differentiate between interactive weight raising and | 
|  | 3601 | * soft real-time weight raising. | 
|  | 3602 | * | 
|  | 3603 | * As a side note, it is worth considering that the above | 
|  | 3604 | * device-idling countermeasures may however fail in the | 
|  | 3605 | * following unlucky scenario: if idling is (correctly) | 
|  | 3606 | * disabled in a time period during which all symmetry | 
|  | 3607 | * sub-conditions hold, and hence the device is allowed to | 
|  | 3608 | * enqueue many requests, but at some later point in time some | 
|  | 3609 | * sub-condition stops to hold, then it may become impossible | 
|  | 3610 | * to let requests be served in the desired order until all | 
|  | 3611 | * the requests already queued in the device have been served. | 
|  | 3612 | */ | 
|  | 3613 | asymmetric_scenario = (bfqq->wr_coeff > 1 && | 
|  | 3614 | bfqd->wr_busy_queues < bfqd->busy_queues) || | 
|  | 3615 | !bfq_symmetric_scenario(bfqd); | 
|  | 3616 |  | 
|  | 3617 | /* | 
|  | 3618 | * Finally, there is a case where maximizing throughput is the | 
|  | 3619 | * best choice even if it may cause unfairness toward | 
|  | 3620 | * bfqq. Such a case is when bfqq became active in a burst of | 
|  | 3621 | * queue activations. Queues that became active during a large | 
|  | 3622 | * burst benefit only from throughput, as discussed in the | 
|  | 3623 | * comments on bfq_handle_burst. Thus, if bfqq became active | 
|  | 3624 | * in a burst and not idling the device maximizes throughput, | 
|  | 3625 | * then the device must no be idled, because not idling the | 
|  | 3626 | * device provides bfqq and all other queues in the burst with | 
|  | 3627 | * maximum benefit. Combining this and the above case, we can | 
|  | 3628 | * now establish when idling is actually needed to preserve | 
|  | 3629 | * service guarantees. | 
|  | 3630 | */ | 
|  | 3631 | idling_needed_for_service_guarantees = | 
|  | 3632 | asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq); | 
|  | 3633 |  | 
|  | 3634 | /* | 
|  | 3635 | * We have now all the components we need to compute the | 
|  | 3636 | * return value of the function, which is true only if idling | 
|  | 3637 | * either boosts the throughput (without issues), or is | 
|  | 3638 | * necessary to preserve service guarantees. | 
|  | 3639 | */ | 
|  | 3640 | return idling_boosts_thr_without_issues || | 
|  | 3641 | idling_needed_for_service_guarantees; | 
|  | 3642 | } | 
|  | 3643 |  | 
|  | 3644 | /* | 
|  | 3645 | * If the in-service queue is empty but the function bfq_better_to_idle | 
|  | 3646 | * returns true, then: | 
|  | 3647 | * 1) the queue must remain in service and cannot be expired, and | 
|  | 3648 | * 2) the device must be idled to wait for the possible arrival of a new | 
|  | 3649 | *    request for the queue. | 
|  | 3650 | * See the comments on the function bfq_better_to_idle for the reasons | 
|  | 3651 | * why performing device idling is the best choice to boost the throughput | 
|  | 3652 | * and preserve service guarantees when bfq_better_to_idle itself | 
|  | 3653 | * returns true. | 
|  | 3654 | */ | 
|  | 3655 | static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq) | 
|  | 3656 | { | 
|  | 3657 | return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq); | 
|  | 3658 | } | 
|  | 3659 |  | 
|  | 3660 | static struct bfq_queue *bfq_choose_bfqq_for_injection(struct bfq_data *bfqd) | 
|  | 3661 | { | 
|  | 3662 | struct bfq_queue *bfqq; | 
|  | 3663 |  | 
|  | 3664 | /* | 
|  | 3665 | * A linear search; but, with a high probability, very few | 
|  | 3666 | * steps are needed to find a candidate queue, i.e., a queue | 
|  | 3667 | * with enough budget left for its next request. In fact: | 
|  | 3668 | * - BFQ dynamically updates the budget of every queue so as | 
|  | 3669 | *   to accommodate the expected backlog of the queue; | 
|  | 3670 | * - if a queue gets all its requests dispatched as injected | 
|  | 3671 | *   service, then the queue is removed from the active list | 
|  | 3672 | *   (and re-added only if it gets new requests, but with | 
|  | 3673 | *   enough budget for its new backlog). | 
|  | 3674 | */ | 
|  | 3675 | list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) | 
|  | 3676 | if (!RB_EMPTY_ROOT(&bfqq->sort_list) && | 
|  | 3677 | bfq_serv_to_charge(bfqq->next_rq, bfqq) <= | 
|  | 3678 | bfq_bfqq_budget_left(bfqq)) | 
|  | 3679 | return bfqq; | 
|  | 3680 |  | 
|  | 3681 | return NULL; | 
|  | 3682 | } | 
|  | 3683 |  | 
|  | 3684 | /* | 
|  | 3685 | * Select a queue for service.  If we have a current queue in service, | 
|  | 3686 | * check whether to continue servicing it, or retrieve and set a new one. | 
|  | 3687 | */ | 
|  | 3688 | static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) | 
|  | 3689 | { | 
|  | 3690 | struct bfq_queue *bfqq; | 
|  | 3691 | struct request *next_rq; | 
|  | 3692 | enum bfqq_expiration reason = BFQQE_BUDGET_TIMEOUT; | 
|  | 3693 |  | 
|  | 3694 | bfqq = bfqd->in_service_queue; | 
|  | 3695 | if (!bfqq) | 
|  | 3696 | goto new_queue; | 
|  | 3697 |  | 
|  | 3698 | bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue"); | 
|  | 3699 |  | 
|  | 3700 | /* | 
|  | 3701 | * Do not expire bfqq for budget timeout if bfqq may be about | 
|  | 3702 | * to enjoy device idling. The reason why, in this case, we | 
|  | 3703 | * prevent bfqq from expiring is the same as in the comments | 
|  | 3704 | * on the case where bfq_bfqq_must_idle() returns true, in | 
|  | 3705 | * bfq_completed_request(). | 
|  | 3706 | */ | 
|  | 3707 | if (bfq_may_expire_for_budg_timeout(bfqq) && | 
|  | 3708 | !bfq_bfqq_must_idle(bfqq)) | 
|  | 3709 | goto expire; | 
|  | 3710 |  | 
|  | 3711 | check_queue: | 
|  | 3712 | /* | 
|  | 3713 | * This loop is rarely executed more than once. Even when it | 
|  | 3714 | * happens, it is much more convenient to re-execute this loop | 
|  | 3715 | * than to return NULL and trigger a new dispatch to get a | 
|  | 3716 | * request served. | 
|  | 3717 | */ | 
|  | 3718 | next_rq = bfqq->next_rq; | 
|  | 3719 | /* | 
|  | 3720 | * If bfqq has requests queued and it has enough budget left to | 
|  | 3721 | * serve them, keep the queue, otherwise expire it. | 
|  | 3722 | */ | 
|  | 3723 | if (next_rq) { | 
|  | 3724 | if (bfq_serv_to_charge(next_rq, bfqq) > | 
|  | 3725 | bfq_bfqq_budget_left(bfqq)) { | 
|  | 3726 | /* | 
|  | 3727 | * Expire the queue for budget exhaustion, | 
|  | 3728 | * which makes sure that the next budget is | 
|  | 3729 | * enough to serve the next request, even if | 
|  | 3730 | * it comes from the fifo expired path. | 
|  | 3731 | */ | 
|  | 3732 | reason = BFQQE_BUDGET_EXHAUSTED; | 
|  | 3733 | goto expire; | 
|  | 3734 | } else { | 
|  | 3735 | /* | 
|  | 3736 | * The idle timer may be pending because we may | 
|  | 3737 | * not disable disk idling even when a new request | 
|  | 3738 | * arrives. | 
|  | 3739 | */ | 
|  | 3740 | if (bfq_bfqq_wait_request(bfqq)) { | 
|  | 3741 | /* | 
|  | 3742 | * If we get here: 1) at least a new request | 
|  | 3743 | * has arrived but we have not disabled the | 
|  | 3744 | * timer because the request was too small, | 
|  | 3745 | * 2) then the block layer has unplugged | 
|  | 3746 | * the device, causing the dispatch to be | 
|  | 3747 | * invoked. | 
|  | 3748 | * | 
|  | 3749 | * Since the device is unplugged, now the | 
|  | 3750 | * requests are probably large enough to | 
|  | 3751 | * provide a reasonable throughput. | 
|  | 3752 | * So we disable idling. | 
|  | 3753 | */ | 
|  | 3754 | bfq_clear_bfqq_wait_request(bfqq); | 
|  | 3755 | hrtimer_try_to_cancel(&bfqd->idle_slice_timer); | 
|  | 3756 | } | 
|  | 3757 | goto keep_queue; | 
|  | 3758 | } | 
|  | 3759 | } | 
|  | 3760 |  | 
|  | 3761 | /* | 
|  | 3762 | * No requests pending. However, if the in-service queue is idling | 
|  | 3763 | * for a new request, or has requests waiting for a completion and | 
|  | 3764 | * may idle after their completion, then keep it anyway. | 
|  | 3765 | * | 
|  | 3766 | * Yet, to boost throughput, inject service from other queues if | 
|  | 3767 | * possible. | 
|  | 3768 | */ | 
|  | 3769 | if (bfq_bfqq_wait_request(bfqq) || | 
|  | 3770 | (bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) { | 
|  | 3771 | if (bfq_bfqq_injectable(bfqq) && | 
|  | 3772 | bfqq->injected_service * bfqq->inject_coeff < | 
|  | 3773 | bfqq->entity.service * 10) | 
|  | 3774 | bfqq = bfq_choose_bfqq_for_injection(bfqd); | 
|  | 3775 | else | 
|  | 3776 | bfqq = NULL; | 
|  | 3777 |  | 
|  | 3778 | goto keep_queue; | 
|  | 3779 | } | 
|  | 3780 |  | 
|  | 3781 | reason = BFQQE_NO_MORE_REQUESTS; | 
|  | 3782 | expire: | 
|  | 3783 | bfq_bfqq_expire(bfqd, bfqq, false, reason); | 
|  | 3784 | new_queue: | 
|  | 3785 | bfqq = bfq_set_in_service_queue(bfqd); | 
|  | 3786 | if (bfqq) { | 
|  | 3787 | bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue"); | 
|  | 3788 | goto check_queue; | 
|  | 3789 | } | 
|  | 3790 | keep_queue: | 
|  | 3791 | if (bfqq) | 
|  | 3792 | bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue"); | 
|  | 3793 | else | 
|  | 3794 | bfq_log(bfqd, "select_queue: no queue returned"); | 
|  | 3795 |  | 
|  | 3796 | return bfqq; | 
|  | 3797 | } | 
|  | 3798 |  | 
|  | 3799 | static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) | 
|  | 3800 | { | 
|  | 3801 | struct bfq_entity *entity = &bfqq->entity; | 
|  | 3802 |  | 
|  | 3803 | if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */ | 
|  | 3804 | bfq_log_bfqq(bfqd, bfqq, | 
|  | 3805 | "raising period dur %u/%u msec, old coeff %u, w %d(%d)", | 
|  | 3806 | jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish), | 
|  | 3807 | jiffies_to_msecs(bfqq->wr_cur_max_time), | 
|  | 3808 | bfqq->wr_coeff, | 
|  | 3809 | bfqq->entity.weight, bfqq->entity.orig_weight); | 
|  | 3810 |  | 
|  | 3811 | if (entity->prio_changed) | 
|  | 3812 | bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change"); | 
|  | 3813 |  | 
|  | 3814 | /* | 
|  | 3815 | * If the queue was activated in a burst, or too much | 
|  | 3816 | * time has elapsed from the beginning of this | 
|  | 3817 | * weight-raising period, then end weight raising. | 
|  | 3818 | */ | 
|  | 3819 | if (bfq_bfqq_in_large_burst(bfqq)) | 
|  | 3820 | bfq_bfqq_end_wr(bfqq); | 
|  | 3821 | else if (time_is_before_jiffies(bfqq->last_wr_start_finish + | 
|  | 3822 | bfqq->wr_cur_max_time)) { | 
|  | 3823 | if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time || | 
|  | 3824 | time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt + | 
|  | 3825 | bfq_wr_duration(bfqd))) | 
|  | 3826 | bfq_bfqq_end_wr(bfqq); | 
|  | 3827 | else { | 
|  | 3828 | switch_back_to_interactive_wr(bfqq, bfqd); | 
|  | 3829 | bfqq->entity.prio_changed = 1; | 
|  | 3830 | } | 
|  | 3831 | } | 
|  | 3832 | if (bfqq->wr_coeff > 1 && | 
|  | 3833 | bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time && | 
|  | 3834 | bfqq->service_from_wr > max_service_from_wr) { | 
|  | 3835 | /* see comments on max_service_from_wr */ | 
|  | 3836 | bfq_bfqq_end_wr(bfqq); | 
|  | 3837 | } | 
|  | 3838 | } | 
|  | 3839 | /* | 
|  | 3840 | * To improve latency (for this or other queues), immediately | 
|  | 3841 | * update weight both if it must be raised and if it must be | 
|  | 3842 | * lowered. Since, entity may be on some active tree here, and | 
|  | 3843 | * might have a pending change of its ioprio class, invoke | 
|  | 3844 | * next function with the last parameter unset (see the | 
|  | 3845 | * comments on the function). | 
|  | 3846 | */ | 
|  | 3847 | if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1)) | 
|  | 3848 | __bfq_entity_update_weight_prio(bfq_entity_service_tree(entity), | 
|  | 3849 | entity, false); | 
|  | 3850 | } | 
|  | 3851 |  | 
|  | 3852 | /* | 
|  | 3853 | * Dispatch next request from bfqq. | 
|  | 3854 | */ | 
|  | 3855 | static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd, | 
|  | 3856 | struct bfq_queue *bfqq) | 
|  | 3857 | { | 
|  | 3858 | struct request *rq = bfqq->next_rq; | 
|  | 3859 | unsigned long service_to_charge; | 
|  | 3860 |  | 
|  | 3861 | service_to_charge = bfq_serv_to_charge(rq, bfqq); | 
|  | 3862 |  | 
|  | 3863 | bfq_bfqq_served(bfqq, service_to_charge); | 
|  | 3864 |  | 
|  | 3865 | bfq_dispatch_remove(bfqd->queue, rq); | 
|  | 3866 |  | 
|  | 3867 | if (bfqq != bfqd->in_service_queue) { | 
|  | 3868 | if (likely(bfqd->in_service_queue)) | 
|  | 3869 | bfqd->in_service_queue->injected_service += | 
|  | 3870 | bfq_serv_to_charge(rq, bfqq); | 
|  | 3871 |  | 
|  | 3872 | goto return_rq; | 
|  | 3873 | } | 
|  | 3874 |  | 
|  | 3875 | /* | 
|  | 3876 | * If weight raising has to terminate for bfqq, then next | 
|  | 3877 | * function causes an immediate update of bfqq's weight, | 
|  | 3878 | * without waiting for next activation. As a consequence, on | 
|  | 3879 | * expiration, bfqq will be timestamped as if has never been | 
|  | 3880 | * weight-raised during this service slot, even if it has | 
|  | 3881 | * received part or even most of the service as a | 
|  | 3882 | * weight-raised queue. This inflates bfqq's timestamps, which | 
|  | 3883 | * is beneficial, as bfqq is then more willing to leave the | 
|  | 3884 | * device immediately to possible other weight-raised queues. | 
|  | 3885 | */ | 
|  | 3886 | bfq_update_wr_data(bfqd, bfqq); | 
|  | 3887 |  | 
|  | 3888 | /* | 
|  | 3889 | * Expire bfqq, pretending that its budget expired, if bfqq | 
|  | 3890 | * belongs to CLASS_IDLE and other queues are waiting for | 
|  | 3891 | * service. | 
|  | 3892 | */ | 
|  | 3893 | if (!(bfqd->busy_queues > 1 && bfq_class_idle(bfqq))) | 
|  | 3894 | goto return_rq; | 
|  | 3895 |  | 
|  | 3896 | bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED); | 
|  | 3897 |  | 
|  | 3898 | return_rq: | 
|  | 3899 | return rq; | 
|  | 3900 | } | 
|  | 3901 |  | 
|  | 3902 | static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) | 
|  | 3903 | { | 
|  | 3904 | struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; | 
|  | 3905 |  | 
|  | 3906 | /* | 
|  | 3907 | * Avoiding lock: a race on bfqd->busy_queues should cause at | 
|  | 3908 | * most a call to dispatch for nothing | 
|  | 3909 | */ | 
|  | 3910 | return !list_empty_careful(&bfqd->dispatch) || | 
|  | 3911 | bfqd->busy_queues > 0; | 
|  | 3912 | } | 
|  | 3913 |  | 
|  | 3914 | static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) | 
|  | 3915 | { | 
|  | 3916 | struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; | 
|  | 3917 | struct request *rq = NULL; | 
|  | 3918 | struct bfq_queue *bfqq = NULL; | 
|  | 3919 |  | 
|  | 3920 | if (!list_empty(&bfqd->dispatch)) { | 
|  | 3921 | rq = list_first_entry(&bfqd->dispatch, struct request, | 
|  | 3922 | queuelist); | 
|  | 3923 | list_del_init(&rq->queuelist); | 
|  | 3924 |  | 
|  | 3925 | bfqq = RQ_BFQQ(rq); | 
|  | 3926 |  | 
|  | 3927 | if (bfqq) { | 
|  | 3928 | /* | 
|  | 3929 | * Increment counters here, because this | 
|  | 3930 | * dispatch does not follow the standard | 
|  | 3931 | * dispatch flow (where counters are | 
|  | 3932 | * incremented) | 
|  | 3933 | */ | 
|  | 3934 | bfqq->dispatched++; | 
|  | 3935 |  | 
|  | 3936 | goto inc_in_driver_start_rq; | 
|  | 3937 | } | 
|  | 3938 |  | 
|  | 3939 | /* | 
|  | 3940 | * We exploit the bfq_finish_requeue_request hook to | 
|  | 3941 | * decrement rq_in_driver, but | 
|  | 3942 | * bfq_finish_requeue_request will not be invoked on | 
|  | 3943 | * this request. So, to avoid unbalance, just start | 
|  | 3944 | * this request, without incrementing rq_in_driver. As | 
|  | 3945 | * a negative consequence, rq_in_driver is deceptively | 
|  | 3946 | * lower than it should be while this request is in | 
|  | 3947 | * service. This may cause bfq_schedule_dispatch to be | 
|  | 3948 | * invoked uselessly. | 
|  | 3949 | * | 
|  | 3950 | * As for implementing an exact solution, the | 
|  | 3951 | * bfq_finish_requeue_request hook, if defined, is | 
|  | 3952 | * probably invoked also on this request. So, by | 
|  | 3953 | * exploiting this hook, we could 1) increment | 
|  | 3954 | * rq_in_driver here, and 2) decrement it in | 
|  | 3955 | * bfq_finish_requeue_request. Such a solution would | 
|  | 3956 | * let the value of the counter be always accurate, | 
|  | 3957 | * but it would entail using an extra interface | 
|  | 3958 | * function. This cost seems higher than the benefit, | 
|  | 3959 | * being the frequency of non-elevator-private | 
|  | 3960 | * requests very low. | 
|  | 3961 | */ | 
|  | 3962 | goto start_rq; | 
|  | 3963 | } | 
|  | 3964 |  | 
|  | 3965 | bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues); | 
|  | 3966 |  | 
|  | 3967 | if (bfqd->busy_queues == 0) | 
|  | 3968 | goto exit; | 
|  | 3969 |  | 
|  | 3970 | /* | 
|  | 3971 | * Force device to serve one request at a time if | 
|  | 3972 | * strict_guarantees is true. Forcing this service scheme is | 
|  | 3973 | * currently the ONLY way to guarantee that the request | 
|  | 3974 | * service order enforced by the scheduler is respected by a | 
|  | 3975 | * queueing device. Otherwise the device is free even to make | 
|  | 3976 | * some unlucky request wait for as long as the device | 
|  | 3977 | * wishes. | 
|  | 3978 | * | 
|  | 3979 | * Of course, serving one request at at time may cause loss of | 
|  | 3980 | * throughput. | 
|  | 3981 | */ | 
|  | 3982 | if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0) | 
|  | 3983 | goto exit; | 
|  | 3984 |  | 
|  | 3985 | bfqq = bfq_select_queue(bfqd); | 
|  | 3986 | if (!bfqq) | 
|  | 3987 | goto exit; | 
|  | 3988 |  | 
|  | 3989 | rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq); | 
|  | 3990 |  | 
|  | 3991 | if (rq) { | 
|  | 3992 | inc_in_driver_start_rq: | 
|  | 3993 | bfqd->rq_in_driver++; | 
|  | 3994 | start_rq: | 
|  | 3995 | rq->rq_flags |= RQF_STARTED; | 
|  | 3996 | } | 
|  | 3997 | exit: | 
|  | 3998 | return rq; | 
|  | 3999 | } | 
|  | 4000 |  | 
|  | 4001 | #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) | 
|  | 4002 | static void bfq_update_dispatch_stats(struct request_queue *q, | 
|  | 4003 | struct request *rq, | 
|  | 4004 | struct bfq_queue *in_serv_queue, | 
|  | 4005 | bool idle_timer_disabled) | 
|  | 4006 | { | 
|  | 4007 | struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL; | 
|  | 4008 |  | 
|  | 4009 | if (!idle_timer_disabled && !bfqq) | 
|  | 4010 | return; | 
|  | 4011 |  | 
|  | 4012 | /* | 
|  | 4013 | * rq and bfqq are guaranteed to exist until this function | 
|  | 4014 | * ends, for the following reasons. First, rq can be | 
|  | 4015 | * dispatched to the device, and then can be completed and | 
|  | 4016 | * freed, only after this function ends. Second, rq cannot be | 
|  | 4017 | * merged (and thus freed because of a merge) any longer, | 
|  | 4018 | * because it has already started. Thus rq cannot be freed | 
|  | 4019 | * before this function ends, and, since rq has a reference to | 
|  | 4020 | * bfqq, the same guarantee holds for bfqq too. | 
|  | 4021 | * | 
|  | 4022 | * In addition, the following queue lock guarantees that | 
|  | 4023 | * bfqq_group(bfqq) exists as well. | 
|  | 4024 | */ | 
|  | 4025 | spin_lock_irq(q->queue_lock); | 
|  | 4026 | if (idle_timer_disabled) | 
|  | 4027 | /* | 
|  | 4028 | * Since the idle timer has been disabled, | 
|  | 4029 | * in_serv_queue contained some request when | 
|  | 4030 | * __bfq_dispatch_request was invoked above, which | 
|  | 4031 | * implies that rq was picked exactly from | 
|  | 4032 | * in_serv_queue. Thus in_serv_queue == bfqq, and is | 
|  | 4033 | * therefore guaranteed to exist because of the above | 
|  | 4034 | * arguments. | 
|  | 4035 | */ | 
|  | 4036 | bfqg_stats_update_idle_time(bfqq_group(in_serv_queue)); | 
|  | 4037 | if (bfqq) { | 
|  | 4038 | struct bfq_group *bfqg = bfqq_group(bfqq); | 
|  | 4039 |  | 
|  | 4040 | bfqg_stats_update_avg_queue_size(bfqg); | 
|  | 4041 | bfqg_stats_set_start_empty_time(bfqg); | 
|  | 4042 | bfqg_stats_update_io_remove(bfqg, rq->cmd_flags); | 
|  | 4043 | } | 
|  | 4044 | spin_unlock_irq(q->queue_lock); | 
|  | 4045 | } | 
|  | 4046 | #else | 
|  | 4047 | static inline void bfq_update_dispatch_stats(struct request_queue *q, | 
|  | 4048 | struct request *rq, | 
|  | 4049 | struct bfq_queue *in_serv_queue, | 
|  | 4050 | bool idle_timer_disabled) {} | 
|  | 4051 | #endif | 
|  | 4052 |  | 
|  | 4053 | static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) | 
|  | 4054 | { | 
|  | 4055 | struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; | 
|  | 4056 | struct request *rq; | 
|  | 4057 | struct bfq_queue *in_serv_queue; | 
|  | 4058 | bool waiting_rq, idle_timer_disabled; | 
|  | 4059 |  | 
|  | 4060 | spin_lock_irq(&bfqd->lock); | 
|  | 4061 |  | 
|  | 4062 | in_serv_queue = bfqd->in_service_queue; | 
|  | 4063 | waiting_rq = in_serv_queue && bfq_bfqq_wait_request(in_serv_queue); | 
|  | 4064 |  | 
|  | 4065 | rq = __bfq_dispatch_request(hctx); | 
|  | 4066 |  | 
|  | 4067 | idle_timer_disabled = | 
|  | 4068 | waiting_rq && !bfq_bfqq_wait_request(in_serv_queue); | 
|  | 4069 |  | 
|  | 4070 | spin_unlock_irq(&bfqd->lock); | 
|  | 4071 |  | 
|  | 4072 | bfq_update_dispatch_stats(hctx->queue, rq, in_serv_queue, | 
|  | 4073 | idle_timer_disabled); | 
|  | 4074 |  | 
|  | 4075 | return rq; | 
|  | 4076 | } | 
|  | 4077 |  | 
|  | 4078 | /* | 
|  | 4079 | * Task holds one reference to the queue, dropped when task exits.  Each rq | 
|  | 4080 | * in-flight on this queue also holds a reference, dropped when rq is freed. | 
|  | 4081 | * | 
|  | 4082 | * Scheduler lock must be held here. Recall not to use bfqq after calling | 
|  | 4083 | * this function on it. | 
|  | 4084 | */ | 
|  | 4085 | void bfq_put_queue(struct bfq_queue *bfqq) | 
|  | 4086 | { | 
|  | 4087 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | 
|  | 4088 | struct bfq_group *bfqg = bfqq_group(bfqq); | 
|  | 4089 | #endif | 
|  | 4090 |  | 
|  | 4091 | if (bfqq->bfqd) | 
|  | 4092 | bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", | 
|  | 4093 | bfqq, bfqq->ref); | 
|  | 4094 |  | 
|  | 4095 | bfqq->ref--; | 
|  | 4096 | if (bfqq->ref) | 
|  | 4097 | return; | 
|  | 4098 |  | 
|  | 4099 | if (!hlist_unhashed(&bfqq->burst_list_node)) { | 
|  | 4100 | hlist_del_init(&bfqq->burst_list_node); | 
|  | 4101 | /* | 
|  | 4102 | * Decrement also burst size after the removal, if the | 
|  | 4103 | * process associated with bfqq is exiting, and thus | 
|  | 4104 | * does not contribute to the burst any longer. This | 
|  | 4105 | * decrement helps filter out false positives of large | 
|  | 4106 | * bursts, when some short-lived process (often due to | 
|  | 4107 | * the execution of commands by some service) happens | 
|  | 4108 | * to start and exit while a complex application is | 
|  | 4109 | * starting, and thus spawning several processes that | 
|  | 4110 | * do I/O (and that *must not* be treated as a large | 
|  | 4111 | * burst, see comments on bfq_handle_burst). | 
|  | 4112 | * | 
|  | 4113 | * In particular, the decrement is performed only if: | 
|  | 4114 | * 1) bfqq is not a merged queue, because, if it is, | 
|  | 4115 | * then this free of bfqq is not triggered by the exit | 
|  | 4116 | * of the process bfqq is associated with, but exactly | 
|  | 4117 | * by the fact that bfqq has just been merged. | 
|  | 4118 | * 2) burst_size is greater than 0, to handle | 
|  | 4119 | * unbalanced decrements. Unbalanced decrements may | 
|  | 4120 | * happen in te following case: bfqq is inserted into | 
|  | 4121 | * the current burst list--without incrementing | 
|  | 4122 | * bust_size--because of a split, but the current | 
|  | 4123 | * burst list is not the burst list bfqq belonged to | 
|  | 4124 | * (see comments on the case of a split in | 
|  | 4125 | * bfq_set_request). | 
|  | 4126 | */ | 
|  | 4127 | if (bfqq->bic && bfqq->bfqd->burst_size > 0) | 
|  | 4128 | bfqq->bfqd->burst_size--; | 
|  | 4129 | } | 
|  | 4130 |  | 
|  | 4131 | kmem_cache_free(bfq_pool, bfqq); | 
|  | 4132 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | 
|  | 4133 | bfqg_and_blkg_put(bfqg); | 
|  | 4134 | #endif | 
|  | 4135 | } | 
|  | 4136 |  | 
|  | 4137 | static void bfq_put_cooperator(struct bfq_queue *bfqq) | 
|  | 4138 | { | 
|  | 4139 | struct bfq_queue *__bfqq, *next; | 
|  | 4140 |  | 
|  | 4141 | /* | 
|  | 4142 | * If this queue was scheduled to merge with another queue, be | 
|  | 4143 | * sure to drop the reference taken on that queue (and others in | 
|  | 4144 | * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs. | 
|  | 4145 | */ | 
|  | 4146 | __bfqq = bfqq->new_bfqq; | 
|  | 4147 | while (__bfqq) { | 
|  | 4148 | if (__bfqq == bfqq) | 
|  | 4149 | break; | 
|  | 4150 | next = __bfqq->new_bfqq; | 
|  | 4151 | bfq_put_queue(__bfqq); | 
|  | 4152 | __bfqq = next; | 
|  | 4153 | } | 
|  | 4154 | } | 
|  | 4155 |  | 
|  | 4156 | static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) | 
|  | 4157 | { | 
|  | 4158 | if (bfqq == bfqd->in_service_queue) { | 
|  | 4159 | __bfq_bfqq_expire(bfqd, bfqq); | 
|  | 4160 | bfq_schedule_dispatch(bfqd); | 
|  | 4161 | } | 
|  | 4162 |  | 
|  | 4163 | bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref); | 
|  | 4164 |  | 
|  | 4165 | bfq_put_cooperator(bfqq); | 
|  | 4166 |  | 
|  | 4167 | bfq_put_queue(bfqq); /* release process reference */ | 
|  | 4168 | } | 
|  | 4169 |  | 
|  | 4170 | static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) | 
|  | 4171 | { | 
|  | 4172 | struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync); | 
|  | 4173 | struct bfq_data *bfqd; | 
|  | 4174 |  | 
|  | 4175 | if (bfqq) | 
|  | 4176 | bfqd = bfqq->bfqd; /* NULL if scheduler already exited */ | 
|  | 4177 |  | 
|  | 4178 | if (bfqq && bfqd) { | 
|  | 4179 | unsigned long flags; | 
|  | 4180 |  | 
|  | 4181 | spin_lock_irqsave(&bfqd->lock, flags); | 
|  | 4182 | bfqq->bic = NULL; | 
|  | 4183 | bfq_exit_bfqq(bfqd, bfqq); | 
|  | 4184 | bic_set_bfqq(bic, NULL, is_sync); | 
|  | 4185 | spin_unlock_irqrestore(&bfqd->lock, flags); | 
|  | 4186 | } | 
|  | 4187 | } | 
|  | 4188 |  | 
|  | 4189 | static void bfq_exit_icq(struct io_cq *icq) | 
|  | 4190 | { | 
|  | 4191 | struct bfq_io_cq *bic = icq_to_bic(icq); | 
|  | 4192 |  | 
|  | 4193 | bfq_exit_icq_bfqq(bic, true); | 
|  | 4194 | bfq_exit_icq_bfqq(bic, false); | 
|  | 4195 | } | 
|  | 4196 |  | 
|  | 4197 | /* | 
|  | 4198 | * Update the entity prio values; note that the new values will not | 
|  | 4199 | * be used until the next (re)activation. | 
|  | 4200 | */ | 
|  | 4201 | static void | 
|  | 4202 | bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) | 
|  | 4203 | { | 
|  | 4204 | struct task_struct *tsk = current; | 
|  | 4205 | int ioprio_class; | 
|  | 4206 | struct bfq_data *bfqd = bfqq->bfqd; | 
|  | 4207 |  | 
|  | 4208 | if (!bfqd) | 
|  | 4209 | return; | 
|  | 4210 |  | 
|  | 4211 | ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); | 
|  | 4212 | switch (ioprio_class) { | 
|  | 4213 | default: | 
|  | 4214 | dev_err(bfqq->bfqd->queue->backing_dev_info->dev, | 
|  | 4215 | "bfq: bad prio class %d\n", ioprio_class); | 
|  | 4216 | /* fall through */ | 
|  | 4217 | case IOPRIO_CLASS_NONE: | 
|  | 4218 | /* | 
|  | 4219 | * No prio set, inherit CPU scheduling settings. | 
|  | 4220 | */ | 
|  | 4221 | bfqq->new_ioprio = task_nice_ioprio(tsk); | 
|  | 4222 | bfqq->new_ioprio_class = task_nice_ioclass(tsk); | 
|  | 4223 | break; | 
|  | 4224 | case IOPRIO_CLASS_RT: | 
|  | 4225 | bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); | 
|  | 4226 | bfqq->new_ioprio_class = IOPRIO_CLASS_RT; | 
|  | 4227 | break; | 
|  | 4228 | case IOPRIO_CLASS_BE: | 
|  | 4229 | bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); | 
|  | 4230 | bfqq->new_ioprio_class = IOPRIO_CLASS_BE; | 
|  | 4231 | break; | 
|  | 4232 | case IOPRIO_CLASS_IDLE: | 
|  | 4233 | bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE; | 
|  | 4234 | bfqq->new_ioprio = 7; | 
|  | 4235 | break; | 
|  | 4236 | } | 
|  | 4237 |  | 
|  | 4238 | if (bfqq->new_ioprio >= IOPRIO_BE_NR) { | 
|  | 4239 | pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n", | 
|  | 4240 | bfqq->new_ioprio); | 
|  | 4241 | bfqq->new_ioprio = IOPRIO_BE_NR; | 
|  | 4242 | } | 
|  | 4243 |  | 
|  | 4244 | bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio); | 
|  | 4245 | bfqq->entity.prio_changed = 1; | 
|  | 4246 | } | 
|  | 4247 |  | 
|  | 4248 | static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, | 
|  | 4249 | struct bio *bio, bool is_sync, | 
|  | 4250 | struct bfq_io_cq *bic); | 
|  | 4251 |  | 
|  | 4252 | static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio) | 
|  | 4253 | { | 
|  | 4254 | struct bfq_data *bfqd = bic_to_bfqd(bic); | 
|  | 4255 | struct bfq_queue *bfqq; | 
|  | 4256 | int ioprio = bic->icq.ioc->ioprio; | 
|  | 4257 |  | 
|  | 4258 | /* | 
|  | 4259 | * This condition may trigger on a newly created bic, be sure to | 
|  | 4260 | * drop the lock before returning. | 
|  | 4261 | */ | 
|  | 4262 | if (unlikely(!bfqd) || likely(bic->ioprio == ioprio)) | 
|  | 4263 | return; | 
|  | 4264 |  | 
|  | 4265 | bic->ioprio = ioprio; | 
|  | 4266 |  | 
|  | 4267 | bfqq = bic_to_bfqq(bic, false); | 
|  | 4268 | if (bfqq) { | 
|  | 4269 | /* release process reference on this queue */ | 
|  | 4270 | bfq_put_queue(bfqq); | 
|  | 4271 | bfqq = bfq_get_queue(bfqd, bio, BLK_RW_ASYNC, bic); | 
|  | 4272 | bic_set_bfqq(bic, bfqq, false); | 
|  | 4273 | } | 
|  | 4274 |  | 
|  | 4275 | bfqq = bic_to_bfqq(bic, true); | 
|  | 4276 | if (bfqq) | 
|  | 4277 | bfq_set_next_ioprio_data(bfqq, bic); | 
|  | 4278 | } | 
|  | 4279 |  | 
|  | 4280 | static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, | 
|  | 4281 | struct bfq_io_cq *bic, pid_t pid, int is_sync) | 
|  | 4282 | { | 
|  | 4283 | RB_CLEAR_NODE(&bfqq->entity.rb_node); | 
|  | 4284 | INIT_LIST_HEAD(&bfqq->fifo); | 
|  | 4285 | INIT_HLIST_NODE(&bfqq->burst_list_node); | 
|  | 4286 |  | 
|  | 4287 | bfqq->ref = 0; | 
|  | 4288 | bfqq->bfqd = bfqd; | 
|  | 4289 |  | 
|  | 4290 | if (bic) | 
|  | 4291 | bfq_set_next_ioprio_data(bfqq, bic); | 
|  | 4292 |  | 
|  | 4293 | if (is_sync) { | 
|  | 4294 | /* | 
|  | 4295 | * No need to mark as has_short_ttime if in | 
|  | 4296 | * idle_class, because no device idling is performed | 
|  | 4297 | * for queues in idle class | 
|  | 4298 | */ | 
|  | 4299 | if (!bfq_class_idle(bfqq)) | 
|  | 4300 | /* tentatively mark as has_short_ttime */ | 
|  | 4301 | bfq_mark_bfqq_has_short_ttime(bfqq); | 
|  | 4302 | bfq_mark_bfqq_sync(bfqq); | 
|  | 4303 | bfq_mark_bfqq_just_created(bfqq); | 
|  | 4304 | /* | 
|  | 4305 | * Aggressively inject a lot of service: up to 90%. | 
|  | 4306 | * This coefficient remains constant during bfqq life, | 
|  | 4307 | * but this behavior might be changed, after enough | 
|  | 4308 | * testing and tuning. | 
|  | 4309 | */ | 
|  | 4310 | bfqq->inject_coeff = 1; | 
|  | 4311 | } else | 
|  | 4312 | bfq_clear_bfqq_sync(bfqq); | 
|  | 4313 |  | 
|  | 4314 | /* set end request to minus infinity from now */ | 
|  | 4315 | bfqq->ttime.last_end_request = ktime_get_ns() + 1; | 
|  | 4316 |  | 
|  | 4317 | bfq_mark_bfqq_IO_bound(bfqq); | 
|  | 4318 |  | 
|  | 4319 | bfqq->pid = pid; | 
|  | 4320 |  | 
|  | 4321 | /* Tentative initial value to trade off between thr and lat */ | 
|  | 4322 | bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3; | 
|  | 4323 | bfqq->budget_timeout = bfq_smallest_from_now(); | 
|  | 4324 |  | 
|  | 4325 | bfqq->wr_coeff = 1; | 
|  | 4326 | bfqq->last_wr_start_finish = jiffies; | 
|  | 4327 | bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now(); | 
|  | 4328 | bfqq->split_time = bfq_smallest_from_now(); | 
|  | 4329 |  | 
|  | 4330 | /* | 
|  | 4331 | * To not forget the possibly high bandwidth consumed by a | 
|  | 4332 | * process/queue in the recent past, | 
|  | 4333 | * bfq_bfqq_softrt_next_start() returns a value at least equal | 
|  | 4334 | * to the current value of bfqq->soft_rt_next_start (see | 
|  | 4335 | * comments on bfq_bfqq_softrt_next_start).  Set | 
|  | 4336 | * soft_rt_next_start to now, to mean that bfqq has consumed | 
|  | 4337 | * no bandwidth so far. | 
|  | 4338 | */ | 
|  | 4339 | bfqq->soft_rt_next_start = jiffies; | 
|  | 4340 |  | 
|  | 4341 | /* first request is almost certainly seeky */ | 
|  | 4342 | bfqq->seek_history = 1; | 
|  | 4343 | } | 
|  | 4344 |  | 
|  | 4345 | static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, | 
|  | 4346 | struct bfq_group *bfqg, | 
|  | 4347 | int ioprio_class, int ioprio) | 
|  | 4348 | { | 
|  | 4349 | switch (ioprio_class) { | 
|  | 4350 | case IOPRIO_CLASS_RT: | 
|  | 4351 | return &bfqg->async_bfqq[0][ioprio]; | 
|  | 4352 | case IOPRIO_CLASS_NONE: | 
|  | 4353 | ioprio = IOPRIO_NORM; | 
|  | 4354 | /* fall through */ | 
|  | 4355 | case IOPRIO_CLASS_BE: | 
|  | 4356 | return &bfqg->async_bfqq[1][ioprio]; | 
|  | 4357 | case IOPRIO_CLASS_IDLE: | 
|  | 4358 | return &bfqg->async_idle_bfqq; | 
|  | 4359 | default: | 
|  | 4360 | return NULL; | 
|  | 4361 | } | 
|  | 4362 | } | 
|  | 4363 |  | 
|  | 4364 | static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, | 
|  | 4365 | struct bio *bio, bool is_sync, | 
|  | 4366 | struct bfq_io_cq *bic) | 
|  | 4367 | { | 
|  | 4368 | const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio); | 
|  | 4369 | const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); | 
|  | 4370 | struct bfq_queue **async_bfqq = NULL; | 
|  | 4371 | struct bfq_queue *bfqq; | 
|  | 4372 | struct bfq_group *bfqg; | 
|  | 4373 |  | 
|  | 4374 | rcu_read_lock(); | 
|  | 4375 |  | 
|  | 4376 | bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio)); | 
|  | 4377 | if (!bfqg) { | 
|  | 4378 | bfqq = &bfqd->oom_bfqq; | 
|  | 4379 | goto out; | 
|  | 4380 | } | 
|  | 4381 |  | 
|  | 4382 | if (!is_sync) { | 
|  | 4383 | async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class, | 
|  | 4384 | ioprio); | 
|  | 4385 | bfqq = *async_bfqq; | 
|  | 4386 | if (bfqq) | 
|  | 4387 | goto out; | 
|  | 4388 | } | 
|  | 4389 |  | 
|  | 4390 | bfqq = kmem_cache_alloc_node(bfq_pool, | 
|  | 4391 | GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN, | 
|  | 4392 | bfqd->queue->node); | 
|  | 4393 |  | 
|  | 4394 | if (bfqq) { | 
|  | 4395 | bfq_init_bfqq(bfqd, bfqq, bic, current->pid, | 
|  | 4396 | is_sync); | 
|  | 4397 | bfq_init_entity(&bfqq->entity, bfqg); | 
|  | 4398 | bfq_log_bfqq(bfqd, bfqq, "allocated"); | 
|  | 4399 | } else { | 
|  | 4400 | bfqq = &bfqd->oom_bfqq; | 
|  | 4401 | bfq_log_bfqq(bfqd, bfqq, "using oom bfqq"); | 
|  | 4402 | goto out; | 
|  | 4403 | } | 
|  | 4404 |  | 
|  | 4405 | /* | 
|  | 4406 | * Pin the queue now that it's allocated, scheduler exit will | 
|  | 4407 | * prune it. | 
|  | 4408 | */ | 
|  | 4409 | if (async_bfqq) { | 
|  | 4410 | bfqq->ref++; /* | 
|  | 4411 | * Extra group reference, w.r.t. sync | 
|  | 4412 | * queue. This extra reference is removed | 
|  | 4413 | * only if bfqq->bfqg disappears, to | 
|  | 4414 | * guarantee that this queue is not freed | 
|  | 4415 | * until its group goes away. | 
|  | 4416 | */ | 
|  | 4417 | bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d", | 
|  | 4418 | bfqq, bfqq->ref); | 
|  | 4419 | *async_bfqq = bfqq; | 
|  | 4420 | } | 
|  | 4421 |  | 
|  | 4422 | out: | 
|  | 4423 | bfqq->ref++; /* get a process reference to this queue */ | 
|  | 4424 | bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref); | 
|  | 4425 | rcu_read_unlock(); | 
|  | 4426 | return bfqq; | 
|  | 4427 | } | 
|  | 4428 |  | 
|  | 4429 | static void bfq_update_io_thinktime(struct bfq_data *bfqd, | 
|  | 4430 | struct bfq_queue *bfqq) | 
|  | 4431 | { | 
|  | 4432 | struct bfq_ttime *ttime = &bfqq->ttime; | 
|  | 4433 | u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request; | 
|  | 4434 |  | 
|  | 4435 | elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle); | 
|  | 4436 |  | 
|  | 4437 | ttime->ttime_samples = (7*bfqq->ttime.ttime_samples + 256) / 8; | 
|  | 4438 | ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed,  8); | 
|  | 4439 | ttime->ttime_mean = div64_ul(ttime->ttime_total + 128, | 
|  | 4440 | ttime->ttime_samples); | 
|  | 4441 | } | 
|  | 4442 |  | 
|  | 4443 | static void | 
|  | 4444 | bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq, | 
|  | 4445 | struct request *rq) | 
|  | 4446 | { | 
|  | 4447 | bfqq->seek_history <<= 1; | 
|  | 4448 | bfqq->seek_history |= | 
|  | 4449 | get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR && | 
|  | 4450 | (!blk_queue_nonrot(bfqd->queue) || | 
|  | 4451 | blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT); | 
|  | 4452 | } | 
|  | 4453 |  | 
|  | 4454 | static void bfq_update_has_short_ttime(struct bfq_data *bfqd, | 
|  | 4455 | struct bfq_queue *bfqq, | 
|  | 4456 | struct bfq_io_cq *bic) | 
|  | 4457 | { | 
|  | 4458 | bool has_short_ttime = true; | 
|  | 4459 |  | 
|  | 4460 | /* | 
|  | 4461 | * No need to update has_short_ttime if bfqq is async or in | 
|  | 4462 | * idle io prio class, or if bfq_slice_idle is zero, because | 
|  | 4463 | * no device idling is performed for bfqq in this case. | 
|  | 4464 | */ | 
|  | 4465 | if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) || | 
|  | 4466 | bfqd->bfq_slice_idle == 0) | 
|  | 4467 | return; | 
|  | 4468 |  | 
|  | 4469 | /* Idle window just restored, statistics are meaningless. */ | 
|  | 4470 | if (time_is_after_eq_jiffies(bfqq->split_time + | 
|  | 4471 | bfqd->bfq_wr_min_idle_time)) | 
|  | 4472 | return; | 
|  | 4473 |  | 
|  | 4474 | /* Think time is infinite if no process is linked to | 
|  | 4475 | * bfqq. Otherwise check average think time to | 
|  | 4476 | * decide whether to mark as has_short_ttime | 
|  | 4477 | */ | 
|  | 4478 | if (atomic_read(&bic->icq.ioc->active_ref) == 0 || | 
|  | 4479 | (bfq_sample_valid(bfqq->ttime.ttime_samples) && | 
|  | 4480 | bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle)) | 
|  | 4481 | has_short_ttime = false; | 
|  | 4482 |  | 
|  | 4483 | bfq_log_bfqq(bfqd, bfqq, "update_has_short_ttime: has_short_ttime %d", | 
|  | 4484 | has_short_ttime); | 
|  | 4485 |  | 
|  | 4486 | if (has_short_ttime) | 
|  | 4487 | bfq_mark_bfqq_has_short_ttime(bfqq); | 
|  | 4488 | else | 
|  | 4489 | bfq_clear_bfqq_has_short_ttime(bfqq); | 
|  | 4490 | } | 
|  | 4491 |  | 
|  | 4492 | /* | 
|  | 4493 | * Called when a new fs request (rq) is added to bfqq.  Check if there's | 
|  | 4494 | * something we should do about it. | 
|  | 4495 | */ | 
|  | 4496 | static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, | 
|  | 4497 | struct request *rq) | 
|  | 4498 | { | 
|  | 4499 | struct bfq_io_cq *bic = RQ_BIC(rq); | 
|  | 4500 |  | 
|  | 4501 | if (rq->cmd_flags & REQ_META) | 
|  | 4502 | bfqq->meta_pending++; | 
|  | 4503 |  | 
|  | 4504 | bfq_update_io_thinktime(bfqd, bfqq); | 
|  | 4505 | bfq_update_has_short_ttime(bfqd, bfqq, bic); | 
|  | 4506 | bfq_update_io_seektime(bfqd, bfqq, rq); | 
|  | 4507 |  | 
|  | 4508 | bfq_log_bfqq(bfqd, bfqq, | 
|  | 4509 | "rq_enqueued: has_short_ttime=%d (seeky %d)", | 
|  | 4510 | bfq_bfqq_has_short_ttime(bfqq), BFQQ_SEEKY(bfqq)); | 
|  | 4511 |  | 
|  | 4512 | bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); | 
|  | 4513 |  | 
|  | 4514 | if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) { | 
|  | 4515 | bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 && | 
|  | 4516 | blk_rq_sectors(rq) < 32; | 
|  | 4517 | bool budget_timeout = bfq_bfqq_budget_timeout(bfqq); | 
|  | 4518 |  | 
|  | 4519 | /* | 
|  | 4520 | * There is just this request queued: if the request | 
|  | 4521 | * is small and the queue is not to be expired, then | 
|  | 4522 | * just exit. | 
|  | 4523 | * | 
|  | 4524 | * In this way, if the device is being idled to wait | 
|  | 4525 | * for a new request from the in-service queue, we | 
|  | 4526 | * avoid unplugging the device and committing the | 
|  | 4527 | * device to serve just a small request. On the | 
|  | 4528 | * contrary, we wait for the block layer to decide | 
|  | 4529 | * when to unplug the device: hopefully, new requests | 
|  | 4530 | * will be merged to this one quickly, then the device | 
|  | 4531 | * will be unplugged and larger requests will be | 
|  | 4532 | * dispatched. | 
|  | 4533 | */ | 
|  | 4534 | if (small_req && !budget_timeout) | 
|  | 4535 | return; | 
|  | 4536 |  | 
|  | 4537 | /* | 
|  | 4538 | * A large enough request arrived, or the queue is to | 
|  | 4539 | * be expired: in both cases disk idling is to be | 
|  | 4540 | * stopped, so clear wait_request flag and reset | 
|  | 4541 | * timer. | 
|  | 4542 | */ | 
|  | 4543 | bfq_clear_bfqq_wait_request(bfqq); | 
|  | 4544 | hrtimer_try_to_cancel(&bfqd->idle_slice_timer); | 
|  | 4545 |  | 
|  | 4546 | /* | 
|  | 4547 | * The queue is not empty, because a new request just | 
|  | 4548 | * arrived. Hence we can safely expire the queue, in | 
|  | 4549 | * case of budget timeout, without risking that the | 
|  | 4550 | * timestamps of the queue are not updated correctly. | 
|  | 4551 | * See [1] for more details. | 
|  | 4552 | */ | 
|  | 4553 | if (budget_timeout) | 
|  | 4554 | bfq_bfqq_expire(bfqd, bfqq, false, | 
|  | 4555 | BFQQE_BUDGET_TIMEOUT); | 
|  | 4556 | } | 
|  | 4557 | } | 
|  | 4558 |  | 
|  | 4559 | /* returns true if it causes the idle timer to be disabled */ | 
|  | 4560 | static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) | 
|  | 4561 | { | 
|  | 4562 | struct bfq_queue *bfqq = RQ_BFQQ(rq), | 
|  | 4563 | *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true); | 
|  | 4564 | bool waiting, idle_timer_disabled = false; | 
|  | 4565 |  | 
|  | 4566 | if (new_bfqq) { | 
|  | 4567 | if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq) | 
|  | 4568 | new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1); | 
|  | 4569 | /* | 
|  | 4570 | * Release the request's reference to the old bfqq | 
|  | 4571 | * and make sure one is taken to the shared queue. | 
|  | 4572 | */ | 
|  | 4573 | new_bfqq->allocated++; | 
|  | 4574 | bfqq->allocated--; | 
|  | 4575 | new_bfqq->ref++; | 
|  | 4576 | /* | 
|  | 4577 | * If the bic associated with the process | 
|  | 4578 | * issuing this request still points to bfqq | 
|  | 4579 | * (and thus has not been already redirected | 
|  | 4580 | * to new_bfqq or even some other bfq_queue), | 
|  | 4581 | * then complete the merge and redirect it to | 
|  | 4582 | * new_bfqq. | 
|  | 4583 | */ | 
|  | 4584 | if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq) | 
|  | 4585 | bfq_merge_bfqqs(bfqd, RQ_BIC(rq), | 
|  | 4586 | bfqq, new_bfqq); | 
|  | 4587 |  | 
|  | 4588 | bfq_clear_bfqq_just_created(bfqq); | 
|  | 4589 | /* | 
|  | 4590 | * rq is about to be enqueued into new_bfqq, | 
|  | 4591 | * release rq reference on bfqq | 
|  | 4592 | */ | 
|  | 4593 | bfq_put_queue(bfqq); | 
|  | 4594 | rq->elv.priv[1] = new_bfqq; | 
|  | 4595 | bfqq = new_bfqq; | 
|  | 4596 | } | 
|  | 4597 |  | 
|  | 4598 | waiting = bfqq && bfq_bfqq_wait_request(bfqq); | 
|  | 4599 | bfq_add_request(rq); | 
|  | 4600 | idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq); | 
|  | 4601 |  | 
|  | 4602 | rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; | 
|  | 4603 | list_add_tail(&rq->queuelist, &bfqq->fifo); | 
|  | 4604 |  | 
|  | 4605 | bfq_rq_enqueued(bfqd, bfqq, rq); | 
|  | 4606 |  | 
|  | 4607 | return idle_timer_disabled; | 
|  | 4608 | } | 
|  | 4609 |  | 
|  | 4610 | #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) | 
|  | 4611 | static void bfq_update_insert_stats(struct request_queue *q, | 
|  | 4612 | struct bfq_queue *bfqq, | 
|  | 4613 | bool idle_timer_disabled, | 
|  | 4614 | unsigned int cmd_flags) | 
|  | 4615 | { | 
|  | 4616 | if (!bfqq) | 
|  | 4617 | return; | 
|  | 4618 |  | 
|  | 4619 | /* | 
|  | 4620 | * bfqq still exists, because it can disappear only after | 
|  | 4621 | * either it is merged with another queue, or the process it | 
|  | 4622 | * is associated with exits. But both actions must be taken by | 
|  | 4623 | * the same process currently executing this flow of | 
|  | 4624 | * instructions. | 
|  | 4625 | * | 
|  | 4626 | * In addition, the following queue lock guarantees that | 
|  | 4627 | * bfqq_group(bfqq) exists as well. | 
|  | 4628 | */ | 
|  | 4629 | spin_lock_irq(q->queue_lock); | 
|  | 4630 | bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags); | 
|  | 4631 | if (idle_timer_disabled) | 
|  | 4632 | bfqg_stats_update_idle_time(bfqq_group(bfqq)); | 
|  | 4633 | spin_unlock_irq(q->queue_lock); | 
|  | 4634 | } | 
|  | 4635 | #else | 
|  | 4636 | static inline void bfq_update_insert_stats(struct request_queue *q, | 
|  | 4637 | struct bfq_queue *bfqq, | 
|  | 4638 | bool idle_timer_disabled, | 
|  | 4639 | unsigned int cmd_flags) {} | 
|  | 4640 | #endif | 
|  | 4641 |  | 
|  | 4642 | static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, | 
|  | 4643 | bool at_head) | 
|  | 4644 | { | 
|  | 4645 | struct request_queue *q = hctx->queue; | 
|  | 4646 | struct bfq_data *bfqd = q->elevator->elevator_data; | 
|  | 4647 | struct bfq_queue *bfqq; | 
|  | 4648 | bool idle_timer_disabled = false; | 
|  | 4649 | unsigned int cmd_flags; | 
|  | 4650 |  | 
|  | 4651 | spin_lock_irq(&bfqd->lock); | 
|  | 4652 | if (blk_mq_sched_try_insert_merge(q, rq)) { | 
|  | 4653 | spin_unlock_irq(&bfqd->lock); | 
|  | 4654 | return; | 
|  | 4655 | } | 
|  | 4656 |  | 
|  | 4657 | spin_unlock_irq(&bfqd->lock); | 
|  | 4658 |  | 
|  | 4659 | blk_mq_sched_request_inserted(rq); | 
|  | 4660 |  | 
|  | 4661 | spin_lock_irq(&bfqd->lock); | 
|  | 4662 | bfqq = bfq_init_rq(rq); | 
|  | 4663 | if (!bfqq || at_head || blk_rq_is_passthrough(rq)) { | 
|  | 4664 | if (at_head) | 
|  | 4665 | list_add(&rq->queuelist, &bfqd->dispatch); | 
|  | 4666 | else | 
|  | 4667 | list_add_tail(&rq->queuelist, &bfqd->dispatch); | 
|  | 4668 | } else { | 
|  | 4669 | idle_timer_disabled = __bfq_insert_request(bfqd, rq); | 
|  | 4670 | /* | 
|  | 4671 | * Update bfqq, because, if a queue merge has occurred | 
|  | 4672 | * in __bfq_insert_request, then rq has been | 
|  | 4673 | * redirected into a new queue. | 
|  | 4674 | */ | 
|  | 4675 | bfqq = RQ_BFQQ(rq); | 
|  | 4676 |  | 
|  | 4677 | if (rq_mergeable(rq)) { | 
|  | 4678 | elv_rqhash_add(q, rq); | 
|  | 4679 | if (!q->last_merge) | 
|  | 4680 | q->last_merge = rq; | 
|  | 4681 | } | 
|  | 4682 | } | 
|  | 4683 |  | 
|  | 4684 | /* | 
|  | 4685 | * Cache cmd_flags before releasing scheduler lock, because rq | 
|  | 4686 | * may disappear afterwards (for example, because of a request | 
|  | 4687 | * merge). | 
|  | 4688 | */ | 
|  | 4689 | cmd_flags = rq->cmd_flags; | 
|  | 4690 |  | 
|  | 4691 | spin_unlock_irq(&bfqd->lock); | 
|  | 4692 |  | 
|  | 4693 | bfq_update_insert_stats(q, bfqq, idle_timer_disabled, | 
|  | 4694 | cmd_flags); | 
|  | 4695 | } | 
|  | 4696 |  | 
|  | 4697 | static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, | 
|  | 4698 | struct list_head *list, bool at_head) | 
|  | 4699 | { | 
|  | 4700 | while (!list_empty(list)) { | 
|  | 4701 | struct request *rq; | 
|  | 4702 |  | 
|  | 4703 | rq = list_first_entry(list, struct request, queuelist); | 
|  | 4704 | list_del_init(&rq->queuelist); | 
|  | 4705 | bfq_insert_request(hctx, rq, at_head); | 
|  | 4706 | } | 
|  | 4707 | } | 
|  | 4708 |  | 
|  | 4709 | static void bfq_update_hw_tag(struct bfq_data *bfqd) | 
|  | 4710 | { | 
|  | 4711 | bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver, | 
|  | 4712 | bfqd->rq_in_driver); | 
|  | 4713 |  | 
|  | 4714 | if (bfqd->hw_tag == 1) | 
|  | 4715 | return; | 
|  | 4716 |  | 
|  | 4717 | /* | 
|  | 4718 | * This sample is valid if the number of outstanding requests | 
|  | 4719 | * is large enough to allow a queueing behavior.  Note that the | 
|  | 4720 | * sum is not exact, as it's not taking into account deactivated | 
|  | 4721 | * requests. | 
|  | 4722 | */ | 
|  | 4723 | if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD) | 
|  | 4724 | return; | 
|  | 4725 |  | 
|  | 4726 | if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES) | 
|  | 4727 | return; | 
|  | 4728 |  | 
|  | 4729 | bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD; | 
|  | 4730 | bfqd->max_rq_in_driver = 0; | 
|  | 4731 | bfqd->hw_tag_samples = 0; | 
|  | 4732 | } | 
|  | 4733 |  | 
|  | 4734 | static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) | 
|  | 4735 | { | 
|  | 4736 | u64 now_ns; | 
|  | 4737 | u32 delta_us; | 
|  | 4738 |  | 
|  | 4739 | bfq_update_hw_tag(bfqd); | 
|  | 4740 |  | 
|  | 4741 | bfqd->rq_in_driver--; | 
|  | 4742 | bfqq->dispatched--; | 
|  | 4743 |  | 
|  | 4744 | if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) { | 
|  | 4745 | /* | 
|  | 4746 | * Set budget_timeout (which we overload to store the | 
|  | 4747 | * time at which the queue remains with no backlog and | 
|  | 4748 | * no outstanding request; used by the weight-raising | 
|  | 4749 | * mechanism). | 
|  | 4750 | */ | 
|  | 4751 | bfqq->budget_timeout = jiffies; | 
|  | 4752 |  | 
|  | 4753 | bfq_weights_tree_remove(bfqd, bfqq); | 
|  | 4754 | } | 
|  | 4755 |  | 
|  | 4756 | now_ns = ktime_get_ns(); | 
|  | 4757 |  | 
|  | 4758 | bfqq->ttime.last_end_request = now_ns; | 
|  | 4759 |  | 
|  | 4760 | /* | 
|  | 4761 | * Using us instead of ns, to get a reasonable precision in | 
|  | 4762 | * computing rate in next check. | 
|  | 4763 | */ | 
|  | 4764 | delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC); | 
|  | 4765 |  | 
|  | 4766 | /* | 
|  | 4767 | * If the request took rather long to complete, and, according | 
|  | 4768 | * to the maximum request size recorded, this completion latency | 
|  | 4769 | * implies that the request was certainly served at a very low | 
|  | 4770 | * rate (less than 1M sectors/sec), then the whole observation | 
|  | 4771 | * interval that lasts up to this time instant cannot be a | 
|  | 4772 | * valid time interval for computing a new peak rate.  Invoke | 
|  | 4773 | * bfq_update_rate_reset to have the following three steps | 
|  | 4774 | * taken: | 
|  | 4775 | * - close the observation interval at the last (previous) | 
|  | 4776 | *   request dispatch or completion | 
|  | 4777 | * - compute rate, if possible, for that observation interval | 
|  | 4778 | * - reset to zero samples, which will trigger a proper | 
|  | 4779 | *   re-initialization of the observation interval on next | 
|  | 4780 | *   dispatch | 
|  | 4781 | */ | 
|  | 4782 | if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC && | 
|  | 4783 | (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us < | 
|  | 4784 | 1UL<<(BFQ_RATE_SHIFT - 10)) | 
|  | 4785 | bfq_update_rate_reset(bfqd, NULL); | 
|  | 4786 | bfqd->last_completion = now_ns; | 
|  | 4787 |  | 
|  | 4788 | /* | 
|  | 4789 | * If we are waiting to discover whether the request pattern | 
|  | 4790 | * of the task associated with the queue is actually | 
|  | 4791 | * isochronous, and both requisites for this condition to hold | 
|  | 4792 | * are now satisfied, then compute soft_rt_next_start (see the | 
|  | 4793 | * comments on the function bfq_bfqq_softrt_next_start()). We | 
|  | 4794 | * schedule this delayed check when bfqq expires, if it still | 
|  | 4795 | * has in-flight requests. | 
|  | 4796 | */ | 
|  | 4797 | if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 && | 
|  | 4798 | RB_EMPTY_ROOT(&bfqq->sort_list)) | 
|  | 4799 | bfqq->soft_rt_next_start = | 
|  | 4800 | bfq_bfqq_softrt_next_start(bfqd, bfqq); | 
|  | 4801 |  | 
|  | 4802 | /* | 
|  | 4803 | * If this is the in-service queue, check if it needs to be expired, | 
|  | 4804 | * or if we want to idle in case it has no pending requests. | 
|  | 4805 | */ | 
|  | 4806 | if (bfqd->in_service_queue == bfqq) { | 
|  | 4807 | if (bfq_bfqq_must_idle(bfqq)) { | 
|  | 4808 | if (bfqq->dispatched == 0) | 
|  | 4809 | bfq_arm_slice_timer(bfqd); | 
|  | 4810 | /* | 
|  | 4811 | * If we get here, we do not expire bfqq, even | 
|  | 4812 | * if bfqq was in budget timeout or had no | 
|  | 4813 | * more requests (as controlled in the next | 
|  | 4814 | * conditional instructions). The reason for | 
|  | 4815 | * not expiring bfqq is as follows. | 
|  | 4816 | * | 
|  | 4817 | * Here bfqq->dispatched > 0 holds, but | 
|  | 4818 | * bfq_bfqq_must_idle() returned true. This | 
|  | 4819 | * implies that, even if no request arrives | 
|  | 4820 | * for bfqq before bfqq->dispatched reaches 0, | 
|  | 4821 | * bfqq will, however, not be expired on the | 
|  | 4822 | * completion event that causes bfqq->dispatch | 
|  | 4823 | * to reach zero. In contrast, on this event, | 
|  | 4824 | * bfqq will start enjoying device idling | 
|  | 4825 | * (I/O-dispatch plugging). | 
|  | 4826 | * | 
|  | 4827 | * But, if we expired bfqq here, bfqq would | 
|  | 4828 | * not have the chance to enjoy device idling | 
|  | 4829 | * when bfqq->dispatched finally reaches | 
|  | 4830 | * zero. This would expose bfqq to violation | 
|  | 4831 | * of its reserved service guarantees. | 
|  | 4832 | */ | 
|  | 4833 | return; | 
|  | 4834 | } else if (bfq_may_expire_for_budg_timeout(bfqq)) | 
|  | 4835 | bfq_bfqq_expire(bfqd, bfqq, false, | 
|  | 4836 | BFQQE_BUDGET_TIMEOUT); | 
|  | 4837 | else if (RB_EMPTY_ROOT(&bfqq->sort_list) && | 
|  | 4838 | (bfqq->dispatched == 0 || | 
|  | 4839 | !bfq_better_to_idle(bfqq))) | 
|  | 4840 | bfq_bfqq_expire(bfqd, bfqq, false, | 
|  | 4841 | BFQQE_NO_MORE_REQUESTS); | 
|  | 4842 | } | 
|  | 4843 |  | 
|  | 4844 | if (!bfqd->rq_in_driver) | 
|  | 4845 | bfq_schedule_dispatch(bfqd); | 
|  | 4846 | } | 
|  | 4847 |  | 
|  | 4848 | static void bfq_finish_requeue_request_body(struct bfq_queue *bfqq) | 
|  | 4849 | { | 
|  | 4850 | bfqq->allocated--; | 
|  | 4851 |  | 
|  | 4852 | bfq_put_queue(bfqq); | 
|  | 4853 | } | 
|  | 4854 |  | 
|  | 4855 | /* | 
|  | 4856 | * Handle either a requeue or a finish for rq. The things to do are | 
|  | 4857 | * the same in both cases: all references to rq are to be dropped. In | 
|  | 4858 | * particular, rq is considered completed from the point of view of | 
|  | 4859 | * the scheduler. | 
|  | 4860 | */ | 
|  | 4861 | static void bfq_finish_requeue_request(struct request *rq) | 
|  | 4862 | { | 
|  | 4863 | struct bfq_queue *bfqq = RQ_BFQQ(rq); | 
|  | 4864 | struct bfq_data *bfqd; | 
|  | 4865 |  | 
|  | 4866 | /* | 
|  | 4867 | * Requeue and finish hooks are invoked in blk-mq without | 
|  | 4868 | * checking whether the involved request is actually still | 
|  | 4869 | * referenced in the scheduler. To handle this fact, the | 
|  | 4870 | * following two checks make this function exit in case of | 
|  | 4871 | * spurious invocations, for which there is nothing to do. | 
|  | 4872 | * | 
|  | 4873 | * First, check whether rq has nothing to do with an elevator. | 
|  | 4874 | */ | 
|  | 4875 | if (unlikely(!(rq->rq_flags & RQF_ELVPRIV))) | 
|  | 4876 | return; | 
|  | 4877 |  | 
|  | 4878 | /* | 
|  | 4879 | * rq either is not associated with any icq, or is an already | 
|  | 4880 | * requeued request that has not (yet) been re-inserted into | 
|  | 4881 | * a bfq_queue. | 
|  | 4882 | */ | 
|  | 4883 | if (!rq->elv.icq || !bfqq) | 
|  | 4884 | return; | 
|  | 4885 |  | 
|  | 4886 | bfqd = bfqq->bfqd; | 
|  | 4887 |  | 
|  | 4888 | if (rq->rq_flags & RQF_STARTED) | 
|  | 4889 | bfqg_stats_update_completion(bfqq_group(bfqq), | 
|  | 4890 | rq->start_time_ns, | 
|  | 4891 | rq->io_start_time_ns, | 
|  | 4892 | rq->cmd_flags); | 
|  | 4893 |  | 
|  | 4894 | if (likely(rq->rq_flags & RQF_STARTED)) { | 
|  | 4895 | unsigned long flags; | 
|  | 4896 |  | 
|  | 4897 | spin_lock_irqsave(&bfqd->lock, flags); | 
|  | 4898 |  | 
|  | 4899 | bfq_completed_request(bfqq, bfqd); | 
|  | 4900 | bfq_finish_requeue_request_body(bfqq); | 
|  | 4901 |  | 
|  | 4902 | spin_unlock_irqrestore(&bfqd->lock, flags); | 
|  | 4903 | } else { | 
|  | 4904 | /* | 
|  | 4905 | * Request rq may be still/already in the scheduler, | 
|  | 4906 | * in which case we need to remove it (this should | 
|  | 4907 | * never happen in case of requeue). And we cannot | 
|  | 4908 | * defer such a check and removal, to avoid | 
|  | 4909 | * inconsistencies in the time interval from the end | 
|  | 4910 | * of this function to the start of the deferred work. | 
|  | 4911 | * This situation seems to occur only in process | 
|  | 4912 | * context, as a consequence of a merge. In the | 
|  | 4913 | * current version of the code, this implies that the | 
|  | 4914 | * lock is held. | 
|  | 4915 | */ | 
|  | 4916 |  | 
|  | 4917 | if (!RB_EMPTY_NODE(&rq->rb_node)) { | 
|  | 4918 | bfq_remove_request(rq->q, rq); | 
|  | 4919 | bfqg_stats_update_io_remove(bfqq_group(bfqq), | 
|  | 4920 | rq->cmd_flags); | 
|  | 4921 | } | 
|  | 4922 | bfq_finish_requeue_request_body(bfqq); | 
|  | 4923 | } | 
|  | 4924 |  | 
|  | 4925 | /* | 
|  | 4926 | * Reset private fields. In case of a requeue, this allows | 
|  | 4927 | * this function to correctly do nothing if it is spuriously | 
|  | 4928 | * invoked again on this same request (see the check at the | 
|  | 4929 | * beginning of the function). Probably, a better general | 
|  | 4930 | * design would be to prevent blk-mq from invoking the requeue | 
|  | 4931 | * or finish hooks of an elevator, for a request that is not | 
|  | 4932 | * referred by that elevator. | 
|  | 4933 | * | 
|  | 4934 | * Resetting the following fields would break the | 
|  | 4935 | * request-insertion logic if rq is re-inserted into a bfq | 
|  | 4936 | * internal queue, without a re-preparation. Here we assume | 
|  | 4937 | * that re-insertions of requeued requests, without | 
|  | 4938 | * re-preparation, can happen only for pass_through or at_head | 
|  | 4939 | * requests (which are not re-inserted into bfq internal | 
|  | 4940 | * queues). | 
|  | 4941 | */ | 
|  | 4942 | rq->elv.priv[0] = NULL; | 
|  | 4943 | rq->elv.priv[1] = NULL; | 
|  | 4944 | } | 
|  | 4945 |  | 
|  | 4946 | /* | 
|  | 4947 | * Returns NULL if a new bfqq should be allocated, or the old bfqq if this | 
|  | 4948 | * was the last process referring to that bfqq. | 
|  | 4949 | */ | 
|  | 4950 | static struct bfq_queue * | 
|  | 4951 | bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) | 
|  | 4952 | { | 
|  | 4953 | bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue"); | 
|  | 4954 |  | 
|  | 4955 | if (bfqq_process_refs(bfqq) == 1) { | 
|  | 4956 | bfqq->pid = current->pid; | 
|  | 4957 | bfq_clear_bfqq_coop(bfqq); | 
|  | 4958 | bfq_clear_bfqq_split_coop(bfqq); | 
|  | 4959 | return bfqq; | 
|  | 4960 | } | 
|  | 4961 |  | 
|  | 4962 | bic_set_bfqq(bic, NULL, 1); | 
|  | 4963 |  | 
|  | 4964 | bfq_put_cooperator(bfqq); | 
|  | 4965 |  | 
|  | 4966 | bfq_put_queue(bfqq); | 
|  | 4967 | return NULL; | 
|  | 4968 | } | 
|  | 4969 |  | 
|  | 4970 | static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd, | 
|  | 4971 | struct bfq_io_cq *bic, | 
|  | 4972 | struct bio *bio, | 
|  | 4973 | bool split, bool is_sync, | 
|  | 4974 | bool *new_queue) | 
|  | 4975 | { | 
|  | 4976 | struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync); | 
|  | 4977 |  | 
|  | 4978 | if (likely(bfqq && bfqq != &bfqd->oom_bfqq)) | 
|  | 4979 | return bfqq; | 
|  | 4980 |  | 
|  | 4981 | if (new_queue) | 
|  | 4982 | *new_queue = true; | 
|  | 4983 |  | 
|  | 4984 | if (bfqq) | 
|  | 4985 | bfq_put_queue(bfqq); | 
|  | 4986 | bfqq = bfq_get_queue(bfqd, bio, is_sync, bic); | 
|  | 4987 |  | 
|  | 4988 | bic_set_bfqq(bic, bfqq, is_sync); | 
|  | 4989 | if (split && is_sync) { | 
|  | 4990 | if ((bic->was_in_burst_list && bfqd->large_burst) || | 
|  | 4991 | bic->saved_in_large_burst) | 
|  | 4992 | bfq_mark_bfqq_in_large_burst(bfqq); | 
|  | 4993 | else { | 
|  | 4994 | bfq_clear_bfqq_in_large_burst(bfqq); | 
|  | 4995 | if (bic->was_in_burst_list) | 
|  | 4996 | /* | 
|  | 4997 | * If bfqq was in the current | 
|  | 4998 | * burst list before being | 
|  | 4999 | * merged, then we have to add | 
|  | 5000 | * it back. And we do not need | 
|  | 5001 | * to increase burst_size, as | 
|  | 5002 | * we did not decrement | 
|  | 5003 | * burst_size when we removed | 
|  | 5004 | * bfqq from the burst list as | 
|  | 5005 | * a consequence of a merge | 
|  | 5006 | * (see comments in | 
|  | 5007 | * bfq_put_queue). In this | 
|  | 5008 | * respect, it would be rather | 
|  | 5009 | * costly to know whether the | 
|  | 5010 | * current burst list is still | 
|  | 5011 | * the same burst list from | 
|  | 5012 | * which bfqq was removed on | 
|  | 5013 | * the merge. To avoid this | 
|  | 5014 | * cost, if bfqq was in a | 
|  | 5015 | * burst list, then we add | 
|  | 5016 | * bfqq to the current burst | 
|  | 5017 | * list without any further | 
|  | 5018 | * check. This can cause | 
|  | 5019 | * inappropriate insertions, | 
|  | 5020 | * but rarely enough to not | 
|  | 5021 | * harm the detection of large | 
|  | 5022 | * bursts significantly. | 
|  | 5023 | */ | 
|  | 5024 | hlist_add_head(&bfqq->burst_list_node, | 
|  | 5025 | &bfqd->burst_list); | 
|  | 5026 | } | 
|  | 5027 | bfqq->split_time = jiffies; | 
|  | 5028 | } | 
|  | 5029 |  | 
|  | 5030 | return bfqq; | 
|  | 5031 | } | 
|  | 5032 |  | 
|  | 5033 | /* | 
|  | 5034 | * Only reset private fields. The actual request preparation will be | 
|  | 5035 | * performed by bfq_init_rq, when rq is either inserted or merged. See | 
|  | 5036 | * comments on bfq_init_rq for the reason behind this delayed | 
|  | 5037 | * preparation. | 
|  | 5038 | */ | 
|  | 5039 | static void bfq_prepare_request(struct request *rq, struct bio *bio) | 
|  | 5040 | { | 
|  | 5041 | /* | 
|  | 5042 | * Regardless of whether we have an icq attached, we have to | 
|  | 5043 | * clear the scheduler pointers, as they might point to | 
|  | 5044 | * previously allocated bic/bfqq structs. | 
|  | 5045 | */ | 
|  | 5046 | rq->elv.priv[0] = rq->elv.priv[1] = NULL; | 
|  | 5047 | } | 
|  | 5048 |  | 
|  | 5049 | /* | 
|  | 5050 | * If needed, init rq, allocate bfq data structures associated with | 
|  | 5051 | * rq, and increment reference counters in the destination bfq_queue | 
|  | 5052 | * for rq. Return the destination bfq_queue for rq, or NULL is rq is | 
|  | 5053 | * not associated with any bfq_queue. | 
|  | 5054 | * | 
|  | 5055 | * This function is invoked by the functions that perform rq insertion | 
|  | 5056 | * or merging. One may have expected the above preparation operations | 
|  | 5057 | * to be performed in bfq_prepare_request, and not delayed to when rq | 
|  | 5058 | * is inserted or merged. The rationale behind this delayed | 
|  | 5059 | * preparation is that, after the prepare_request hook is invoked for | 
|  | 5060 | * rq, rq may still be transformed into a request with no icq, i.e., a | 
|  | 5061 | * request not associated with any queue. No bfq hook is invoked to | 
|  | 5062 | * signal this tranformation. As a consequence, should these | 
|  | 5063 | * preparation operations be performed when the prepare_request hook | 
|  | 5064 | * is invoked, and should rq be transformed one moment later, bfq | 
|  | 5065 | * would end up in an inconsistent state, because it would have | 
|  | 5066 | * incremented some queue counters for an rq destined to | 
|  | 5067 | * transformation, without any chance to correctly lower these | 
|  | 5068 | * counters back. In contrast, no transformation can still happen for | 
|  | 5069 | * rq after rq has been inserted or merged. So, it is safe to execute | 
|  | 5070 | * these preparation operations when rq is finally inserted or merged. | 
|  | 5071 | */ | 
|  | 5072 | static struct bfq_queue *bfq_init_rq(struct request *rq) | 
|  | 5073 | { | 
|  | 5074 | struct request_queue *q = rq->q; | 
|  | 5075 | struct bio *bio = rq->bio; | 
|  | 5076 | struct bfq_data *bfqd = q->elevator->elevator_data; | 
|  | 5077 | struct bfq_io_cq *bic; | 
|  | 5078 | const int is_sync = rq_is_sync(rq); | 
|  | 5079 | struct bfq_queue *bfqq; | 
|  | 5080 | bool new_queue = false; | 
|  | 5081 | bool bfqq_already_existing = false, split = false; | 
|  | 5082 |  | 
|  | 5083 | if (unlikely(!rq->elv.icq)) | 
|  | 5084 | return NULL; | 
|  | 5085 |  | 
|  | 5086 | /* | 
|  | 5087 | * Assuming that elv.priv[1] is set only if everything is set | 
|  | 5088 | * for this rq. This holds true, because this function is | 
|  | 5089 | * invoked only for insertion or merging, and, after such | 
|  | 5090 | * events, a request cannot be manipulated any longer before | 
|  | 5091 | * being removed from bfq. | 
|  | 5092 | */ | 
|  | 5093 | if (rq->elv.priv[1]) | 
|  | 5094 | return rq->elv.priv[1]; | 
|  | 5095 |  | 
|  | 5096 | bic = icq_to_bic(rq->elv.icq); | 
|  | 5097 |  | 
|  | 5098 | bfq_check_ioprio_change(bic, bio); | 
|  | 5099 |  | 
|  | 5100 | bfq_bic_update_cgroup(bic, bio); | 
|  | 5101 |  | 
|  | 5102 | bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, false, is_sync, | 
|  | 5103 | &new_queue); | 
|  | 5104 |  | 
|  | 5105 | if (likely(!new_queue)) { | 
|  | 5106 | /* If the queue was seeky for too long, break it apart. */ | 
|  | 5107 | if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) { | 
|  | 5108 | bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq"); | 
|  | 5109 |  | 
|  | 5110 | /* Update bic before losing reference to bfqq */ | 
|  | 5111 | if (bfq_bfqq_in_large_burst(bfqq)) | 
|  | 5112 | bic->saved_in_large_burst = true; | 
|  | 5113 |  | 
|  | 5114 | bfqq = bfq_split_bfqq(bic, bfqq); | 
|  | 5115 | split = true; | 
|  | 5116 |  | 
|  | 5117 | if (!bfqq) | 
|  | 5118 | bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, | 
|  | 5119 | true, is_sync, | 
|  | 5120 | NULL); | 
|  | 5121 | else | 
|  | 5122 | bfqq_already_existing = true; | 
|  | 5123 | } | 
|  | 5124 | } | 
|  | 5125 |  | 
|  | 5126 | bfqq->allocated++; | 
|  | 5127 | bfqq->ref++; | 
|  | 5128 | bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d", | 
|  | 5129 | rq, bfqq, bfqq->ref); | 
|  | 5130 |  | 
|  | 5131 | rq->elv.priv[0] = bic; | 
|  | 5132 | rq->elv.priv[1] = bfqq; | 
|  | 5133 |  | 
|  | 5134 | /* | 
|  | 5135 | * If a bfq_queue has only one process reference, it is owned | 
|  | 5136 | * by only this bic: we can then set bfqq->bic = bic. in | 
|  | 5137 | * addition, if the queue has also just been split, we have to | 
|  | 5138 | * resume its state. | 
|  | 5139 | */ | 
|  | 5140 | if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { | 
|  | 5141 | bfqq->bic = bic; | 
|  | 5142 | if (split) { | 
|  | 5143 | /* | 
|  | 5144 | * The queue has just been split from a shared | 
|  | 5145 | * queue: restore the idle window and the | 
|  | 5146 | * possible weight raising period. | 
|  | 5147 | */ | 
|  | 5148 | bfq_bfqq_resume_state(bfqq, bfqd, bic, | 
|  | 5149 | bfqq_already_existing); | 
|  | 5150 | } | 
|  | 5151 | } | 
|  | 5152 |  | 
|  | 5153 | if (unlikely(bfq_bfqq_just_created(bfqq))) | 
|  | 5154 | bfq_handle_burst(bfqd, bfqq); | 
|  | 5155 |  | 
|  | 5156 | return bfqq; | 
|  | 5157 | } | 
|  | 5158 |  | 
|  | 5159 | static void bfq_idle_slice_timer_body(struct bfq_queue *bfqq) | 
|  | 5160 | { | 
|  | 5161 | struct bfq_data *bfqd = bfqq->bfqd; | 
|  | 5162 | enum bfqq_expiration reason; | 
|  | 5163 | unsigned long flags; | 
|  | 5164 |  | 
|  | 5165 | spin_lock_irqsave(&bfqd->lock, flags); | 
|  | 5166 | bfq_clear_bfqq_wait_request(bfqq); | 
|  | 5167 |  | 
|  | 5168 | if (bfqq != bfqd->in_service_queue) { | 
|  | 5169 | spin_unlock_irqrestore(&bfqd->lock, flags); | 
|  | 5170 | return; | 
|  | 5171 | } | 
|  | 5172 |  | 
|  | 5173 | if (bfq_bfqq_budget_timeout(bfqq)) | 
|  | 5174 | /* | 
|  | 5175 | * Also here the queue can be safely expired | 
|  | 5176 | * for budget timeout without wasting | 
|  | 5177 | * guarantees | 
|  | 5178 | */ | 
|  | 5179 | reason = BFQQE_BUDGET_TIMEOUT; | 
|  | 5180 | else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0) | 
|  | 5181 | /* | 
|  | 5182 | * The queue may not be empty upon timer expiration, | 
|  | 5183 | * because we may not disable the timer when the | 
|  | 5184 | * first request of the in-service queue arrives | 
|  | 5185 | * during disk idling. | 
|  | 5186 | */ | 
|  | 5187 | reason = BFQQE_TOO_IDLE; | 
|  | 5188 | else | 
|  | 5189 | goto schedule_dispatch; | 
|  | 5190 |  | 
|  | 5191 | bfq_bfqq_expire(bfqd, bfqq, true, reason); | 
|  | 5192 |  | 
|  | 5193 | schedule_dispatch: | 
|  | 5194 | spin_unlock_irqrestore(&bfqd->lock, flags); | 
|  | 5195 | bfq_schedule_dispatch(bfqd); | 
|  | 5196 | } | 
|  | 5197 |  | 
|  | 5198 | /* | 
|  | 5199 | * Handler of the expiration of the timer running if the in-service queue | 
|  | 5200 | * is idling inside its time slice. | 
|  | 5201 | */ | 
|  | 5202 | static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer) | 
|  | 5203 | { | 
|  | 5204 | struct bfq_data *bfqd = container_of(timer, struct bfq_data, | 
|  | 5205 | idle_slice_timer); | 
|  | 5206 | struct bfq_queue *bfqq = bfqd->in_service_queue; | 
|  | 5207 |  | 
|  | 5208 | /* | 
|  | 5209 | * Theoretical race here: the in-service queue can be NULL or | 
|  | 5210 | * different from the queue that was idling if a new request | 
|  | 5211 | * arrives for the current queue and there is a full dispatch | 
|  | 5212 | * cycle that changes the in-service queue.  This can hardly | 
|  | 5213 | * happen, but in the worst case we just expire a queue too | 
|  | 5214 | * early. | 
|  | 5215 | */ | 
|  | 5216 | if (bfqq) | 
|  | 5217 | bfq_idle_slice_timer_body(bfqq); | 
|  | 5218 |  | 
|  | 5219 | return HRTIMER_NORESTART; | 
|  | 5220 | } | 
|  | 5221 |  | 
|  | 5222 | static void __bfq_put_async_bfqq(struct bfq_data *bfqd, | 
|  | 5223 | struct bfq_queue **bfqq_ptr) | 
|  | 5224 | { | 
|  | 5225 | struct bfq_queue *bfqq = *bfqq_ptr; | 
|  | 5226 |  | 
|  | 5227 | bfq_log(bfqd, "put_async_bfqq: %p", bfqq); | 
|  | 5228 | if (bfqq) { | 
|  | 5229 | bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); | 
|  | 5230 |  | 
|  | 5231 | bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d", | 
|  | 5232 | bfqq, bfqq->ref); | 
|  | 5233 | bfq_put_queue(bfqq); | 
|  | 5234 | *bfqq_ptr = NULL; | 
|  | 5235 | } | 
|  | 5236 | } | 
|  | 5237 |  | 
|  | 5238 | /* | 
|  | 5239 | * Release all the bfqg references to its async queues.  If we are | 
|  | 5240 | * deallocating the group these queues may still contain requests, so | 
|  | 5241 | * we reparent them to the root cgroup (i.e., the only one that will | 
|  | 5242 | * exist for sure until all the requests on a device are gone). | 
|  | 5243 | */ | 
|  | 5244 | void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) | 
|  | 5245 | { | 
|  | 5246 | int i, j; | 
|  | 5247 |  | 
|  | 5248 | for (i = 0; i < 2; i++) | 
|  | 5249 | for (j = 0; j < IOPRIO_BE_NR; j++) | 
|  | 5250 | __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]); | 
|  | 5251 |  | 
|  | 5252 | __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq); | 
|  | 5253 | } | 
|  | 5254 |  | 
|  | 5255 | /* | 
|  | 5256 | * See the comments on bfq_limit_depth for the purpose of | 
|  | 5257 | * the depths set in the function. Return minimum shallow depth we'll use. | 
|  | 5258 | */ | 
|  | 5259 | static unsigned int bfq_update_depths(struct bfq_data *bfqd, | 
|  | 5260 | struct sbitmap_queue *bt) | 
|  | 5261 | { | 
|  | 5262 | unsigned int i, j, min_shallow = UINT_MAX; | 
|  | 5263 |  | 
|  | 5264 | /* | 
|  | 5265 | * In-word depths if no bfq_queue is being weight-raised: | 
|  | 5266 | * leaving 25% of tags only for sync reads. | 
|  | 5267 | * | 
|  | 5268 | * In next formulas, right-shift the value | 
|  | 5269 | * (1U<<bt->sb.shift), instead of computing directly | 
|  | 5270 | * (1U<<(bt->sb.shift - something)), to be robust against | 
|  | 5271 | * any possible value of bt->sb.shift, without having to | 
|  | 5272 | * limit 'something'. | 
|  | 5273 | */ | 
|  | 5274 | /* no more than 50% of tags for async I/O */ | 
|  | 5275 | bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U); | 
|  | 5276 | /* | 
|  | 5277 | * no more than 75% of tags for sync writes (25% extra tags | 
|  | 5278 | * w.r.t. async I/O, to prevent async I/O from starving sync | 
|  | 5279 | * writes) | 
|  | 5280 | */ | 
|  | 5281 | bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U); | 
|  | 5282 |  | 
|  | 5283 | /* | 
|  | 5284 | * In-word depths in case some bfq_queue is being weight- | 
|  | 5285 | * raised: leaving ~63% of tags for sync reads. This is the | 
|  | 5286 | * highest percentage for which, in our tests, application | 
|  | 5287 | * start-up times didn't suffer from any regression due to tag | 
|  | 5288 | * shortage. | 
|  | 5289 | */ | 
|  | 5290 | /* no more than ~18% of tags for async I/O */ | 
|  | 5291 | bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U); | 
|  | 5292 | /* no more than ~37% of tags for sync writes (~20% extra tags) */ | 
|  | 5293 | bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U); | 
|  | 5294 |  | 
|  | 5295 | for (i = 0; i < 2; i++) | 
|  | 5296 | for (j = 0; j < 2; j++) | 
|  | 5297 | min_shallow = min(min_shallow, bfqd->word_depths[i][j]); | 
|  | 5298 |  | 
|  | 5299 | return min_shallow; | 
|  | 5300 | } | 
|  | 5301 |  | 
|  | 5302 | static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx) | 
|  | 5303 | { | 
|  | 5304 | struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; | 
|  | 5305 | struct blk_mq_tags *tags = hctx->sched_tags; | 
|  | 5306 | unsigned int min_shallow; | 
|  | 5307 |  | 
|  | 5308 | min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags); | 
|  | 5309 | sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow); | 
|  | 5310 | } | 
|  | 5311 |  | 
|  | 5312 | static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index) | 
|  | 5313 | { | 
|  | 5314 | bfq_depth_updated(hctx); | 
|  | 5315 | return 0; | 
|  | 5316 | } | 
|  | 5317 |  | 
|  | 5318 | static void bfq_exit_queue(struct elevator_queue *e) | 
|  | 5319 | { | 
|  | 5320 | struct bfq_data *bfqd = e->elevator_data; | 
|  | 5321 | struct bfq_queue *bfqq, *n; | 
|  | 5322 |  | 
|  | 5323 | hrtimer_cancel(&bfqd->idle_slice_timer); | 
|  | 5324 |  | 
|  | 5325 | spin_lock_irq(&bfqd->lock); | 
|  | 5326 | list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) | 
|  | 5327 | bfq_deactivate_bfqq(bfqd, bfqq, false, false); | 
|  | 5328 | spin_unlock_irq(&bfqd->lock); | 
|  | 5329 |  | 
|  | 5330 | hrtimer_cancel(&bfqd->idle_slice_timer); | 
|  | 5331 |  | 
|  | 5332 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | 
|  | 5333 | /* release oom-queue reference to root group */ | 
|  | 5334 | bfqg_and_blkg_put(bfqd->root_group); | 
|  | 5335 |  | 
|  | 5336 | blkcg_deactivate_policy(bfqd->queue, &blkcg_policy_bfq); | 
|  | 5337 | #else | 
|  | 5338 | spin_lock_irq(&bfqd->lock); | 
|  | 5339 | bfq_put_async_queues(bfqd, bfqd->root_group); | 
|  | 5340 | kfree(bfqd->root_group); | 
|  | 5341 | spin_unlock_irq(&bfqd->lock); | 
|  | 5342 | #endif | 
|  | 5343 |  | 
|  | 5344 | kfree(bfqd); | 
|  | 5345 | } | 
|  | 5346 |  | 
|  | 5347 | static void bfq_init_root_group(struct bfq_group *root_group, | 
|  | 5348 | struct bfq_data *bfqd) | 
|  | 5349 | { | 
|  | 5350 | int i; | 
|  | 5351 |  | 
|  | 5352 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | 
|  | 5353 | root_group->entity.parent = NULL; | 
|  | 5354 | root_group->my_entity = NULL; | 
|  | 5355 | root_group->bfqd = bfqd; | 
|  | 5356 | #endif | 
|  | 5357 | root_group->rq_pos_tree = RB_ROOT; | 
|  | 5358 | for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) | 
|  | 5359 | root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; | 
|  | 5360 | root_group->sched_data.bfq_class_idle_last_service = jiffies; | 
|  | 5361 | } | 
|  | 5362 |  | 
|  | 5363 | static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) | 
|  | 5364 | { | 
|  | 5365 | struct bfq_data *bfqd; | 
|  | 5366 | struct elevator_queue *eq; | 
|  | 5367 |  | 
|  | 5368 | eq = elevator_alloc(q, e); | 
|  | 5369 | if (!eq) | 
|  | 5370 | return -ENOMEM; | 
|  | 5371 |  | 
|  | 5372 | bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node); | 
|  | 5373 | if (!bfqd) { | 
|  | 5374 | kobject_put(&eq->kobj); | 
|  | 5375 | return -ENOMEM; | 
|  | 5376 | } | 
|  | 5377 | eq->elevator_data = bfqd; | 
|  | 5378 |  | 
|  | 5379 | spin_lock_irq(q->queue_lock); | 
|  | 5380 | q->elevator = eq; | 
|  | 5381 | spin_unlock_irq(q->queue_lock); | 
|  | 5382 |  | 
|  | 5383 | /* | 
|  | 5384 | * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues. | 
|  | 5385 | * Grab a permanent reference to it, so that the normal code flow | 
|  | 5386 | * will not attempt to free it. | 
|  | 5387 | */ | 
|  | 5388 | bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0); | 
|  | 5389 | bfqd->oom_bfqq.ref++; | 
|  | 5390 | bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO; | 
|  | 5391 | bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE; | 
|  | 5392 | bfqd->oom_bfqq.entity.new_weight = | 
|  | 5393 | bfq_ioprio_to_weight(bfqd->oom_bfqq.new_ioprio); | 
|  | 5394 |  | 
|  | 5395 | /* oom_bfqq does not participate to bursts */ | 
|  | 5396 | bfq_clear_bfqq_just_created(&bfqd->oom_bfqq); | 
|  | 5397 |  | 
|  | 5398 | /* | 
|  | 5399 | * Trigger weight initialization, according to ioprio, at the | 
|  | 5400 | * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio | 
|  | 5401 | * class won't be changed any more. | 
|  | 5402 | */ | 
|  | 5403 | bfqd->oom_bfqq.entity.prio_changed = 1; | 
|  | 5404 |  | 
|  | 5405 | bfqd->queue = q; | 
|  | 5406 |  | 
|  | 5407 | INIT_LIST_HEAD(&bfqd->dispatch); | 
|  | 5408 |  | 
|  | 5409 | hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC, | 
|  | 5410 | HRTIMER_MODE_REL); | 
|  | 5411 | bfqd->idle_slice_timer.function = bfq_idle_slice_timer; | 
|  | 5412 |  | 
|  | 5413 | bfqd->queue_weights_tree = RB_ROOT; | 
|  | 5414 | bfqd->group_weights_tree = RB_ROOT; | 
|  | 5415 |  | 
|  | 5416 | INIT_LIST_HEAD(&bfqd->active_list); | 
|  | 5417 | INIT_LIST_HEAD(&bfqd->idle_list); | 
|  | 5418 | INIT_HLIST_HEAD(&bfqd->burst_list); | 
|  | 5419 |  | 
|  | 5420 | bfqd->hw_tag = -1; | 
|  | 5421 |  | 
|  | 5422 | bfqd->bfq_max_budget = bfq_default_max_budget; | 
|  | 5423 |  | 
|  | 5424 | bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0]; | 
|  | 5425 | bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1]; | 
|  | 5426 | bfqd->bfq_back_max = bfq_back_max; | 
|  | 5427 | bfqd->bfq_back_penalty = bfq_back_penalty; | 
|  | 5428 | bfqd->bfq_slice_idle = bfq_slice_idle; | 
|  | 5429 | bfqd->bfq_timeout = bfq_timeout; | 
|  | 5430 |  | 
|  | 5431 | bfqd->bfq_requests_within_timer = 120; | 
|  | 5432 |  | 
|  | 5433 | bfqd->bfq_large_burst_thresh = 8; | 
|  | 5434 | bfqd->bfq_burst_interval = msecs_to_jiffies(180); | 
|  | 5435 |  | 
|  | 5436 | bfqd->low_latency = true; | 
|  | 5437 |  | 
|  | 5438 | /* | 
|  | 5439 | * Trade-off between responsiveness and fairness. | 
|  | 5440 | */ | 
|  | 5441 | bfqd->bfq_wr_coeff = 30; | 
|  | 5442 | bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300); | 
|  | 5443 | bfqd->bfq_wr_max_time = 0; | 
|  | 5444 | bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000); | 
|  | 5445 | bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500); | 
|  | 5446 | bfqd->bfq_wr_max_softrt_rate = 7000; /* | 
|  | 5447 | * Approximate rate required | 
|  | 5448 | * to playback or record a | 
|  | 5449 | * high-definition compressed | 
|  | 5450 | * video. | 
|  | 5451 | */ | 
|  | 5452 | bfqd->wr_busy_queues = 0; | 
|  | 5453 |  | 
|  | 5454 | /* | 
|  | 5455 | * Begin by assuming, optimistically, that the device peak | 
|  | 5456 | * rate is equal to 2/3 of the highest reference rate. | 
|  | 5457 | */ | 
|  | 5458 | bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] * | 
|  | 5459 | ref_wr_duration[blk_queue_nonrot(bfqd->queue)]; | 
|  | 5460 | bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3; | 
|  | 5461 |  | 
|  | 5462 | spin_lock_init(&bfqd->lock); | 
|  | 5463 |  | 
|  | 5464 | /* | 
|  | 5465 | * The invocation of the next bfq_create_group_hierarchy | 
|  | 5466 | * function is the head of a chain of function calls | 
|  | 5467 | * (bfq_create_group_hierarchy->blkcg_activate_policy-> | 
|  | 5468 | * blk_mq_freeze_queue) that may lead to the invocation of the | 
|  | 5469 | * has_work hook function. For this reason, | 
|  | 5470 | * bfq_create_group_hierarchy is invoked only after all | 
|  | 5471 | * scheduler data has been initialized, apart from the fields | 
|  | 5472 | * that can be initialized only after invoking | 
|  | 5473 | * bfq_create_group_hierarchy. This, in particular, enables | 
|  | 5474 | * has_work to correctly return false. Of course, to avoid | 
|  | 5475 | * other inconsistencies, the blk-mq stack must then refrain | 
|  | 5476 | * from invoking further scheduler hooks before this init | 
|  | 5477 | * function is finished. | 
|  | 5478 | */ | 
|  | 5479 | bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node); | 
|  | 5480 | if (!bfqd->root_group) | 
|  | 5481 | goto out_free; | 
|  | 5482 | bfq_init_root_group(bfqd->root_group, bfqd); | 
|  | 5483 | bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); | 
|  | 5484 |  | 
|  | 5485 | wbt_disable_default(q); | 
|  | 5486 | return 0; | 
|  | 5487 |  | 
|  | 5488 | out_free: | 
|  | 5489 | kfree(bfqd); | 
|  | 5490 | kobject_put(&eq->kobj); | 
|  | 5491 | return -ENOMEM; | 
|  | 5492 | } | 
|  | 5493 |  | 
|  | 5494 | static void bfq_slab_kill(void) | 
|  | 5495 | { | 
|  | 5496 | kmem_cache_destroy(bfq_pool); | 
|  | 5497 | } | 
|  | 5498 |  | 
|  | 5499 | static int __init bfq_slab_setup(void) | 
|  | 5500 | { | 
|  | 5501 | bfq_pool = KMEM_CACHE(bfq_queue, 0); | 
|  | 5502 | if (!bfq_pool) | 
|  | 5503 | return -ENOMEM; | 
|  | 5504 | return 0; | 
|  | 5505 | } | 
|  | 5506 |  | 
|  | 5507 | static ssize_t bfq_var_show(unsigned int var, char *page) | 
|  | 5508 | { | 
|  | 5509 | return sprintf(page, "%u\n", var); | 
|  | 5510 | } | 
|  | 5511 |  | 
|  | 5512 | static int bfq_var_store(unsigned long *var, const char *page) | 
|  | 5513 | { | 
|  | 5514 | unsigned long new_val; | 
|  | 5515 | int ret = kstrtoul(page, 10, &new_val); | 
|  | 5516 |  | 
|  | 5517 | if (ret) | 
|  | 5518 | return ret; | 
|  | 5519 | *var = new_val; | 
|  | 5520 | return 0; | 
|  | 5521 | } | 
|  | 5522 |  | 
|  | 5523 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\ | 
|  | 5524 | static ssize_t __FUNC(struct elevator_queue *e, char *page)		\ | 
|  | 5525 | {									\ | 
|  | 5526 | struct bfq_data *bfqd = e->elevator_data;			\ | 
|  | 5527 | u64 __data = __VAR;						\ | 
|  | 5528 | if (__CONV == 1)						\ | 
|  | 5529 | __data = jiffies_to_msecs(__data);			\ | 
|  | 5530 | else if (__CONV == 2)						\ | 
|  | 5531 | __data = div_u64(__data, NSEC_PER_MSEC);		\ | 
|  | 5532 | return bfq_var_show(__data, (page));				\ | 
|  | 5533 | } | 
|  | 5534 | SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2); | 
|  | 5535 | SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2); | 
|  | 5536 | SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0); | 
|  | 5537 | SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0); | 
|  | 5538 | SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2); | 
|  | 5539 | SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0); | 
|  | 5540 | SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1); | 
|  | 5541 | SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0); | 
|  | 5542 | SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0); | 
|  | 5543 | #undef SHOW_FUNCTION | 
|  | 5544 |  | 
|  | 5545 | #define USEC_SHOW_FUNCTION(__FUNC, __VAR)				\ | 
|  | 5546 | static ssize_t __FUNC(struct elevator_queue *e, char *page)		\ | 
|  | 5547 | {									\ | 
|  | 5548 | struct bfq_data *bfqd = e->elevator_data;			\ | 
|  | 5549 | u64 __data = __VAR;						\ | 
|  | 5550 | __data = div_u64(__data, NSEC_PER_USEC);			\ | 
|  | 5551 | return bfq_var_show(__data, (page));				\ | 
|  | 5552 | } | 
|  | 5553 | USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle); | 
|  | 5554 | #undef USEC_SHOW_FUNCTION | 
|  | 5555 |  | 
|  | 5556 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\ | 
|  | 5557 | static ssize_t								\ | 
|  | 5558 | __FUNC(struct elevator_queue *e, const char *page, size_t count)	\ | 
|  | 5559 | {									\ | 
|  | 5560 | struct bfq_data *bfqd = e->elevator_data;			\ | 
|  | 5561 | unsigned long __data, __min = (MIN), __max = (MAX);		\ | 
|  | 5562 | int ret;							\ | 
|  | 5563 | \ | 
|  | 5564 | ret = bfq_var_store(&__data, (page));				\ | 
|  | 5565 | if (ret)							\ | 
|  | 5566 | return ret;						\ | 
|  | 5567 | if (__data < __min)						\ | 
|  | 5568 | __data = __min;						\ | 
|  | 5569 | else if (__data > __max)					\ | 
|  | 5570 | __data = __max;						\ | 
|  | 5571 | if (__CONV == 1)						\ | 
|  | 5572 | *(__PTR) = msecs_to_jiffies(__data);			\ | 
|  | 5573 | else if (__CONV == 2)						\ | 
|  | 5574 | *(__PTR) = (u64)__data * NSEC_PER_MSEC;			\ | 
|  | 5575 | else								\ | 
|  | 5576 | *(__PTR) = __data;					\ | 
|  | 5577 | return count;							\ | 
|  | 5578 | } | 
|  | 5579 | STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1, | 
|  | 5580 | INT_MAX, 2); | 
|  | 5581 | STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1, | 
|  | 5582 | INT_MAX, 2); | 
|  | 5583 | STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0); | 
|  | 5584 | STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1, | 
|  | 5585 | INT_MAX, 0); | 
|  | 5586 | STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2); | 
|  | 5587 | #undef STORE_FUNCTION | 
|  | 5588 |  | 
|  | 5589 | #define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX)			\ | 
|  | 5590 | static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\ | 
|  | 5591 | {									\ | 
|  | 5592 | struct bfq_data *bfqd = e->elevator_data;			\ | 
|  | 5593 | unsigned long __data, __min = (MIN), __max = (MAX);		\ | 
|  | 5594 | int ret;							\ | 
|  | 5595 | \ | 
|  | 5596 | ret = bfq_var_store(&__data, (page));				\ | 
|  | 5597 | if (ret)							\ | 
|  | 5598 | return ret;						\ | 
|  | 5599 | if (__data < __min)						\ | 
|  | 5600 | __data = __min;						\ | 
|  | 5601 | else if (__data > __max)					\ | 
|  | 5602 | __data = __max;						\ | 
|  | 5603 | *(__PTR) = (u64)__data * NSEC_PER_USEC;				\ | 
|  | 5604 | return count;							\ | 
|  | 5605 | } | 
|  | 5606 | USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0, | 
|  | 5607 | UINT_MAX); | 
|  | 5608 | #undef USEC_STORE_FUNCTION | 
|  | 5609 |  | 
|  | 5610 | static ssize_t bfq_max_budget_store(struct elevator_queue *e, | 
|  | 5611 | const char *page, size_t count) | 
|  | 5612 | { | 
|  | 5613 | struct bfq_data *bfqd = e->elevator_data; | 
|  | 5614 | unsigned long __data; | 
|  | 5615 | int ret; | 
|  | 5616 |  | 
|  | 5617 | ret = bfq_var_store(&__data, (page)); | 
|  | 5618 | if (ret) | 
|  | 5619 | return ret; | 
|  | 5620 |  | 
|  | 5621 | if (__data == 0) | 
|  | 5622 | bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); | 
|  | 5623 | else { | 
|  | 5624 | if (__data > INT_MAX) | 
|  | 5625 | __data = INT_MAX; | 
|  | 5626 | bfqd->bfq_max_budget = __data; | 
|  | 5627 | } | 
|  | 5628 |  | 
|  | 5629 | bfqd->bfq_user_max_budget = __data; | 
|  | 5630 |  | 
|  | 5631 | return count; | 
|  | 5632 | } | 
|  | 5633 |  | 
|  | 5634 | /* | 
|  | 5635 | * Leaving this name to preserve name compatibility with cfq | 
|  | 5636 | * parameters, but this timeout is used for both sync and async. | 
|  | 5637 | */ | 
|  | 5638 | static ssize_t bfq_timeout_sync_store(struct elevator_queue *e, | 
|  | 5639 | const char *page, size_t count) | 
|  | 5640 | { | 
|  | 5641 | struct bfq_data *bfqd = e->elevator_data; | 
|  | 5642 | unsigned long __data; | 
|  | 5643 | int ret; | 
|  | 5644 |  | 
|  | 5645 | ret = bfq_var_store(&__data, (page)); | 
|  | 5646 | if (ret) | 
|  | 5647 | return ret; | 
|  | 5648 |  | 
|  | 5649 | if (__data < 1) | 
|  | 5650 | __data = 1; | 
|  | 5651 | else if (__data > INT_MAX) | 
|  | 5652 | __data = INT_MAX; | 
|  | 5653 |  | 
|  | 5654 | bfqd->bfq_timeout = msecs_to_jiffies(__data); | 
|  | 5655 | if (bfqd->bfq_user_max_budget == 0) | 
|  | 5656 | bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); | 
|  | 5657 |  | 
|  | 5658 | return count; | 
|  | 5659 | } | 
|  | 5660 |  | 
|  | 5661 | static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e, | 
|  | 5662 | const char *page, size_t count) | 
|  | 5663 | { | 
|  | 5664 | struct bfq_data *bfqd = e->elevator_data; | 
|  | 5665 | unsigned long __data; | 
|  | 5666 | int ret; | 
|  | 5667 |  | 
|  | 5668 | ret = bfq_var_store(&__data, (page)); | 
|  | 5669 | if (ret) | 
|  | 5670 | return ret; | 
|  | 5671 |  | 
|  | 5672 | if (__data > 1) | 
|  | 5673 | __data = 1; | 
|  | 5674 | if (!bfqd->strict_guarantees && __data == 1 | 
|  | 5675 | && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC) | 
|  | 5676 | bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC; | 
|  | 5677 |  | 
|  | 5678 | bfqd->strict_guarantees = __data; | 
|  | 5679 |  | 
|  | 5680 | return count; | 
|  | 5681 | } | 
|  | 5682 |  | 
|  | 5683 | static ssize_t bfq_low_latency_store(struct elevator_queue *e, | 
|  | 5684 | const char *page, size_t count) | 
|  | 5685 | { | 
|  | 5686 | struct bfq_data *bfqd = e->elevator_data; | 
|  | 5687 | unsigned long __data; | 
|  | 5688 | int ret; | 
|  | 5689 |  | 
|  | 5690 | ret = bfq_var_store(&__data, (page)); | 
|  | 5691 | if (ret) | 
|  | 5692 | return ret; | 
|  | 5693 |  | 
|  | 5694 | if (__data > 1) | 
|  | 5695 | __data = 1; | 
|  | 5696 | if (__data == 0 && bfqd->low_latency != 0) | 
|  | 5697 | bfq_end_wr(bfqd); | 
|  | 5698 | bfqd->low_latency = __data; | 
|  | 5699 |  | 
|  | 5700 | return count; | 
|  | 5701 | } | 
|  | 5702 |  | 
|  | 5703 | #define BFQ_ATTR(name) \ | 
|  | 5704 | __ATTR(name, 0644, bfq_##name##_show, bfq_##name##_store) | 
|  | 5705 |  | 
|  | 5706 | static struct elv_fs_entry bfq_attrs[] = { | 
|  | 5707 | BFQ_ATTR(fifo_expire_sync), | 
|  | 5708 | BFQ_ATTR(fifo_expire_async), | 
|  | 5709 | BFQ_ATTR(back_seek_max), | 
|  | 5710 | BFQ_ATTR(back_seek_penalty), | 
|  | 5711 | BFQ_ATTR(slice_idle), | 
|  | 5712 | BFQ_ATTR(slice_idle_us), | 
|  | 5713 | BFQ_ATTR(max_budget), | 
|  | 5714 | BFQ_ATTR(timeout_sync), | 
|  | 5715 | BFQ_ATTR(strict_guarantees), | 
|  | 5716 | BFQ_ATTR(low_latency), | 
|  | 5717 | __ATTR_NULL | 
|  | 5718 | }; | 
|  | 5719 |  | 
|  | 5720 | static struct elevator_type iosched_bfq_mq = { | 
|  | 5721 | .ops.mq = { | 
|  | 5722 | .limit_depth		= bfq_limit_depth, | 
|  | 5723 | .prepare_request	= bfq_prepare_request, | 
|  | 5724 | .requeue_request        = bfq_finish_requeue_request, | 
|  | 5725 | .finish_request		= bfq_finish_requeue_request, | 
|  | 5726 | .exit_icq		= bfq_exit_icq, | 
|  | 5727 | .insert_requests	= bfq_insert_requests, | 
|  | 5728 | .dispatch_request	= bfq_dispatch_request, | 
|  | 5729 | .next_request		= elv_rb_latter_request, | 
|  | 5730 | .former_request		= elv_rb_former_request, | 
|  | 5731 | .allow_merge		= bfq_allow_bio_merge, | 
|  | 5732 | .bio_merge		= bfq_bio_merge, | 
|  | 5733 | .request_merge		= bfq_request_merge, | 
|  | 5734 | .requests_merged	= bfq_requests_merged, | 
|  | 5735 | .request_merged		= bfq_request_merged, | 
|  | 5736 | .has_work		= bfq_has_work, | 
|  | 5737 | .depth_updated		= bfq_depth_updated, | 
|  | 5738 | .init_hctx		= bfq_init_hctx, | 
|  | 5739 | .init_sched		= bfq_init_queue, | 
|  | 5740 | .exit_sched		= bfq_exit_queue, | 
|  | 5741 | }, | 
|  | 5742 |  | 
|  | 5743 | .uses_mq =		true, | 
|  | 5744 | .icq_size =		sizeof(struct bfq_io_cq), | 
|  | 5745 | .icq_align =		__alignof__(struct bfq_io_cq), | 
|  | 5746 | .elevator_attrs =	bfq_attrs, | 
|  | 5747 | .elevator_name =	"bfq", | 
|  | 5748 | .elevator_owner =	THIS_MODULE, | 
|  | 5749 | }; | 
|  | 5750 | MODULE_ALIAS("bfq-iosched"); | 
|  | 5751 |  | 
|  | 5752 | static int __init bfq_init(void) | 
|  | 5753 | { | 
|  | 5754 | int ret; | 
|  | 5755 |  | 
|  | 5756 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | 
|  | 5757 | ret = blkcg_policy_register(&blkcg_policy_bfq); | 
|  | 5758 | if (ret) | 
|  | 5759 | return ret; | 
|  | 5760 | #endif | 
|  | 5761 |  | 
|  | 5762 | ret = -ENOMEM; | 
|  | 5763 | if (bfq_slab_setup()) | 
|  | 5764 | goto err_pol_unreg; | 
|  | 5765 |  | 
|  | 5766 | /* | 
|  | 5767 | * Times to load large popular applications for the typical | 
|  | 5768 | * systems installed on the reference devices (see the | 
|  | 5769 | * comments before the definition of the next | 
|  | 5770 | * array). Actually, we use slightly lower values, as the | 
|  | 5771 | * estimated peak rate tends to be smaller than the actual | 
|  | 5772 | * peak rate.  The reason for this last fact is that estimates | 
|  | 5773 | * are computed over much shorter time intervals than the long | 
|  | 5774 | * intervals typically used for benchmarking. Why? First, to | 
|  | 5775 | * adapt more quickly to variations. Second, because an I/O | 
|  | 5776 | * scheduler cannot rely on a peak-rate-evaluation workload to | 
|  | 5777 | * be run for a long time. | 
|  | 5778 | */ | 
|  | 5779 | ref_wr_duration[0] = msecs_to_jiffies(7000); /* actually 8 sec */ | 
|  | 5780 | ref_wr_duration[1] = msecs_to_jiffies(2500); /* actually 3 sec */ | 
|  | 5781 |  | 
|  | 5782 | ret = elv_register(&iosched_bfq_mq); | 
|  | 5783 | if (ret) | 
|  | 5784 | goto slab_kill; | 
|  | 5785 |  | 
|  | 5786 | return 0; | 
|  | 5787 |  | 
|  | 5788 | slab_kill: | 
|  | 5789 | bfq_slab_kill(); | 
|  | 5790 | err_pol_unreg: | 
|  | 5791 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | 
|  | 5792 | blkcg_policy_unregister(&blkcg_policy_bfq); | 
|  | 5793 | #endif | 
|  | 5794 | return ret; | 
|  | 5795 | } | 
|  | 5796 |  | 
|  | 5797 | static void __exit bfq_exit(void) | 
|  | 5798 | { | 
|  | 5799 | elv_unregister(&iosched_bfq_mq); | 
|  | 5800 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | 
|  | 5801 | blkcg_policy_unregister(&blkcg_policy_bfq); | 
|  | 5802 | #endif | 
|  | 5803 | bfq_slab_kill(); | 
|  | 5804 | } | 
|  | 5805 |  | 
|  | 5806 | module_init(bfq_init); | 
|  | 5807 | module_exit(bfq_exit); | 
|  | 5808 |  | 
|  | 5809 | MODULE_AUTHOR("Paolo Valente"); | 
|  | 5810 | MODULE_LICENSE("GPL"); | 
|  | 5811 | MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler"); |