blob: 92baf6bf261d2c6d79e996f248368f2677fb0648 [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001/*
2 * Block device elevator/IO-scheduler.
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *
6 * 30042000 Jens Axboe <axboe@kernel.dk> :
7 *
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
12 * an existing request
13 * - elevator_dequeue_fn, called when a request is taken off the active list
14 *
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
18 *
19 * Jens:
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
23 *
24 */
25#include <linux/kernel.h>
26#include <linux/fs.h>
27#include <linux/blkdev.h>
28#include <linux/elevator.h>
29#include <linux/bio.h>
30#include <linux/module.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/compiler.h>
34#include <linux/blktrace_api.h>
35#include <linux/hash.h>
36#include <linux/uaccess.h>
37
38#include <trace/events/block.h>
39
40#include "blk.h"
41
42static DEFINE_SPINLOCK(elv_list_lock);
43static LIST_HEAD(elv_list);
44
45/*
46 * Merge hash stuff.
47 */
48static const int elv_hash_shift = 6;
49#define ELV_HASH_BLOCK(sec) ((sec) >> 3)
50#define ELV_HASH_FN(sec) \
51 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
52#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
53#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
54
55/*
56 * Query io scheduler to see if the current process issuing bio may be
57 * merged with rq.
58 */
59static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
60{
61 struct request_queue *q = rq->q;
62 struct elevator_queue *e = q->elevator;
63
64 if (e->type->ops.elevator_allow_merge_fn)
65 return e->type->ops.elevator_allow_merge_fn(q, rq, bio);
66
67 return 1;
68}
69
70/*
71 * can we safely merge with this request?
72 */
73bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
74{
75 if (!blk_rq_merge_ok(rq, bio))
76 return 0;
77
78 if (!elv_iosched_allow_merge(rq, bio))
79 return 0;
80
81 return 1;
82}
83EXPORT_SYMBOL(elv_rq_merge_ok);
84
85static struct elevator_type *elevator_find(const char *name)
86{
87 struct elevator_type *e;
88
89 list_for_each_entry(e, &elv_list, list) {
90 if (!strcmp(e->elevator_name, name))
91 return e;
92 }
93
94 return NULL;
95}
96
97static void elevator_put(struct elevator_type *e)
98{
99 module_put(e->elevator_owner);
100}
101
102static struct elevator_type *elevator_get(const char *name)
103{
104 struct elevator_type *e;
105
106 spin_lock(&elv_list_lock);
107
108 e = elevator_find(name);
109 if (!e) {
110 spin_unlock(&elv_list_lock);
111 request_module("%s-iosched", name);
112 spin_lock(&elv_list_lock);
113 e = elevator_find(name);
114 }
115
116 if (e && !try_module_get(e->elevator_owner))
117 e = NULL;
118
119 spin_unlock(&elv_list_lock);
120
121 return e;
122}
123
124static int elevator_init_queue(struct request_queue *q,
125 struct elevator_queue *eq)
126{
127 eq->elevator_data = eq->type->ops.elevator_init_fn(q);
128 if (eq->elevator_data)
129 return 0;
130 return -ENOMEM;
131}
132
133static char chosen_elevator[ELV_NAME_MAX];
134
135static int __init elevator_setup(char *str)
136{
137 /*
138 * Be backwards-compatible with previous kernels, so users
139 * won't get the wrong elevator.
140 */
141 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
142 return 1;
143}
144
145__setup("elevator=", elevator_setup);
146
147static struct kobj_type elv_ktype;
148
149static struct elevator_queue *elevator_alloc(struct request_queue *q,
150 struct elevator_type *e)
151{
152 struct elevator_queue *eq;
153 int i;
154
155 eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
156 if (unlikely(!eq))
157 goto err;
158
159 eq->type = e;
160 kobject_init(&eq->kobj, &elv_ktype);
161 mutex_init(&eq->sysfs_lock);
162
163 eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
164 GFP_KERNEL, q->node);
165 if (!eq->hash)
166 goto err;
167
168 for (i = 0; i < ELV_HASH_ENTRIES; i++)
169 INIT_HLIST_HEAD(&eq->hash[i]);
170
171 return eq;
172err:
173 kfree(eq);
174 elevator_put(e);
175 return NULL;
176}
177
178static void elevator_release(struct kobject *kobj)
179{
180 struct elevator_queue *e;
181
182 e = container_of(kobj, struct elevator_queue, kobj);
183 elevator_put(e->type);
184 kfree(e->hash);
185 kfree(e);
186}
187
188int elevator_init(struct request_queue *q, char *name)
189{
190 struct elevator_type *e = NULL;
191 struct elevator_queue *eq;
192 int err;
193
194 if (unlikely(q->elevator))
195 return 0;
196
197 INIT_LIST_HEAD(&q->queue_head);
198 q->last_merge = NULL;
199 q->end_sector = 0;
200 q->boundary_rq = NULL;
201
202 if (name) {
203 e = elevator_get(name);
204 if (!e)
205 return -EINVAL;
206 }
207
208 if (!e && *chosen_elevator) {
209 e = elevator_get(chosen_elevator);
210 if (!e)
211 printk(KERN_ERR "I/O scheduler %s not found\n",
212 chosen_elevator);
213 }
214
215 if (!e) {
216 e = elevator_get(CONFIG_DEFAULT_IOSCHED);
217 if (!e) {
218 printk(KERN_ERR
219 "Default I/O scheduler not found. " \
220 "Using noop.\n");
221 e = elevator_get("noop");
222 if (unlikely(!e))
223 return -ENOENT;
224 }
225 }
226
227 eq = elevator_alloc(q, e);
228 if (!eq)
229 return -ENOMEM;
230
231 err = elevator_init_queue(q, eq);
232 if (err) {
233 kobject_put(&eq->kobj);
234 return err;
235 }
236
237 q->elevator = eq;
238 return 0;
239}
240EXPORT_SYMBOL(elevator_init);
241
242void elevator_exit(struct elevator_queue *e)
243{
244 mutex_lock(&e->sysfs_lock);
245 if (e->type->ops.elevator_exit_fn)
246 e->type->ops.elevator_exit_fn(e);
247 mutex_unlock(&e->sysfs_lock);
248
249 kobject_put(&e->kobj);
250}
251EXPORT_SYMBOL(elevator_exit);
252
253static inline void __elv_rqhash_del(struct request *rq)
254{
255 hlist_del_init(&rq->hash);
256}
257
258static void elv_rqhash_del(struct request_queue *q, struct request *rq)
259{
260 if (ELV_ON_HASH(rq))
261 __elv_rqhash_del(rq);
262}
263
264static void elv_rqhash_add(struct request_queue *q, struct request *rq)
265{
266 struct elevator_queue *e = q->elevator;
267
268 BUG_ON(ELV_ON_HASH(rq));
269 hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
270}
271
272static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
273{
274 __elv_rqhash_del(rq);
275 elv_rqhash_add(q, rq);
276}
277
278static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
279{
280 struct elevator_queue *e = q->elevator;
281 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
282 struct hlist_node *entry, *next;
283 struct request *rq;
284
285 hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
286 BUG_ON(!ELV_ON_HASH(rq));
287
288 if (unlikely(!rq_mergeable(rq))) {
289 __elv_rqhash_del(rq);
290 continue;
291 }
292
293 if (rq_hash_key(rq) == offset)
294 return rq;
295 }
296
297 return NULL;
298}
299
300/*
301 * RB-tree support functions for inserting/lookup/removal of requests
302 * in a sorted RB tree.
303 */
304void elv_rb_add(struct rb_root *root, struct request *rq)
305{
306 struct rb_node **p = &root->rb_node;
307 struct rb_node *parent = NULL;
308 struct request *__rq;
309
310 while (*p) {
311 parent = *p;
312 __rq = rb_entry(parent, struct request, rb_node);
313
314 if (blk_rq_pos(rq) < blk_rq_pos(__rq))
315 p = &(*p)->rb_left;
316 else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
317 p = &(*p)->rb_right;
318 }
319
320 rb_link_node(&rq->rb_node, parent, p);
321 rb_insert_color(&rq->rb_node, root);
322}
323EXPORT_SYMBOL(elv_rb_add);
324
325void elv_rb_del(struct rb_root *root, struct request *rq)
326{
327 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
328 rb_erase(&rq->rb_node, root);
329 RB_CLEAR_NODE(&rq->rb_node);
330}
331EXPORT_SYMBOL(elv_rb_del);
332
333struct request *elv_rb_find(struct rb_root *root, sector_t sector)
334{
335 struct rb_node *n = root->rb_node;
336 struct request *rq;
337
338 while (n) {
339 rq = rb_entry(n, struct request, rb_node);
340
341 if (sector < blk_rq_pos(rq))
342 n = n->rb_left;
343 else if (sector > blk_rq_pos(rq))
344 n = n->rb_right;
345 else
346 return rq;
347 }
348
349 return NULL;
350}
351EXPORT_SYMBOL(elv_rb_find);
352
353/*
354 * Insert rq into dispatch queue of q. Queue lock must be held on
355 * entry. rq is sort instead into the dispatch queue. To be used by
356 * specific elevators.
357 */
358void elv_dispatch_sort(struct request_queue *q, struct request *rq)
359{
360 sector_t boundary;
361 struct list_head *entry;
362 int stop_flags;
363
364 if (q->last_merge == rq)
365 q->last_merge = NULL;
366
367 elv_rqhash_del(q, rq);
368
369 q->nr_sorted--;
370
371 boundary = q->end_sector;
372 stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
373 list_for_each_prev(entry, &q->queue_head) {
374 struct request *pos = list_entry_rq(entry);
375
376 if ((rq->cmd_flags & REQ_DISCARD) !=
377 (pos->cmd_flags & REQ_DISCARD))
378 break;
379 if (rq_data_dir(rq) != rq_data_dir(pos))
380 break;
381 if (pos->cmd_flags & stop_flags)
382 break;
383 if (blk_rq_pos(rq) >= boundary) {
384 if (blk_rq_pos(pos) < boundary)
385 continue;
386 } else {
387 if (blk_rq_pos(pos) >= boundary)
388 break;
389 }
390 if (blk_rq_pos(rq) >= blk_rq_pos(pos))
391 break;
392 }
393
394 list_add(&rq->queuelist, entry);
395}
396EXPORT_SYMBOL(elv_dispatch_sort);
397
398/*
399 * Insert rq into dispatch queue of q. Queue lock must be held on
400 * entry. rq is added to the back of the dispatch queue. To be used by
401 * specific elevators.
402 */
403void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
404{
405 if (q->last_merge == rq)
406 q->last_merge = NULL;
407
408 elv_rqhash_del(q, rq);
409
410 q->nr_sorted--;
411
412 q->end_sector = rq_end_sector(rq);
413 q->boundary_rq = rq;
414 list_add_tail(&rq->queuelist, &q->queue_head);
415}
416EXPORT_SYMBOL(elv_dispatch_add_tail);
417
418int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
419{
420 struct elevator_queue *e = q->elevator;
421 struct request *__rq;
422 int ret;
423
424 /*
425 * Levels of merges:
426 * nomerges: No merges at all attempted
427 * noxmerges: Only simple one-hit cache try
428 * merges: All merge tries attempted
429 */
430 if (blk_queue_nomerges(q))
431 return ELEVATOR_NO_MERGE;
432
433 /*
434 * First try one-hit cache.
435 */
436 if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
437 ret = blk_try_merge(q->last_merge, bio);
438 if (ret != ELEVATOR_NO_MERGE) {
439 *req = q->last_merge;
440 return ret;
441 }
442 }
443
444 if (blk_queue_noxmerges(q))
445 return ELEVATOR_NO_MERGE;
446
447 /*
448 * See if our hash lookup can find a potential backmerge.
449 */
450 __rq = elv_rqhash_find(q, bio->bi_sector);
451 if (__rq && elv_rq_merge_ok(__rq, bio)) {
452 *req = __rq;
453 return ELEVATOR_BACK_MERGE;
454 }
455
456 if (e->type->ops.elevator_merge_fn)
457 return e->type->ops.elevator_merge_fn(q, req, bio);
458
459 return ELEVATOR_NO_MERGE;
460}
461
462/*
463 * Attempt to do an insertion back merge. Only check for the case where
464 * we can append 'rq' to an existing request, so we can throw 'rq' away
465 * afterwards.
466 *
467 * Returns true if we merged, false otherwise
468 */
469static bool elv_attempt_insert_merge(struct request_queue *q,
470 struct request *rq)
471{
472 struct request *__rq;
473
474 if (blk_queue_nomerges(q))
475 return false;
476
477 /*
478 * First try one-hit cache.
479 */
480 if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
481 return true;
482
483 if (blk_queue_noxmerges(q))
484 return false;
485
486 /*
487 * See if our hash lookup can find a potential backmerge.
488 */
489 __rq = elv_rqhash_find(q, blk_rq_pos(rq));
490 if (__rq && blk_attempt_req_merge(q, __rq, rq))
491 return true;
492
493 return false;
494}
495
496void elv_merged_request(struct request_queue *q, struct request *rq, int type)
497{
498 struct elevator_queue *e = q->elevator;
499
500 if (e->type->ops.elevator_merged_fn)
501 e->type->ops.elevator_merged_fn(q, rq, type);
502
503 if (type == ELEVATOR_BACK_MERGE)
504 elv_rqhash_reposition(q, rq);
505
506 q->last_merge = rq;
507}
508
509void elv_merge_requests(struct request_queue *q, struct request *rq,
510 struct request *next)
511{
512 struct elevator_queue *e = q->elevator;
513 const int next_sorted = next->cmd_flags & REQ_SORTED;
514
515 if (next_sorted && e->type->ops.elevator_merge_req_fn)
516 e->type->ops.elevator_merge_req_fn(q, rq, next);
517
518 elv_rqhash_reposition(q, rq);
519
520 if (next_sorted) {
521 elv_rqhash_del(q, next);
522 q->nr_sorted--;
523 }
524
525 q->last_merge = rq;
526}
527
528void elv_bio_merged(struct request_queue *q, struct request *rq,
529 struct bio *bio)
530{
531 struct elevator_queue *e = q->elevator;
532
533 if (e->type->ops.elevator_bio_merged_fn)
534 e->type->ops.elevator_bio_merged_fn(q, rq, bio);
535}
536
537void elv_requeue_request(struct request_queue *q, struct request *rq)
538{
539 /*
540 * it already went through dequeue, we need to decrement the
541 * in_flight count again
542 */
543 if (blk_account_rq(rq)) {
544 q->in_flight[rq_is_sync(rq)]--;
545 if (rq->cmd_flags & REQ_SORTED)
546 elv_deactivate_rq(q, rq);
547 }
548
549 rq->cmd_flags &= ~REQ_STARTED;
550
551 __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
552}
553
554void elv_drain_elevator(struct request_queue *q)
555{
556 static int printed;
557
558 lockdep_assert_held(q->queue_lock);
559
560 while (q->elevator->type->ops.elevator_dispatch_fn(q, 1))
561 ;
562 if (q->nr_sorted && printed++ < 10) {
563 printk(KERN_ERR "%s: forced dispatching is broken "
564 "(nr_sorted=%u), please report this\n",
565 q->elevator->type->elevator_name, q->nr_sorted);
566 }
567}
568
569void elv_quiesce_start(struct request_queue *q)
570{
571 if (!q->elevator)
572 return;
573
574 spin_lock_irq(q->queue_lock);
575 queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
576 spin_unlock_irq(q->queue_lock);
577
578 blk_drain_queue(q, false);
579}
580
581void elv_quiesce_end(struct request_queue *q)
582{
583 spin_lock_irq(q->queue_lock);
584 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
585 spin_unlock_irq(q->queue_lock);
586}
587
588void __elv_add_request(struct request_queue *q, struct request *rq, int where)
589{
590 trace_block_rq_insert(q, rq);
591
592 rq->q = q;
593
594 if (rq->cmd_flags & REQ_SOFTBARRIER) {
595 /* barriers are scheduling boundary, update end_sector */
596 if (rq->cmd_type == REQ_TYPE_FS ||
597 (rq->cmd_flags & REQ_DISCARD)) {
598 q->end_sector = rq_end_sector(rq);
599 q->boundary_rq = rq;
600 }
601 } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
602 (where == ELEVATOR_INSERT_SORT ||
603 where == ELEVATOR_INSERT_SORT_MERGE))
604 where = ELEVATOR_INSERT_BACK;
605
606 switch (where) {
607 case ELEVATOR_INSERT_REQUEUE:
608 case ELEVATOR_INSERT_FRONT:
609 rq->cmd_flags |= REQ_SOFTBARRIER;
610 list_add(&rq->queuelist, &q->queue_head);
611 break;
612
613 case ELEVATOR_INSERT_BACK:
614 rq->cmd_flags |= REQ_SOFTBARRIER;
615 elv_drain_elevator(q);
616 list_add_tail(&rq->queuelist, &q->queue_head);
617 /*
618 * We kick the queue here for the following reasons.
619 * - The elevator might have returned NULL previously
620 * to delay requests and returned them now. As the
621 * queue wasn't empty before this request, ll_rw_blk
622 * won't run the queue on return, resulting in hang.
623 * - Usually, back inserted requests won't be merged
624 * with anything. There's no point in delaying queue
625 * processing.
626 */
627 __blk_run_queue(q);
628 break;
629
630 case ELEVATOR_INSERT_SORT_MERGE:
631 /*
632 * If we succeed in merging this request with one in the
633 * queue already, we are done - rq has now been freed,
634 * so no need to do anything further.
635 */
636 if (elv_attempt_insert_merge(q, rq))
637 break;
638 case ELEVATOR_INSERT_SORT:
639 BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
640 !(rq->cmd_flags & REQ_DISCARD));
641 rq->cmd_flags |= REQ_SORTED;
642 q->nr_sorted++;
643 if (rq_mergeable(rq)) {
644 elv_rqhash_add(q, rq);
645 if (!q->last_merge)
646 q->last_merge = rq;
647 }
648
649 /*
650 * Some ioscheds (cfq) run q->request_fn directly, so
651 * rq cannot be accessed after calling
652 * elevator_add_req_fn.
653 */
654 q->elevator->type->ops.elevator_add_req_fn(q, rq);
655 break;
656
657 case ELEVATOR_INSERT_FLUSH:
658 rq->cmd_flags |= REQ_SOFTBARRIER;
659 blk_insert_flush(rq);
660 break;
661 default:
662 printk(KERN_ERR "%s: bad insertion point %d\n",
663 __func__, where);
664 BUG();
665 }
666}
667EXPORT_SYMBOL(__elv_add_request);
668
669void elv_add_request(struct request_queue *q, struct request *rq, int where)
670{
671 unsigned long flags;
672
673 spin_lock_irqsave(q->queue_lock, flags);
674 __elv_add_request(q, rq, where);
675 spin_unlock_irqrestore(q->queue_lock, flags);
676}
677EXPORT_SYMBOL(elv_add_request);
678
679struct request *elv_latter_request(struct request_queue *q, struct request *rq)
680{
681 struct elevator_queue *e = q->elevator;
682
683 if (e->type->ops.elevator_latter_req_fn)
684 return e->type->ops.elevator_latter_req_fn(q, rq);
685 return NULL;
686}
687
688struct request *elv_former_request(struct request_queue *q, struct request *rq)
689{
690 struct elevator_queue *e = q->elevator;
691
692 if (e->type->ops.elevator_former_req_fn)
693 return e->type->ops.elevator_former_req_fn(q, rq);
694 return NULL;
695}
696
697int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
698{
699 struct elevator_queue *e = q->elevator;
700
701 if (e->type->ops.elevator_set_req_fn)
702 return e->type->ops.elevator_set_req_fn(q, rq, gfp_mask);
703 return 0;
704}
705
706void elv_put_request(struct request_queue *q, struct request *rq)
707{
708 struct elevator_queue *e = q->elevator;
709
710 if (e->type->ops.elevator_put_req_fn)
711 e->type->ops.elevator_put_req_fn(rq);
712}
713
714int elv_may_queue(struct request_queue *q, int rw)
715{
716 struct elevator_queue *e = q->elevator;
717
718 if (e->type->ops.elevator_may_queue_fn)
719 return e->type->ops.elevator_may_queue_fn(q, rw);
720
721 return ELV_MQUEUE_MAY;
722}
723
724void elv_abort_queue(struct request_queue *q)
725{
726 struct request *rq;
727
728 blk_abort_flushes(q);
729
730 while (!list_empty(&q->queue_head)) {
731 rq = list_entry_rq(q->queue_head.next);
732 rq->cmd_flags |= REQ_QUIET;
733 trace_block_rq_abort(q, rq);
734 /*
735 * Mark this request as started so we don't trigger
736 * any debug logic in the end I/O path.
737 */
738 blk_start_request(rq);
739 __blk_end_request_all(rq, -EIO);
740 }
741}
742EXPORT_SYMBOL(elv_abort_queue);
743
744void elv_completed_request(struct request_queue *q, struct request *rq)
745{
746 struct elevator_queue *e = q->elevator;
747
748 /*
749 * request is released from the driver, io must be done
750 */
751 if (blk_account_rq(rq)) {
752 q->in_flight[rq_is_sync(rq)]--;
753 if ((rq->cmd_flags & REQ_SORTED) &&
754 e->type->ops.elevator_completed_req_fn)
755 e->type->ops.elevator_completed_req_fn(q, rq);
756 }
757}
758
759#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
760
761static ssize_t
762elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
763{
764 struct elv_fs_entry *entry = to_elv(attr);
765 struct elevator_queue *e;
766 ssize_t error;
767
768 if (!entry->show)
769 return -EIO;
770
771 e = container_of(kobj, struct elevator_queue, kobj);
772 mutex_lock(&e->sysfs_lock);
773 error = e->type ? entry->show(e, page) : -ENOENT;
774 mutex_unlock(&e->sysfs_lock);
775 return error;
776}
777
778static ssize_t
779elv_attr_store(struct kobject *kobj, struct attribute *attr,
780 const char *page, size_t length)
781{
782 struct elv_fs_entry *entry = to_elv(attr);
783 struct elevator_queue *e;
784 ssize_t error;
785
786 if (!entry->store)
787 return -EIO;
788
789 e = container_of(kobj, struct elevator_queue, kobj);
790 mutex_lock(&e->sysfs_lock);
791 error = e->type ? entry->store(e, page, length) : -ENOENT;
792 mutex_unlock(&e->sysfs_lock);
793 return error;
794}
795
796static const struct sysfs_ops elv_sysfs_ops = {
797 .show = elv_attr_show,
798 .store = elv_attr_store,
799};
800
801static struct kobj_type elv_ktype = {
802 .sysfs_ops = &elv_sysfs_ops,
803 .release = elevator_release,
804};
805
806int __elv_register_queue(struct request_queue *q, struct elevator_queue *e)
807{
808 int error;
809
810 error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
811 if (!error) {
812 struct elv_fs_entry *attr = e->type->elevator_attrs;
813 if (attr) {
814 while (attr->attr.name) {
815 if (sysfs_create_file(&e->kobj, &attr->attr))
816 break;
817 attr++;
818 }
819 }
820 kobject_uevent(&e->kobj, KOBJ_ADD);
821 e->registered = 1;
822 }
823 return error;
824}
825
826int elv_register_queue(struct request_queue *q)
827{
828 return __elv_register_queue(q, q->elevator);
829}
830EXPORT_SYMBOL(elv_register_queue);
831
832void elv_unregister_queue(struct request_queue *q)
833{
834 if (q) {
835 struct elevator_queue *e = q->elevator;
836
837 kobject_uevent(&e->kobj, KOBJ_REMOVE);
838 kobject_del(&e->kobj);
839 e->registered = 0;
840 }
841}
842EXPORT_SYMBOL(elv_unregister_queue);
843
844int elv_register(struct elevator_type *e)
845{
846 char *def = "";
847
848 /* create icq_cache if requested */
849 if (e->icq_size) {
850 if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
851 WARN_ON(e->icq_align < __alignof__(struct io_cq)))
852 return -EINVAL;
853
854 snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
855 "%s_io_cq", e->elevator_name);
856 e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
857 e->icq_align, 0, NULL);
858 if (!e->icq_cache)
859 return -ENOMEM;
860 }
861
862 /* register, don't allow duplicate names */
863 spin_lock(&elv_list_lock);
864 if (elevator_find(e->elevator_name)) {
865 spin_unlock(&elv_list_lock);
866 if (e->icq_cache)
867 kmem_cache_destroy(e->icq_cache);
868 return -EBUSY;
869 }
870 list_add_tail(&e->list, &elv_list);
871 spin_unlock(&elv_list_lock);
872
873 /* print pretty message */
874 if (!strcmp(e->elevator_name, chosen_elevator) ||
875 (!*chosen_elevator &&
876 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
877 def = " (default)";
878
879 printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
880 def);
881 return 0;
882}
883EXPORT_SYMBOL_GPL(elv_register);
884
885void elv_unregister(struct elevator_type *e)
886{
887 /* unregister */
888 spin_lock(&elv_list_lock);
889 list_del_init(&e->list);
890 spin_unlock(&elv_list_lock);
891
892 /*
893 * Destroy icq_cache if it exists. icq's are RCU managed. Make
894 * sure all RCU operations are complete before proceeding.
895 */
896 if (e->icq_cache) {
897 rcu_barrier();
898 kmem_cache_destroy(e->icq_cache);
899 e->icq_cache = NULL;
900 }
901}
902EXPORT_SYMBOL_GPL(elv_unregister);
903
904/*
905 * switch to new_e io scheduler. be careful not to introduce deadlocks -
906 * we don't free the old io scheduler, before we have allocated what we
907 * need for the new one. this way we have a chance of going back to the old
908 * one, if the new one fails init for some reason.
909 */
910static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
911{
912 struct elevator_queue *old_elevator, *e;
913 int err;
914
915 /* allocate new elevator */
916 e = elevator_alloc(q, new_e);
917 if (!e)
918 return -ENOMEM;
919
920 err = elevator_init_queue(q, e);
921 if (err) {
922 kobject_put(&e->kobj);
923 return err;
924 }
925
926 /* turn on BYPASS and drain all requests w/ elevator private data */
927 elv_quiesce_start(q);
928
929 /* unregister old queue, register new one and kill old elevator */
930 if (q->elevator->registered) {
931 elv_unregister_queue(q);
932 err = __elv_register_queue(q, e);
933 if (err)
934 goto fail_register;
935 }
936
937 /* done, clear io_cq's, switch elevators and turn off BYPASS */
938 spin_lock_irq(q->queue_lock);
939 ioc_clear_queue(q);
940 old_elevator = q->elevator;
941 q->elevator = e;
942 spin_unlock_irq(q->queue_lock);
943
944 elevator_exit(old_elevator);
945 elv_quiesce_end(q);
946
947 blk_add_trace_msg(q, "elv switch: %s", e->type->elevator_name);
948
949 return 0;
950
951fail_register:
952 /*
953 * switch failed, exit the new io scheduler and reattach the old
954 * one again (along with re-adding the sysfs dir)
955 */
956 elevator_exit(e);
957 elv_register_queue(q);
958 elv_quiesce_end(q);
959
960 return err;
961}
962
963/*
964 * Switch this queue to the given IO scheduler.
965 */
966static int __elevator_change(struct request_queue *q, const char *name)
967{
968 char elevator_name[ELV_NAME_MAX];
969 struct elevator_type *e;
970
971 if (!q->elevator)
972 return -ENXIO;
973
974 strlcpy(elevator_name, name, sizeof(elevator_name));
975 e = elevator_get(strstrip(elevator_name));
976 if (!e) {
977 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
978 return -EINVAL;
979 }
980
981 if (!strcmp(elevator_name, q->elevator->type->elevator_name)) {
982 elevator_put(e);
983 return 0;
984 }
985
986 return elevator_switch(q, e);
987}
988
989int elevator_change(struct request_queue *q, const char *name)
990{
991 int ret;
992
993 /* Protect q->elevator from elevator_init() */
994 mutex_lock(&q->sysfs_lock);
995 ret = __elevator_change(q, name);
996 mutex_unlock(&q->sysfs_lock);
997
998 return ret;
999}
1000EXPORT_SYMBOL(elevator_change);
1001
1002ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1003 size_t count)
1004{
1005 int ret;
1006
1007 if (!q->elevator)
1008 return count;
1009
1010 ret = __elevator_change(q, name);
1011 if (!ret)
1012 return count;
1013
1014 printk(KERN_ERR "elevator: switch to %s failed\n", name);
1015 return ret;
1016}
1017
1018ssize_t elv_iosched_show(struct request_queue *q, char *name)
1019{
1020 struct elevator_queue *e = q->elevator;
1021 struct elevator_type *elv;
1022 struct elevator_type *__e;
1023 int len = 0;
1024
1025 if (!q->elevator || !blk_queue_stackable(q))
1026 return sprintf(name, "none\n");
1027
1028 elv = e->type;
1029
1030 spin_lock(&elv_list_lock);
1031 list_for_each_entry(__e, &elv_list, list) {
1032 if (!strcmp(elv->elevator_name, __e->elevator_name))
1033 len += sprintf(name+len, "[%s] ", elv->elevator_name);
1034 else
1035 len += sprintf(name+len, "%s ", __e->elevator_name);
1036 }
1037 spin_unlock(&elv_list_lock);
1038
1039 len += sprintf(len+name, "\n");
1040 return len;
1041}
1042
1043struct request *elv_rb_former_request(struct request_queue *q,
1044 struct request *rq)
1045{
1046 struct rb_node *rbprev = rb_prev(&rq->rb_node);
1047
1048 if (rbprev)
1049 return rb_entry_rq(rbprev);
1050
1051 return NULL;
1052}
1053EXPORT_SYMBOL(elv_rb_former_request);
1054
1055struct request *elv_rb_latter_request(struct request_queue *q,
1056 struct request *rq)
1057{
1058 struct rb_node *rbnext = rb_next(&rq->rb_node);
1059
1060 if (rbnext)
1061 return rb_entry_rq(rbnext);
1062
1063 return NULL;
1064}
1065EXPORT_SYMBOL(elv_rb_latter_request);