rjw | 1f88458 | 2022-01-06 17:20:42 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Functions related to segment and merge handling |
| 4 | */ |
| 5 | #include <linux/kernel.h> |
| 6 | #include <linux/module.h> |
| 7 | #include <linux/bio.h> |
| 8 | #include <linux/blkdev.h> |
| 9 | #include <linux/scatterlist.h> |
| 10 | |
| 11 | #include <trace/events/block.h> |
| 12 | |
| 13 | #include "blk.h" |
| 14 | |
| 15 | static struct bio *blk_bio_discard_split(struct request_queue *q, |
| 16 | struct bio *bio, |
| 17 | struct bio_set *bs, |
| 18 | unsigned *nsegs) |
| 19 | { |
| 20 | unsigned int max_discard_sectors, granularity; |
| 21 | int alignment; |
| 22 | sector_t tmp; |
| 23 | unsigned split_sectors; |
| 24 | |
| 25 | *nsegs = 1; |
| 26 | |
| 27 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
| 28 | granularity = max(q->limits.discard_granularity >> 9, 1U); |
| 29 | |
| 30 | max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); |
| 31 | max_discard_sectors -= max_discard_sectors % granularity; |
| 32 | |
| 33 | if (unlikely(!max_discard_sectors)) { |
| 34 | /* XXX: warn */ |
| 35 | return NULL; |
| 36 | } |
| 37 | |
| 38 | if (bio_sectors(bio) <= max_discard_sectors) |
| 39 | return NULL; |
| 40 | |
| 41 | split_sectors = max_discard_sectors; |
| 42 | |
| 43 | /* |
| 44 | * If the next starting sector would be misaligned, stop the discard at |
| 45 | * the previous aligned sector. |
| 46 | */ |
| 47 | alignment = (q->limits.discard_alignment >> 9) % granularity; |
| 48 | |
| 49 | tmp = bio->bi_iter.bi_sector + split_sectors - alignment; |
| 50 | tmp = sector_div(tmp, granularity); |
| 51 | |
| 52 | if (split_sectors > tmp) |
| 53 | split_sectors -= tmp; |
| 54 | |
| 55 | return bio_split(bio, split_sectors, GFP_NOIO, bs); |
| 56 | } |
| 57 | |
| 58 | static struct bio *blk_bio_write_zeroes_split(struct request_queue *q, |
| 59 | struct bio *bio, struct bio_set *bs, unsigned *nsegs) |
| 60 | { |
| 61 | *nsegs = 1; |
| 62 | |
| 63 | if (!q->limits.max_write_zeroes_sectors) |
| 64 | return NULL; |
| 65 | |
| 66 | if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors) |
| 67 | return NULL; |
| 68 | |
| 69 | return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs); |
| 70 | } |
| 71 | |
| 72 | static struct bio *blk_bio_write_same_split(struct request_queue *q, |
| 73 | struct bio *bio, |
| 74 | struct bio_set *bs, |
| 75 | unsigned *nsegs) |
| 76 | { |
| 77 | *nsegs = 1; |
| 78 | |
| 79 | if (!q->limits.max_write_same_sectors) |
| 80 | return NULL; |
| 81 | |
| 82 | if (bio_sectors(bio) <= q->limits.max_write_same_sectors) |
| 83 | return NULL; |
| 84 | |
| 85 | return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); |
| 86 | } |
| 87 | |
| 88 | static inline unsigned get_max_io_size(struct request_queue *q, |
| 89 | struct bio *bio) |
| 90 | { |
| 91 | unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); |
| 92 | unsigned mask = queue_logical_block_size(q) - 1; |
| 93 | |
| 94 | /* aligned to logical block size */ |
| 95 | sectors &= ~(mask >> 9); |
| 96 | |
| 97 | return sectors; |
| 98 | } |
| 99 | |
| 100 | static struct bio *blk_bio_segment_split(struct request_queue *q, |
| 101 | struct bio *bio, |
| 102 | struct bio_set *bs, |
| 103 | unsigned *segs) |
| 104 | { |
| 105 | struct bio_vec bv, bvprv, *bvprvp = NULL; |
| 106 | struct bvec_iter iter; |
| 107 | unsigned seg_size = 0, nsegs = 0, sectors = 0; |
| 108 | unsigned front_seg_size = bio->bi_seg_front_size; |
| 109 | bool do_split = true; |
| 110 | struct bio *new = NULL; |
| 111 | const unsigned max_sectors = get_max_io_size(q, bio); |
| 112 | |
| 113 | bio_for_each_segment(bv, bio, iter) { |
| 114 | /* |
| 115 | * If the queue doesn't support SG gaps and adding this |
| 116 | * offset would create a gap, disallow it. |
| 117 | */ |
| 118 | if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) |
| 119 | goto split; |
| 120 | |
| 121 | if (sectors + (bv.bv_len >> 9) > max_sectors) { |
| 122 | /* |
| 123 | * Consider this a new segment if we're splitting in |
| 124 | * the middle of this vector. |
| 125 | */ |
| 126 | if (nsegs < queue_max_segments(q) && |
| 127 | sectors < max_sectors) { |
| 128 | nsegs++; |
| 129 | sectors = max_sectors; |
| 130 | } |
| 131 | if (sectors) |
| 132 | goto split; |
| 133 | /* Make this single bvec as the 1st segment */ |
| 134 | } |
| 135 | |
| 136 | if (bvprvp && blk_queue_cluster(q)) { |
| 137 | if (seg_size + bv.bv_len > queue_max_segment_size(q)) |
| 138 | goto new_segment; |
| 139 | if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv)) |
| 140 | goto new_segment; |
| 141 | if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv)) |
| 142 | goto new_segment; |
| 143 | |
| 144 | seg_size += bv.bv_len; |
| 145 | bvprv = bv; |
| 146 | bvprvp = &bvprv; |
| 147 | sectors += bv.bv_len >> 9; |
| 148 | |
| 149 | if (nsegs == 1 && seg_size > front_seg_size) |
| 150 | front_seg_size = seg_size; |
| 151 | continue; |
| 152 | } |
| 153 | new_segment: |
| 154 | if (nsegs == queue_max_segments(q)) |
| 155 | goto split; |
| 156 | |
| 157 | nsegs++; |
| 158 | bvprv = bv; |
| 159 | bvprvp = &bvprv; |
| 160 | seg_size = bv.bv_len; |
| 161 | sectors += bv.bv_len >> 9; |
| 162 | |
| 163 | if (nsegs == 1 && seg_size > front_seg_size) |
| 164 | front_seg_size = seg_size; |
| 165 | } |
| 166 | |
| 167 | do_split = false; |
| 168 | split: |
| 169 | *segs = nsegs; |
| 170 | |
| 171 | if (do_split) { |
| 172 | new = bio_split(bio, sectors, GFP_NOIO, bs); |
| 173 | if (new) |
| 174 | bio = new; |
| 175 | } |
| 176 | |
| 177 | bio->bi_seg_front_size = front_seg_size; |
| 178 | if (seg_size > bio->bi_seg_back_size) |
| 179 | bio->bi_seg_back_size = seg_size; |
| 180 | |
| 181 | return do_split ? new : NULL; |
| 182 | } |
| 183 | |
| 184 | void blk_queue_split(struct request_queue *q, struct bio **bio) |
| 185 | { |
| 186 | struct bio *split, *res; |
| 187 | unsigned nsegs; |
| 188 | |
| 189 | switch (bio_op(*bio)) { |
| 190 | case REQ_OP_DISCARD: |
| 191 | case REQ_OP_SECURE_ERASE: |
| 192 | split = blk_bio_discard_split(q, *bio, q->bio_split, &nsegs); |
| 193 | break; |
| 194 | case REQ_OP_WRITE_ZEROES: |
| 195 | split = blk_bio_write_zeroes_split(q, *bio, q->bio_split, &nsegs); |
| 196 | break; |
| 197 | case REQ_OP_WRITE_SAME: |
| 198 | split = blk_bio_write_same_split(q, *bio, q->bio_split, &nsegs); |
| 199 | break; |
| 200 | default: |
| 201 | split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs); |
| 202 | break; |
| 203 | } |
| 204 | |
| 205 | /* physical segments can be figured out during splitting */ |
| 206 | res = split ? split : *bio; |
| 207 | res->bi_phys_segments = nsegs; |
| 208 | bio_set_flag(res, BIO_SEG_VALID); |
| 209 | |
| 210 | if (split) { |
| 211 | /* there isn't chance to merge the splitted bio */ |
| 212 | split->bi_opf |= REQ_NOMERGE; |
| 213 | |
| 214 | bio_chain(split, *bio); |
| 215 | trace_block_split(q, split, (*bio)->bi_iter.bi_sector); |
| 216 | generic_make_request(*bio); |
| 217 | *bio = split; |
| 218 | } |
| 219 | } |
| 220 | EXPORT_SYMBOL(blk_queue_split); |
| 221 | |
| 222 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
| 223 | struct bio *bio, |
| 224 | bool no_sg_merge) |
| 225 | { |
| 226 | struct bio_vec bv, bvprv = { NULL }; |
| 227 | int cluster, prev = 0; |
| 228 | unsigned int seg_size, nr_phys_segs; |
| 229 | struct bio *fbio, *bbio; |
| 230 | struct bvec_iter iter; |
| 231 | |
| 232 | if (!bio) |
| 233 | return 0; |
| 234 | |
| 235 | switch (bio_op(bio)) { |
| 236 | case REQ_OP_DISCARD: |
| 237 | case REQ_OP_SECURE_ERASE: |
| 238 | case REQ_OP_WRITE_ZEROES: |
| 239 | return 0; |
| 240 | case REQ_OP_WRITE_SAME: |
| 241 | return 1; |
| 242 | } |
| 243 | |
| 244 | fbio = bio; |
| 245 | cluster = blk_queue_cluster(q); |
| 246 | seg_size = 0; |
| 247 | nr_phys_segs = 0; |
| 248 | for_each_bio(bio) { |
| 249 | bio_for_each_segment(bv, bio, iter) { |
| 250 | /* |
| 251 | * If SG merging is disabled, each bio vector is |
| 252 | * a segment |
| 253 | */ |
| 254 | if (no_sg_merge) |
| 255 | goto new_segment; |
| 256 | |
| 257 | if (prev && cluster) { |
| 258 | if (seg_size + bv.bv_len |
| 259 | > queue_max_segment_size(q)) |
| 260 | goto new_segment; |
| 261 | if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) |
| 262 | goto new_segment; |
| 263 | if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) |
| 264 | goto new_segment; |
| 265 | |
| 266 | seg_size += bv.bv_len; |
| 267 | bvprv = bv; |
| 268 | continue; |
| 269 | } |
| 270 | new_segment: |
| 271 | if (nr_phys_segs == 1 && seg_size > |
| 272 | fbio->bi_seg_front_size) |
| 273 | fbio->bi_seg_front_size = seg_size; |
| 274 | |
| 275 | nr_phys_segs++; |
| 276 | bvprv = bv; |
| 277 | prev = 1; |
| 278 | seg_size = bv.bv_len; |
| 279 | } |
| 280 | bbio = bio; |
| 281 | } |
| 282 | |
| 283 | if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) |
| 284 | fbio->bi_seg_front_size = seg_size; |
| 285 | if (seg_size > bbio->bi_seg_back_size) |
| 286 | bbio->bi_seg_back_size = seg_size; |
| 287 | |
| 288 | return nr_phys_segs; |
| 289 | } |
| 290 | |
| 291 | void blk_recalc_rq_segments(struct request *rq) |
| 292 | { |
| 293 | bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, |
| 294 | &rq->q->queue_flags); |
| 295 | |
| 296 | rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, |
| 297 | no_sg_merge); |
| 298 | } |
| 299 | |
| 300 | void blk_recount_segments(struct request_queue *q, struct bio *bio) |
| 301 | { |
| 302 | unsigned short seg_cnt = bio_segments(bio); |
| 303 | |
| 304 | if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && |
| 305 | (seg_cnt < queue_max_segments(q))) |
| 306 | bio->bi_phys_segments = seg_cnt; |
| 307 | else { |
| 308 | struct bio *nxt = bio->bi_next; |
| 309 | |
| 310 | bio->bi_next = NULL; |
| 311 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); |
| 312 | bio->bi_next = nxt; |
| 313 | } |
| 314 | |
| 315 | bio_set_flag(bio, BIO_SEG_VALID); |
| 316 | } |
| 317 | EXPORT_SYMBOL(blk_recount_segments); |
| 318 | |
| 319 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, |
| 320 | struct bio *nxt) |
| 321 | { |
| 322 | struct bio_vec end_bv = { NULL }, nxt_bv; |
| 323 | |
| 324 | if (!blk_queue_cluster(q)) |
| 325 | return 0; |
| 326 | |
| 327 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > |
| 328 | queue_max_segment_size(q)) |
| 329 | return 0; |
| 330 | |
| 331 | if (!bio_has_data(bio)) |
| 332 | return 1; |
| 333 | |
| 334 | bio_get_last_bvec(bio, &end_bv); |
| 335 | bio_get_first_bvec(nxt, &nxt_bv); |
| 336 | |
| 337 | if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv)) |
| 338 | return 0; |
| 339 | |
| 340 | /* |
| 341 | * bio and nxt are contiguous in memory; check if the queue allows |
| 342 | * these two to be merged into one |
| 343 | */ |
| 344 | if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv)) |
| 345 | return 1; |
| 346 | |
| 347 | return 0; |
| 348 | } |
| 349 | |
| 350 | static inline void |
| 351 | __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, |
| 352 | struct scatterlist *sglist, struct bio_vec *bvprv, |
| 353 | struct scatterlist **sg, int *nsegs, int *cluster) |
| 354 | { |
| 355 | |
| 356 | int nbytes = bvec->bv_len; |
| 357 | |
| 358 | if (*sg && *cluster) { |
| 359 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) |
| 360 | goto new_segment; |
| 361 | |
| 362 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) |
| 363 | goto new_segment; |
| 364 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) |
| 365 | goto new_segment; |
| 366 | |
| 367 | (*sg)->length += nbytes; |
| 368 | } else { |
| 369 | new_segment: |
| 370 | if (!*sg) |
| 371 | *sg = sglist; |
| 372 | else { |
| 373 | /* |
| 374 | * If the driver previously mapped a shorter |
| 375 | * list, we could see a termination bit |
| 376 | * prematurely unless it fully inits the sg |
| 377 | * table on each mapping. We KNOW that there |
| 378 | * must be more entries here or the driver |
| 379 | * would be buggy, so force clear the |
| 380 | * termination bit to avoid doing a full |
| 381 | * sg_init_table() in drivers for each command. |
| 382 | */ |
| 383 | sg_unmark_end(*sg); |
| 384 | *sg = sg_next(*sg); |
| 385 | } |
| 386 | |
| 387 | sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); |
| 388 | (*nsegs)++; |
| 389 | } |
| 390 | *bvprv = *bvec; |
| 391 | } |
| 392 | |
| 393 | static inline int __blk_bvec_map_sg(struct request_queue *q, struct bio_vec bv, |
| 394 | struct scatterlist *sglist, struct scatterlist **sg) |
| 395 | { |
| 396 | *sg = sglist; |
| 397 | sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); |
| 398 | return 1; |
| 399 | } |
| 400 | |
| 401 | static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, |
| 402 | struct scatterlist *sglist, |
| 403 | struct scatterlist **sg) |
| 404 | { |
| 405 | struct bio_vec bvec, bvprv = { NULL }; |
| 406 | struct bvec_iter iter; |
| 407 | int cluster = blk_queue_cluster(q), nsegs = 0; |
| 408 | |
| 409 | for_each_bio(bio) |
| 410 | bio_for_each_segment(bvec, bio, iter) |
| 411 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, |
| 412 | &nsegs, &cluster); |
| 413 | |
| 414 | return nsegs; |
| 415 | } |
| 416 | |
| 417 | /* |
| 418 | * map a request to scatterlist, return number of sg entries setup. Caller |
| 419 | * must make sure sg can hold rq->nr_phys_segments entries |
| 420 | */ |
| 421 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, |
| 422 | struct scatterlist *sglist) |
| 423 | { |
| 424 | struct scatterlist *sg = NULL; |
| 425 | int nsegs = 0; |
| 426 | |
| 427 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) |
| 428 | nsegs = __blk_bvec_map_sg(q, rq->special_vec, sglist, &sg); |
| 429 | else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME) |
| 430 | nsegs = __blk_bvec_map_sg(q, bio_iovec(rq->bio), sglist, &sg); |
| 431 | else if (rq->bio) |
| 432 | nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); |
| 433 | |
| 434 | if (unlikely(rq->rq_flags & RQF_COPY_USER) && |
| 435 | (blk_rq_bytes(rq) & q->dma_pad_mask)) { |
| 436 | unsigned int pad_len = |
| 437 | (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; |
| 438 | |
| 439 | sg->length += pad_len; |
| 440 | rq->extra_len += pad_len; |
| 441 | } |
| 442 | |
| 443 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { |
| 444 | if (op_is_write(req_op(rq))) |
| 445 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); |
| 446 | |
| 447 | sg_unmark_end(sg); |
| 448 | sg = sg_next(sg); |
| 449 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), |
| 450 | q->dma_drain_size, |
| 451 | ((unsigned long)q->dma_drain_buffer) & |
| 452 | (PAGE_SIZE - 1)); |
| 453 | nsegs++; |
| 454 | rq->extra_len += q->dma_drain_size; |
| 455 | } |
| 456 | |
| 457 | if (sg) |
| 458 | sg_mark_end(sg); |
| 459 | |
| 460 | /* |
| 461 | * Something must have been wrong if the figured number of |
| 462 | * segment is bigger than number of req's physical segments |
| 463 | */ |
| 464 | WARN_ON(nsegs > blk_rq_nr_phys_segments(rq)); |
| 465 | |
| 466 | return nsegs; |
| 467 | } |
| 468 | EXPORT_SYMBOL(blk_rq_map_sg); |
| 469 | |
| 470 | static inline int ll_new_hw_segment(struct request_queue *q, |
| 471 | struct request *req, |
| 472 | struct bio *bio) |
| 473 | { |
| 474 | int nr_phys_segs = bio_phys_segments(q, bio); |
| 475 | |
| 476 | if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) |
| 477 | goto no_merge; |
| 478 | |
| 479 | if (blk_integrity_merge_bio(q, req, bio) == false) |
| 480 | goto no_merge; |
| 481 | |
| 482 | /* |
| 483 | * This will form the start of a new hw segment. Bump both |
| 484 | * counters. |
| 485 | */ |
| 486 | req->nr_phys_segments += nr_phys_segs; |
| 487 | return 1; |
| 488 | |
| 489 | no_merge: |
| 490 | req_set_nomerge(q, req); |
| 491 | return 0; |
| 492 | } |
| 493 | |
| 494 | int ll_back_merge_fn(struct request_queue *q, struct request *req, |
| 495 | struct bio *bio) |
| 496 | { |
| 497 | if (req_gap_back_merge(req, bio)) |
| 498 | return 0; |
| 499 | if (blk_integrity_rq(req) && |
| 500 | integrity_req_gap_back_merge(req, bio)) |
| 501 | return 0; |
| 502 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
| 503 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) { |
| 504 | req_set_nomerge(q, req); |
| 505 | return 0; |
| 506 | } |
| 507 | if (!bio_flagged(req->biotail, BIO_SEG_VALID)) |
| 508 | blk_recount_segments(q, req->biotail); |
| 509 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
| 510 | blk_recount_segments(q, bio); |
| 511 | |
| 512 | return ll_new_hw_segment(q, req, bio); |
| 513 | } |
| 514 | |
| 515 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
| 516 | struct bio *bio) |
| 517 | { |
| 518 | |
| 519 | if (req_gap_front_merge(req, bio)) |
| 520 | return 0; |
| 521 | if (blk_integrity_rq(req) && |
| 522 | integrity_req_gap_front_merge(req, bio)) |
| 523 | return 0; |
| 524 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
| 525 | blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { |
| 526 | req_set_nomerge(q, req); |
| 527 | return 0; |
| 528 | } |
| 529 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
| 530 | blk_recount_segments(q, bio); |
| 531 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) |
| 532 | blk_recount_segments(q, req->bio); |
| 533 | |
| 534 | return ll_new_hw_segment(q, req, bio); |
| 535 | } |
| 536 | |
| 537 | /* |
| 538 | * blk-mq uses req->special to carry normal driver per-request payload, it |
| 539 | * does not indicate a prepared command that we cannot merge with. |
| 540 | */ |
| 541 | static bool req_no_special_merge(struct request *req) |
| 542 | { |
| 543 | struct request_queue *q = req->q; |
| 544 | |
| 545 | return !q->mq_ops && req->special; |
| 546 | } |
| 547 | |
| 548 | static bool req_attempt_discard_merge(struct request_queue *q, struct request *req, |
| 549 | struct request *next) |
| 550 | { |
| 551 | unsigned short segments = blk_rq_nr_discard_segments(req); |
| 552 | |
| 553 | if (segments >= queue_max_discard_segments(q)) |
| 554 | goto no_merge; |
| 555 | if (blk_rq_sectors(req) + bio_sectors(next->bio) > |
| 556 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) |
| 557 | goto no_merge; |
| 558 | |
| 559 | req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next); |
| 560 | return true; |
| 561 | no_merge: |
| 562 | req_set_nomerge(q, req); |
| 563 | return false; |
| 564 | } |
| 565 | |
| 566 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, |
| 567 | struct request *next) |
| 568 | { |
| 569 | int total_phys_segments; |
| 570 | unsigned int seg_size = |
| 571 | req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; |
| 572 | |
| 573 | /* |
| 574 | * First check if the either of the requests are re-queued |
| 575 | * requests. Can't merge them if they are. |
| 576 | */ |
| 577 | if (req_no_special_merge(req) || req_no_special_merge(next)) |
| 578 | return 0; |
| 579 | |
| 580 | if (req_gap_back_merge(req, next->bio)) |
| 581 | return 0; |
| 582 | |
| 583 | /* |
| 584 | * Will it become too large? |
| 585 | */ |
| 586 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > |
| 587 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) |
| 588 | return 0; |
| 589 | |
| 590 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; |
| 591 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) { |
| 592 | if (req->nr_phys_segments == 1) |
| 593 | req->bio->bi_seg_front_size = seg_size; |
| 594 | if (next->nr_phys_segments == 1) |
| 595 | next->biotail->bi_seg_back_size = seg_size; |
| 596 | total_phys_segments--; |
| 597 | } |
| 598 | |
| 599 | if (total_phys_segments > queue_max_segments(q)) |
| 600 | return 0; |
| 601 | |
| 602 | if (blk_integrity_merge_rq(q, req, next) == false) |
| 603 | return 0; |
| 604 | |
| 605 | /* Merge is OK... */ |
| 606 | req->nr_phys_segments = total_phys_segments; |
| 607 | return 1; |
| 608 | } |
| 609 | |
| 610 | /** |
| 611 | * blk_rq_set_mixed_merge - mark a request as mixed merge |
| 612 | * @rq: request to mark as mixed merge |
| 613 | * |
| 614 | * Description: |
| 615 | * @rq is about to be mixed merged. Make sure the attributes |
| 616 | * which can be mixed are set in each bio and mark @rq as mixed |
| 617 | * merged. |
| 618 | */ |
| 619 | void blk_rq_set_mixed_merge(struct request *rq) |
| 620 | { |
| 621 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; |
| 622 | struct bio *bio; |
| 623 | |
| 624 | if (rq->rq_flags & RQF_MIXED_MERGE) |
| 625 | return; |
| 626 | |
| 627 | /* |
| 628 | * @rq will no longer represent mixable attributes for all the |
| 629 | * contained bios. It will just track those of the first one. |
| 630 | * Distributes the attributs to each bio. |
| 631 | */ |
| 632 | for (bio = rq->bio; bio; bio = bio->bi_next) { |
| 633 | WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && |
| 634 | (bio->bi_opf & REQ_FAILFAST_MASK) != ff); |
| 635 | bio->bi_opf |= ff; |
| 636 | } |
| 637 | rq->rq_flags |= RQF_MIXED_MERGE; |
| 638 | } |
| 639 | |
| 640 | static void blk_account_io_merge(struct request *req) |
| 641 | { |
| 642 | if (blk_do_io_stat(req)) { |
| 643 | struct hd_struct *part; |
| 644 | int cpu; |
| 645 | |
| 646 | cpu = part_stat_lock(); |
| 647 | part = req->part; |
| 648 | |
| 649 | part_round_stats(req->q, cpu, part); |
| 650 | part_dec_in_flight(req->q, part, rq_data_dir(req)); |
| 651 | |
| 652 | hd_struct_put(part); |
| 653 | part_stat_unlock(); |
| 654 | } |
| 655 | } |
| 656 | /* |
| 657 | * Two cases of handling DISCARD merge: |
| 658 | * If max_discard_segments > 1, the driver takes every bio |
| 659 | * as a range and send them to controller together. The ranges |
| 660 | * needn't to be contiguous. |
| 661 | * Otherwise, the bios/requests will be handled as same as |
| 662 | * others which should be contiguous. |
| 663 | */ |
| 664 | static inline bool blk_discard_mergable(struct request *req) |
| 665 | { |
| 666 | if (req_op(req) == REQ_OP_DISCARD && |
| 667 | queue_max_discard_segments(req->q) > 1) |
| 668 | return true; |
| 669 | return false; |
| 670 | } |
| 671 | |
| 672 | enum elv_merge blk_try_req_merge(struct request *req, struct request *next) |
| 673 | { |
| 674 | if (blk_discard_mergable(req)) |
| 675 | return ELEVATOR_DISCARD_MERGE; |
| 676 | else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) |
| 677 | return ELEVATOR_BACK_MERGE; |
| 678 | |
| 679 | return ELEVATOR_NO_MERGE; |
| 680 | } |
| 681 | |
| 682 | /* |
| 683 | * For non-mq, this has to be called with the request spinlock acquired. |
| 684 | * For mq with scheduling, the appropriate queue wide lock should be held. |
| 685 | */ |
| 686 | static struct request *attempt_merge(struct request_queue *q, |
| 687 | struct request *req, struct request *next) |
| 688 | { |
| 689 | if (!q->mq_ops) |
| 690 | lockdep_assert_held(q->queue_lock); |
| 691 | |
| 692 | if (!rq_mergeable(req) || !rq_mergeable(next)) |
| 693 | return NULL; |
| 694 | |
| 695 | if (req_op(req) != req_op(next)) |
| 696 | return NULL; |
| 697 | |
| 698 | if (rq_data_dir(req) != rq_data_dir(next) |
| 699 | || req->rq_disk != next->rq_disk |
| 700 | || req_no_special_merge(next)) |
| 701 | return NULL; |
| 702 | |
| 703 | if (req_op(req) == REQ_OP_WRITE_SAME && |
| 704 | !blk_write_same_mergeable(req->bio, next->bio)) |
| 705 | return NULL; |
| 706 | |
| 707 | /* |
| 708 | * Don't allow merge of different write hints, or for a hint with |
| 709 | * non-hint IO. |
| 710 | */ |
| 711 | if (req->write_hint != next->write_hint) |
| 712 | return NULL; |
| 713 | |
| 714 | /* |
| 715 | * If we are allowed to merge, then append bio list |
| 716 | * from next to rq and release next. merge_requests_fn |
| 717 | * will have updated segment counts, update sector |
| 718 | * counts here. Handle DISCARDs separately, as they |
| 719 | * have separate settings. |
| 720 | */ |
| 721 | |
| 722 | switch (blk_try_req_merge(req, next)) { |
| 723 | case ELEVATOR_DISCARD_MERGE: |
| 724 | if (!req_attempt_discard_merge(q, req, next)) |
| 725 | return NULL; |
| 726 | break; |
| 727 | case ELEVATOR_BACK_MERGE: |
| 728 | if (!ll_merge_requests_fn(q, req, next)) |
| 729 | return NULL; |
| 730 | break; |
| 731 | default: |
| 732 | return NULL; |
| 733 | } |
| 734 | |
| 735 | /* |
| 736 | * If failfast settings disagree or any of the two is already |
| 737 | * a mixed merge, mark both as mixed before proceeding. This |
| 738 | * makes sure that all involved bios have mixable attributes |
| 739 | * set properly. |
| 740 | */ |
| 741 | if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || |
| 742 | (req->cmd_flags & REQ_FAILFAST_MASK) != |
| 743 | (next->cmd_flags & REQ_FAILFAST_MASK)) { |
| 744 | blk_rq_set_mixed_merge(req); |
| 745 | blk_rq_set_mixed_merge(next); |
| 746 | } |
| 747 | |
| 748 | /* |
| 749 | * At this point we have either done a back merge |
| 750 | * or front merge. We need the smaller start_time of |
| 751 | * the merged requests to be the current request |
| 752 | * for accounting purposes. |
| 753 | */ |
| 754 | if (time_after(req->start_time, next->start_time)) |
| 755 | req->start_time = next->start_time; |
| 756 | |
| 757 | req->biotail->bi_next = next->bio; |
| 758 | req->biotail = next->biotail; |
| 759 | |
| 760 | req->__data_len += blk_rq_bytes(next); |
| 761 | |
| 762 | if (!blk_discard_mergable(req)) |
| 763 | elv_merge_requests(q, req, next); |
| 764 | |
| 765 | /* |
| 766 | * 'next' is going away, so update stats accordingly |
| 767 | */ |
| 768 | blk_account_io_merge(next); |
| 769 | |
| 770 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); |
| 771 | if (blk_rq_cpu_valid(next)) |
| 772 | req->cpu = next->cpu; |
| 773 | |
| 774 | /* |
| 775 | * ownership of bio passed from next to req, return 'next' for |
| 776 | * the caller to free |
| 777 | */ |
| 778 | next->bio = NULL; |
| 779 | return next; |
| 780 | } |
| 781 | |
| 782 | struct request *attempt_back_merge(struct request_queue *q, struct request *rq) |
| 783 | { |
| 784 | struct request *next = elv_latter_request(q, rq); |
| 785 | |
| 786 | if (next) |
| 787 | return attempt_merge(q, rq, next); |
| 788 | |
| 789 | return NULL; |
| 790 | } |
| 791 | |
| 792 | struct request *attempt_front_merge(struct request_queue *q, struct request *rq) |
| 793 | { |
| 794 | struct request *prev = elv_former_request(q, rq); |
| 795 | |
| 796 | if (prev) |
| 797 | return attempt_merge(q, prev, rq); |
| 798 | |
| 799 | return NULL; |
| 800 | } |
| 801 | |
| 802 | int blk_attempt_req_merge(struct request_queue *q, struct request *rq, |
| 803 | struct request *next) |
| 804 | { |
| 805 | struct elevator_queue *e = q->elevator; |
| 806 | struct request *free; |
| 807 | |
| 808 | if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn) |
| 809 | if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next)) |
| 810 | return 0; |
| 811 | |
| 812 | free = attempt_merge(q, rq, next); |
| 813 | if (free) { |
| 814 | __blk_put_request(q, free); |
| 815 | return 1; |
| 816 | } |
| 817 | |
| 818 | return 0; |
| 819 | } |
| 820 | |
| 821 | bool blk_rq_merge_ok(struct request *rq, struct bio *bio) |
| 822 | { |
| 823 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) |
| 824 | return false; |
| 825 | |
| 826 | if (req_op(rq) != bio_op(bio)) |
| 827 | return false; |
| 828 | |
| 829 | /* different data direction or already started, don't merge */ |
| 830 | if (bio_data_dir(bio) != rq_data_dir(rq)) |
| 831 | return false; |
| 832 | |
| 833 | /* must be same device and not a special request */ |
| 834 | if (rq->rq_disk != bio->bi_disk || req_no_special_merge(rq)) |
| 835 | return false; |
| 836 | |
| 837 | /* only merge integrity protected bio into ditto rq */ |
| 838 | if (blk_integrity_merge_bio(rq->q, rq, bio) == false) |
| 839 | return false; |
| 840 | |
| 841 | /* must be using the same buffer */ |
| 842 | if (req_op(rq) == REQ_OP_WRITE_SAME && |
| 843 | !blk_write_same_mergeable(rq->bio, bio)) |
| 844 | return false; |
| 845 | |
| 846 | /* |
| 847 | * Don't allow merge of different write hints, or for a hint with |
| 848 | * non-hint IO. |
| 849 | */ |
| 850 | if (rq->write_hint != bio->bi_write_hint) |
| 851 | return false; |
| 852 | |
| 853 | return true; |
| 854 | } |
| 855 | |
| 856 | enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) |
| 857 | { |
| 858 | if (blk_discard_mergable(rq)) |
| 859 | return ELEVATOR_DISCARD_MERGE; |
| 860 | else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) |
| 861 | return ELEVATOR_BACK_MERGE; |
| 862 | else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) |
| 863 | return ELEVATOR_FRONT_MERGE; |
| 864 | return ELEVATOR_NO_MERGE; |
| 865 | } |