rjw | 1f88458 | 2022-01-06 17:20:42 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Functions related to mapping data to requests |
| 4 | */ |
| 5 | #include <linux/kernel.h> |
| 6 | #include <linux/sched/task_stack.h> |
| 7 | #include <linux/module.h> |
| 8 | #include <linux/bio.h> |
| 9 | #include <linux/blkdev.h> |
| 10 | #include <linux/uio.h> |
| 11 | |
| 12 | #include "blk.h" |
| 13 | |
| 14 | /* |
| 15 | * Append a bio to a passthrough request. Only works if the bio can be merged |
| 16 | * into the request based on the driver constraints. |
| 17 | */ |
| 18 | int blk_rq_append_bio(struct request *rq, struct bio **bio) |
| 19 | { |
| 20 | struct bio *orig_bio = *bio; |
| 21 | |
| 22 | blk_queue_bounce(rq->q, bio); |
| 23 | |
| 24 | if (!rq->bio) { |
| 25 | blk_rq_bio_prep(rq->q, rq, *bio); |
| 26 | } else { |
| 27 | if (!ll_back_merge_fn(rq->q, rq, *bio)) { |
| 28 | if (orig_bio != *bio) { |
| 29 | bio_put(*bio); |
| 30 | *bio = orig_bio; |
| 31 | } |
| 32 | return -EINVAL; |
| 33 | } |
| 34 | |
| 35 | rq->biotail->bi_next = *bio; |
| 36 | rq->biotail = *bio; |
| 37 | rq->__data_len += (*bio)->bi_iter.bi_size; |
| 38 | } |
| 39 | |
| 40 | return 0; |
| 41 | } |
| 42 | EXPORT_SYMBOL(blk_rq_append_bio); |
| 43 | |
| 44 | static int __blk_rq_unmap_user(struct bio *bio) |
| 45 | { |
| 46 | int ret = 0; |
| 47 | |
| 48 | if (bio) { |
| 49 | if (bio_flagged(bio, BIO_USER_MAPPED)) |
| 50 | bio_unmap_user(bio); |
| 51 | else |
| 52 | ret = bio_uncopy_user(bio); |
| 53 | } |
| 54 | |
| 55 | return ret; |
| 56 | } |
| 57 | |
| 58 | static int __blk_rq_map_user_iov(struct request *rq, |
| 59 | struct rq_map_data *map_data, struct iov_iter *iter, |
| 60 | gfp_t gfp_mask, bool copy) |
| 61 | { |
| 62 | struct request_queue *q = rq->q; |
| 63 | struct bio *bio, *orig_bio; |
| 64 | int ret; |
| 65 | |
| 66 | if (copy) |
| 67 | bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); |
| 68 | else |
| 69 | bio = bio_map_user_iov(q, iter, gfp_mask); |
| 70 | |
| 71 | if (IS_ERR(bio)) |
| 72 | return PTR_ERR(bio); |
| 73 | |
| 74 | bio->bi_opf &= ~REQ_OP_MASK; |
| 75 | bio->bi_opf |= req_op(rq); |
| 76 | |
| 77 | if (map_data && map_data->null_mapped) |
| 78 | bio_set_flag(bio, BIO_NULL_MAPPED); |
| 79 | |
| 80 | iov_iter_advance(iter, bio->bi_iter.bi_size); |
| 81 | if (map_data) |
| 82 | map_data->offset += bio->bi_iter.bi_size; |
| 83 | |
| 84 | orig_bio = bio; |
| 85 | |
| 86 | /* |
| 87 | * We link the bounce buffer in and could have to traverse it |
| 88 | * later so we have to get a ref to prevent it from being freed |
| 89 | */ |
| 90 | ret = blk_rq_append_bio(rq, &bio); |
| 91 | if (ret) { |
| 92 | __blk_rq_unmap_user(orig_bio); |
| 93 | return ret; |
| 94 | } |
| 95 | bio_get(bio); |
| 96 | |
| 97 | return 0; |
| 98 | } |
| 99 | |
| 100 | /** |
| 101 | * blk_rq_map_user_iov - map user data to a request, for passthrough requests |
| 102 | * @q: request queue where request should be inserted |
| 103 | * @rq: request to map data to |
| 104 | * @map_data: pointer to the rq_map_data holding pages (if necessary) |
| 105 | * @iter: iovec iterator |
| 106 | * @gfp_mask: memory allocation flags |
| 107 | * |
| 108 | * Description: |
| 109 | * Data will be mapped directly for zero copy I/O, if possible. Otherwise |
| 110 | * a kernel bounce buffer is used. |
| 111 | * |
| 112 | * A matching blk_rq_unmap_user() must be issued at the end of I/O, while |
| 113 | * still in process context. |
| 114 | * |
| 115 | * Note: The mapped bio may need to be bounced through blk_queue_bounce() |
| 116 | * before being submitted to the device, as pages mapped may be out of |
| 117 | * reach. It's the callers responsibility to make sure this happens. The |
| 118 | * original bio must be passed back in to blk_rq_unmap_user() for proper |
| 119 | * unmapping. |
| 120 | */ |
| 121 | int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, |
| 122 | struct rq_map_data *map_data, |
| 123 | const struct iov_iter *iter, gfp_t gfp_mask) |
| 124 | { |
| 125 | bool copy = false; |
| 126 | unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); |
| 127 | struct bio *bio = NULL; |
| 128 | struct iov_iter i; |
| 129 | int ret = -EINVAL; |
| 130 | |
| 131 | if (!iter_is_iovec(iter)) |
| 132 | goto fail; |
| 133 | |
| 134 | if (map_data) |
| 135 | copy = true; |
| 136 | else if (iov_iter_alignment(iter) & align) |
| 137 | copy = true; |
| 138 | else if (queue_virt_boundary(q)) |
| 139 | copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); |
| 140 | |
| 141 | i = *iter; |
| 142 | do { |
| 143 | ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); |
| 144 | if (ret) |
| 145 | goto unmap_rq; |
| 146 | if (!bio) |
| 147 | bio = rq->bio; |
| 148 | } while (iov_iter_count(&i)); |
| 149 | |
| 150 | if (!bio_flagged(bio, BIO_USER_MAPPED)) |
| 151 | rq->rq_flags |= RQF_COPY_USER; |
| 152 | return 0; |
| 153 | |
| 154 | unmap_rq: |
| 155 | blk_rq_unmap_user(bio); |
| 156 | fail: |
| 157 | rq->bio = NULL; |
| 158 | return ret; |
| 159 | } |
| 160 | EXPORT_SYMBOL(blk_rq_map_user_iov); |
| 161 | |
| 162 | int blk_rq_map_user(struct request_queue *q, struct request *rq, |
| 163 | struct rq_map_data *map_data, void __user *ubuf, |
| 164 | unsigned long len, gfp_t gfp_mask) |
| 165 | { |
| 166 | struct iovec iov; |
| 167 | struct iov_iter i; |
| 168 | int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); |
| 169 | |
| 170 | if (unlikely(ret < 0)) |
| 171 | return ret; |
| 172 | |
| 173 | return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); |
| 174 | } |
| 175 | EXPORT_SYMBOL(blk_rq_map_user); |
| 176 | |
| 177 | /** |
| 178 | * blk_rq_unmap_user - unmap a request with user data |
| 179 | * @bio: start of bio list |
| 180 | * |
| 181 | * Description: |
| 182 | * Unmap a rq previously mapped by blk_rq_map_user(). The caller must |
| 183 | * supply the original rq->bio from the blk_rq_map_user() return, since |
| 184 | * the I/O completion may have changed rq->bio. |
| 185 | */ |
| 186 | int blk_rq_unmap_user(struct bio *bio) |
| 187 | { |
| 188 | struct bio *mapped_bio; |
| 189 | int ret = 0, ret2; |
| 190 | |
| 191 | while (bio) { |
| 192 | mapped_bio = bio; |
| 193 | if (unlikely(bio_flagged(bio, BIO_BOUNCED))) |
| 194 | mapped_bio = bio->bi_private; |
| 195 | |
| 196 | ret2 = __blk_rq_unmap_user(mapped_bio); |
| 197 | if (ret2 && !ret) |
| 198 | ret = ret2; |
| 199 | |
| 200 | mapped_bio = bio; |
| 201 | bio = bio->bi_next; |
| 202 | bio_put(mapped_bio); |
| 203 | } |
| 204 | |
| 205 | return ret; |
| 206 | } |
| 207 | EXPORT_SYMBOL(blk_rq_unmap_user); |
| 208 | |
| 209 | /** |
| 210 | * blk_rq_map_kern - map kernel data to a request, for passthrough requests |
| 211 | * @q: request queue where request should be inserted |
| 212 | * @rq: request to fill |
| 213 | * @kbuf: the kernel buffer |
| 214 | * @len: length of user data |
| 215 | * @gfp_mask: memory allocation flags |
| 216 | * |
| 217 | * Description: |
| 218 | * Data will be mapped directly if possible. Otherwise a bounce |
| 219 | * buffer is used. Can be called multiple times to append multiple |
| 220 | * buffers. |
| 221 | */ |
| 222 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, |
| 223 | unsigned int len, gfp_t gfp_mask) |
| 224 | { |
| 225 | int reading = rq_data_dir(rq) == READ; |
| 226 | unsigned long addr = (unsigned long) kbuf; |
| 227 | int do_copy = 0; |
| 228 | struct bio *bio, *orig_bio; |
| 229 | int ret; |
| 230 | |
| 231 | if (len > (queue_max_hw_sectors(q) << 9)) |
| 232 | return -EINVAL; |
| 233 | if (!len || !kbuf) |
| 234 | return -EINVAL; |
| 235 | |
| 236 | do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); |
| 237 | if (do_copy) |
| 238 | bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); |
| 239 | else |
| 240 | bio = bio_map_kern(q, kbuf, len, gfp_mask); |
| 241 | |
| 242 | if (IS_ERR(bio)) |
| 243 | return PTR_ERR(bio); |
| 244 | |
| 245 | bio->bi_opf &= ~REQ_OP_MASK; |
| 246 | bio->bi_opf |= req_op(rq); |
| 247 | |
| 248 | if (do_copy) |
| 249 | rq->rq_flags |= RQF_COPY_USER; |
| 250 | |
| 251 | orig_bio = bio; |
| 252 | ret = blk_rq_append_bio(rq, &bio); |
| 253 | if (unlikely(ret)) { |
| 254 | /* request is too big */ |
| 255 | bio_put(orig_bio); |
| 256 | return ret; |
| 257 | } |
| 258 | |
| 259 | return 0; |
| 260 | } |
| 261 | EXPORT_SYMBOL(blk_rq_map_kern); |