b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * NVMe I/O command implementation. |
| 4 | * Copyright (c) 2015-2016 HGST, a Western Digital Company. |
| 5 | */ |
| 6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 7 | #include <linux/blkdev.h> |
| 8 | #include <linux/module.h> |
| 9 | #include "nvmet.h" |
| 10 | |
| 11 | void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) |
| 12 | { |
| 13 | const struct queue_limits *ql = &bdev_get_queue(bdev)->limits; |
| 14 | /* Number of logical blocks per physical block. */ |
| 15 | const u32 lpp = ql->physical_block_size / ql->logical_block_size; |
| 16 | /* Logical blocks per physical block, 0's based. */ |
| 17 | const __le16 lpp0b = to0based(lpp); |
| 18 | |
| 19 | /* |
| 20 | * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN, |
| 21 | * NAWUPF, and NACWU are defined for this namespace and should be |
| 22 | * used by the host for this namespace instead of the AWUN, AWUPF, |
| 23 | * and ACWU fields in the Identify Controller data structure. If |
| 24 | * any of these fields are zero that means that the corresponding |
| 25 | * field from the identify controller data structure should be used. |
| 26 | */ |
| 27 | id->nsfeat |= 1 << 1; |
| 28 | id->nawun = lpp0b; |
| 29 | id->nawupf = lpp0b; |
| 30 | id->nacwu = lpp0b; |
| 31 | |
| 32 | /* |
| 33 | * Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and |
| 34 | * NOWS are defined for this namespace and should be used by |
| 35 | * the host for I/O optimization. |
| 36 | */ |
| 37 | id->nsfeat |= 1 << 4; |
| 38 | /* NPWG = Namespace Preferred Write Granularity. 0's based */ |
| 39 | id->npwg = to0based(bdev_io_min(bdev) / bdev_logical_block_size(bdev)); |
| 40 | /* NPWA = Namespace Preferred Write Alignment. 0's based */ |
| 41 | id->npwa = id->npwg; |
| 42 | /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */ |
| 43 | id->npdg = to0based(ql->discard_granularity / ql->logical_block_size); |
| 44 | /* NPDG = Namespace Preferred Deallocate Alignment */ |
| 45 | id->npda = id->npdg; |
| 46 | /* NOWS = Namespace Optimal Write Size */ |
| 47 | id->nows = to0based(ql->io_opt / ql->logical_block_size); |
| 48 | } |
| 49 | |
| 50 | int nvmet_bdev_ns_enable(struct nvmet_ns *ns) |
| 51 | { |
| 52 | int ret; |
| 53 | |
| 54 | ns->bdev = blkdev_get_by_path(ns->device_path, |
| 55 | FMODE_READ | FMODE_WRITE, NULL); |
| 56 | if (IS_ERR(ns->bdev)) { |
| 57 | ret = PTR_ERR(ns->bdev); |
| 58 | if (ret != -ENOTBLK) { |
| 59 | pr_err("failed to open block device %s: (%ld)\n", |
| 60 | ns->device_path, PTR_ERR(ns->bdev)); |
| 61 | } |
| 62 | ns->bdev = NULL; |
| 63 | return ret; |
| 64 | } |
| 65 | ns->size = i_size_read(ns->bdev->bd_inode); |
| 66 | ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev)); |
| 67 | return 0; |
| 68 | } |
| 69 | |
| 70 | void nvmet_bdev_ns_disable(struct nvmet_ns *ns) |
| 71 | { |
| 72 | if (ns->bdev) { |
| 73 | blkdev_put(ns->bdev, FMODE_WRITE | FMODE_READ); |
| 74 | ns->bdev = NULL; |
| 75 | } |
| 76 | } |
| 77 | |
| 78 | static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) |
| 79 | { |
| 80 | u16 status = NVME_SC_SUCCESS; |
| 81 | |
| 82 | if (likely(blk_sts == BLK_STS_OK)) |
| 83 | return status; |
| 84 | /* |
| 85 | * Right now there exists M : 1 mapping between block layer error |
| 86 | * to the NVMe status code (see nvme_error_status()). For consistency, |
| 87 | * when we reverse map we use most appropriate NVMe Status code from |
| 88 | * the group of the NVMe staus codes used in the nvme_error_status(). |
| 89 | */ |
| 90 | switch (blk_sts) { |
| 91 | case BLK_STS_NOSPC: |
| 92 | status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; |
| 93 | req->error_loc = offsetof(struct nvme_rw_command, length); |
| 94 | break; |
| 95 | case BLK_STS_TARGET: |
| 96 | status = NVME_SC_LBA_RANGE | NVME_SC_DNR; |
| 97 | req->error_loc = offsetof(struct nvme_rw_command, slba); |
| 98 | break; |
| 99 | case BLK_STS_NOTSUPP: |
| 100 | req->error_loc = offsetof(struct nvme_common_command, opcode); |
| 101 | switch (req->cmd->common.opcode) { |
| 102 | case nvme_cmd_dsm: |
| 103 | case nvme_cmd_write_zeroes: |
| 104 | status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR; |
| 105 | break; |
| 106 | default: |
| 107 | status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR; |
| 108 | } |
| 109 | break; |
| 110 | case BLK_STS_MEDIUM: |
| 111 | status = NVME_SC_ACCESS_DENIED; |
| 112 | req->error_loc = offsetof(struct nvme_rw_command, nsid); |
| 113 | break; |
| 114 | case BLK_STS_IOERR: |
| 115 | /* fallthru */ |
| 116 | default: |
| 117 | status = NVME_SC_INTERNAL | NVME_SC_DNR; |
| 118 | req->error_loc = offsetof(struct nvme_common_command, opcode); |
| 119 | } |
| 120 | |
| 121 | switch (req->cmd->common.opcode) { |
| 122 | case nvme_cmd_read: |
| 123 | case nvme_cmd_write: |
| 124 | req->error_slba = le64_to_cpu(req->cmd->rw.slba); |
| 125 | break; |
| 126 | case nvme_cmd_write_zeroes: |
| 127 | req->error_slba = |
| 128 | le64_to_cpu(req->cmd->write_zeroes.slba); |
| 129 | break; |
| 130 | default: |
| 131 | req->error_slba = 0; |
| 132 | } |
| 133 | return status; |
| 134 | } |
| 135 | |
| 136 | static void nvmet_bio_done(struct bio *bio) |
| 137 | { |
| 138 | struct nvmet_req *req = bio->bi_private; |
| 139 | |
| 140 | nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); |
| 141 | if (bio != &req->b.inline_bio) |
| 142 | bio_put(bio); |
| 143 | } |
| 144 | |
| 145 | static void nvmet_bdev_execute_rw(struct nvmet_req *req) |
| 146 | { |
| 147 | int sg_cnt = req->sg_cnt; |
| 148 | struct bio *bio; |
| 149 | struct scatterlist *sg; |
| 150 | sector_t sector; |
| 151 | int op, op_flags = 0, i; |
| 152 | |
| 153 | if (!req->sg_cnt) { |
| 154 | nvmet_req_complete(req, 0); |
| 155 | return; |
| 156 | } |
| 157 | |
| 158 | if (req->cmd->rw.opcode == nvme_cmd_write) { |
| 159 | op = REQ_OP_WRITE; |
| 160 | op_flags = REQ_SYNC | REQ_IDLE; |
| 161 | if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) |
| 162 | op_flags |= REQ_FUA; |
| 163 | } else { |
| 164 | op = REQ_OP_READ; |
| 165 | } |
| 166 | |
| 167 | if (is_pci_p2pdma_page(sg_page(req->sg))) |
| 168 | op_flags |= REQ_NOMERGE; |
| 169 | |
| 170 | sector = le64_to_cpu(req->cmd->rw.slba); |
| 171 | sector <<= (req->ns->blksize_shift - 9); |
| 172 | |
| 173 | if (req->data_len <= NVMET_MAX_INLINE_DATA_LEN) { |
| 174 | bio = &req->b.inline_bio; |
| 175 | bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); |
| 176 | } else { |
| 177 | bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); |
| 178 | } |
| 179 | bio_set_dev(bio, req->ns->bdev); |
| 180 | bio->bi_iter.bi_sector = sector; |
| 181 | bio->bi_private = req; |
| 182 | bio->bi_end_io = nvmet_bio_done; |
| 183 | bio_set_op_attrs(bio, op, op_flags); |
| 184 | |
| 185 | for_each_sg(req->sg, sg, req->sg_cnt, i) { |
| 186 | while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) |
| 187 | != sg->length) { |
| 188 | struct bio *prev = bio; |
| 189 | |
| 190 | bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); |
| 191 | bio_set_dev(bio, req->ns->bdev); |
| 192 | bio->bi_iter.bi_sector = sector; |
| 193 | bio_set_op_attrs(bio, op, op_flags); |
| 194 | |
| 195 | bio_chain(bio, prev); |
| 196 | submit_bio(prev); |
| 197 | } |
| 198 | |
| 199 | sector += sg->length >> 9; |
| 200 | sg_cnt--; |
| 201 | } |
| 202 | |
| 203 | submit_bio(bio); |
| 204 | } |
| 205 | |
| 206 | static void nvmet_bdev_execute_flush(struct nvmet_req *req) |
| 207 | { |
| 208 | struct bio *bio = &req->b.inline_bio; |
| 209 | |
| 210 | bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec)); |
| 211 | bio_set_dev(bio, req->ns->bdev); |
| 212 | bio->bi_private = req; |
| 213 | bio->bi_end_io = nvmet_bio_done; |
| 214 | bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; |
| 215 | |
| 216 | submit_bio(bio); |
| 217 | } |
| 218 | |
| 219 | u16 nvmet_bdev_flush(struct nvmet_req *req) |
| 220 | { |
| 221 | if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL)) |
| 222 | return NVME_SC_INTERNAL | NVME_SC_DNR; |
| 223 | return 0; |
| 224 | } |
| 225 | |
| 226 | static u16 nvmet_bdev_discard_range(struct nvmet_req *req, |
| 227 | struct nvme_dsm_range *range, struct bio **bio) |
| 228 | { |
| 229 | struct nvmet_ns *ns = req->ns; |
| 230 | int ret; |
| 231 | |
| 232 | ret = __blkdev_issue_discard(ns->bdev, |
| 233 | le64_to_cpu(range->slba) << (ns->blksize_shift - 9), |
| 234 | le32_to_cpu(range->nlb) << (ns->blksize_shift - 9), |
| 235 | GFP_KERNEL, 0, bio); |
| 236 | if (ret && ret != -EOPNOTSUPP) { |
| 237 | req->error_slba = le64_to_cpu(range->slba); |
| 238 | return errno_to_nvme_status(req, ret); |
| 239 | } |
| 240 | return NVME_SC_SUCCESS; |
| 241 | } |
| 242 | |
| 243 | static void nvmet_bdev_execute_discard(struct nvmet_req *req) |
| 244 | { |
| 245 | struct nvme_dsm_range range; |
| 246 | struct bio *bio = NULL; |
| 247 | int i; |
| 248 | u16 status; |
| 249 | |
| 250 | for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) { |
| 251 | status = nvmet_copy_from_sgl(req, i * sizeof(range), &range, |
| 252 | sizeof(range)); |
| 253 | if (status) |
| 254 | break; |
| 255 | |
| 256 | status = nvmet_bdev_discard_range(req, &range, &bio); |
| 257 | if (status) |
| 258 | break; |
| 259 | } |
| 260 | |
| 261 | if (bio) { |
| 262 | bio->bi_private = req; |
| 263 | bio->bi_end_io = nvmet_bio_done; |
| 264 | if (status) { |
| 265 | bio->bi_status = BLK_STS_IOERR; |
| 266 | bio_endio(bio); |
| 267 | } else { |
| 268 | submit_bio(bio); |
| 269 | } |
| 270 | } else { |
| 271 | nvmet_req_complete(req, status); |
| 272 | } |
| 273 | } |
| 274 | |
| 275 | static void nvmet_bdev_execute_dsm(struct nvmet_req *req) |
| 276 | { |
| 277 | switch (le32_to_cpu(req->cmd->dsm.attributes)) { |
| 278 | case NVME_DSMGMT_AD: |
| 279 | nvmet_bdev_execute_discard(req); |
| 280 | return; |
| 281 | case NVME_DSMGMT_IDR: |
| 282 | case NVME_DSMGMT_IDW: |
| 283 | default: |
| 284 | /* Not supported yet */ |
| 285 | nvmet_req_complete(req, 0); |
| 286 | return; |
| 287 | } |
| 288 | } |
| 289 | |
| 290 | static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req) |
| 291 | { |
| 292 | struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes; |
| 293 | struct bio *bio = NULL; |
| 294 | sector_t sector; |
| 295 | sector_t nr_sector; |
| 296 | int ret; |
| 297 | |
| 298 | sector = le64_to_cpu(write_zeroes->slba) << |
| 299 | (req->ns->blksize_shift - 9); |
| 300 | nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) << |
| 301 | (req->ns->blksize_shift - 9)); |
| 302 | |
| 303 | ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector, |
| 304 | GFP_KERNEL, &bio, 0); |
| 305 | if (bio) { |
| 306 | bio->bi_private = req; |
| 307 | bio->bi_end_io = nvmet_bio_done; |
| 308 | submit_bio(bio); |
| 309 | } else { |
| 310 | nvmet_req_complete(req, errno_to_nvme_status(req, ret)); |
| 311 | } |
| 312 | } |
| 313 | |
| 314 | u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req) |
| 315 | { |
| 316 | struct nvme_command *cmd = req->cmd; |
| 317 | |
| 318 | switch (cmd->common.opcode) { |
| 319 | case nvme_cmd_read: |
| 320 | case nvme_cmd_write: |
| 321 | req->execute = nvmet_bdev_execute_rw; |
| 322 | req->data_len = nvmet_rw_len(req); |
| 323 | return 0; |
| 324 | case nvme_cmd_flush: |
| 325 | req->execute = nvmet_bdev_execute_flush; |
| 326 | req->data_len = 0; |
| 327 | return 0; |
| 328 | case nvme_cmd_dsm: |
| 329 | req->execute = nvmet_bdev_execute_dsm; |
| 330 | req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) * |
| 331 | sizeof(struct nvme_dsm_range); |
| 332 | return 0; |
| 333 | case nvme_cmd_write_zeroes: |
| 334 | req->execute = nvmet_bdev_execute_write_zeroes; |
| 335 | req->data_len = 0; |
| 336 | return 0; |
| 337 | default: |
| 338 | pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode, |
| 339 | req->sq->qid); |
| 340 | req->error_loc = offsetof(struct nvme_common_command, opcode); |
| 341 | return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; |
| 342 | } |
| 343 | } |