blob: 1c8d16b0245b1be4269d4056fcb08bff45163f9d [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe Over Fabrics Target File I/O commands implementation.
4 * Copyright (c) 2017-2018 Western Digital Corporation or its
5 * affiliates.
6 */
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8#include <linux/uio.h>
9#include <linux/falloc.h>
10#include <linux/file.h>
11#include <linux/fs.h>
12#include "nvmet.h"
13
14#define NVMET_MAX_MPOOL_BVEC 16
15#define NVMET_MIN_MPOOL_OBJ 16
16
17void nvmet_file_ns_disable(struct nvmet_ns *ns)
18{
19 if (ns->file) {
20 if (ns->buffered_io)
21 flush_workqueue(buffered_io_wq);
22 mempool_destroy(ns->bvec_pool);
23 ns->bvec_pool = NULL;
24 kmem_cache_destroy(ns->bvec_cache);
25 ns->bvec_cache = NULL;
26 fput(ns->file);
27 ns->file = NULL;
28 }
29}
30
31int nvmet_file_ns_enable(struct nvmet_ns *ns)
32{
33 int flags = O_RDWR | O_LARGEFILE;
34 struct kstat stat;
35 int ret;
36
37 if (!ns->buffered_io)
38 flags |= O_DIRECT;
39
40 ns->file = filp_open(ns->device_path, flags, 0);
41 if (IS_ERR(ns->file)) {
42 ret = PTR_ERR(ns->file);
43 pr_err("failed to open file %s: (%d)\n",
44 ns->device_path, ret);
45 ns->file = NULL;
46 return ret;
47 }
48
49 ret = vfs_getattr(&ns->file->f_path,
50 &stat, STATX_SIZE, AT_STATX_FORCE_SYNC);
51 if (ret)
52 goto err;
53
54 ns->size = stat.size;
55 /*
56 * i_blkbits can be greater than the universally accepted upper bound,
57 * so make sure we export a sane namespace lba_shift.
58 */
59 ns->blksize_shift = min_t(u8,
60 file_inode(ns->file)->i_blkbits, 12);
61
62 ns->bvec_cache = kmem_cache_create("nvmet-bvec",
63 NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
64 0, SLAB_HWCACHE_ALIGN, NULL);
65 if (!ns->bvec_cache) {
66 ret = -ENOMEM;
67 goto err;
68 }
69
70 ns->bvec_pool = mempool_create(NVMET_MIN_MPOOL_OBJ, mempool_alloc_slab,
71 mempool_free_slab, ns->bvec_cache);
72
73 if (!ns->bvec_pool) {
74 ret = -ENOMEM;
75 goto err;
76 }
77
78 return ret;
79err:
80 ns->size = 0;
81 ns->blksize_shift = 0;
82 nvmet_file_ns_disable(ns);
83 return ret;
84}
85
86static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
87{
88 bv->bv_page = sg_page(sg);
89 bv->bv_offset = sg->offset;
90 bv->bv_len = sg->length;
91}
92
93static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
94 unsigned long nr_segs, size_t count, int ki_flags)
95{
96 struct kiocb *iocb = &req->f.iocb;
97 ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter);
98 struct iov_iter iter;
99 int rw;
100
101 if (req->cmd->rw.opcode == nvme_cmd_write) {
102 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
103 ki_flags |= IOCB_DSYNC;
104 call_iter = req->ns->file->f_op->write_iter;
105 rw = WRITE;
106 } else {
107 call_iter = req->ns->file->f_op->read_iter;
108 rw = READ;
109 }
110
111 iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
112
113 iocb->ki_pos = pos;
114 iocb->ki_filp = req->ns->file;
115 iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
116
117 return call_iter(iocb, &iter);
118}
119
120static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
121{
122 struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
123 u16 status = NVME_SC_SUCCESS;
124
125 if (req->f.bvec != req->inline_bvec) {
126 if (likely(req->f.mpool_alloc == false))
127 kfree(req->f.bvec);
128 else
129 mempool_free(req->f.bvec, req->ns->bvec_pool);
130 }
131
132 if (unlikely(ret != req->data_len))
133 status = errno_to_nvme_status(req, ret);
134 nvmet_req_complete(req, status);
135}
136
137static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
138{
139 ssize_t nr_bvec = req->sg_cnt;
140 unsigned long bv_cnt = 0;
141 bool is_sync = false;
142 size_t len = 0, total_len = 0;
143 ssize_t ret = 0;
144 loff_t pos;
145 int i;
146 struct scatterlist *sg;
147
148 if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
149 is_sync = true;
150
151 pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
152 if (unlikely(pos + req->data_len > req->ns->size)) {
153 nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
154 return true;
155 }
156
157 memset(&req->f.iocb, 0, sizeof(struct kiocb));
158 for_each_sg(req->sg, sg, req->sg_cnt, i) {
159 nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
160 len += req->f.bvec[bv_cnt].bv_len;
161 total_len += req->f.bvec[bv_cnt].bv_len;
162 bv_cnt++;
163
164 WARN_ON_ONCE((nr_bvec - 1) < 0);
165
166 if (unlikely(is_sync) &&
167 (nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) {
168 ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len, 0);
169 if (ret < 0)
170 goto complete;
171
172 pos += len;
173 bv_cnt = 0;
174 len = 0;
175 }
176 nr_bvec--;
177 }
178
179 if (WARN_ON_ONCE(total_len != req->data_len)) {
180 ret = -EIO;
181 goto complete;
182 }
183
184 if (unlikely(is_sync)) {
185 ret = total_len;
186 goto complete;
187 }
188
189 /*
190 * A NULL ki_complete ask for synchronous execution, which we want
191 * for the IOCB_NOWAIT case.
192 */
193 if (!(ki_flags & IOCB_NOWAIT))
194 req->f.iocb.ki_complete = nvmet_file_io_done;
195
196 ret = nvmet_file_submit_bvec(req, pos, bv_cnt, total_len, ki_flags);
197
198 switch (ret) {
199 case -EIOCBQUEUED:
200 return true;
201 case -EAGAIN:
202 if (WARN_ON_ONCE(!(ki_flags & IOCB_NOWAIT)))
203 goto complete;
204 return false;
205 case -EOPNOTSUPP:
206 /*
207 * For file systems returning error -EOPNOTSUPP, handle
208 * IOCB_NOWAIT error case separately and retry without
209 * IOCB_NOWAIT.
210 */
211 if ((ki_flags & IOCB_NOWAIT))
212 return false;
213 break;
214 }
215
216complete:
217 nvmet_file_io_done(&req->f.iocb, ret, 0);
218 return true;
219}
220
221static void nvmet_file_buffered_io_work(struct work_struct *w)
222{
223 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
224
225 nvmet_file_execute_io(req, 0);
226}
227
228static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
229{
230 INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
231 queue_work(buffered_io_wq, &req->f.work);
232}
233
234static void nvmet_file_execute_rw(struct nvmet_req *req)
235{
236 ssize_t nr_bvec = req->sg_cnt;
237
238 if (!req->sg_cnt || !nr_bvec) {
239 nvmet_req_complete(req, 0);
240 return;
241 }
242
243 if (nr_bvec > NVMET_MAX_INLINE_BIOVEC)
244 req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
245 GFP_KERNEL);
246 else
247 req->f.bvec = req->inline_bvec;
248
249 if (unlikely(!req->f.bvec)) {
250 /* fallback under memory pressure */
251 req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL);
252 req->f.mpool_alloc = true;
253 } else
254 req->f.mpool_alloc = false;
255
256 if (req->ns->buffered_io) {
257 if (likely(!req->f.mpool_alloc) &&
258 (req->ns->file->f_mode & FMODE_NOWAIT) &&
259 nvmet_file_execute_io(req, IOCB_NOWAIT))
260 return;
261 nvmet_file_submit_buffered_io(req);
262 } else
263 nvmet_file_execute_io(req, 0);
264}
265
266u16 nvmet_file_flush(struct nvmet_req *req)
267{
268 return errno_to_nvme_status(req, vfs_fsync(req->ns->file, 1));
269}
270
271static void nvmet_file_flush_work(struct work_struct *w)
272{
273 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
274
275 nvmet_req_complete(req, nvmet_file_flush(req));
276}
277
278static void nvmet_file_execute_flush(struct nvmet_req *req)
279{
280 INIT_WORK(&req->f.work, nvmet_file_flush_work);
281 schedule_work(&req->f.work);
282}
283
284static void nvmet_file_execute_discard(struct nvmet_req *req)
285{
286 int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
287 struct nvme_dsm_range range;
288 loff_t offset, len;
289 u16 status = 0;
290 int ret;
291 int i;
292
293 for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
294 status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
295 sizeof(range));
296 if (status)
297 break;
298
299 offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
300 len = le32_to_cpu(range.nlb);
301 len <<= req->ns->blksize_shift;
302 if (offset + len > req->ns->size) {
303 req->error_slba = le64_to_cpu(range.slba);
304 status = errno_to_nvme_status(req, -ENOSPC);
305 break;
306 }
307
308 ret = vfs_fallocate(req->ns->file, mode, offset, len);
309 if (ret && ret != -EOPNOTSUPP) {
310 req->error_slba = le64_to_cpu(range.slba);
311 status = errno_to_nvme_status(req, ret);
312 break;
313 }
314 }
315
316 nvmet_req_complete(req, status);
317}
318
319static void nvmet_file_dsm_work(struct work_struct *w)
320{
321 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
322
323 switch (le32_to_cpu(req->cmd->dsm.attributes)) {
324 case NVME_DSMGMT_AD:
325 nvmet_file_execute_discard(req);
326 return;
327 case NVME_DSMGMT_IDR:
328 case NVME_DSMGMT_IDW:
329 default:
330 /* Not supported yet */
331 nvmet_req_complete(req, 0);
332 return;
333 }
334}
335
336static void nvmet_file_execute_dsm(struct nvmet_req *req)
337{
338 INIT_WORK(&req->f.work, nvmet_file_dsm_work);
339 schedule_work(&req->f.work);
340}
341
342static void nvmet_file_write_zeroes_work(struct work_struct *w)
343{
344 struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
345 struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
346 int mode = FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE;
347 loff_t offset;
348 loff_t len;
349 int ret;
350
351 offset = le64_to_cpu(write_zeroes->slba) << req->ns->blksize_shift;
352 len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
353 req->ns->blksize_shift);
354
355 if (unlikely(offset + len > req->ns->size)) {
356 nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
357 return;
358 }
359
360 ret = vfs_fallocate(req->ns->file, mode, offset, len);
361 nvmet_req_complete(req, ret < 0 ? errno_to_nvme_status(req, ret) : 0);
362}
363
364static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
365{
366 INIT_WORK(&req->f.work, nvmet_file_write_zeroes_work);
367 schedule_work(&req->f.work);
368}
369
370u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
371{
372 struct nvme_command *cmd = req->cmd;
373
374 switch (cmd->common.opcode) {
375 case nvme_cmd_read:
376 case nvme_cmd_write:
377 req->execute = nvmet_file_execute_rw;
378 req->data_len = nvmet_rw_len(req);
379 return 0;
380 case nvme_cmd_flush:
381 req->execute = nvmet_file_execute_flush;
382 req->data_len = 0;
383 return 0;
384 case nvme_cmd_dsm:
385 req->execute = nvmet_file_execute_dsm;
386 req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
387 sizeof(struct nvme_dsm_range);
388 return 0;
389 case nvme_cmd_write_zeroes:
390 req->execute = nvmet_file_execute_write_zeroes;
391 req->data_len = 0;
392 return 0;
393 default:
394 pr_err("unhandled cmd for file ns %d on qid %d\n",
395 cmd->common.opcode, req->sq->qid);
396 req->error_loc = offsetof(struct nvme_common_command, opcode);
397 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
398 }
399}