blob: 74f15498c9bfd97277a3a66c9631b7d9286cdf95 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Module for pnfs flexfile layout driver.
3 *
4 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
5 *
6 * Tao Peng <bergwolf@primarydata.com>
7 */
8
9#include <linux/nfs_fs.h>
10#include <linux/nfs_page.h>
11#include <linux/module.h>
12
13#include <linux/sunrpc/metrics.h>
14
15#include "flexfilelayout.h"
16#include "../nfs4session.h"
17#include "../nfs4idmap.h"
18#include "../internal.h"
19#include "../delegation.h"
20#include "../nfs4trace.h"
21#include "../iostat.h"
22#include "../nfs.h"
23#include "../nfs42.h"
24
25#define NFSDBG_FACILITY NFSDBG_PNFS_LD
26
27#define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
28#define FF_LAYOUTRETURN_MAXERR 20
29
30
31static struct group_info *ff_zero_group;
32
33static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
34 struct nfs_pgio_header *hdr);
35static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
36 struct nfs42_layoutstat_devinfo *devinfo,
37 int dev_limit);
38static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
39 const struct nfs42_layoutstat_devinfo *devinfo,
40 struct nfs4_ff_layout_mirror *mirror);
41
42static struct pnfs_layout_hdr *
43ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
44{
45 struct nfs4_flexfile_layout *ffl;
46
47 ffl = kzalloc(sizeof(*ffl), gfp_flags);
48 if (ffl) {
49 INIT_LIST_HEAD(&ffl->error_list);
50 INIT_LIST_HEAD(&ffl->mirrors);
51 ffl->last_report_time = ktime_get();
52 return &ffl->generic_hdr;
53 } else
54 return NULL;
55}
56
57static void
58ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
59{
60 struct nfs4_ff_layout_ds_err *err, *n;
61
62 list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list,
63 list) {
64 list_del(&err->list);
65 kfree(err);
66 }
67 kfree(FF_LAYOUT_FROM_HDR(lo));
68}
69
70static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
71{
72 __be32 *p;
73
74 p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
75 if (unlikely(p == NULL))
76 return -ENOBUFS;
77 stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
78 memcpy(stateid->data, p, NFS4_STATEID_SIZE);
79 dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
80 p[0], p[1], p[2], p[3]);
81 return 0;
82}
83
84static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
85{
86 __be32 *p;
87
88 p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
89 if (unlikely(!p))
90 return -ENOBUFS;
91 memcpy(devid, p, NFS4_DEVICEID4_SIZE);
92 nfs4_print_deviceid(devid);
93 return 0;
94}
95
96static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
97{
98 __be32 *p;
99
100 p = xdr_inline_decode(xdr, 4);
101 if (unlikely(!p))
102 return -ENOBUFS;
103 fh->size = be32_to_cpup(p++);
104 if (fh->size > sizeof(struct nfs_fh)) {
105 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
106 fh->size);
107 return -EOVERFLOW;
108 }
109 /* fh.data */
110 p = xdr_inline_decode(xdr, fh->size);
111 if (unlikely(!p))
112 return -ENOBUFS;
113 memcpy(&fh->data, p, fh->size);
114 dprintk("%s: fh len %d\n", __func__, fh->size);
115
116 return 0;
117}
118
119/*
120 * Currently only stringified uids and gids are accepted.
121 * I.e., kerberos is not supported to the DSes, so no pricipals.
122 *
123 * That means that one common function will suffice, but when
124 * principals are added, this should be split to accomodate
125 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
126 */
127static int
128decode_name(struct xdr_stream *xdr, u32 *id)
129{
130 __be32 *p;
131 int len;
132
133 /* opaque_length(4)*/
134 p = xdr_inline_decode(xdr, 4);
135 if (unlikely(!p))
136 return -ENOBUFS;
137 len = be32_to_cpup(p++);
138 if (len < 0)
139 return -EINVAL;
140
141 dprintk("%s: len %u\n", __func__, len);
142
143 /* opaque body */
144 p = xdr_inline_decode(xdr, len);
145 if (unlikely(!p))
146 return -ENOBUFS;
147
148 if (!nfs_map_string_to_numeric((char *)p, len, id))
149 return -EINVAL;
150
151 return 0;
152}
153
154static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
155 const struct nfs4_ff_layout_mirror *m2)
156{
157 int i, j;
158
159 if (m1->fh_versions_cnt != m2->fh_versions_cnt)
160 return false;
161 for (i = 0; i < m1->fh_versions_cnt; i++) {
162 bool found_fh = false;
163 for (j = 0; j < m2->fh_versions_cnt; j++) {
164 if (nfs_compare_fh(&m1->fh_versions[i],
165 &m2->fh_versions[j]) == 0) {
166 found_fh = true;
167 break;
168 }
169 }
170 if (!found_fh)
171 return false;
172 }
173 return true;
174}
175
176static struct nfs4_ff_layout_mirror *
177ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
178 struct nfs4_ff_layout_mirror *mirror)
179{
180 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
181 struct nfs4_ff_layout_mirror *pos;
182 struct inode *inode = lo->plh_inode;
183
184 spin_lock(&inode->i_lock);
185 list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
186 if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
187 continue;
188 if (!ff_mirror_match_fh(mirror, pos))
189 continue;
190 if (atomic_inc_not_zero(&pos->ref)) {
191 spin_unlock(&inode->i_lock);
192 return pos;
193 }
194 }
195 list_add(&mirror->mirrors, &ff_layout->mirrors);
196 mirror->layout = lo;
197 spin_unlock(&inode->i_lock);
198 return mirror;
199}
200
201static void
202ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
203{
204 struct inode *inode;
205 if (mirror->layout == NULL)
206 return;
207 inode = mirror->layout->plh_inode;
208 spin_lock(&inode->i_lock);
209 list_del(&mirror->mirrors);
210 spin_unlock(&inode->i_lock);
211 mirror->layout = NULL;
212}
213
214static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
215{
216 struct nfs4_ff_layout_mirror *mirror;
217
218 mirror = kzalloc(sizeof(*mirror), gfp_flags);
219 if (mirror != NULL) {
220 spin_lock_init(&mirror->lock);
221 atomic_set(&mirror->ref, 1);
222 INIT_LIST_HEAD(&mirror->mirrors);
223 }
224 return mirror;
225}
226
227static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
228{
229 struct rpc_cred *cred;
230
231 ff_layout_remove_mirror(mirror);
232 kfree(mirror->fh_versions);
233 cred = rcu_access_pointer(mirror->ro_cred);
234 if (cred)
235 put_rpccred(cred);
236 cred = rcu_access_pointer(mirror->rw_cred);
237 if (cred)
238 put_rpccred(cred);
239 nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
240 kfree(mirror);
241}
242
243static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
244{
245 if (mirror != NULL && atomic_dec_and_test(&mirror->ref))
246 ff_layout_free_mirror(mirror);
247}
248
249static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
250{
251 int i;
252
253 if (fls->mirror_array) {
254 for (i = 0; i < fls->mirror_array_cnt; i++) {
255 /* normally mirror_ds is freed in
256 * .free_deviceid_node but we still do it here
257 * for .alloc_lseg error path */
258 ff_layout_put_mirror(fls->mirror_array[i]);
259 }
260 kfree(fls->mirror_array);
261 fls->mirror_array = NULL;
262 }
263}
264
265static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr)
266{
267 int ret = 0;
268
269 dprintk("--> %s\n", __func__);
270
271 /* FIXME: remove this check when layout segment support is added */
272 if (lgr->range.offset != 0 ||
273 lgr->range.length != NFS4_MAX_UINT64) {
274 dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
275 __func__);
276 ret = -EINVAL;
277 }
278
279 dprintk("--> %s returns %d\n", __func__, ret);
280 return ret;
281}
282
283static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
284{
285 if (fls) {
286 ff_layout_free_mirror_array(fls);
287 kfree(fls);
288 }
289}
290
291static bool
292ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
293 const struct pnfs_layout_range *l2)
294{
295 u64 end1, end2;
296
297 if (l1->iomode != l2->iomode)
298 return l1->iomode != IOMODE_READ;
299 end1 = pnfs_calc_offset_end(l1->offset, l1->length);
300 end2 = pnfs_calc_offset_end(l2->offset, l2->length);
301 if (end1 < l2->offset)
302 return false;
303 if (end2 < l1->offset)
304 return true;
305 return l2->offset <= l1->offset;
306}
307
308static bool
309ff_lseg_merge(struct pnfs_layout_segment *new,
310 struct pnfs_layout_segment *old)
311{
312 u64 new_end, old_end;
313
314 if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
315 return false;
316 if (new->pls_range.iomode != old->pls_range.iomode)
317 return false;
318 old_end = pnfs_calc_offset_end(old->pls_range.offset,
319 old->pls_range.length);
320 if (old_end < new->pls_range.offset)
321 return false;
322 new_end = pnfs_calc_offset_end(new->pls_range.offset,
323 new->pls_range.length);
324 if (new_end < old->pls_range.offset)
325 return false;
326
327 /* Mergeable: copy info from 'old' to 'new' */
328 if (new_end < old_end)
329 new_end = old_end;
330 if (new->pls_range.offset < old->pls_range.offset)
331 new->pls_range.offset = old->pls_range.offset;
332 new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
333 new_end);
334 if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
335 set_bit(NFS_LSEG_ROC, &new->pls_flags);
336 return true;
337}
338
339static void
340ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
341 struct pnfs_layout_segment *lseg,
342 struct list_head *free_me)
343{
344 pnfs_generic_layout_insert_lseg(lo, lseg,
345 ff_lseg_range_is_after,
346 ff_lseg_merge,
347 free_me);
348}
349
350static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
351{
352 int i, j;
353
354 for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
355 for (j = i + 1; j < fls->mirror_array_cnt; j++)
356 if (fls->mirror_array[i]->efficiency <
357 fls->mirror_array[j]->efficiency)
358 swap(fls->mirror_array[i],
359 fls->mirror_array[j]);
360 }
361}
362
363static struct pnfs_layout_segment *
364ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
365 struct nfs4_layoutget_res *lgr,
366 gfp_t gfp_flags)
367{
368 struct pnfs_layout_segment *ret;
369 struct nfs4_ff_layout_segment *fls = NULL;
370 struct xdr_stream stream;
371 struct xdr_buf buf;
372 struct page *scratch;
373 u64 stripe_unit;
374 u32 mirror_array_cnt;
375 __be32 *p;
376 int i, rc;
377
378 dprintk("--> %s\n", __func__);
379 scratch = alloc_page(gfp_flags);
380 if (!scratch)
381 return ERR_PTR(-ENOMEM);
382
383 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
384 lgr->layoutp->len);
385 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
386
387 /* stripe unit and mirror_array_cnt */
388 rc = -EIO;
389 p = xdr_inline_decode(&stream, 8 + 4);
390 if (!p)
391 goto out_err_free;
392
393 p = xdr_decode_hyper(p, &stripe_unit);
394 mirror_array_cnt = be32_to_cpup(p++);
395 dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
396 stripe_unit, mirror_array_cnt);
397
398 if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
399 mirror_array_cnt == 0)
400 goto out_err_free;
401
402 rc = -ENOMEM;
403 fls = kzalloc(sizeof(*fls), gfp_flags);
404 if (!fls)
405 goto out_err_free;
406
407 fls->mirror_array_cnt = mirror_array_cnt;
408 fls->stripe_unit = stripe_unit;
409 fls->mirror_array = kcalloc(fls->mirror_array_cnt,
410 sizeof(fls->mirror_array[0]), gfp_flags);
411 if (fls->mirror_array == NULL)
412 goto out_err_free;
413
414 for (i = 0; i < fls->mirror_array_cnt; i++) {
415 struct nfs4_ff_layout_mirror *mirror;
416 struct auth_cred acred = { .group_info = ff_zero_group };
417 struct rpc_cred __rcu *cred;
418 u32 ds_count, fh_count, id;
419 int j;
420
421 rc = -EIO;
422 p = xdr_inline_decode(&stream, 4);
423 if (!p)
424 goto out_err_free;
425 ds_count = be32_to_cpup(p);
426
427 /* FIXME: allow for striping? */
428 if (ds_count != 1)
429 goto out_err_free;
430
431 fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
432 if (fls->mirror_array[i] == NULL) {
433 rc = -ENOMEM;
434 goto out_err_free;
435 }
436
437 fls->mirror_array[i]->ds_count = ds_count;
438
439 /* deviceid */
440 rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
441 if (rc)
442 goto out_err_free;
443
444 /* efficiency */
445 rc = -EIO;
446 p = xdr_inline_decode(&stream, 4);
447 if (!p)
448 goto out_err_free;
449 fls->mirror_array[i]->efficiency = be32_to_cpup(p);
450
451 /* stateid */
452 rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
453 if (rc)
454 goto out_err_free;
455
456 /* fh */
457 rc = -EIO;
458 p = xdr_inline_decode(&stream, 4);
459 if (!p)
460 goto out_err_free;
461 fh_count = be32_to_cpup(p);
462
463 fls->mirror_array[i]->fh_versions =
464 kzalloc(fh_count * sizeof(struct nfs_fh),
465 gfp_flags);
466 if (fls->mirror_array[i]->fh_versions == NULL) {
467 rc = -ENOMEM;
468 goto out_err_free;
469 }
470
471 for (j = 0; j < fh_count; j++) {
472 rc = decode_nfs_fh(&stream,
473 &fls->mirror_array[i]->fh_versions[j]);
474 if (rc)
475 goto out_err_free;
476 }
477
478 fls->mirror_array[i]->fh_versions_cnt = fh_count;
479
480 /* user */
481 rc = decode_name(&stream, &id);
482 if (rc)
483 goto out_err_free;
484
485 acred.uid = make_kuid(&init_user_ns, id);
486
487 /* group */
488 rc = decode_name(&stream, &id);
489 if (rc)
490 goto out_err_free;
491
492 acred.gid = make_kgid(&init_user_ns, id);
493
494 /* find the cred for it */
495 rcu_assign_pointer(cred, rpc_lookup_generic_cred(&acred, 0, gfp_flags));
496 if (IS_ERR(cred)) {
497 rc = PTR_ERR(cred);
498 goto out_err_free;
499 }
500
501 if (lgr->range.iomode == IOMODE_READ)
502 rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
503 else
504 rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
505
506 mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
507 if (mirror != fls->mirror_array[i]) {
508 /* swap cred ptrs so free_mirror will clean up old */
509 if (lgr->range.iomode == IOMODE_READ) {
510 cred = xchg(&mirror->ro_cred, cred);
511 rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
512 } else {
513 cred = xchg(&mirror->rw_cred, cred);
514 rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
515 }
516 ff_layout_free_mirror(fls->mirror_array[i]);
517 fls->mirror_array[i] = mirror;
518 }
519
520 dprintk("%s: iomode %s uid %u gid %u\n", __func__,
521 lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
522 from_kuid(&init_user_ns, acred.uid),
523 from_kgid(&init_user_ns, acred.gid));
524 }
525
526 p = xdr_inline_decode(&stream, 4);
527 if (!p)
528 goto out_sort_mirrors;
529 fls->flags = be32_to_cpup(p);
530
531 p = xdr_inline_decode(&stream, 4);
532 if (!p)
533 goto out_sort_mirrors;
534 for (i=0; i < fls->mirror_array_cnt; i++)
535 fls->mirror_array[i]->report_interval = be32_to_cpup(p);
536
537out_sort_mirrors:
538 ff_layout_sort_mirrors(fls);
539 rc = ff_layout_check_layout(lgr);
540 if (rc)
541 goto out_err_free;
542 ret = &fls->generic_hdr;
543 dprintk("<-- %s (success)\n", __func__);
544out_free_page:
545 __free_page(scratch);
546 return ret;
547out_err_free:
548 _ff_layout_free_lseg(fls);
549 ret = ERR_PTR(rc);
550 dprintk("<-- %s (%d)\n", __func__, rc);
551 goto out_free_page;
552}
553
554static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout)
555{
556 struct pnfs_layout_segment *lseg;
557
558 list_for_each_entry(lseg, &layout->plh_segs, pls_list)
559 if (lseg->pls_range.iomode == IOMODE_RW)
560 return true;
561
562 return false;
563}
564
565static void
566ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
567{
568 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
569
570 dprintk("--> %s\n", __func__);
571
572 if (lseg->pls_range.iomode == IOMODE_RW) {
573 struct nfs4_flexfile_layout *ffl;
574 struct inode *inode;
575
576 ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
577 inode = ffl->generic_hdr.plh_inode;
578 spin_lock(&inode->i_lock);
579 if (!ff_layout_has_rw_segments(lseg->pls_layout)) {
580 ffl->commit_info.nbuckets = 0;
581 kfree(ffl->commit_info.buckets);
582 ffl->commit_info.buckets = NULL;
583 }
584 spin_unlock(&inode->i_lock);
585 }
586 _ff_layout_free_lseg(fls);
587}
588
589/* Return 1 until we have multiple lsegs support */
590static int
591ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls)
592{
593 return 1;
594}
595
596static void
597nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
598{
599 /* first IO request? */
600 if (atomic_inc_return(&timer->n_ops) == 1) {
601 timer->start_time = now;
602 }
603}
604
605static ktime_t
606nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
607{
608 ktime_t start;
609
610 if (atomic_dec_return(&timer->n_ops) < 0)
611 WARN_ON_ONCE(1);
612
613 start = timer->start_time;
614 timer->start_time = now;
615 return ktime_sub(now, start);
616}
617
618static bool
619nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
620 struct nfs4_ff_layoutstat *layoutstat,
621 ktime_t now)
622{
623 s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
624 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
625
626 nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
627 if (!mirror->start_time)
628 mirror->start_time = now;
629 if (mirror->report_interval != 0)
630 report_interval = (s64)mirror->report_interval * 1000LL;
631 else if (layoutstats_timer != 0)
632 report_interval = (s64)layoutstats_timer * 1000LL;
633 if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
634 report_interval) {
635 ffl->last_report_time = now;
636 return true;
637 }
638
639 return false;
640}
641
642static void
643nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
644 __u64 requested)
645{
646 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
647
648 iostat->ops_requested++;
649 iostat->bytes_requested += requested;
650}
651
652static void
653nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
654 __u64 requested,
655 __u64 completed,
656 ktime_t time_completed,
657 ktime_t time_started)
658{
659 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
660 ktime_t completion_time = ktime_sub(time_completed, time_started);
661 ktime_t timer;
662
663 iostat->ops_completed++;
664 iostat->bytes_completed += completed;
665 iostat->bytes_not_delivered += requested - completed;
666
667 timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
668 iostat->total_busy_time =
669 ktime_add(iostat->total_busy_time, timer);
670 iostat->aggregate_completion_time =
671 ktime_add(iostat->aggregate_completion_time,
672 completion_time);
673}
674
675static void
676nfs4_ff_layout_stat_io_start_read(struct inode *inode,
677 struct nfs4_ff_layout_mirror *mirror,
678 __u64 requested, ktime_t now)
679{
680 bool report;
681
682 spin_lock(&mirror->lock);
683 report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
684 nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
685 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
686 spin_unlock(&mirror->lock);
687
688 if (report)
689 pnfs_report_layoutstat(inode, GFP_KERNEL);
690}
691
692static void
693nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
694 struct nfs4_ff_layout_mirror *mirror,
695 __u64 requested,
696 __u64 completed)
697{
698 spin_lock(&mirror->lock);
699 nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
700 requested, completed,
701 ktime_get(), task->tk_start);
702 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
703 spin_unlock(&mirror->lock);
704}
705
706static void
707nfs4_ff_layout_stat_io_start_write(struct inode *inode,
708 struct nfs4_ff_layout_mirror *mirror,
709 __u64 requested, ktime_t now)
710{
711 bool report;
712
713 spin_lock(&mirror->lock);
714 report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
715 nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
716 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
717 spin_unlock(&mirror->lock);
718
719 if (report)
720 pnfs_report_layoutstat(inode, GFP_NOIO);
721}
722
723static void
724nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
725 struct nfs4_ff_layout_mirror *mirror,
726 __u64 requested,
727 __u64 completed,
728 enum nfs3_stable_how committed)
729{
730 if (committed == NFS_UNSTABLE)
731 requested = completed = 0;
732
733 spin_lock(&mirror->lock);
734 nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
735 requested, completed, ktime_get(), task->tk_start);
736 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
737 spin_unlock(&mirror->lock);
738}
739
740static int
741ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
742 struct nfs_commit_info *cinfo,
743 gfp_t gfp_flags)
744{
745 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
746 struct pnfs_commit_bucket *buckets;
747 int size;
748
749 if (cinfo->ds->nbuckets != 0) {
750 /* This assumes there is only one RW lseg per file.
751 * To support multiple lseg per file, we need to
752 * change struct pnfs_commit_bucket to allow dynamic
753 * increasing nbuckets.
754 */
755 return 0;
756 }
757
758 size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg);
759
760 buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
761 gfp_flags);
762 if (!buckets)
763 return -ENOMEM;
764 else {
765 int i;
766
767 spin_lock(&cinfo->inode->i_lock);
768 if (cinfo->ds->nbuckets != 0)
769 kfree(buckets);
770 else {
771 cinfo->ds->buckets = buckets;
772 cinfo->ds->nbuckets = size;
773 for (i = 0; i < size; i++) {
774 INIT_LIST_HEAD(&buckets[i].written);
775 INIT_LIST_HEAD(&buckets[i].committing);
776 /* mark direct verifier as unset */
777 buckets[i].direct_verf.committed =
778 NFS_INVALID_STABLE_HOW;
779 }
780 }
781 spin_unlock(&cinfo->inode->i_lock);
782 return 0;
783 }
784}
785
786static struct nfs4_pnfs_ds *
787ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
788 int start_idx,
789 int *best_idx)
790{
791 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
792 struct nfs4_pnfs_ds *ds;
793 bool fail_return = false;
794 int idx;
795
796 /* mirrors are sorted by efficiency */
797 for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
798 if (idx+1 == fls->mirror_array_cnt)
799 fail_return = true;
800 ds = nfs4_ff_layout_prepare_ds(lseg, idx, fail_return);
801 if (ds) {
802 *best_idx = idx;
803 return ds;
804 }
805 }
806
807 return NULL;
808}
809
810static void
811ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
812 struct nfs_page *req,
813 bool strict_iomode)
814{
815retry_strict:
816 pnfs_put_lseg(pgio->pg_lseg);
817 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
818 req->wb_context,
819 0,
820 NFS4_MAX_UINT64,
821 IOMODE_READ,
822 strict_iomode,
823 GFP_KERNEL);
824 if (IS_ERR(pgio->pg_lseg)) {
825 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
826 pgio->pg_lseg = NULL;
827 }
828
829 /* If we don't have checking, do get a IOMODE_RW
830 * segment, and the server wants to avoid READs
831 * there, then retry!
832 */
833 if (pgio->pg_lseg && !strict_iomode &&
834 ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
835 strict_iomode = true;
836 goto retry_strict;
837 }
838}
839
840static void
841ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
842 struct nfs_page *req)
843{
844 struct nfs_pgio_mirror *pgm;
845 struct nfs4_ff_layout_mirror *mirror;
846 struct nfs4_pnfs_ds *ds;
847 int ds_idx;
848
849retry:
850 pnfs_generic_pg_check_layout(pgio);
851 /* Use full layout for now */
852 if (!pgio->pg_lseg)
853 ff_layout_pg_get_read(pgio, req, false);
854 else if (ff_layout_avoid_read_on_rw(pgio->pg_lseg))
855 ff_layout_pg_get_read(pgio, req, true);
856
857 /* If no lseg, fall back to read through mds */
858 if (pgio->pg_lseg == NULL)
859 goto out_mds;
860
861 ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
862 if (!ds) {
863 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
864 goto out_mds;
865 pnfs_put_lseg(pgio->pg_lseg);
866 pgio->pg_lseg = NULL;
867 /* Sleep for 1 second before retrying */
868 ssleep(1);
869 goto retry;
870 }
871
872 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
873
874 pgio->pg_mirror_idx = ds_idx;
875
876 /* read always uses only one mirror - idx 0 for pgio layer */
877 pgm = &pgio->pg_mirrors[0];
878 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
879
880 return;
881out_mds:
882 pnfs_put_lseg(pgio->pg_lseg);
883 pgio->pg_lseg = NULL;
884 nfs_pageio_reset_read_mds(pgio);
885}
886
887static void
888ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
889 struct nfs_page *req)
890{
891 struct nfs4_ff_layout_mirror *mirror;
892 struct nfs_pgio_mirror *pgm;
893 struct nfs_commit_info cinfo;
894 struct nfs4_pnfs_ds *ds;
895 int i;
896 int status;
897
898retry:
899 pnfs_generic_pg_check_layout(pgio);
900 if (!pgio->pg_lseg) {
901 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
902 req->wb_context,
903 0,
904 NFS4_MAX_UINT64,
905 IOMODE_RW,
906 false,
907 GFP_NOFS);
908 if (IS_ERR(pgio->pg_lseg)) {
909 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
910 pgio->pg_lseg = NULL;
911 return;
912 }
913 }
914 /* If no lseg, fall back to write through mds */
915 if (pgio->pg_lseg == NULL)
916 goto out_mds;
917
918 nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
919 status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
920 if (status < 0)
921 goto out_mds;
922
923 /* Use a direct mapping of ds_idx to pgio mirror_idx */
924 if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
925 goto out_eagain;
926
927 for (i = 0; i < pgio->pg_mirror_count; i++) {
928 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
929 if (!ds) {
930 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
931 goto out_mds;
932 pnfs_put_lseg(pgio->pg_lseg);
933 pgio->pg_lseg = NULL;
934 /* Sleep for 1 second before retrying */
935 ssleep(1);
936 goto retry;
937 }
938 pgm = &pgio->pg_mirrors[i];
939 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
940 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
941 }
942
943 return;
944out_eagain:
945 pnfs_generic_pg_cleanup(pgio);
946 pgio->pg_error = -EAGAIN;
947 return;
948out_mds:
949 pnfs_put_lseg(pgio->pg_lseg);
950 pgio->pg_lseg = NULL;
951 nfs_pageio_reset_write_mds(pgio);
952 pgio->pg_error = -EAGAIN;
953}
954
955static unsigned int
956ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
957 struct nfs_page *req)
958{
959 if (!pgio->pg_lseg) {
960 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
961 req->wb_context,
962 0,
963 NFS4_MAX_UINT64,
964 IOMODE_RW,
965 false,
966 GFP_NOFS);
967 if (IS_ERR(pgio->pg_lseg)) {
968 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
969 pgio->pg_lseg = NULL;
970 goto out;
971 }
972 }
973 if (pgio->pg_lseg)
974 return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
975
976 /* no lseg means that pnfs is not in use, so no mirroring here */
977 nfs_pageio_reset_write_mds(pgio);
978out:
979 return 1;
980}
981
982static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
983 .pg_init = ff_layout_pg_init_read,
984 .pg_test = pnfs_generic_pg_test,
985 .pg_doio = pnfs_generic_pg_readpages,
986 .pg_cleanup = pnfs_generic_pg_cleanup,
987};
988
989static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
990 .pg_init = ff_layout_pg_init_write,
991 .pg_test = pnfs_generic_pg_test,
992 .pg_doio = pnfs_generic_pg_writepages,
993 .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
994 .pg_cleanup = pnfs_generic_pg_cleanup,
995};
996
997static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
998{
999 struct rpc_task *task = &hdr->task;
1000
1001 pnfs_layoutcommit_inode(hdr->inode, false);
1002
1003 if (retry_pnfs) {
1004 dprintk("%s Reset task %5u for i/o through pNFS "
1005 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1006 hdr->task.tk_pid,
1007 hdr->inode->i_sb->s_id,
1008 (unsigned long long)NFS_FILEID(hdr->inode),
1009 hdr->args.count,
1010 (unsigned long long)hdr->args.offset);
1011
1012 hdr->completion_ops->reschedule_io(hdr);
1013 return;
1014 }
1015
1016 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1017 dprintk("%s Reset task %5u for i/o through MDS "
1018 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1019 hdr->task.tk_pid,
1020 hdr->inode->i_sb->s_id,
1021 (unsigned long long)NFS_FILEID(hdr->inode),
1022 hdr->args.count,
1023 (unsigned long long)hdr->args.offset);
1024
1025 task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1026 }
1027}
1028
1029static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1030{
1031 struct rpc_task *task = &hdr->task;
1032
1033 pnfs_layoutcommit_inode(hdr->inode, false);
1034
1035 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1036 dprintk("%s Reset task %5u for i/o through MDS "
1037 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1038 hdr->task.tk_pid,
1039 hdr->inode->i_sb->s_id,
1040 (unsigned long long)NFS_FILEID(hdr->inode),
1041 hdr->args.count,
1042 (unsigned long long)hdr->args.offset);
1043
1044 task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1045 }
1046}
1047
1048static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1049 struct nfs4_state *state,
1050 struct nfs_client *clp,
1051 struct pnfs_layout_segment *lseg,
1052 int idx)
1053{
1054 struct pnfs_layout_hdr *lo = lseg->pls_layout;
1055 struct inode *inode = lo->plh_inode;
1056 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1057 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1058
1059 switch (task->tk_status) {
1060 case -NFS4ERR_BADSESSION:
1061 case -NFS4ERR_BADSLOT:
1062 case -NFS4ERR_BAD_HIGH_SLOT:
1063 case -NFS4ERR_DEADSESSION:
1064 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1065 case -NFS4ERR_SEQ_FALSE_RETRY:
1066 case -NFS4ERR_SEQ_MISORDERED:
1067 dprintk("%s ERROR %d, Reset session. Exchangeid "
1068 "flags 0x%x\n", __func__, task->tk_status,
1069 clp->cl_exchange_flags);
1070 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1071 break;
1072 case -NFS4ERR_DELAY:
1073 case -NFS4ERR_GRACE:
1074 rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1075 break;
1076 case -NFS4ERR_RETRY_UNCACHED_REP:
1077 break;
1078 /* Invalidate Layout errors */
1079 case -NFS4ERR_PNFS_NO_LAYOUT:
1080 case -ESTALE: /* mapped NFS4ERR_STALE */
1081 case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
1082 case -EISDIR: /* mapped NFS4ERR_ISDIR */
1083 case -NFS4ERR_FHEXPIRED:
1084 case -NFS4ERR_WRONG_TYPE:
1085 dprintk("%s Invalid layout error %d\n", __func__,
1086 task->tk_status);
1087 /*
1088 * Destroy layout so new i/o will get a new layout.
1089 * Layout will not be destroyed until all current lseg
1090 * references are put. Mark layout as invalid to resend failed
1091 * i/o and all i/o waiting on the slot table to the MDS until
1092 * layout is destroyed and a new valid layout is obtained.
1093 */
1094 pnfs_destroy_layout(NFS_I(inode));
1095 rpc_wake_up(&tbl->slot_tbl_waitq);
1096 goto reset;
1097 /* RPC connection errors */
1098 case -ECONNREFUSED:
1099 case -EHOSTDOWN:
1100 case -EHOSTUNREACH:
1101 case -ENETUNREACH:
1102 case -EIO:
1103 case -ETIMEDOUT:
1104 case -EPIPE:
1105 dprintk("%s DS connection error %d\n", __func__,
1106 task->tk_status);
1107 nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1108 &devid->deviceid);
1109 rpc_wake_up(&tbl->slot_tbl_waitq);
1110 /* fall through */
1111 default:
1112 if (ff_layout_avoid_mds_available_ds(lseg))
1113 return -NFS4ERR_RESET_TO_PNFS;
1114reset:
1115 dprintk("%s Retry through MDS. Error %d\n", __func__,
1116 task->tk_status);
1117 return -NFS4ERR_RESET_TO_MDS;
1118 }
1119 task->tk_status = 0;
1120 return -EAGAIN;
1121}
1122
1123/* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
1124static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1125 struct pnfs_layout_segment *lseg,
1126 int idx)
1127{
1128 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1129
1130 switch (task->tk_status) {
1131 /* File access problems. Don't mark the device as unavailable */
1132 case -EACCES:
1133 case -ESTALE:
1134 case -EISDIR:
1135 case -EBADHANDLE:
1136 case -ELOOP:
1137 case -ENOSPC:
1138 break;
1139 case -EJUKEBOX:
1140 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1141 goto out_retry;
1142 default:
1143 dprintk("%s DS connection error %d\n", __func__,
1144 task->tk_status);
1145 nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1146 &devid->deviceid);
1147 }
1148 /* FIXME: Need to prevent infinite looping here. */
1149 return -NFS4ERR_RESET_TO_PNFS;
1150out_retry:
1151 task->tk_status = 0;
1152 rpc_restart_call_prepare(task);
1153 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1154 return -EAGAIN;
1155}
1156
1157static int ff_layout_async_handle_error(struct rpc_task *task,
1158 struct nfs4_state *state,
1159 struct nfs_client *clp,
1160 struct pnfs_layout_segment *lseg,
1161 int idx)
1162{
1163 int vers = clp->cl_nfs_mod->rpc_vers->number;
1164
1165 if (task->tk_status >= 0)
1166 return 0;
1167
1168 /* Handle the case of an invalid layout segment */
1169 if (!pnfs_is_valid_lseg(lseg))
1170 return -NFS4ERR_RESET_TO_PNFS;
1171
1172 switch (vers) {
1173 case 3:
1174 return ff_layout_async_handle_error_v3(task, lseg, idx);
1175 case 4:
1176 return ff_layout_async_handle_error_v4(task, state, clp,
1177 lseg, idx);
1178 default:
1179 /* should never happen */
1180 WARN_ON_ONCE(1);
1181 return 0;
1182 }
1183}
1184
1185static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1186 int idx, u64 offset, u64 length,
1187 u32 status, int opnum, int error)
1188{
1189 struct nfs4_ff_layout_mirror *mirror;
1190 int err;
1191
1192 if (status == 0) {
1193 switch (error) {
1194 case -ETIMEDOUT:
1195 case -EPFNOSUPPORT:
1196 case -EPROTONOSUPPORT:
1197 case -EOPNOTSUPP:
1198 case -ECONNREFUSED:
1199 case -ECONNRESET:
1200 case -EHOSTDOWN:
1201 case -EHOSTUNREACH:
1202 case -ENETUNREACH:
1203 case -EADDRINUSE:
1204 case -ENOBUFS:
1205 case -EPIPE:
1206 case -EPERM:
1207 status = NFS4ERR_NXIO;
1208 break;
1209 case -EACCES:
1210 status = NFS4ERR_ACCESS;
1211 break;
1212 default:
1213 return;
1214 }
1215 }
1216
1217 switch (status) {
1218 case NFS4ERR_DELAY:
1219 case NFS4ERR_GRACE:
1220 return;
1221 default:
1222 break;
1223 }
1224
1225 mirror = FF_LAYOUT_COMP(lseg, idx);
1226 err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1227 mirror, offset, length, status, opnum,
1228 GFP_NOIO);
1229 pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg);
1230 dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1231}
1232
1233/* NFS_PROTO call done callback routines */
1234static int ff_layout_read_done_cb(struct rpc_task *task,
1235 struct nfs_pgio_header *hdr)
1236{
1237 int err;
1238
1239 trace_nfs4_pnfs_read(hdr, task->tk_status);
1240 if (task->tk_status < 0)
1241 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1242 hdr->args.offset, hdr->args.count,
1243 hdr->res.op_status, OP_READ,
1244 task->tk_status);
1245 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1246 hdr->ds_clp, hdr->lseg,
1247 hdr->pgio_mirror_idx);
1248
1249 switch (err) {
1250 case -NFS4ERR_RESET_TO_PNFS:
1251 if (ff_layout_choose_best_ds_for_read(hdr->lseg,
1252 hdr->pgio_mirror_idx + 1,
1253 &hdr->pgio_mirror_idx))
1254 goto out_eagain;
1255 ff_layout_read_record_layoutstats_done(task, hdr);
1256 pnfs_read_resend_pnfs(hdr);
1257 return task->tk_status;
1258 case -NFS4ERR_RESET_TO_MDS:
1259 ff_layout_reset_read(hdr);
1260 return task->tk_status;
1261 case -EAGAIN:
1262 goto out_eagain;
1263 }
1264
1265 return 0;
1266out_eagain:
1267 rpc_restart_call_prepare(task);
1268 return -EAGAIN;
1269}
1270
1271static bool
1272ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1273{
1274 return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1275}
1276
1277/*
1278 * We reference the rpc_cred of the first WRITE that triggers the need for
1279 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1280 * rfc5661 is not clear about which credential should be used.
1281 *
1282 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1283 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1284 * we always send layoutcommit after DS writes.
1285 */
1286static void
1287ff_layout_set_layoutcommit(struct inode *inode,
1288 struct pnfs_layout_segment *lseg,
1289 loff_t end_offset)
1290{
1291 if (!ff_layout_need_layoutcommit(lseg))
1292 return;
1293
1294 pnfs_set_layoutcommit(inode, lseg, end_offset);
1295 dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1296 (unsigned long long) NFS_I(inode)->layout->plh_lwb);
1297}
1298
1299static bool
1300ff_layout_device_unavailable(struct pnfs_layout_segment *lseg, int idx)
1301{
1302 /* No mirroring for now */
1303 struct nfs4_deviceid_node *node = FF_LAYOUT_DEVID_NODE(lseg, idx);
1304
1305 return ff_layout_test_devid_unavailable(node);
1306}
1307
1308static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1309 struct nfs_pgio_header *hdr)
1310{
1311 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1312 return;
1313 nfs4_ff_layout_stat_io_start_read(hdr->inode,
1314 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1315 hdr->args.count,
1316 task->tk_start);
1317}
1318
1319static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1320 struct nfs_pgio_header *hdr)
1321{
1322 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1323 return;
1324 nfs4_ff_layout_stat_io_end_read(task,
1325 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1326 hdr->args.count,
1327 hdr->res.count);
1328}
1329
1330static int ff_layout_read_prepare_common(struct rpc_task *task,
1331 struct nfs_pgio_header *hdr)
1332{
1333 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1334 rpc_exit(task, -EIO);
1335 return -EIO;
1336 }
1337 if (ff_layout_device_unavailable(hdr->lseg, hdr->pgio_mirror_idx)) {
1338 rpc_exit(task, -EHOSTDOWN);
1339 return -EAGAIN;
1340 }
1341
1342 ff_layout_read_record_layoutstats_start(task, hdr);
1343 return 0;
1344}
1345
1346/*
1347 * Call ops for the async read/write cases
1348 * In the case of dense layouts, the offset needs to be reset to its
1349 * original value.
1350 */
1351static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1352{
1353 struct nfs_pgio_header *hdr = data;
1354
1355 if (ff_layout_read_prepare_common(task, hdr))
1356 return;
1357
1358 rpc_call_start(task);
1359}
1360
1361static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1362{
1363 struct nfs_pgio_header *hdr = data;
1364
1365 if (nfs4_setup_sequence(hdr->ds_clp,
1366 &hdr->args.seq_args,
1367 &hdr->res.seq_res,
1368 task))
1369 return;
1370
1371 ff_layout_read_prepare_common(task, hdr);
1372}
1373
1374static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1375{
1376 struct nfs_pgio_header *hdr = data;
1377
1378 dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
1379
1380 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1381 task->tk_status == 0) {
1382 nfs4_sequence_done(task, &hdr->res.seq_res);
1383 return;
1384 }
1385
1386 /* Note this may cause RPC to be resent */
1387 hdr->mds_ops->rpc_call_done(task, hdr);
1388}
1389
1390static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1391{
1392 struct nfs_pgio_header *hdr = data;
1393
1394 ff_layout_read_record_layoutstats_done(task, hdr);
1395 rpc_count_iostats_metrics(task,
1396 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1397}
1398
1399static void ff_layout_read_release(void *data)
1400{
1401 struct nfs_pgio_header *hdr = data;
1402
1403 ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1404 pnfs_generic_rw_release(data);
1405}
1406
1407
1408static int ff_layout_write_done_cb(struct rpc_task *task,
1409 struct nfs_pgio_header *hdr)
1410{
1411 loff_t end_offs = 0;
1412 int err;
1413
1414 trace_nfs4_pnfs_write(hdr, task->tk_status);
1415 if (task->tk_status < 0)
1416 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1417 hdr->args.offset, hdr->args.count,
1418 hdr->res.op_status, OP_WRITE,
1419 task->tk_status);
1420 err = ff_layout_async_handle_error(task, hdr->args.context->state,
1421 hdr->ds_clp, hdr->lseg,
1422 hdr->pgio_mirror_idx);
1423
1424 switch (err) {
1425 case -NFS4ERR_RESET_TO_PNFS:
1426 ff_layout_reset_write(hdr, true);
1427 return task->tk_status;
1428 case -NFS4ERR_RESET_TO_MDS:
1429 ff_layout_reset_write(hdr, false);
1430 return task->tk_status;
1431 case -EAGAIN:
1432 return -EAGAIN;
1433 }
1434
1435 if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1436 hdr->res.verf->committed == NFS_DATA_SYNC)
1437 end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1438
1439 /* Note: if the write is unstable, don't set end_offs until commit */
1440 ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1441
1442 /* zero out fattr since we don't care DS attr at all */
1443 hdr->fattr.valid = 0;
1444 if (task->tk_status >= 0)
1445 nfs_writeback_update_inode(hdr);
1446
1447 return 0;
1448}
1449
1450static int ff_layout_commit_done_cb(struct rpc_task *task,
1451 struct nfs_commit_data *data)
1452{
1453 int err;
1454
1455 trace_nfs4_pnfs_commit_ds(data, task->tk_status);
1456 if (task->tk_status < 0)
1457 ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1458 data->args.offset, data->args.count,
1459 data->res.op_status, OP_COMMIT,
1460 task->tk_status);
1461 err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1462 data->lseg, data->ds_commit_index);
1463
1464 switch (err) {
1465 case -NFS4ERR_RESET_TO_PNFS:
1466 pnfs_generic_prepare_to_resend_writes(data);
1467 return -EAGAIN;
1468 case -NFS4ERR_RESET_TO_MDS:
1469 pnfs_generic_prepare_to_resend_writes(data);
1470 return -EAGAIN;
1471 case -EAGAIN:
1472 rpc_restart_call_prepare(task);
1473 return -EAGAIN;
1474 }
1475
1476 ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1477
1478 return 0;
1479}
1480
1481static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1482 struct nfs_pgio_header *hdr)
1483{
1484 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1485 return;
1486 nfs4_ff_layout_stat_io_start_write(hdr->inode,
1487 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1488 hdr->args.count,
1489 task->tk_start);
1490}
1491
1492static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1493 struct nfs_pgio_header *hdr)
1494{
1495 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1496 return;
1497 nfs4_ff_layout_stat_io_end_write(task,
1498 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1499 hdr->args.count, hdr->res.count,
1500 hdr->res.verf->committed);
1501}
1502
1503static int ff_layout_write_prepare_common(struct rpc_task *task,
1504 struct nfs_pgio_header *hdr)
1505{
1506 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1507 rpc_exit(task, -EIO);
1508 return -EIO;
1509 }
1510
1511 if (ff_layout_device_unavailable(hdr->lseg, hdr->pgio_mirror_idx)) {
1512 rpc_exit(task, -EHOSTDOWN);
1513 return -EAGAIN;
1514 }
1515
1516 ff_layout_write_record_layoutstats_start(task, hdr);
1517 return 0;
1518}
1519
1520static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1521{
1522 struct nfs_pgio_header *hdr = data;
1523
1524 if (ff_layout_write_prepare_common(task, hdr))
1525 return;
1526
1527 rpc_call_start(task);
1528}
1529
1530static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1531{
1532 struct nfs_pgio_header *hdr = data;
1533
1534 if (nfs4_setup_sequence(hdr->ds_clp,
1535 &hdr->args.seq_args,
1536 &hdr->res.seq_res,
1537 task))
1538 return;
1539
1540 ff_layout_write_prepare_common(task, hdr);
1541}
1542
1543static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1544{
1545 struct nfs_pgio_header *hdr = data;
1546
1547 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1548 task->tk_status == 0) {
1549 nfs4_sequence_done(task, &hdr->res.seq_res);
1550 return;
1551 }
1552
1553 /* Note this may cause RPC to be resent */
1554 hdr->mds_ops->rpc_call_done(task, hdr);
1555}
1556
1557static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1558{
1559 struct nfs_pgio_header *hdr = data;
1560
1561 ff_layout_write_record_layoutstats_done(task, hdr);
1562 rpc_count_iostats_metrics(task,
1563 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1564}
1565
1566static void ff_layout_write_release(void *data)
1567{
1568 struct nfs_pgio_header *hdr = data;
1569
1570 ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1571 pnfs_generic_rw_release(data);
1572}
1573
1574static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1575 struct nfs_commit_data *cdata)
1576{
1577 if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1578 return;
1579 nfs4_ff_layout_stat_io_start_write(cdata->inode,
1580 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1581 0, task->tk_start);
1582}
1583
1584static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1585 struct nfs_commit_data *cdata)
1586{
1587 struct nfs_page *req;
1588 __u64 count = 0;
1589
1590 if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1591 return;
1592
1593 if (task->tk_status == 0) {
1594 list_for_each_entry(req, &cdata->pages, wb_list)
1595 count += req->wb_bytes;
1596 }
1597 nfs4_ff_layout_stat_io_end_write(task,
1598 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1599 count, count, NFS_FILE_SYNC);
1600}
1601
1602static void ff_layout_commit_prepare_common(struct rpc_task *task,
1603 struct nfs_commit_data *cdata)
1604{
1605 ff_layout_commit_record_layoutstats_start(task, cdata);
1606}
1607
1608static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1609{
1610 ff_layout_commit_prepare_common(task, data);
1611 rpc_call_start(task);
1612}
1613
1614static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1615{
1616 struct nfs_commit_data *wdata = data;
1617
1618 if (nfs4_setup_sequence(wdata->ds_clp,
1619 &wdata->args.seq_args,
1620 &wdata->res.seq_res,
1621 task))
1622 return;
1623 ff_layout_commit_prepare_common(task, data);
1624}
1625
1626static void ff_layout_commit_done(struct rpc_task *task, void *data)
1627{
1628 pnfs_generic_write_commit_done(task, data);
1629}
1630
1631static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1632{
1633 struct nfs_commit_data *cdata = data;
1634
1635 ff_layout_commit_record_layoutstats_done(task, cdata);
1636 rpc_count_iostats_metrics(task,
1637 &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1638}
1639
1640static void ff_layout_commit_release(void *data)
1641{
1642 struct nfs_commit_data *cdata = data;
1643
1644 ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1645 pnfs_generic_commit_release(data);
1646}
1647
1648static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1649 .rpc_call_prepare = ff_layout_read_prepare_v3,
1650 .rpc_call_done = ff_layout_read_call_done,
1651 .rpc_count_stats = ff_layout_read_count_stats,
1652 .rpc_release = ff_layout_read_release,
1653};
1654
1655static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1656 .rpc_call_prepare = ff_layout_read_prepare_v4,
1657 .rpc_call_done = ff_layout_read_call_done,
1658 .rpc_count_stats = ff_layout_read_count_stats,
1659 .rpc_release = ff_layout_read_release,
1660};
1661
1662static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1663 .rpc_call_prepare = ff_layout_write_prepare_v3,
1664 .rpc_call_done = ff_layout_write_call_done,
1665 .rpc_count_stats = ff_layout_write_count_stats,
1666 .rpc_release = ff_layout_write_release,
1667};
1668
1669static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1670 .rpc_call_prepare = ff_layout_write_prepare_v4,
1671 .rpc_call_done = ff_layout_write_call_done,
1672 .rpc_count_stats = ff_layout_write_count_stats,
1673 .rpc_release = ff_layout_write_release,
1674};
1675
1676static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1677 .rpc_call_prepare = ff_layout_commit_prepare_v3,
1678 .rpc_call_done = ff_layout_commit_done,
1679 .rpc_count_stats = ff_layout_commit_count_stats,
1680 .rpc_release = ff_layout_commit_release,
1681};
1682
1683static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1684 .rpc_call_prepare = ff_layout_commit_prepare_v4,
1685 .rpc_call_done = ff_layout_commit_done,
1686 .rpc_count_stats = ff_layout_commit_count_stats,
1687 .rpc_release = ff_layout_commit_release,
1688};
1689
1690static enum pnfs_try_status
1691ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1692{
1693 struct pnfs_layout_segment *lseg = hdr->lseg;
1694 struct nfs4_pnfs_ds *ds;
1695 struct rpc_clnt *ds_clnt;
1696 struct rpc_cred *ds_cred;
1697 loff_t offset = hdr->args.offset;
1698 u32 idx = hdr->pgio_mirror_idx;
1699 int vers;
1700 struct nfs_fh *fh;
1701
1702 dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
1703 __func__, hdr->inode->i_ino,
1704 hdr->args.pgbase, (size_t)hdr->args.count, offset);
1705
1706 ds = nfs4_ff_layout_prepare_ds(lseg, idx, false);
1707 if (!ds)
1708 goto out_failed;
1709
1710 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1711 hdr->inode);
1712 if (IS_ERR(ds_clnt))
1713 goto out_failed;
1714
1715 ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1716 if (!ds_cred)
1717 goto out_failed;
1718
1719 vers = nfs4_ff_layout_ds_version(lseg, idx);
1720
1721 dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1722 ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count), vers);
1723
1724 hdr->pgio_done_cb = ff_layout_read_done_cb;
1725 atomic_inc(&ds->ds_clp->cl_count);
1726 hdr->ds_clp = ds->ds_clp;
1727 fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1728 if (fh)
1729 hdr->args.fh = fh;
1730
1731 if (vers == 4 &&
1732 !nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
1733 goto out_failed;
1734
1735 /*
1736 * Note that if we ever decide to split across DSes,
1737 * then we may need to handle dense-like offsets.
1738 */
1739 hdr->args.offset = offset;
1740 hdr->mds_offset = offset;
1741
1742 /* Perform an asynchronous read to ds */
1743 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1744 vers == 3 ? &ff_layout_read_call_ops_v3 :
1745 &ff_layout_read_call_ops_v4,
1746 0, RPC_TASK_SOFTCONN);
1747 put_rpccred(ds_cred);
1748 return PNFS_ATTEMPTED;
1749
1750out_failed:
1751 if (ff_layout_avoid_mds_available_ds(lseg))
1752 return PNFS_TRY_AGAIN;
1753 return PNFS_NOT_ATTEMPTED;
1754}
1755
1756/* Perform async writes. */
1757static enum pnfs_try_status
1758ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1759{
1760 struct pnfs_layout_segment *lseg = hdr->lseg;
1761 struct nfs4_pnfs_ds *ds;
1762 struct rpc_clnt *ds_clnt;
1763 struct rpc_cred *ds_cred;
1764 loff_t offset = hdr->args.offset;
1765 int vers;
1766 struct nfs_fh *fh;
1767 int idx = hdr->pgio_mirror_idx;
1768
1769 ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1770 if (!ds)
1771 goto out_failed;
1772
1773 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1774 hdr->inode);
1775 if (IS_ERR(ds_clnt))
1776 goto out_failed;
1777
1778 ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
1779 if (!ds_cred)
1780 goto out_failed;
1781
1782 vers = nfs4_ff_layout_ds_version(lseg, idx);
1783
1784 dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
1785 __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1786 offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count),
1787 vers);
1788
1789 hdr->pgio_done_cb = ff_layout_write_done_cb;
1790 atomic_inc(&ds->ds_clp->cl_count);
1791 hdr->ds_clp = ds->ds_clp;
1792 hdr->ds_commit_idx = idx;
1793 fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
1794 if (fh)
1795 hdr->args.fh = fh;
1796
1797 if (vers == 4 &&
1798 !nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
1799 goto out_failed;
1800
1801 /*
1802 * Note that if we ever decide to split across DSes,
1803 * then we may need to handle dense-like offsets.
1804 */
1805 hdr->args.offset = offset;
1806
1807 /* Perform an asynchronous write */
1808 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1809 vers == 3 ? &ff_layout_write_call_ops_v3 :
1810 &ff_layout_write_call_ops_v4,
1811 sync, RPC_TASK_SOFTCONN);
1812 put_rpccred(ds_cred);
1813 return PNFS_ATTEMPTED;
1814
1815out_failed:
1816 if (ff_layout_avoid_mds_available_ds(lseg))
1817 return PNFS_TRY_AGAIN;
1818 return PNFS_NOT_ATTEMPTED;
1819}
1820
1821static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1822{
1823 return i;
1824}
1825
1826static struct nfs_fh *
1827select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1828{
1829 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1830
1831 /* FIXME: Assume that there is only one NFS version available
1832 * for the DS.
1833 */
1834 return &flseg->mirror_array[i]->fh_versions[0];
1835}
1836
1837static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1838{
1839 struct pnfs_layout_segment *lseg = data->lseg;
1840 struct nfs4_pnfs_ds *ds;
1841 struct rpc_clnt *ds_clnt;
1842 struct rpc_cred *ds_cred;
1843 u32 idx;
1844 int vers, ret;
1845 struct nfs_fh *fh;
1846
1847 if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
1848 test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
1849 goto out_err;
1850
1851 idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1852 ds = nfs4_ff_layout_prepare_ds(lseg, idx, true);
1853 if (!ds)
1854 goto out_err;
1855
1856 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp,
1857 data->inode);
1858 if (IS_ERR(ds_clnt))
1859 goto out_err;
1860
1861 ds_cred = ff_layout_get_ds_cred(lseg, idx, data->cred);
1862 if (!ds_cred)
1863 goto out_err;
1864
1865 vers = nfs4_ff_layout_ds_version(lseg, idx);
1866
1867 dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1868 data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count),
1869 vers);
1870 data->commit_done_cb = ff_layout_commit_done_cb;
1871 data->cred = ds_cred;
1872 atomic_inc(&ds->ds_clp->cl_count);
1873 data->ds_clp = ds->ds_clp;
1874 fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1875 if (fh)
1876 data->args.fh = fh;
1877
1878 ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1879 vers == 3 ? &ff_layout_commit_call_ops_v3 :
1880 &ff_layout_commit_call_ops_v4,
1881 how, RPC_TASK_SOFTCONN);
1882 put_rpccred(ds_cred);
1883 return ret;
1884out_err:
1885 pnfs_generic_prepare_to_resend_writes(data);
1886 pnfs_generic_commit_release(data);
1887 return -EAGAIN;
1888}
1889
1890static int
1891ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1892 int how, struct nfs_commit_info *cinfo)
1893{
1894 return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1895 ff_layout_initiate_commit);
1896}
1897
1898static struct pnfs_ds_commit_info *
1899ff_layout_get_ds_info(struct inode *inode)
1900{
1901 struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1902
1903 if (layout == NULL)
1904 return NULL;
1905
1906 return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
1907}
1908
1909static void
1910ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
1911{
1912 nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
1913 id_node));
1914}
1915
1916static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
1917 const struct nfs4_layoutreturn_args *args,
1918 const struct nfs4_flexfile_layoutreturn_args *ff_args)
1919{
1920 __be32 *start;
1921
1922 start = xdr_reserve_space(xdr, 4);
1923 if (unlikely(!start))
1924 return -E2BIG;
1925
1926 *start = cpu_to_be32(ff_args->num_errors);
1927 /* This assume we always return _ALL_ layouts */
1928 return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
1929}
1930
1931static void
1932encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
1933{
1934 WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
1935}
1936
1937static void
1938ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
1939 const nfs4_stateid *stateid,
1940 const struct nfs42_layoutstat_devinfo *devinfo)
1941{
1942 __be32 *p;
1943
1944 p = xdr_reserve_space(xdr, 8 + 8);
1945 p = xdr_encode_hyper(p, devinfo->offset);
1946 p = xdr_encode_hyper(p, devinfo->length);
1947 encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
1948 p = xdr_reserve_space(xdr, 4*8);
1949 p = xdr_encode_hyper(p, devinfo->read_count);
1950 p = xdr_encode_hyper(p, devinfo->read_bytes);
1951 p = xdr_encode_hyper(p, devinfo->write_count);
1952 p = xdr_encode_hyper(p, devinfo->write_bytes);
1953 encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
1954}
1955
1956static void
1957ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
1958 const nfs4_stateid *stateid,
1959 const struct nfs42_layoutstat_devinfo *devinfo)
1960{
1961 ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
1962 ff_layout_encode_ff_layoutupdate(xdr, devinfo,
1963 devinfo->ld_private.data);
1964}
1965
1966/* report nothing for now */
1967static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
1968 const struct nfs4_layoutreturn_args *args,
1969 struct nfs4_flexfile_layoutreturn_args *ff_args)
1970{
1971 __be32 *p;
1972 int i;
1973
1974 p = xdr_reserve_space(xdr, 4);
1975 *p = cpu_to_be32(ff_args->num_dev);
1976 for (i = 0; i < ff_args->num_dev; i++)
1977 ff_layout_encode_ff_iostat(xdr,
1978 &args->layout->plh_stateid,
1979 &ff_args->devinfo[i]);
1980}
1981
1982static void
1983ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
1984 unsigned int num_entries)
1985{
1986 unsigned int i;
1987
1988 for (i = 0; i < num_entries; i++) {
1989 if (!devinfo[i].ld_private.ops)
1990 continue;
1991 if (!devinfo[i].ld_private.ops->free)
1992 continue;
1993 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
1994 }
1995}
1996
1997static struct nfs4_deviceid_node *
1998ff_layout_alloc_deviceid_node(struct nfs_server *server,
1999 struct pnfs_device *pdev, gfp_t gfp_flags)
2000{
2001 struct nfs4_ff_layout_ds *dsaddr;
2002
2003 dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2004 if (!dsaddr)
2005 return NULL;
2006 return &dsaddr->id_node;
2007}
2008
2009static void
2010ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2011 const void *voidargs,
2012 const struct nfs4_xdr_opaque_data *ff_opaque)
2013{
2014 const struct nfs4_layoutreturn_args *args = voidargs;
2015 struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2016 struct xdr_buf tmp_buf = {
2017 .head = {
2018 [0] = {
2019 .iov_base = page_address(ff_args->pages[0]),
2020 },
2021 },
2022 .buflen = PAGE_SIZE,
2023 };
2024 struct xdr_stream tmp_xdr;
2025 __be32 *start;
2026
2027 dprintk("%s: Begin\n", __func__);
2028
2029 xdr_init_encode(&tmp_xdr, &tmp_buf, NULL);
2030
2031 ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2032 ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2033
2034 start = xdr_reserve_space(xdr, 4);
2035 *start = cpu_to_be32(tmp_buf.len);
2036 xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2037
2038 dprintk("%s: Return\n", __func__);
2039}
2040
2041static void
2042ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2043{
2044 struct nfs4_flexfile_layoutreturn_args *ff_args;
2045
2046 if (!args->data)
2047 return;
2048 ff_args = args->data;
2049 args->data = NULL;
2050
2051 ff_layout_free_ds_ioerr(&ff_args->errors);
2052 ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2053
2054 put_page(ff_args->pages[0]);
2055 kfree(ff_args);
2056}
2057
2058static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2059 .encode = ff_layout_encode_layoutreturn,
2060 .free = ff_layout_free_layoutreturn,
2061};
2062
2063static int
2064ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2065{
2066 struct nfs4_flexfile_layoutreturn_args *ff_args;
2067 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2068
2069 ff_args = kmalloc(sizeof(*ff_args), GFP_KERNEL);
2070 if (!ff_args)
2071 goto out_nomem;
2072 ff_args->pages[0] = alloc_page(GFP_KERNEL);
2073 if (!ff_args->pages[0])
2074 goto out_nomem_free;
2075
2076 INIT_LIST_HEAD(&ff_args->errors);
2077 ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2078 &args->range, &ff_args->errors,
2079 FF_LAYOUTRETURN_MAXERR);
2080
2081 spin_lock(&args->inode->i_lock);
2082 ff_args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2083 &ff_args->devinfo[0], ARRAY_SIZE(ff_args->devinfo));
2084 spin_unlock(&args->inode->i_lock);
2085
2086 args->ld_private->ops = &layoutreturn_ops;
2087 args->ld_private->data = ff_args;
2088 return 0;
2089out_nomem_free:
2090 kfree(ff_args);
2091out_nomem:
2092 return -ENOMEM;
2093}
2094
2095static int
2096ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2097{
2098 const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2099
2100 return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2101}
2102
2103static size_t
2104ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2105 const int buflen)
2106{
2107 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2108 const struct in6_addr *addr = &sin6->sin6_addr;
2109
2110 /*
2111 * RFC 4291, Section 2.2.2
2112 *
2113 * Shorthanded ANY address
2114 */
2115 if (ipv6_addr_any(addr))
2116 return snprintf(buf, buflen, "::");
2117
2118 /*
2119 * RFC 4291, Section 2.2.2
2120 *
2121 * Shorthanded loopback address
2122 */
2123 if (ipv6_addr_loopback(addr))
2124 return snprintf(buf, buflen, "::1");
2125
2126 /*
2127 * RFC 4291, Section 2.2.3
2128 *
2129 * Special presentation address format for mapped v4
2130 * addresses.
2131 */
2132 if (ipv6_addr_v4mapped(addr))
2133 return snprintf(buf, buflen, "::ffff:%pI4",
2134 &addr->s6_addr32[3]);
2135
2136 /*
2137 * RFC 4291, Section 2.2.1
2138 */
2139 return snprintf(buf, buflen, "%pI6c", addr);
2140}
2141
2142/* Derived from rpc_sockaddr2uaddr */
2143static void
2144ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2145{
2146 struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2147 char portbuf[RPCBIND_MAXUADDRPLEN];
2148 char addrbuf[RPCBIND_MAXUADDRLEN];
2149 char *netid;
2150 unsigned short port;
2151 int len, netid_len;
2152 __be32 *p;
2153
2154 switch (sap->sa_family) {
2155 case AF_INET:
2156 if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2157 return;
2158 port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2159 netid = "tcp";
2160 netid_len = 3;
2161 break;
2162 case AF_INET6:
2163 if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2164 return;
2165 port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2166 netid = "tcp6";
2167 netid_len = 4;
2168 break;
2169 default:
2170 /* we only support tcp and tcp6 */
2171 WARN_ON_ONCE(1);
2172 return;
2173 }
2174
2175 snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2176 len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2177
2178 p = xdr_reserve_space(xdr, 4 + netid_len);
2179 xdr_encode_opaque(p, netid, netid_len);
2180
2181 p = xdr_reserve_space(xdr, 4 + len);
2182 xdr_encode_opaque(p, addrbuf, len);
2183}
2184
2185static void
2186ff_layout_encode_nfstime(struct xdr_stream *xdr,
2187 ktime_t t)
2188{
2189 struct timespec64 ts;
2190 __be32 *p;
2191
2192 p = xdr_reserve_space(xdr, 12);
2193 ts = ktime_to_timespec64(t);
2194 p = xdr_encode_hyper(p, ts.tv_sec);
2195 *p++ = cpu_to_be32(ts.tv_nsec);
2196}
2197
2198static void
2199ff_layout_encode_io_latency(struct xdr_stream *xdr,
2200 struct nfs4_ff_io_stat *stat)
2201{
2202 __be32 *p;
2203
2204 p = xdr_reserve_space(xdr, 5 * 8);
2205 p = xdr_encode_hyper(p, stat->ops_requested);
2206 p = xdr_encode_hyper(p, stat->bytes_requested);
2207 p = xdr_encode_hyper(p, stat->ops_completed);
2208 p = xdr_encode_hyper(p, stat->bytes_completed);
2209 p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2210 ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2211 ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2212}
2213
2214static void
2215ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2216 const struct nfs42_layoutstat_devinfo *devinfo,
2217 struct nfs4_ff_layout_mirror *mirror)
2218{
2219 struct nfs4_pnfs_ds_addr *da;
2220 struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2221 struct nfs_fh *fh = &mirror->fh_versions[0];
2222 __be32 *p;
2223
2224 da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2225 dprintk("%s: DS %s: encoding address %s\n",
2226 __func__, ds->ds_remotestr, da->da_remotestr);
2227 /* netaddr4 */
2228 ff_layout_encode_netaddr(xdr, da);
2229 /* nfs_fh4 */
2230 p = xdr_reserve_space(xdr, 4 + fh->size);
2231 xdr_encode_opaque(p, fh->data, fh->size);
2232 /* ff_io_latency4 read */
2233 spin_lock(&mirror->lock);
2234 ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2235 /* ff_io_latency4 write */
2236 ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2237 spin_unlock(&mirror->lock);
2238 /* nfstime4 */
2239 ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2240 /* bool */
2241 p = xdr_reserve_space(xdr, 4);
2242 *p = cpu_to_be32(false);
2243}
2244
2245static void
2246ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2247 const struct nfs4_xdr_opaque_data *opaque)
2248{
2249 struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2250 struct nfs42_layoutstat_devinfo, ld_private);
2251 __be32 *start;
2252
2253 /* layoutupdate length */
2254 start = xdr_reserve_space(xdr, 4);
2255 ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2256
2257 *start = cpu_to_be32((xdr->p - start - 1) * 4);
2258}
2259
2260static void
2261ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2262{
2263 struct nfs4_ff_layout_mirror *mirror = opaque->data;
2264
2265 ff_layout_put_mirror(mirror);
2266}
2267
2268static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2269 .encode = ff_layout_encode_layoutstats,
2270 .free = ff_layout_free_layoutstats,
2271};
2272
2273static int
2274ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2275 struct nfs42_layoutstat_devinfo *devinfo,
2276 int dev_limit)
2277{
2278 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2279 struct nfs4_ff_layout_mirror *mirror;
2280 struct nfs4_deviceid_node *dev;
2281 int i = 0;
2282
2283 list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2284 if (i >= dev_limit)
2285 break;
2286 if (IS_ERR_OR_NULL(mirror->mirror_ds))
2287 continue;
2288 if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags))
2289 continue;
2290 /* mirror refcount put in cleanup_layoutstats */
2291 if (!atomic_inc_not_zero(&mirror->ref))
2292 continue;
2293 dev = &mirror->mirror_ds->id_node;
2294 memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2295 devinfo->offset = 0;
2296 devinfo->length = NFS4_MAX_UINT64;
2297 spin_lock(&mirror->lock);
2298 devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2299 devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2300 devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2301 devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2302 spin_unlock(&mirror->lock);
2303 devinfo->layout_type = LAYOUT_FLEX_FILES;
2304 devinfo->ld_private.ops = &layoutstat_ops;
2305 devinfo->ld_private.data = mirror;
2306
2307 devinfo++;
2308 i++;
2309 }
2310 return i;
2311}
2312
2313static int
2314ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2315{
2316 struct nfs4_flexfile_layout *ff_layout;
2317 const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2318
2319 /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2320 args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
2321 if (!args->devinfo)
2322 return -ENOMEM;
2323
2324 spin_lock(&args->inode->i_lock);
2325 ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
2326 args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2327 &args->devinfo[0], dev_count);
2328 spin_unlock(&args->inode->i_lock);
2329 if (!args->num_dev) {
2330 kfree(args->devinfo);
2331 args->devinfo = NULL;
2332 return -ENOENT;
2333 }
2334
2335 return 0;
2336}
2337
2338static int
2339ff_layout_set_layoutdriver(struct nfs_server *server,
2340 const struct nfs_fh *dummy)
2341{
2342#if IS_ENABLED(CONFIG_NFS_V4_2)
2343 server->caps |= NFS_CAP_LAYOUTSTATS;
2344#endif
2345 return 0;
2346}
2347
2348static struct pnfs_layoutdriver_type flexfilelayout_type = {
2349 .id = LAYOUT_FLEX_FILES,
2350 .name = "LAYOUT_FLEX_FILES",
2351 .owner = THIS_MODULE,
2352 .set_layoutdriver = ff_layout_set_layoutdriver,
2353 .alloc_layout_hdr = ff_layout_alloc_layout_hdr,
2354 .free_layout_hdr = ff_layout_free_layout_hdr,
2355 .alloc_lseg = ff_layout_alloc_lseg,
2356 .free_lseg = ff_layout_free_lseg,
2357 .add_lseg = ff_layout_add_lseg,
2358 .pg_read_ops = &ff_layout_pg_read_ops,
2359 .pg_write_ops = &ff_layout_pg_write_ops,
2360 .get_ds_info = ff_layout_get_ds_info,
2361 .free_deviceid_node = ff_layout_free_deviceid_node,
2362 .mark_request_commit = pnfs_layout_mark_request_commit,
2363 .clear_request_commit = pnfs_generic_clear_request_commit,
2364 .scan_commit_lists = pnfs_generic_scan_commit_lists,
2365 .recover_commit_reqs = pnfs_generic_recover_commit_reqs,
2366 .commit_pagelist = ff_layout_commit_pagelist,
2367 .read_pagelist = ff_layout_read_pagelist,
2368 .write_pagelist = ff_layout_write_pagelist,
2369 .alloc_deviceid_node = ff_layout_alloc_deviceid_node,
2370 .prepare_layoutreturn = ff_layout_prepare_layoutreturn,
2371 .sync = pnfs_nfs_generic_sync,
2372 .prepare_layoutstats = ff_layout_prepare_layoutstats,
2373};
2374
2375static int __init nfs4flexfilelayout_init(void)
2376{
2377 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2378 __func__);
2379 if (!ff_zero_group) {
2380 ff_zero_group = groups_alloc(0);
2381 if (!ff_zero_group)
2382 return -ENOMEM;
2383 }
2384 return pnfs_register_layoutdriver(&flexfilelayout_type);
2385}
2386
2387static void __exit nfs4flexfilelayout_exit(void)
2388{
2389 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2390 __func__);
2391 pnfs_unregister_layoutdriver(&flexfilelayout_type);
2392 if (ff_zero_group) {
2393 put_group_info(ff_zero_group);
2394 ff_zero_group = NULL;
2395 }
2396}
2397
2398MODULE_ALIAS("nfs-layouttype4-4");
2399
2400MODULE_LICENSE("GPL");
2401MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2402
2403module_init(nfs4flexfilelayout_init);
2404module_exit(nfs4flexfilelayout_exit);