blob: 3c68ea878ad0dd91915002fc81e07f3551ae0050 [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * Block Translation Table
3 * Copyright (c) 2014-2015, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#include <linux/highmem.h>
15#include <linux/debugfs.h>
16#include <linux/blkdev.h>
17#include <linux/module.h>
18#include <linux/device.h>
19#include <linux/mutex.h>
20#include <linux/hdreg.h>
21#include <linux/genhd.h>
22#include <linux/sizes.h>
23#include <linux/ndctl.h>
24#include <linux/fs.h>
25#include <linux/nd.h>
26#include "btt.h"
27#include "nd.h"
28
29enum log_ent_request {
30 LOG_NEW_ENT = 0,
31 LOG_OLD_ENT
32};
33
34static struct device *to_dev(struct arena_info *arena)
35{
36 return &arena->nd_btt->dev;
37}
38
39static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset)
40{
41 return offset + nd_btt->initial_offset;
42}
43
44static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
45 void *buf, size_t n, unsigned long flags)
46{
47 struct nd_btt *nd_btt = arena->nd_btt;
48 struct nd_namespace_common *ndns = nd_btt->ndns;
49
50 /* arena offsets may be shifted from the base of the device */
51 offset = adjust_initial_offset(nd_btt, offset);
52 return nvdimm_read_bytes(ndns, offset, buf, n, flags);
53}
54
55static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
56 void *buf, size_t n, unsigned long flags)
57{
58 struct nd_btt *nd_btt = arena->nd_btt;
59 struct nd_namespace_common *ndns = nd_btt->ndns;
60
61 /* arena offsets may be shifted from the base of the device */
62 offset = adjust_initial_offset(nd_btt, offset);
63 return nvdimm_write_bytes(ndns, offset, buf, n, flags);
64}
65
66static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
67{
68 int ret;
69
70 /*
71 * infooff and info2off should always be at least 512B aligned.
72 * We rely on that to make sure rw_bytes does error clearing
73 * correctly, so make sure that is the case.
74 */
75 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512),
76 "arena->infooff: %#llx is unaligned\n", arena->infooff);
77 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512),
78 "arena->info2off: %#llx is unaligned\n", arena->info2off);
79
80 ret = arena_write_bytes(arena, arena->info2off, super,
81 sizeof(struct btt_sb), 0);
82 if (ret)
83 return ret;
84
85 return arena_write_bytes(arena, arena->infooff, super,
86 sizeof(struct btt_sb), 0);
87}
88
89static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
90{
91 return arena_read_bytes(arena, arena->infooff, super,
92 sizeof(struct btt_sb), 0);
93}
94
95/*
96 * 'raw' version of btt_map write
97 * Assumptions:
98 * mapping is in little-endian
99 * mapping contains 'E' and 'Z' flags as desired
100 */
101static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
102 unsigned long flags)
103{
104 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
105
106 if (unlikely(lba >= arena->external_nlba))
107 dev_err_ratelimited(to_dev(arena),
108 "%s: lba %#x out of range (max: %#x)\n",
109 __func__, lba, arena->external_nlba);
110 return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
111}
112
113static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
114 u32 z_flag, u32 e_flag, unsigned long rwb_flags)
115{
116 u32 ze;
117 __le32 mapping_le;
118
119 /*
120 * This 'mapping' is supposed to be just the LBA mapping, without
121 * any flags set, so strip the flag bits.
122 */
123 mapping = ent_lba(mapping);
124
125 ze = (z_flag << 1) + e_flag;
126 switch (ze) {
127 case 0:
128 /*
129 * We want to set neither of the Z or E flags, and
130 * in the actual layout, this means setting the bit
131 * positions of both to '1' to indicate a 'normal'
132 * map entry
133 */
134 mapping |= MAP_ENT_NORMAL;
135 break;
136 case 1:
137 mapping |= (1 << MAP_ERR_SHIFT);
138 break;
139 case 2:
140 mapping |= (1 << MAP_TRIM_SHIFT);
141 break;
142 default:
143 /*
144 * The case where Z and E are both sent in as '1' could be
145 * construed as a valid 'normal' case, but we decide not to,
146 * to avoid confusion
147 */
148 dev_err_ratelimited(to_dev(arena),
149 "Invalid use of Z and E flags\n");
150 return -EIO;
151 }
152
153 mapping_le = cpu_to_le32(mapping);
154 return __btt_map_write(arena, lba, mapping_le, rwb_flags);
155}
156
157static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
158 int *trim, int *error, unsigned long rwb_flags)
159{
160 int ret;
161 __le32 in;
162 u32 raw_mapping, postmap, ze, z_flag, e_flag;
163 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
164
165 if (unlikely(lba >= arena->external_nlba))
166 dev_err_ratelimited(to_dev(arena),
167 "%s: lba %#x out of range (max: %#x)\n",
168 __func__, lba, arena->external_nlba);
169
170 ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
171 if (ret)
172 return ret;
173
174 raw_mapping = le32_to_cpu(in);
175
176 z_flag = ent_z_flag(raw_mapping);
177 e_flag = ent_e_flag(raw_mapping);
178 ze = (z_flag << 1) + e_flag;
179 postmap = ent_lba(raw_mapping);
180
181 /* Reuse the {z,e}_flag variables for *trim and *error */
182 z_flag = 0;
183 e_flag = 0;
184
185 switch (ze) {
186 case 0:
187 /* Initial state. Return postmap = premap */
188 *mapping = lba;
189 break;
190 case 1:
191 *mapping = postmap;
192 e_flag = 1;
193 break;
194 case 2:
195 *mapping = postmap;
196 z_flag = 1;
197 break;
198 case 3:
199 *mapping = postmap;
200 break;
201 default:
202 return -EIO;
203 }
204
205 if (trim)
206 *trim = z_flag;
207 if (error)
208 *error = e_flag;
209
210 return ret;
211}
212
213static int btt_log_group_read(struct arena_info *arena, u32 lane,
214 struct log_group *log)
215{
216 return arena_read_bytes(arena,
217 arena->logoff + (lane * LOG_GRP_SIZE), log,
218 LOG_GRP_SIZE, 0);
219}
220
221static struct dentry *debugfs_root;
222
223static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
224 int idx)
225{
226 char dirname[32];
227 struct dentry *d;
228
229 /* If for some reason, parent bttN was not created, exit */
230 if (!parent)
231 return;
232
233 snprintf(dirname, 32, "arena%d", idx);
234 d = debugfs_create_dir(dirname, parent);
235 if (IS_ERR_OR_NULL(d))
236 return;
237 a->debugfs_dir = d;
238
239 debugfs_create_x64("size", S_IRUGO, d, &a->size);
240 debugfs_create_x64("external_lba_start", S_IRUGO, d,
241 &a->external_lba_start);
242 debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
243 debugfs_create_u32("internal_lbasize", S_IRUGO, d,
244 &a->internal_lbasize);
245 debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
246 debugfs_create_u32("external_lbasize", S_IRUGO, d,
247 &a->external_lbasize);
248 debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
249 debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
250 debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
251 debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
252 debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
253 debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
254 debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
255 debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
256 debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
257 debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
258 debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
259 debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
260}
261
262static void btt_debugfs_init(struct btt *btt)
263{
264 int i = 0;
265 struct arena_info *arena;
266
267 btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
268 debugfs_root);
269 if (IS_ERR_OR_NULL(btt->debugfs_dir))
270 return;
271
272 list_for_each_entry(arena, &btt->arena_list, list) {
273 arena_debugfs_init(arena, btt->debugfs_dir, i);
274 i++;
275 }
276}
277
278static u32 log_seq(struct log_group *log, int log_idx)
279{
280 return le32_to_cpu(log->ent[log_idx].seq);
281}
282
283/*
284 * This function accepts two log entries, and uses the
285 * sequence number to find the 'older' entry.
286 * It also updates the sequence number in this old entry to
287 * make it the 'new' one if the mark_flag is set.
288 * Finally, it returns which of the entries was the older one.
289 *
290 * TODO The logic feels a bit kludge-y. make it better..
291 */
292static int btt_log_get_old(struct arena_info *a, struct log_group *log)
293{
294 int idx0 = a->log_index[0];
295 int idx1 = a->log_index[1];
296 int old;
297
298 /*
299 * the first ever time this is seen, the entry goes into [0]
300 * the next time, the following logic works out to put this
301 * (next) entry into [1]
302 */
303 if (log_seq(log, idx0) == 0) {
304 log->ent[idx0].seq = cpu_to_le32(1);
305 return 0;
306 }
307
308 if (log_seq(log, idx0) == log_seq(log, idx1))
309 return -EINVAL;
310 if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
311 return -EINVAL;
312
313 if (log_seq(log, idx0) < log_seq(log, idx1)) {
314 if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
315 old = 0;
316 else
317 old = 1;
318 } else {
319 if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
320 old = 1;
321 else
322 old = 0;
323 }
324
325 return old;
326}
327
328/*
329 * This function copies the desired (old/new) log entry into ent if
330 * it is not NULL. It returns the sub-slot number (0 or 1)
331 * where the desired log entry was found. Negative return values
332 * indicate errors.
333 */
334static int btt_log_read(struct arena_info *arena, u32 lane,
335 struct log_entry *ent, int old_flag)
336{
337 int ret;
338 int old_ent, ret_ent;
339 struct log_group log;
340
341 ret = btt_log_group_read(arena, lane, &log);
342 if (ret)
343 return -EIO;
344
345 old_ent = btt_log_get_old(arena, &log);
346 if (old_ent < 0 || old_ent > 1) {
347 dev_err(to_dev(arena),
348 "log corruption (%d): lane %d seq [%d, %d]\n",
349 old_ent, lane, log.ent[arena->log_index[0]].seq,
350 log.ent[arena->log_index[1]].seq);
351 /* TODO set error state? */
352 return -EIO;
353 }
354
355 ret_ent = (old_flag ? old_ent : (1 - old_ent));
356
357 if (ent != NULL)
358 memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
359
360 return ret_ent;
361}
362
363/*
364 * This function commits a log entry to media
365 * It does _not_ prepare the freelist entry for the next write
366 * btt_flog_write is the wrapper for updating the freelist elements
367 */
368static int __btt_log_write(struct arena_info *arena, u32 lane,
369 u32 sub, struct log_entry *ent, unsigned long flags)
370{
371 int ret;
372 u32 group_slot = arena->log_index[sub];
373 unsigned int log_half = LOG_ENT_SIZE / 2;
374 void *src = ent;
375 u64 ns_off;
376
377 ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
378 (group_slot * LOG_ENT_SIZE);
379 /* split the 16B write into atomic, durable halves */
380 ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
381 if (ret)
382 return ret;
383
384 ns_off += log_half;
385 src += log_half;
386 return arena_write_bytes(arena, ns_off, src, log_half, flags);
387}
388
389static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
390 struct log_entry *ent)
391{
392 int ret;
393
394 ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
395 if (ret)
396 return ret;
397
398 /* prepare the next free entry */
399 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
400 if (++(arena->freelist[lane].seq) == 4)
401 arena->freelist[lane].seq = 1;
402 if (ent_e_flag(le32_to_cpu(ent->old_map)))
403 arena->freelist[lane].has_err = 1;
404 arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map));
405
406 return ret;
407}
408
409/*
410 * This function initializes the BTT map to the initial state, which is
411 * all-zeroes, and indicates an identity mapping
412 */
413static int btt_map_init(struct arena_info *arena)
414{
415 int ret = -EINVAL;
416 void *zerobuf;
417 size_t offset = 0;
418 size_t chunk_size = SZ_2M;
419 size_t mapsize = arena->logoff - arena->mapoff;
420
421 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
422 if (!zerobuf)
423 return -ENOMEM;
424
425 /*
426 * mapoff should always be at least 512B aligned. We rely on that to
427 * make sure rw_bytes does error clearing correctly, so make sure that
428 * is the case.
429 */
430 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512),
431 "arena->mapoff: %#llx is unaligned\n", arena->mapoff);
432
433 while (mapsize) {
434 size_t size = min(mapsize, chunk_size);
435
436 dev_WARN_ONCE(to_dev(arena), size < 512,
437 "chunk size: %#zx is unaligned\n", size);
438 ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
439 size, 0);
440 if (ret)
441 goto free;
442
443 offset += size;
444 mapsize -= size;
445 cond_resched();
446 }
447
448 free:
449 kfree(zerobuf);
450 return ret;
451}
452
453/*
454 * This function initializes the BTT log with 'fake' entries pointing
455 * to the initial reserved set of blocks as being free
456 */
457static int btt_log_init(struct arena_info *arena)
458{
459 size_t logsize = arena->info2off - arena->logoff;
460 size_t chunk_size = SZ_4K, offset = 0;
461 struct log_entry ent;
462 void *zerobuf;
463 int ret;
464 u32 i;
465
466 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
467 if (!zerobuf)
468 return -ENOMEM;
469 /*
470 * logoff should always be at least 512B aligned. We rely on that to
471 * make sure rw_bytes does error clearing correctly, so make sure that
472 * is the case.
473 */
474 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512),
475 "arena->logoff: %#llx is unaligned\n", arena->logoff);
476
477 while (logsize) {
478 size_t size = min(logsize, chunk_size);
479
480 dev_WARN_ONCE(to_dev(arena), size < 512,
481 "chunk size: %#zx is unaligned\n", size);
482 ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
483 size, 0);
484 if (ret)
485 goto free;
486
487 offset += size;
488 logsize -= size;
489 cond_resched();
490 }
491
492 for (i = 0; i < arena->nfree; i++) {
493 ent.lba = cpu_to_le32(i);
494 ent.old_map = cpu_to_le32(arena->external_nlba + i);
495 ent.new_map = cpu_to_le32(arena->external_nlba + i);
496 ent.seq = cpu_to_le32(LOG_SEQ_INIT);
497 ret = __btt_log_write(arena, i, 0, &ent, 0);
498 if (ret)
499 goto free;
500 }
501
502 free:
503 kfree(zerobuf);
504 return ret;
505}
506
507static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
508{
509 return arena->dataoff + ((u64)lba * arena->internal_lbasize);
510}
511
512static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
513{
514 int ret = 0;
515
516 if (arena->freelist[lane].has_err) {
517 void *zero_page = page_address(ZERO_PAGE(0));
518 u32 lba = arena->freelist[lane].block;
519 u64 nsoff = to_namespace_offset(arena, lba);
520 unsigned long len = arena->sector_size;
521
522 mutex_lock(&arena->err_lock);
523
524 while (len) {
525 unsigned long chunk = min(len, PAGE_SIZE);
526
527 ret = arena_write_bytes(arena, nsoff, zero_page,
528 chunk, 0);
529 if (ret)
530 break;
531 len -= chunk;
532 nsoff += chunk;
533 if (len == 0)
534 arena->freelist[lane].has_err = 0;
535 }
536 mutex_unlock(&arena->err_lock);
537 }
538 return ret;
539}
540
541static int btt_freelist_init(struct arena_info *arena)
542{
543 int new, ret;
544 struct log_entry log_new;
545 u32 i, map_entry, log_oldmap, log_newmap;
546
547 arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
548 GFP_KERNEL);
549 if (!arena->freelist)
550 return -ENOMEM;
551
552 for (i = 0; i < arena->nfree; i++) {
553 new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
554 if (new < 0)
555 return new;
556
557 /* old and new map entries with any flags stripped out */
558 log_oldmap = ent_lba(le32_to_cpu(log_new.old_map));
559 log_newmap = ent_lba(le32_to_cpu(log_new.new_map));
560
561 /* sub points to the next one to be overwritten */
562 arena->freelist[i].sub = 1 - new;
563 arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
564 arena->freelist[i].block = log_oldmap;
565
566 /*
567 * FIXME: if error clearing fails during init, we want to make
568 * the BTT read-only
569 */
570 if (ent_e_flag(le32_to_cpu(log_new.old_map)) &&
571 !ent_normal(le32_to_cpu(log_new.old_map))) {
572 arena->freelist[i].has_err = 1;
573 ret = arena_clear_freelist_error(arena, i);
574 if (ret)
575 dev_err_ratelimited(to_dev(arena),
576 "Unable to clear known errors\n");
577 }
578
579 /* This implies a newly created or untouched flog entry */
580 if (log_oldmap == log_newmap)
581 continue;
582
583 /* Check if map recovery is needed */
584 ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
585 NULL, NULL, 0);
586 if (ret)
587 return ret;
588
589 /*
590 * The map_entry from btt_read_map is stripped of any flag bits,
591 * so use the stripped out versions from the log as well for
592 * testing whether recovery is needed. For restoration, use the
593 * 'raw' version of the log entries as that captured what we
594 * were going to write originally.
595 */
596 if ((log_newmap != map_entry) && (log_oldmap == map_entry)) {
597 /*
598 * Last transaction wrote the flog, but wasn't able
599 * to complete the map write. So fix up the map.
600 */
601 ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
602 le32_to_cpu(log_new.new_map), 0, 0, 0);
603 if (ret)
604 return ret;
605 }
606 }
607
608 return 0;
609}
610
611static bool ent_is_padding(struct log_entry *ent)
612{
613 return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
614 && (ent->seq == 0);
615}
616
617/*
618 * Detecting valid log indices: We read a log group (see the comments in btt.h
619 * for a description of a 'log_group' and its 'slots'), and iterate over its
620 * four slots. We expect that a padding slot will be all-zeroes, and use this
621 * to detect a padding slot vs. an actual entry.
622 *
623 * If a log_group is in the initial state, i.e. hasn't been used since the
624 * creation of this BTT layout, it will have three of the four slots with
625 * zeroes. We skip over these log_groups for the detection of log_index. If
626 * all log_groups are in the initial state (i.e. the BTT has never been
627 * written to), it is safe to assume the 'new format' of log entries in slots
628 * (0, 1).
629 */
630static int log_set_indices(struct arena_info *arena)
631{
632 bool idx_set = false, initial_state = true;
633 int ret, log_index[2] = {-1, -1};
634 u32 i, j, next_idx = 0;
635 struct log_group log;
636 u32 pad_count = 0;
637
638 for (i = 0; i < arena->nfree; i++) {
639 ret = btt_log_group_read(arena, i, &log);
640 if (ret < 0)
641 return ret;
642
643 for (j = 0; j < 4; j++) {
644 if (!idx_set) {
645 if (ent_is_padding(&log.ent[j])) {
646 pad_count++;
647 continue;
648 } else {
649 /* Skip if index has been recorded */
650 if ((next_idx == 1) &&
651 (j == log_index[0]))
652 continue;
653 /* valid entry, record index */
654 log_index[next_idx] = j;
655 next_idx++;
656 }
657 if (next_idx == 2) {
658 /* two valid entries found */
659 idx_set = true;
660 } else if (next_idx > 2) {
661 /* too many valid indices */
662 return -ENXIO;
663 }
664 } else {
665 /*
666 * once the indices have been set, just verify
667 * that all subsequent log groups are either in
668 * their initial state or follow the same
669 * indices.
670 */
671 if (j == log_index[0]) {
672 /* entry must be 'valid' */
673 if (ent_is_padding(&log.ent[j]))
674 return -ENXIO;
675 } else if (j == log_index[1]) {
676 ;
677 /*
678 * log_index[1] can be padding if the
679 * lane never got used and it is still
680 * in the initial state (three 'padding'
681 * entries)
682 */
683 } else {
684 /* entry must be invalid (padding) */
685 if (!ent_is_padding(&log.ent[j]))
686 return -ENXIO;
687 }
688 }
689 }
690 /*
691 * If any of the log_groups have more than one valid,
692 * non-padding entry, then the we are no longer in the
693 * initial_state
694 */
695 if (pad_count < 3)
696 initial_state = false;
697 pad_count = 0;
698 }
699
700 if (!initial_state && !idx_set)
701 return -ENXIO;
702
703 /*
704 * If all the entries in the log were in the initial state,
705 * assume new padding scheme
706 */
707 if (initial_state)
708 log_index[1] = 1;
709
710 /*
711 * Only allow the known permutations of log/padding indices,
712 * i.e. (0, 1), and (0, 2)
713 */
714 if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
715 ; /* known index possibilities */
716 else {
717 dev_err(to_dev(arena), "Found an unknown padding scheme\n");
718 return -ENXIO;
719 }
720
721 arena->log_index[0] = log_index[0];
722 arena->log_index[1] = log_index[1];
723 dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
724 dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
725 return 0;
726}
727
728static int btt_rtt_init(struct arena_info *arena)
729{
730 arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
731 if (arena->rtt == NULL)
732 return -ENOMEM;
733
734 return 0;
735}
736
737static int btt_maplocks_init(struct arena_info *arena)
738{
739 u32 i;
740
741 arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
742 GFP_KERNEL);
743 if (!arena->map_locks)
744 return -ENOMEM;
745
746 for (i = 0; i < arena->nfree; i++)
747 spin_lock_init(&arena->map_locks[i].lock);
748
749 return 0;
750}
751
752static struct arena_info *alloc_arena(struct btt *btt, size_t size,
753 size_t start, size_t arena_off)
754{
755 struct arena_info *arena;
756 u64 logsize, mapsize, datasize;
757 u64 available = size;
758
759 arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
760 if (!arena)
761 return NULL;
762 arena->nd_btt = btt->nd_btt;
763 arena->sector_size = btt->sector_size;
764
765 if (!size)
766 return arena;
767
768 arena->size = size;
769 arena->external_lba_start = start;
770 arena->external_lbasize = btt->lbasize;
771 arena->internal_lbasize = roundup(arena->external_lbasize,
772 INT_LBASIZE_ALIGNMENT);
773 arena->nfree = BTT_DEFAULT_NFREE;
774 arena->version_major = btt->nd_btt->version_major;
775 arena->version_minor = btt->nd_btt->version_minor;
776
777 if (available % BTT_PG_SIZE)
778 available -= (available % BTT_PG_SIZE);
779
780 /* Two pages are reserved for the super block and its copy */
781 available -= 2 * BTT_PG_SIZE;
782
783 /* The log takes a fixed amount of space based on nfree */
784 logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
785 available -= logsize;
786
787 /* Calculate optimal split between map and data area */
788 arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
789 arena->internal_lbasize + MAP_ENT_SIZE);
790 arena->external_nlba = arena->internal_nlba - arena->nfree;
791
792 mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
793 datasize = available - mapsize;
794
795 /* 'Absolute' values, relative to start of storage space */
796 arena->infooff = arena_off;
797 arena->dataoff = arena->infooff + BTT_PG_SIZE;
798 arena->mapoff = arena->dataoff + datasize;
799 arena->logoff = arena->mapoff + mapsize;
800 arena->info2off = arena->logoff + logsize;
801
802 /* Default log indices are (0,1) */
803 arena->log_index[0] = 0;
804 arena->log_index[1] = 1;
805 return arena;
806}
807
808static void free_arenas(struct btt *btt)
809{
810 struct arena_info *arena, *next;
811
812 list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
813 list_del(&arena->list);
814 kfree(arena->rtt);
815 kfree(arena->map_locks);
816 kfree(arena->freelist);
817 debugfs_remove_recursive(arena->debugfs_dir);
818 kfree(arena);
819 }
820}
821
822/*
823 * This function reads an existing valid btt superblock and
824 * populates the corresponding arena_info struct
825 */
826static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
827 u64 arena_off)
828{
829 arena->internal_nlba = le32_to_cpu(super->internal_nlba);
830 arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
831 arena->external_nlba = le32_to_cpu(super->external_nlba);
832 arena->external_lbasize = le32_to_cpu(super->external_lbasize);
833 arena->nfree = le32_to_cpu(super->nfree);
834 arena->version_major = le16_to_cpu(super->version_major);
835 arena->version_minor = le16_to_cpu(super->version_minor);
836
837 arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
838 le64_to_cpu(super->nextoff));
839 arena->infooff = arena_off;
840 arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
841 arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
842 arena->logoff = arena_off + le64_to_cpu(super->logoff);
843 arena->info2off = arena_off + le64_to_cpu(super->info2off);
844
845 arena->size = (le64_to_cpu(super->nextoff) > 0)
846 ? (le64_to_cpu(super->nextoff))
847 : (arena->info2off - arena->infooff + BTT_PG_SIZE);
848
849 arena->flags = le32_to_cpu(super->flags);
850}
851
852static int discover_arenas(struct btt *btt)
853{
854 int ret = 0;
855 struct arena_info *arena;
856 struct btt_sb *super;
857 size_t remaining = btt->rawsize;
858 u64 cur_nlba = 0;
859 size_t cur_off = 0;
860 int num_arenas = 0;
861
862 super = kzalloc(sizeof(*super), GFP_KERNEL);
863 if (!super)
864 return -ENOMEM;
865
866 while (remaining) {
867 /* Alloc memory for arena */
868 arena = alloc_arena(btt, 0, 0, 0);
869 if (!arena) {
870 ret = -ENOMEM;
871 goto out_super;
872 }
873
874 arena->infooff = cur_off;
875 ret = btt_info_read(arena, super);
876 if (ret)
877 goto out;
878
879 if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
880 if (remaining == btt->rawsize) {
881 btt->init_state = INIT_NOTFOUND;
882 dev_info(to_dev(arena), "No existing arenas\n");
883 goto out;
884 } else {
885 dev_err(to_dev(arena),
886 "Found corrupted metadata!\n");
887 ret = -ENODEV;
888 goto out;
889 }
890 }
891
892 arena->external_lba_start = cur_nlba;
893 parse_arena_meta(arena, super, cur_off);
894
895 ret = log_set_indices(arena);
896 if (ret) {
897 dev_err(to_dev(arena),
898 "Unable to deduce log/padding indices\n");
899 goto out;
900 }
901
902 mutex_init(&arena->err_lock);
903 ret = btt_freelist_init(arena);
904 if (ret)
905 goto out;
906
907 ret = btt_rtt_init(arena);
908 if (ret)
909 goto out;
910
911 ret = btt_maplocks_init(arena);
912 if (ret)
913 goto out;
914
915 list_add_tail(&arena->list, &btt->arena_list);
916
917 remaining -= arena->size;
918 cur_off += arena->size;
919 cur_nlba += arena->external_nlba;
920 num_arenas++;
921
922 if (arena->nextoff == 0)
923 break;
924 }
925 btt->num_arenas = num_arenas;
926 btt->nlba = cur_nlba;
927 btt->init_state = INIT_READY;
928
929 kfree(super);
930 return ret;
931
932 out:
933 kfree(arena);
934 free_arenas(btt);
935 out_super:
936 kfree(super);
937 return ret;
938}
939
940static int create_arenas(struct btt *btt)
941{
942 size_t remaining = btt->rawsize;
943 size_t cur_off = 0;
944
945 while (remaining) {
946 struct arena_info *arena;
947 size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
948
949 remaining -= arena_size;
950 if (arena_size < ARENA_MIN_SIZE)
951 break;
952
953 arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
954 if (!arena) {
955 free_arenas(btt);
956 return -ENOMEM;
957 }
958 btt->nlba += arena->external_nlba;
959 if (remaining >= ARENA_MIN_SIZE)
960 arena->nextoff = arena->size;
961 else
962 arena->nextoff = 0;
963 cur_off += arena_size;
964 list_add_tail(&arena->list, &btt->arena_list);
965 }
966
967 return 0;
968}
969
970/*
971 * This function completes arena initialization by writing
972 * all the metadata.
973 * It is only called for an uninitialized arena when a write
974 * to that arena occurs for the first time.
975 */
976static int btt_arena_write_layout(struct arena_info *arena)
977{
978 int ret;
979 u64 sum;
980 struct btt_sb *super;
981 struct nd_btt *nd_btt = arena->nd_btt;
982 const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
983
984 ret = btt_map_init(arena);
985 if (ret)
986 return ret;
987
988 ret = btt_log_init(arena);
989 if (ret)
990 return ret;
991
992 super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
993 if (!super)
994 return -ENOMEM;
995
996 strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
997 memcpy(super->uuid, nd_btt->uuid, 16);
998 memcpy(super->parent_uuid, parent_uuid, 16);
999 super->flags = cpu_to_le32(arena->flags);
1000 super->version_major = cpu_to_le16(arena->version_major);
1001 super->version_minor = cpu_to_le16(arena->version_minor);
1002 super->external_lbasize = cpu_to_le32(arena->external_lbasize);
1003 super->external_nlba = cpu_to_le32(arena->external_nlba);
1004 super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
1005 super->internal_nlba = cpu_to_le32(arena->internal_nlba);
1006 super->nfree = cpu_to_le32(arena->nfree);
1007 super->infosize = cpu_to_le32(sizeof(struct btt_sb));
1008 super->nextoff = cpu_to_le64(arena->nextoff);
1009 /*
1010 * Subtract arena->infooff (arena start) so numbers are relative
1011 * to 'this' arena
1012 */
1013 super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
1014 super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
1015 super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
1016 super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
1017
1018 super->flags = 0;
1019 sum = nd_sb_checksum((struct nd_gen_sb *) super);
1020 super->checksum = cpu_to_le64(sum);
1021
1022 ret = btt_info_write(arena, super);
1023
1024 kfree(super);
1025 return ret;
1026}
1027
1028/*
1029 * This function completes the initialization for the BTT namespace
1030 * such that it is ready to accept IOs
1031 */
1032static int btt_meta_init(struct btt *btt)
1033{
1034 int ret = 0;
1035 struct arena_info *arena;
1036
1037 mutex_lock(&btt->init_lock);
1038 list_for_each_entry(arena, &btt->arena_list, list) {
1039 ret = btt_arena_write_layout(arena);
1040 if (ret)
1041 goto unlock;
1042
1043 ret = btt_freelist_init(arena);
1044 if (ret)
1045 goto unlock;
1046
1047 ret = btt_rtt_init(arena);
1048 if (ret)
1049 goto unlock;
1050
1051 ret = btt_maplocks_init(arena);
1052 if (ret)
1053 goto unlock;
1054 }
1055
1056 btt->init_state = INIT_READY;
1057
1058 unlock:
1059 mutex_unlock(&btt->init_lock);
1060 return ret;
1061}
1062
1063static u32 btt_meta_size(struct btt *btt)
1064{
1065 return btt->lbasize - btt->sector_size;
1066}
1067
1068/*
1069 * This function calculates the arena in which the given LBA lies
1070 * by doing a linear walk. This is acceptable since we expect only
1071 * a few arenas. If we have backing devices that get much larger,
1072 * we can construct a balanced binary tree of arenas at init time
1073 * so that this range search becomes faster.
1074 */
1075static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
1076 struct arena_info **arena)
1077{
1078 struct arena_info *arena_list;
1079 __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
1080
1081 list_for_each_entry(arena_list, &btt->arena_list, list) {
1082 if (lba < arena_list->external_nlba) {
1083 *arena = arena_list;
1084 *premap = lba;
1085 return 0;
1086 }
1087 lba -= arena_list->external_nlba;
1088 }
1089
1090 return -EIO;
1091}
1092
1093/*
1094 * The following (lock_map, unlock_map) are mostly just to improve
1095 * readability, since they index into an array of locks
1096 */
1097static void lock_map(struct arena_info *arena, u32 premap)
1098 __acquires(&arena->map_locks[idx].lock)
1099{
1100 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1101
1102 spin_lock(&arena->map_locks[idx].lock);
1103}
1104
1105static void unlock_map(struct arena_info *arena, u32 premap)
1106 __releases(&arena->map_locks[idx].lock)
1107{
1108 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1109
1110 spin_unlock(&arena->map_locks[idx].lock);
1111}
1112
1113static int btt_data_read(struct arena_info *arena, struct page *page,
1114 unsigned int off, u32 lba, u32 len)
1115{
1116 int ret;
1117 u64 nsoff = to_namespace_offset(arena, lba);
1118 void *mem = kmap_atomic(page);
1119
1120 ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1121 kunmap_atomic(mem);
1122
1123 return ret;
1124}
1125
1126static int btt_data_write(struct arena_info *arena, u32 lba,
1127 struct page *page, unsigned int off, u32 len)
1128{
1129 int ret;
1130 u64 nsoff = to_namespace_offset(arena, lba);
1131 void *mem = kmap_atomic(page);
1132
1133 ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1134 kunmap_atomic(mem);
1135
1136 return ret;
1137}
1138
1139static void zero_fill_data(struct page *page, unsigned int off, u32 len)
1140{
1141 void *mem = kmap_atomic(page);
1142
1143 memset(mem + off, 0, len);
1144 kunmap_atomic(mem);
1145}
1146
1147#ifdef CONFIG_BLK_DEV_INTEGRITY
1148static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1149 struct arena_info *arena, u32 postmap, int rw)
1150{
1151 unsigned int len = btt_meta_size(btt);
1152 u64 meta_nsoff;
1153 int ret = 0;
1154
1155 if (bip == NULL)
1156 return 0;
1157
1158 meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
1159
1160 while (len) {
1161 unsigned int cur_len;
1162 struct bio_vec bv;
1163 void *mem;
1164
1165 bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1166 /*
1167 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
1168 * .bv_offset already adjusted for iter->bi_bvec_done, and we
1169 * can use those directly
1170 */
1171
1172 cur_len = min(len, bv.bv_len);
1173 mem = kmap_atomic(bv.bv_page);
1174 if (rw)
1175 ret = arena_write_bytes(arena, meta_nsoff,
1176 mem + bv.bv_offset, cur_len,
1177 NVDIMM_IO_ATOMIC);
1178 else
1179 ret = arena_read_bytes(arena, meta_nsoff,
1180 mem + bv.bv_offset, cur_len,
1181 NVDIMM_IO_ATOMIC);
1182
1183 kunmap_atomic(mem);
1184 if (ret)
1185 return ret;
1186
1187 len -= cur_len;
1188 meta_nsoff += cur_len;
1189 if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len))
1190 return -EIO;
1191 }
1192
1193 return ret;
1194}
1195
1196#else /* CONFIG_BLK_DEV_INTEGRITY */
1197static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1198 struct arena_info *arena, u32 postmap, int rw)
1199{
1200 return 0;
1201}
1202#endif
1203
1204static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
1205 struct page *page, unsigned int off, sector_t sector,
1206 unsigned int len)
1207{
1208 int ret = 0;
1209 int t_flag, e_flag;
1210 struct arena_info *arena = NULL;
1211 u32 lane = 0, premap, postmap;
1212
1213 while (len) {
1214 u32 cur_len;
1215
1216 lane = nd_region_acquire_lane(btt->nd_region);
1217
1218 ret = lba_to_arena(btt, sector, &premap, &arena);
1219 if (ret)
1220 goto out_lane;
1221
1222 cur_len = min(btt->sector_size, len);
1223
1224 ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
1225 NVDIMM_IO_ATOMIC);
1226 if (ret)
1227 goto out_lane;
1228
1229 /*
1230 * We loop to make sure that the post map LBA didn't change
1231 * from under us between writing the RTT and doing the actual
1232 * read.
1233 */
1234 while (1) {
1235 u32 new_map;
1236 int new_t, new_e;
1237
1238 if (t_flag) {
1239 zero_fill_data(page, off, cur_len);
1240 goto out_lane;
1241 }
1242
1243 if (e_flag) {
1244 ret = -EIO;
1245 goto out_lane;
1246 }
1247
1248 arena->rtt[lane] = RTT_VALID | postmap;
1249 /*
1250 * Barrier to make sure this write is not reordered
1251 * to do the verification map_read before the RTT store
1252 */
1253 barrier();
1254
1255 ret = btt_map_read(arena, premap, &new_map, &new_t,
1256 &new_e, NVDIMM_IO_ATOMIC);
1257 if (ret)
1258 goto out_rtt;
1259
1260 if ((postmap == new_map) && (t_flag == new_t) &&
1261 (e_flag == new_e))
1262 break;
1263
1264 postmap = new_map;
1265 t_flag = new_t;
1266 e_flag = new_e;
1267 }
1268
1269 ret = btt_data_read(arena, page, off, postmap, cur_len);
1270 if (ret) {
1271 /* Media error - set the e_flag */
1272 if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC))
1273 dev_warn_ratelimited(to_dev(arena),
1274 "Error persistently tracking bad blocks at %#x\n",
1275 premap);
1276 goto out_rtt;
1277 }
1278
1279 if (bip) {
1280 ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1281 if (ret)
1282 goto out_rtt;
1283 }
1284
1285 arena->rtt[lane] = RTT_INVALID;
1286 nd_region_release_lane(btt->nd_region, lane);
1287
1288 len -= cur_len;
1289 off += cur_len;
1290 sector += btt->sector_size >> SECTOR_SHIFT;
1291 }
1292
1293 return 0;
1294
1295 out_rtt:
1296 arena->rtt[lane] = RTT_INVALID;
1297 out_lane:
1298 nd_region_release_lane(btt->nd_region, lane);
1299 return ret;
1300}
1301
1302/*
1303 * Normally, arena_{read,write}_bytes will take care of the initial offset
1304 * adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem,
1305 * we need the final, raw namespace offset here
1306 */
1307static bool btt_is_badblock(struct btt *btt, struct arena_info *arena,
1308 u32 postmap)
1309{
1310 u64 nsoff = adjust_initial_offset(arena->nd_btt,
1311 to_namespace_offset(arena, postmap));
1312 sector_t phys_sector = nsoff >> 9;
1313
1314 return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize);
1315}
1316
1317static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1318 sector_t sector, struct page *page, unsigned int off,
1319 unsigned int len)
1320{
1321 int ret = 0;
1322 struct arena_info *arena = NULL;
1323 u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
1324 struct log_entry log;
1325 int sub;
1326
1327 while (len) {
1328 u32 cur_len;
1329 int e_flag;
1330
1331 retry:
1332 lane = nd_region_acquire_lane(btt->nd_region);
1333
1334 ret = lba_to_arena(btt, sector, &premap, &arena);
1335 if (ret)
1336 goto out_lane;
1337 cur_len = min(btt->sector_size, len);
1338
1339 if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
1340 ret = -EIO;
1341 goto out_lane;
1342 }
1343
1344 if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
1345 arena->freelist[lane].has_err = 1;
1346
1347 if (mutex_is_locked(&arena->err_lock)
1348 || arena->freelist[lane].has_err) {
1349 nd_region_release_lane(btt->nd_region, lane);
1350
1351 ret = arena_clear_freelist_error(arena, lane);
1352 if (ret)
1353 return ret;
1354
1355 /* OK to acquire a different lane/free block */
1356 goto retry;
1357 }
1358
1359 new_postmap = arena->freelist[lane].block;
1360
1361 /* Wait if the new block is being read from */
1362 for (i = 0; i < arena->nfree; i++)
1363 while (arena->rtt[i] == (RTT_VALID | new_postmap))
1364 cpu_relax();
1365
1366
1367 if (new_postmap >= arena->internal_nlba) {
1368 ret = -EIO;
1369 goto out_lane;
1370 }
1371
1372 ret = btt_data_write(arena, new_postmap, page, off, cur_len);
1373 if (ret)
1374 goto out_lane;
1375
1376 if (bip) {
1377 ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1378 WRITE);
1379 if (ret)
1380 goto out_lane;
1381 }
1382
1383 lock_map(arena, premap);
1384 ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag,
1385 NVDIMM_IO_ATOMIC);
1386 if (ret)
1387 goto out_map;
1388 if (old_postmap >= arena->internal_nlba) {
1389 ret = -EIO;
1390 goto out_map;
1391 }
1392 if (e_flag)
1393 set_e_flag(old_postmap);
1394
1395 log.lba = cpu_to_le32(premap);
1396 log.old_map = cpu_to_le32(old_postmap);
1397 log.new_map = cpu_to_le32(new_postmap);
1398 log.seq = cpu_to_le32(arena->freelist[lane].seq);
1399 sub = arena->freelist[lane].sub;
1400 ret = btt_flog_write(arena, lane, sub, &log);
1401 if (ret)
1402 goto out_map;
1403
1404 ret = btt_map_write(arena, premap, new_postmap, 0, 0,
1405 NVDIMM_IO_ATOMIC);
1406 if (ret)
1407 goto out_map;
1408
1409 unlock_map(arena, premap);
1410 nd_region_release_lane(btt->nd_region, lane);
1411
1412 if (e_flag) {
1413 ret = arena_clear_freelist_error(arena, lane);
1414 if (ret)
1415 return ret;
1416 }
1417
1418 len -= cur_len;
1419 off += cur_len;
1420 sector += btt->sector_size >> SECTOR_SHIFT;
1421 }
1422
1423 return 0;
1424
1425 out_map:
1426 unlock_map(arena, premap);
1427 out_lane:
1428 nd_region_release_lane(btt->nd_region, lane);
1429 return ret;
1430}
1431
1432static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1433 struct page *page, unsigned int len, unsigned int off,
1434 bool is_write, sector_t sector)
1435{
1436 int ret;
1437
1438 if (!is_write) {
1439 ret = btt_read_pg(btt, bip, page, off, sector, len);
1440 flush_dcache_page(page);
1441 } else {
1442 flush_dcache_page(page);
1443 ret = btt_write_pg(btt, bip, sector, page, off, len);
1444 }
1445
1446 return ret;
1447}
1448
1449static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
1450{
1451 struct bio_integrity_payload *bip = bio_integrity(bio);
1452 struct btt *btt = q->queuedata;
1453 struct bvec_iter iter;
1454 unsigned long start;
1455 struct bio_vec bvec;
1456 int err = 0;
1457 bool do_acct;
1458
1459 if (!bio_integrity_prep(bio))
1460 return BLK_QC_T_NONE;
1461
1462 do_acct = nd_iostat_start(bio, &start);
1463 bio_for_each_segment(bvec, bio, iter) {
1464 unsigned int len = bvec.bv_len;
1465
1466 if (len > PAGE_SIZE || len < btt->sector_size ||
1467 len % btt->sector_size) {
1468 dev_err_ratelimited(&btt->nd_btt->dev,
1469 "unaligned bio segment (len: %d)\n", len);
1470 bio->bi_status = BLK_STS_IOERR;
1471 break;
1472 }
1473
1474 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1475 op_is_write(bio_op(bio)), iter.bi_sector);
1476 if (err) {
1477 dev_err(&btt->nd_btt->dev,
1478 "io error in %s sector %lld, len %d,\n",
1479 (op_is_write(bio_op(bio))) ? "WRITE" :
1480 "READ",
1481 (unsigned long long) iter.bi_sector, len);
1482 bio->bi_status = errno_to_blk_status(err);
1483 break;
1484 }
1485 }
1486 if (do_acct)
1487 nd_iostat_end(bio, start);
1488
1489 bio_endio(bio);
1490 return BLK_QC_T_NONE;
1491}
1492
1493static int btt_rw_page(struct block_device *bdev, sector_t sector,
1494 struct page *page, bool is_write)
1495{
1496 struct btt *btt = bdev->bd_disk->private_data;
1497 int rc;
1498 unsigned int len;
1499
1500 len = hpage_nr_pages(page) * PAGE_SIZE;
1501 rc = btt_do_bvec(btt, NULL, page, len, 0, is_write, sector);
1502 if (rc == 0)
1503 page_endio(page, is_write, 0);
1504
1505 return rc;
1506}
1507
1508
1509static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
1510{
1511 /* some standard values */
1512 geo->heads = 1 << 6;
1513 geo->sectors = 1 << 5;
1514 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1515 return 0;
1516}
1517
1518static const struct block_device_operations btt_fops = {
1519 .owner = THIS_MODULE,
1520 .rw_page = btt_rw_page,
1521 .getgeo = btt_getgeo,
1522 .revalidate_disk = nvdimm_revalidate_disk,
1523};
1524
1525static int btt_blk_init(struct btt *btt)
1526{
1527 struct nd_btt *nd_btt = btt->nd_btt;
1528 struct nd_namespace_common *ndns = nd_btt->ndns;
1529
1530 /* create a new disk and request queue for btt */
1531 btt->btt_queue = blk_alloc_queue(GFP_KERNEL);
1532 if (!btt->btt_queue)
1533 return -ENOMEM;
1534
1535 btt->btt_disk = alloc_disk(0);
1536 if (!btt->btt_disk) {
1537 blk_cleanup_queue(btt->btt_queue);
1538 return -ENOMEM;
1539 }
1540
1541 nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
1542 btt->btt_disk->first_minor = 0;
1543 btt->btt_disk->fops = &btt_fops;
1544 btt->btt_disk->private_data = btt;
1545 btt->btt_disk->queue = btt->btt_queue;
1546 btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
1547
1548 blk_queue_make_request(btt->btt_queue, btt_make_request);
1549 blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
1550 blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
1551 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
1552 btt->btt_queue->queuedata = btt;
1553
1554 if (btt_meta_size(btt)) {
1555 int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
1556
1557 if (rc) {
1558 del_gendisk(btt->btt_disk);
1559 put_disk(btt->btt_disk);
1560 blk_cleanup_queue(btt->btt_queue);
1561 return rc;
1562 }
1563 }
1564 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1565 device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
1566 btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
1567 revalidate_disk(btt->btt_disk);
1568
1569 return 0;
1570}
1571
1572static void btt_blk_cleanup(struct btt *btt)
1573{
1574 del_gendisk(btt->btt_disk);
1575 put_disk(btt->btt_disk);
1576 blk_cleanup_queue(btt->btt_queue);
1577}
1578
1579/**
1580 * btt_init - initialize a block translation table for the given device
1581 * @nd_btt: device with BTT geometry and backing device info
1582 * @rawsize: raw size in bytes of the backing device
1583 * @lbasize: lba size of the backing device
1584 * @uuid: A uuid for the backing device - this is stored on media
1585 * @maxlane: maximum number of parallel requests the device can handle
1586 *
1587 * Initialize a Block Translation Table on a backing device to provide
1588 * single sector power fail atomicity.
1589 *
1590 * Context:
1591 * Might sleep.
1592 *
1593 * Returns:
1594 * Pointer to a new struct btt on success, NULL on failure.
1595 */
1596static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1597 u32 lbasize, u8 *uuid, struct nd_region *nd_region)
1598{
1599 int ret;
1600 struct btt *btt;
1601 struct nd_namespace_io *nsio;
1602 struct device *dev = &nd_btt->dev;
1603
1604 btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
1605 if (!btt)
1606 return NULL;
1607
1608 btt->nd_btt = nd_btt;
1609 btt->rawsize = rawsize;
1610 btt->lbasize = lbasize;
1611 btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
1612 INIT_LIST_HEAD(&btt->arena_list);
1613 mutex_init(&btt->init_lock);
1614 btt->nd_region = nd_region;
1615 nsio = to_nd_namespace_io(&nd_btt->ndns->dev);
1616 btt->phys_bb = &nsio->bb;
1617
1618 ret = discover_arenas(btt);
1619 if (ret) {
1620 dev_err(dev, "init: error in arena_discover: %d\n", ret);
1621 return NULL;
1622 }
1623
1624 if (btt->init_state != INIT_READY && nd_region->ro) {
1625 dev_warn(dev, "%s is read-only, unable to init btt metadata\n",
1626 dev_name(&nd_region->dev));
1627 return NULL;
1628 } else if (btt->init_state != INIT_READY) {
1629 btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
1630 ((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
1631 dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
1632 btt->num_arenas, rawsize);
1633
1634 ret = create_arenas(btt);
1635 if (ret) {
1636 dev_info(dev, "init: create_arenas: %d\n", ret);
1637 return NULL;
1638 }
1639
1640 ret = btt_meta_init(btt);
1641 if (ret) {
1642 dev_err(dev, "init: error in meta_init: %d\n", ret);
1643 return NULL;
1644 }
1645 }
1646
1647 ret = btt_blk_init(btt);
1648 if (ret) {
1649 dev_err(dev, "init: error in blk_init: %d\n", ret);
1650 return NULL;
1651 }
1652
1653 btt_debugfs_init(btt);
1654
1655 return btt;
1656}
1657
1658/**
1659 * btt_fini - de-initialize a BTT
1660 * @btt: the BTT handle that was generated by btt_init
1661 *
1662 * De-initialize a Block Translation Table on device removal
1663 *
1664 * Context:
1665 * Might sleep.
1666 */
1667static void btt_fini(struct btt *btt)
1668{
1669 if (btt) {
1670 btt_blk_cleanup(btt);
1671 free_arenas(btt);
1672 debugfs_remove_recursive(btt->debugfs_dir);
1673 }
1674}
1675
1676int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
1677{
1678 struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1679 struct nd_region *nd_region;
1680 struct btt_sb *btt_sb;
1681 struct btt *btt;
1682 size_t rawsize;
1683
1684 if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
1685 dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
1686 return -ENODEV;
1687 }
1688
1689 btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL);
1690 if (!btt_sb)
1691 return -ENOMEM;
1692
1693 /*
1694 * If this returns < 0, that is ok as it just means there wasn't
1695 * an existing BTT, and we're creating a new one. We still need to
1696 * call this as we need the version dependent fields in nd_btt to be
1697 * set correctly based on the holder class
1698 */
1699 nd_btt_version(nd_btt, ndns, btt_sb);
1700
1701 rawsize = nvdimm_namespace_capacity(ndns) - nd_btt->initial_offset;
1702 if (rawsize < ARENA_MIN_SIZE) {
1703 dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
1704 dev_name(&ndns->dev),
1705 ARENA_MIN_SIZE + nd_btt->initial_offset);
1706 return -ENXIO;
1707 }
1708 nd_region = to_nd_region(nd_btt->dev.parent);
1709 btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
1710 nd_region);
1711 if (!btt)
1712 return -ENOMEM;
1713 nd_btt->btt = btt;
1714
1715 return 0;
1716}
1717EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
1718
1719int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
1720{
1721 struct btt *btt = nd_btt->btt;
1722
1723 btt_fini(btt);
1724 nd_btt->btt = NULL;
1725
1726 return 0;
1727}
1728EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
1729
1730static int __init nd_btt_init(void)
1731{
1732 int rc = 0;
1733
1734 debugfs_root = debugfs_create_dir("btt", NULL);
1735 if (IS_ERR_OR_NULL(debugfs_root))
1736 rc = -ENXIO;
1737
1738 return rc;
1739}
1740
1741static void __exit nd_btt_exit(void)
1742{
1743 debugfs_remove_recursive(debugfs_root);
1744}
1745
1746MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
1747MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1748MODULE_LICENSE("GPL v2");
1749module_init(nd_btt_init);
1750module_exit(nd_btt_exit);