blob: c2c17149d9687b3a595ab64577b59c429cea81ea [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-zoned.h"
8
9#include <linux/module.h>
10#include <linux/crc32.h>
11
12#define DM_MSG_PREFIX "zoned metadata"
13
14/*
15 * Metadata version.
16 */
17#define DMZ_META_VER 1
18
19/*
20 * On-disk super block magic.
21 */
22#define DMZ_MAGIC ((((unsigned int)('D')) << 24) | \
23 (((unsigned int)('Z')) << 16) | \
24 (((unsigned int)('B')) << 8) | \
25 ((unsigned int)('D')))
26
27/*
28 * On disk super block.
29 * This uses only 512 B but uses on disk a full 4KB block. This block is
30 * followed on disk by the mapping table of chunks to zones and the bitmap
31 * blocks indicating zone block validity.
32 * The overall resulting metadata format is:
33 * (1) Super block (1 block)
34 * (2) Chunk mapping table (nr_map_blocks)
35 * (3) Bitmap blocks (nr_bitmap_blocks)
36 * All metadata blocks are stored in conventional zones, starting from the
37 * the first conventional zone found on disk.
38 */
39struct dmz_super {
40 /* Magic number */
41 __le32 magic; /* 4 */
42
43 /* Metadata version number */
44 __le32 version; /* 8 */
45
46 /* Generation number */
47 __le64 gen; /* 16 */
48
49 /* This block number */
50 __le64 sb_block; /* 24 */
51
52 /* The number of metadata blocks, including this super block */
53 __le32 nr_meta_blocks; /* 28 */
54
55 /* The number of sequential zones reserved for reclaim */
56 __le32 nr_reserved_seq; /* 32 */
57
58 /* The number of entries in the mapping table */
59 __le32 nr_chunks; /* 36 */
60
61 /* The number of blocks used for the chunk mapping table */
62 __le32 nr_map_blocks; /* 40 */
63
64 /* The number of blocks used for the block bitmaps */
65 __le32 nr_bitmap_blocks; /* 44 */
66
67 /* Checksum */
68 __le32 crc; /* 48 */
69
70 /* Padding to full 512B sector */
71 u8 reserved[464]; /* 512 */
72};
73
74/*
75 * Chunk mapping entry: entries are indexed by chunk number
76 * and give the zone ID (dzone_id) mapping the chunk on disk.
77 * This zone may be sequential or random. If it is a sequential
78 * zone, a second zone (bzone_id) used as a write buffer may
79 * also be specified. This second zone will always be a randomly
80 * writeable zone.
81 */
82struct dmz_map {
83 __le32 dzone_id;
84 __le32 bzone_id;
85};
86
87/*
88 * Chunk mapping table metadata: 512 8-bytes entries per 4KB block.
89 */
90#define DMZ_MAP_ENTRIES (DMZ_BLOCK_SIZE / sizeof(struct dmz_map))
91#define DMZ_MAP_ENTRIES_SHIFT (ilog2(DMZ_MAP_ENTRIES))
92#define DMZ_MAP_ENTRIES_MASK (DMZ_MAP_ENTRIES - 1)
93#define DMZ_MAP_UNMAPPED UINT_MAX
94
95/*
96 * Meta data block descriptor (for cached metadata blocks).
97 */
98struct dmz_mblock {
99 struct rb_node node;
100 struct list_head link;
101 sector_t no;
102 unsigned int ref;
103 unsigned long state;
104 struct page *page;
105 void *data;
106};
107
108/*
109 * Metadata block state flags.
110 */
111enum {
112 DMZ_META_DIRTY,
113 DMZ_META_READING,
114 DMZ_META_WRITING,
115 DMZ_META_ERROR,
116};
117
118/*
119 * Super block information (one per metadata set).
120 */
121struct dmz_sb {
122 sector_t block;
123 struct dmz_mblock *mblk;
124 struct dmz_super *sb;
125};
126
127/*
128 * In-memory metadata.
129 */
130struct dmz_metadata {
131 struct dmz_dev *dev;
132
133 sector_t zone_bitmap_size;
134 unsigned int zone_nr_bitmap_blocks;
135
136 unsigned int nr_bitmap_blocks;
137 unsigned int nr_map_blocks;
138
139 unsigned int nr_useable_zones;
140 unsigned int nr_meta_blocks;
141 unsigned int nr_meta_zones;
142 unsigned int nr_data_zones;
143 unsigned int nr_rnd_zones;
144 unsigned int nr_reserved_seq;
145 unsigned int nr_chunks;
146
147 /* Zone information array */
148 struct dm_zone *zones;
149
150 struct dm_zone *sb_zone;
151 struct dmz_sb sb[2];
152 unsigned int mblk_primary;
153 u64 sb_gen;
154 unsigned int min_nr_mblks;
155 unsigned int max_nr_mblks;
156 atomic_t nr_mblks;
157 struct rw_semaphore mblk_sem;
158 struct mutex mblk_flush_lock;
159 spinlock_t mblk_lock;
160 struct rb_root mblk_rbtree;
161 struct list_head mblk_lru_list;
162 struct list_head mblk_dirty_list;
163 struct shrinker mblk_shrinker;
164
165 /* Zone allocation management */
166 struct mutex map_lock;
167 struct dmz_mblock **map_mblk;
168 unsigned int nr_rnd;
169 atomic_t unmap_nr_rnd;
170 struct list_head unmap_rnd_list;
171 struct list_head map_rnd_list;
172
173 unsigned int nr_seq;
174 atomic_t unmap_nr_seq;
175 struct list_head unmap_seq_list;
176 struct list_head map_seq_list;
177
178 atomic_t nr_reserved_seq_zones;
179 struct list_head reserved_seq_zones_list;
180
181 wait_queue_head_t free_wq;
182};
183
184/*
185 * Various accessors
186 */
187unsigned int dmz_id(struct dmz_metadata *zmd, struct dm_zone *zone)
188{
189 return ((unsigned int)(zone - zmd->zones));
190}
191
192sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone)
193{
194 return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_sectors_shift;
195}
196
197sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone)
198{
199 return (sector_t)dmz_id(zmd, zone) << zmd->dev->zone_nr_blocks_shift;
200}
201
202unsigned int dmz_nr_chunks(struct dmz_metadata *zmd)
203{
204 return zmd->nr_chunks;
205}
206
207unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd)
208{
209 return zmd->nr_rnd;
210}
211
212unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd)
213{
214 return atomic_read(&zmd->unmap_nr_rnd);
215}
216
217/*
218 * Lock/unlock mapping table.
219 * The map lock also protects all the zone lists.
220 */
221void dmz_lock_map(struct dmz_metadata *zmd)
222{
223 mutex_lock(&zmd->map_lock);
224}
225
226void dmz_unlock_map(struct dmz_metadata *zmd)
227{
228 mutex_unlock(&zmd->map_lock);
229}
230
231/*
232 * Lock/unlock metadata access. This is a "read" lock on a semaphore
233 * that prevents metadata flush from running while metadata are being
234 * modified. The actual metadata write mutual exclusion is achieved with
235 * the map lock and zone styate management (active and reclaim state are
236 * mutually exclusive).
237 */
238void dmz_lock_metadata(struct dmz_metadata *zmd)
239{
240 down_read(&zmd->mblk_sem);
241}
242
243void dmz_unlock_metadata(struct dmz_metadata *zmd)
244{
245 up_read(&zmd->mblk_sem);
246}
247
248/*
249 * Lock/unlock flush: prevent concurrent executions
250 * of dmz_flush_metadata as well as metadata modification in reclaim
251 * while flush is being executed.
252 */
253void dmz_lock_flush(struct dmz_metadata *zmd)
254{
255 mutex_lock(&zmd->mblk_flush_lock);
256}
257
258void dmz_unlock_flush(struct dmz_metadata *zmd)
259{
260 mutex_unlock(&zmd->mblk_flush_lock);
261}
262
263/*
264 * Allocate a metadata block.
265 */
266static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd,
267 sector_t mblk_no)
268{
269 struct dmz_mblock *mblk = NULL;
270
271 /* See if we can reuse cached blocks */
272 if (zmd->max_nr_mblks && atomic_read(&zmd->nr_mblks) > zmd->max_nr_mblks) {
273 spin_lock(&zmd->mblk_lock);
274 mblk = list_first_entry_or_null(&zmd->mblk_lru_list,
275 struct dmz_mblock, link);
276 if (mblk) {
277 list_del_init(&mblk->link);
278 rb_erase(&mblk->node, &zmd->mblk_rbtree);
279 mblk->no = mblk_no;
280 }
281 spin_unlock(&zmd->mblk_lock);
282 if (mblk)
283 return mblk;
284 }
285
286 /* Allocate a new block */
287 mblk = kmalloc(sizeof(struct dmz_mblock), GFP_NOIO);
288 if (!mblk)
289 return NULL;
290
291 mblk->page = alloc_page(GFP_NOIO);
292 if (!mblk->page) {
293 kfree(mblk);
294 return NULL;
295 }
296
297 RB_CLEAR_NODE(&mblk->node);
298 INIT_LIST_HEAD(&mblk->link);
299 mblk->ref = 0;
300 mblk->state = 0;
301 mblk->no = mblk_no;
302 mblk->data = page_address(mblk->page);
303
304 atomic_inc(&zmd->nr_mblks);
305
306 return mblk;
307}
308
309/*
310 * Free a metadata block.
311 */
312static void dmz_free_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
313{
314 __free_pages(mblk->page, 0);
315 kfree(mblk);
316
317 atomic_dec(&zmd->nr_mblks);
318}
319
320/*
321 * Insert a metadata block in the rbtree.
322 */
323static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
324{
325 struct rb_root *root = &zmd->mblk_rbtree;
326 struct rb_node **new = &(root->rb_node), *parent = NULL;
327 struct dmz_mblock *b;
328
329 /* Figure out where to put the new node */
330 while (*new) {
331 b = container_of(*new, struct dmz_mblock, node);
332 parent = *new;
333 new = (b->no < mblk->no) ? &((*new)->rb_left) : &((*new)->rb_right);
334 }
335
336 /* Add new node and rebalance tree */
337 rb_link_node(&mblk->node, parent, new);
338 rb_insert_color(&mblk->node, root);
339}
340
341/*
342 * Lookup a metadata block in the rbtree. If the block is found, increment
343 * its reference count.
344 */
345static struct dmz_mblock *dmz_get_mblock_fast(struct dmz_metadata *zmd,
346 sector_t mblk_no)
347{
348 struct rb_root *root = &zmd->mblk_rbtree;
349 struct rb_node *node = root->rb_node;
350 struct dmz_mblock *mblk;
351
352 while (node) {
353 mblk = container_of(node, struct dmz_mblock, node);
354 if (mblk->no == mblk_no) {
355 /*
356 * If this is the first reference to the block,
357 * remove it from the LRU list.
358 */
359 mblk->ref++;
360 if (mblk->ref == 1 &&
361 !test_bit(DMZ_META_DIRTY, &mblk->state))
362 list_del_init(&mblk->link);
363 return mblk;
364 }
365 node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right;
366 }
367
368 return NULL;
369}
370
371/*
372 * Metadata block BIO end callback.
373 */
374static void dmz_mblock_bio_end_io(struct bio *bio)
375{
376 struct dmz_mblock *mblk = bio->bi_private;
377 int flag;
378
379 if (bio->bi_status)
380 set_bit(DMZ_META_ERROR, &mblk->state);
381
382 if (bio_op(bio) == REQ_OP_WRITE)
383 flag = DMZ_META_WRITING;
384 else
385 flag = DMZ_META_READING;
386
387 clear_bit_unlock(flag, &mblk->state);
388 smp_mb__after_atomic();
389 wake_up_bit(&mblk->state, flag);
390
391 bio_put(bio);
392}
393
394/*
395 * Read an uncached metadata block from disk and add it to the cache.
396 */
397static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
398 sector_t mblk_no)
399{
400 struct dmz_mblock *mblk, *m;
401 sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
402 struct bio *bio;
403
404 if (dmz_bdev_is_dying(zmd->dev))
405 return ERR_PTR(-EIO);
406
407 /* Get a new block and a BIO to read it */
408 mblk = dmz_alloc_mblock(zmd, mblk_no);
409 if (!mblk)
410 return ERR_PTR(-ENOMEM);
411
412 bio = bio_alloc(GFP_NOIO, 1);
413 if (!bio) {
414 dmz_free_mblock(zmd, mblk);
415 return ERR_PTR(-ENOMEM);
416 }
417
418 spin_lock(&zmd->mblk_lock);
419
420 /*
421 * Make sure that another context did not start reading
422 * the block already.
423 */
424 m = dmz_get_mblock_fast(zmd, mblk_no);
425 if (m) {
426 spin_unlock(&zmd->mblk_lock);
427 dmz_free_mblock(zmd, mblk);
428 bio_put(bio);
429 return m;
430 }
431
432 mblk->ref++;
433 set_bit(DMZ_META_READING, &mblk->state);
434 dmz_insert_mblock(zmd, mblk);
435
436 spin_unlock(&zmd->mblk_lock);
437
438 /* Submit read BIO */
439 bio->bi_iter.bi_sector = dmz_blk2sect(block);
440 bio_set_dev(bio, zmd->dev->bdev);
441 bio->bi_private = mblk;
442 bio->bi_end_io = dmz_mblock_bio_end_io;
443 bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO);
444 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
445 submit_bio(bio);
446
447 return mblk;
448}
449
450/*
451 * Free metadata blocks.
452 */
453static unsigned long dmz_shrink_mblock_cache(struct dmz_metadata *zmd,
454 unsigned long limit)
455{
456 struct dmz_mblock *mblk;
457 unsigned long count = 0;
458
459 if (!zmd->max_nr_mblks)
460 return 0;
461
462 while (!list_empty(&zmd->mblk_lru_list) &&
463 atomic_read(&zmd->nr_mblks) > zmd->min_nr_mblks &&
464 count < limit) {
465 mblk = list_first_entry(&zmd->mblk_lru_list,
466 struct dmz_mblock, link);
467 list_del_init(&mblk->link);
468 rb_erase(&mblk->node, &zmd->mblk_rbtree);
469 dmz_free_mblock(zmd, mblk);
470 count++;
471 }
472
473 return count;
474}
475
476/*
477 * For mblock shrinker: get the number of unused metadata blocks in the cache.
478 */
479static unsigned long dmz_mblock_shrinker_count(struct shrinker *shrink,
480 struct shrink_control *sc)
481{
482 struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker);
483
484 return atomic_read(&zmd->nr_mblks);
485}
486
487/*
488 * For mblock shrinker: scan unused metadata blocks and shrink the cache.
489 */
490static unsigned long dmz_mblock_shrinker_scan(struct shrinker *shrink,
491 struct shrink_control *sc)
492{
493 struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker);
494 unsigned long count;
495
496 spin_lock(&zmd->mblk_lock);
497 count = dmz_shrink_mblock_cache(zmd, sc->nr_to_scan);
498 spin_unlock(&zmd->mblk_lock);
499
500 return count ? count : SHRINK_STOP;
501}
502
503/*
504 * Release a metadata block.
505 */
506static void dmz_release_mblock(struct dmz_metadata *zmd,
507 struct dmz_mblock *mblk)
508{
509
510 if (!mblk)
511 return;
512
513 spin_lock(&zmd->mblk_lock);
514
515 mblk->ref--;
516 if (mblk->ref == 0) {
517 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
518 rb_erase(&mblk->node, &zmd->mblk_rbtree);
519 dmz_free_mblock(zmd, mblk);
520 } else if (!test_bit(DMZ_META_DIRTY, &mblk->state)) {
521 list_add_tail(&mblk->link, &zmd->mblk_lru_list);
522 dmz_shrink_mblock_cache(zmd, 1);
523 }
524 }
525
526 spin_unlock(&zmd->mblk_lock);
527}
528
529/*
530 * Get a metadata block from the rbtree. If the block
531 * is not present, read it from disk.
532 */
533static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
534 sector_t mblk_no)
535{
536 struct dmz_mblock *mblk;
537
538 /* Check rbtree */
539 spin_lock(&zmd->mblk_lock);
540 mblk = dmz_get_mblock_fast(zmd, mblk_no);
541 spin_unlock(&zmd->mblk_lock);
542
543 if (!mblk) {
544 /* Cache miss: read the block from disk */
545 mblk = dmz_get_mblock_slow(zmd, mblk_no);
546 if (IS_ERR(mblk))
547 return mblk;
548 }
549
550 /* Wait for on-going read I/O and check for error */
551 wait_on_bit_io(&mblk->state, DMZ_META_READING,
552 TASK_UNINTERRUPTIBLE);
553 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
554 dmz_release_mblock(zmd, mblk);
555 dmz_check_bdev(zmd->dev);
556 return ERR_PTR(-EIO);
557 }
558
559 return mblk;
560}
561
562/*
563 * Mark a metadata block dirty.
564 */
565static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
566{
567 spin_lock(&zmd->mblk_lock);
568 if (!test_and_set_bit(DMZ_META_DIRTY, &mblk->state))
569 list_add_tail(&mblk->link, &zmd->mblk_dirty_list);
570 spin_unlock(&zmd->mblk_lock);
571}
572
573/*
574 * Issue a metadata block write BIO.
575 */
576static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
577 unsigned int set)
578{
579 sector_t block = zmd->sb[set].block + mblk->no;
580 struct bio *bio;
581
582 if (dmz_bdev_is_dying(zmd->dev))
583 return -EIO;
584
585 bio = bio_alloc(GFP_NOIO, 1);
586 if (!bio) {
587 set_bit(DMZ_META_ERROR, &mblk->state);
588 return -ENOMEM;
589 }
590
591 set_bit(DMZ_META_WRITING, &mblk->state);
592
593 bio->bi_iter.bi_sector = dmz_blk2sect(block);
594 bio_set_dev(bio, zmd->dev->bdev);
595 bio->bi_private = mblk;
596 bio->bi_end_io = dmz_mblock_bio_end_io;
597 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
598 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
599 submit_bio(bio);
600
601 return 0;
602}
603
604/*
605 * Read/write a metadata block.
606 */
607static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
608 struct page *page)
609{
610 struct bio *bio;
611 int ret;
612
613 if (dmz_bdev_is_dying(zmd->dev))
614 return -EIO;
615
616 bio = bio_alloc(GFP_NOIO, 1);
617 if (!bio)
618 return -ENOMEM;
619
620 bio->bi_iter.bi_sector = dmz_blk2sect(block);
621 bio_set_dev(bio, zmd->dev->bdev);
622 bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO);
623 bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
624 ret = submit_bio_wait(bio);
625 bio_put(bio);
626
627 if (ret)
628 dmz_check_bdev(zmd->dev);
629 return ret;
630}
631
632/*
633 * Write super block of the specified metadata set.
634 */
635static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
636{
637 sector_t block = zmd->sb[set].block;
638 struct dmz_mblock *mblk = zmd->sb[set].mblk;
639 struct dmz_super *sb = zmd->sb[set].sb;
640 u64 sb_gen = zmd->sb_gen + 1;
641 int ret;
642
643 sb->magic = cpu_to_le32(DMZ_MAGIC);
644 sb->version = cpu_to_le32(DMZ_META_VER);
645
646 sb->gen = cpu_to_le64(sb_gen);
647
648 sb->sb_block = cpu_to_le64(block);
649 sb->nr_meta_blocks = cpu_to_le32(zmd->nr_meta_blocks);
650 sb->nr_reserved_seq = cpu_to_le32(zmd->nr_reserved_seq);
651 sb->nr_chunks = cpu_to_le32(zmd->nr_chunks);
652
653 sb->nr_map_blocks = cpu_to_le32(zmd->nr_map_blocks);
654 sb->nr_bitmap_blocks = cpu_to_le32(zmd->nr_bitmap_blocks);
655
656 sb->crc = 0;
657 sb->crc = cpu_to_le32(crc32_le(sb_gen, (unsigned char *)sb, DMZ_BLOCK_SIZE));
658
659 ret = dmz_rdwr_block(zmd, REQ_OP_WRITE, block, mblk->page);
660 if (ret == 0)
661 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
662
663 return ret;
664}
665
666/*
667 * Write dirty metadata blocks to the specified set.
668 */
669static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
670 struct list_head *write_list,
671 unsigned int set)
672{
673 struct dmz_mblock *mblk;
674 struct blk_plug plug;
675 int ret = 0, nr_mblks_submitted = 0;
676
677 /* Issue writes */
678 blk_start_plug(&plug);
679 list_for_each_entry(mblk, write_list, link) {
680 ret = dmz_write_mblock(zmd, mblk, set);
681 if (ret)
682 break;
683 nr_mblks_submitted++;
684 }
685 blk_finish_plug(&plug);
686
687 /* Wait for completion */
688 list_for_each_entry(mblk, write_list, link) {
689 if (!nr_mblks_submitted)
690 break;
691 wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
692 TASK_UNINTERRUPTIBLE);
693 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
694 clear_bit(DMZ_META_ERROR, &mblk->state);
695 dmz_check_bdev(zmd->dev);
696 ret = -EIO;
697 }
698 nr_mblks_submitted--;
699 }
700
701 /* Flush drive cache (this will also sync data) */
702 if (ret == 0)
703 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
704
705 return ret;
706}
707
708/*
709 * Log dirty metadata blocks.
710 */
711static int dmz_log_dirty_mblocks(struct dmz_metadata *zmd,
712 struct list_head *write_list)
713{
714 unsigned int log_set = zmd->mblk_primary ^ 0x1;
715 int ret;
716
717 /* Write dirty blocks to the log */
718 ret = dmz_write_dirty_mblocks(zmd, write_list, log_set);
719 if (ret)
720 return ret;
721
722 /*
723 * No error so far: now validate the log by updating the
724 * log index super block generation.
725 */
726 ret = dmz_write_sb(zmd, log_set);
727 if (ret)
728 return ret;
729
730 return 0;
731}
732
733/*
734 * Flush dirty metadata blocks.
735 */
736int dmz_flush_metadata(struct dmz_metadata *zmd)
737{
738 struct dmz_mblock *mblk;
739 struct list_head write_list;
740 int ret;
741
742 if (WARN_ON(!zmd))
743 return 0;
744
745 INIT_LIST_HEAD(&write_list);
746
747 /*
748 * Make sure that metadata blocks are stable before logging: take
749 * the write lock on the metadata semaphore to prevent target BIOs
750 * from modifying metadata.
751 */
752 down_write(&zmd->mblk_sem);
753
754 /*
755 * This is called from the target flush work and reclaim work.
756 * Concurrent execution is not allowed.
757 */
758 dmz_lock_flush(zmd);
759
760 if (dmz_bdev_is_dying(zmd->dev)) {
761 ret = -EIO;
762 goto out;
763 }
764
765 /* Get dirty blocks */
766 spin_lock(&zmd->mblk_lock);
767 list_splice_init(&zmd->mblk_dirty_list, &write_list);
768 spin_unlock(&zmd->mblk_lock);
769
770 /* If there are no dirty metadata blocks, just flush the device cache */
771 if (list_empty(&write_list)) {
772 ret = blkdev_issue_flush(zmd->dev->bdev, GFP_NOIO, NULL);
773 goto err;
774 }
775
776 /*
777 * The primary metadata set is still clean. Keep it this way until
778 * all updates are successful in the secondary set. That is, use
779 * the secondary set as a log.
780 */
781 ret = dmz_log_dirty_mblocks(zmd, &write_list);
782 if (ret)
783 goto err;
784
785 /*
786 * The log is on disk. It is now safe to update in place
787 * in the primary metadata set.
788 */
789 ret = dmz_write_dirty_mblocks(zmd, &write_list, zmd->mblk_primary);
790 if (ret)
791 goto err;
792
793 ret = dmz_write_sb(zmd, zmd->mblk_primary);
794 if (ret)
795 goto err;
796
797 while (!list_empty(&write_list)) {
798 mblk = list_first_entry(&write_list, struct dmz_mblock, link);
799 list_del_init(&mblk->link);
800
801 spin_lock(&zmd->mblk_lock);
802 clear_bit(DMZ_META_DIRTY, &mblk->state);
803 if (mblk->ref == 0)
804 list_add_tail(&mblk->link, &zmd->mblk_lru_list);
805 spin_unlock(&zmd->mblk_lock);
806 }
807
808 zmd->sb_gen++;
809out:
810 dmz_unlock_flush(zmd);
811 up_write(&zmd->mblk_sem);
812
813 return ret;
814
815err:
816 if (!list_empty(&write_list)) {
817 spin_lock(&zmd->mblk_lock);
818 list_splice(&write_list, &zmd->mblk_dirty_list);
819 spin_unlock(&zmd->mblk_lock);
820 }
821 if (!dmz_check_bdev(zmd->dev))
822 ret = -EIO;
823 goto out;
824}
825
826/*
827 * Check super block.
828 */
829static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_super *sb)
830{
831 unsigned int nr_meta_zones, nr_data_zones;
832 struct dmz_dev *dev = zmd->dev;
833 u32 crc, stored_crc;
834 u64 gen;
835
836 gen = le64_to_cpu(sb->gen);
837 stored_crc = le32_to_cpu(sb->crc);
838 sb->crc = 0;
839 crc = crc32_le(gen, (unsigned char *)sb, DMZ_BLOCK_SIZE);
840 if (crc != stored_crc) {
841 dmz_dev_err(dev, "Invalid checksum (needed 0x%08x, got 0x%08x)",
842 crc, stored_crc);
843 return -ENXIO;
844 }
845
846 if (le32_to_cpu(sb->magic) != DMZ_MAGIC) {
847 dmz_dev_err(dev, "Invalid meta magic (needed 0x%08x, got 0x%08x)",
848 DMZ_MAGIC, le32_to_cpu(sb->magic));
849 return -ENXIO;
850 }
851
852 if (le32_to_cpu(sb->version) != DMZ_META_VER) {
853 dmz_dev_err(dev, "Invalid meta version (needed %d, got %d)",
854 DMZ_META_VER, le32_to_cpu(sb->version));
855 return -ENXIO;
856 }
857
858 nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + dev->zone_nr_blocks - 1)
859 >> dev->zone_nr_blocks_shift;
860 if (!nr_meta_zones ||
861 nr_meta_zones >= zmd->nr_rnd_zones) {
862 dmz_dev_err(dev, "Invalid number of metadata blocks");
863 return -ENXIO;
864 }
865
866 if (!le32_to_cpu(sb->nr_reserved_seq) ||
867 le32_to_cpu(sb->nr_reserved_seq) >= (zmd->nr_useable_zones - nr_meta_zones)) {
868 dmz_dev_err(dev, "Invalid number of reserved sequential zones");
869 return -ENXIO;
870 }
871
872 nr_data_zones = zmd->nr_useable_zones -
873 (nr_meta_zones * 2 + le32_to_cpu(sb->nr_reserved_seq));
874 if (le32_to_cpu(sb->nr_chunks) > nr_data_zones) {
875 dmz_dev_err(dev, "Invalid number of chunks %u / %u",
876 le32_to_cpu(sb->nr_chunks), nr_data_zones);
877 return -ENXIO;
878 }
879
880 /* OK */
881 zmd->nr_meta_blocks = le32_to_cpu(sb->nr_meta_blocks);
882 zmd->nr_reserved_seq = le32_to_cpu(sb->nr_reserved_seq);
883 zmd->nr_chunks = le32_to_cpu(sb->nr_chunks);
884 zmd->nr_map_blocks = le32_to_cpu(sb->nr_map_blocks);
885 zmd->nr_bitmap_blocks = le32_to_cpu(sb->nr_bitmap_blocks);
886 zmd->nr_meta_zones = nr_meta_zones;
887 zmd->nr_data_zones = nr_data_zones;
888
889 return 0;
890}
891
892/*
893 * Read the first or second super block from disk.
894 */
895static int dmz_read_sb(struct dmz_metadata *zmd, unsigned int set)
896{
897 return dmz_rdwr_block(zmd, REQ_OP_READ, zmd->sb[set].block,
898 zmd->sb[set].mblk->page);
899}
900
901/*
902 * Determine the position of the secondary super blocks on disk.
903 * This is used only if a corruption of the primary super block
904 * is detected.
905 */
906static int dmz_lookup_secondary_sb(struct dmz_metadata *zmd)
907{
908 unsigned int zone_nr_blocks = zmd->dev->zone_nr_blocks;
909 struct dmz_mblock *mblk;
910 int i;
911
912 /* Allocate a block */
913 mblk = dmz_alloc_mblock(zmd, 0);
914 if (!mblk)
915 return -ENOMEM;
916
917 zmd->sb[1].mblk = mblk;
918 zmd->sb[1].sb = mblk->data;
919
920 /* Bad first super block: search for the second one */
921 zmd->sb[1].block = zmd->sb[0].block + zone_nr_blocks;
922 for (i = 0; i < zmd->nr_rnd_zones - 1; i++) {
923 if (dmz_read_sb(zmd, 1) != 0)
924 break;
925 if (le32_to_cpu(zmd->sb[1].sb->magic) == DMZ_MAGIC)
926 return 0;
927 zmd->sb[1].block += zone_nr_blocks;
928 }
929
930 dmz_free_mblock(zmd, mblk);
931 zmd->sb[1].mblk = NULL;
932
933 return -EIO;
934}
935
936/*
937 * Read the first or second super block from disk.
938 */
939static int dmz_get_sb(struct dmz_metadata *zmd, unsigned int set)
940{
941 struct dmz_mblock *mblk;
942 int ret;
943
944 /* Allocate a block */
945 mblk = dmz_alloc_mblock(zmd, 0);
946 if (!mblk)
947 return -ENOMEM;
948
949 zmd->sb[set].mblk = mblk;
950 zmd->sb[set].sb = mblk->data;
951
952 /* Read super block */
953 ret = dmz_read_sb(zmd, set);
954 if (ret) {
955 dmz_free_mblock(zmd, mblk);
956 zmd->sb[set].mblk = NULL;
957 return ret;
958 }
959
960 return 0;
961}
962
963/*
964 * Recover a metadata set.
965 */
966static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
967{
968 unsigned int src_set = dst_set ^ 0x1;
969 struct page *page;
970 int i, ret;
971
972 dmz_dev_warn(zmd->dev, "Metadata set %u invalid: recovering", dst_set);
973
974 if (dst_set == 0)
975 zmd->sb[0].block = dmz_start_block(zmd, zmd->sb_zone);
976 else {
977 zmd->sb[1].block = zmd->sb[0].block +
978 (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift);
979 }
980
981 page = alloc_page(GFP_NOIO);
982 if (!page)
983 return -ENOMEM;
984
985 /* Copy metadata blocks */
986 for (i = 1; i < zmd->nr_meta_blocks; i++) {
987 ret = dmz_rdwr_block(zmd, REQ_OP_READ,
988 zmd->sb[src_set].block + i, page);
989 if (ret)
990 goto out;
991 ret = dmz_rdwr_block(zmd, REQ_OP_WRITE,
992 zmd->sb[dst_set].block + i, page);
993 if (ret)
994 goto out;
995 }
996
997 /* Finalize with the super block */
998 if (!zmd->sb[dst_set].mblk) {
999 zmd->sb[dst_set].mblk = dmz_alloc_mblock(zmd, 0);
1000 if (!zmd->sb[dst_set].mblk) {
1001 ret = -ENOMEM;
1002 goto out;
1003 }
1004 zmd->sb[dst_set].sb = zmd->sb[dst_set].mblk->data;
1005 }
1006
1007 ret = dmz_write_sb(zmd, dst_set);
1008out:
1009 __free_pages(page, 0);
1010
1011 return ret;
1012}
1013
1014/*
1015 * Get super block from disk.
1016 */
1017static int dmz_load_sb(struct dmz_metadata *zmd)
1018{
1019 bool sb_good[2] = {false, false};
1020 u64 sb_gen[2] = {0, 0};
1021 int ret;
1022
1023 /* Read and check the primary super block */
1024 zmd->sb[0].block = dmz_start_block(zmd, zmd->sb_zone);
1025 ret = dmz_get_sb(zmd, 0);
1026 if (ret) {
1027 dmz_dev_err(zmd->dev, "Read primary super block failed");
1028 return ret;
1029 }
1030
1031 ret = dmz_check_sb(zmd, zmd->sb[0].sb);
1032
1033 /* Read and check secondary super block */
1034 if (ret == 0) {
1035 sb_good[0] = true;
1036 zmd->sb[1].block = zmd->sb[0].block +
1037 (zmd->nr_meta_zones << zmd->dev->zone_nr_blocks_shift);
1038 ret = dmz_get_sb(zmd, 1);
1039 } else
1040 ret = dmz_lookup_secondary_sb(zmd);
1041
1042 if (ret) {
1043 dmz_dev_err(zmd->dev, "Read secondary super block failed");
1044 return ret;
1045 }
1046
1047 ret = dmz_check_sb(zmd, zmd->sb[1].sb);
1048 if (ret == 0)
1049 sb_good[1] = true;
1050
1051 /* Use highest generation sb first */
1052 if (!sb_good[0] && !sb_good[1]) {
1053 dmz_dev_err(zmd->dev, "No valid super block found");
1054 return -EIO;
1055 }
1056
1057 if (sb_good[0])
1058 sb_gen[0] = le64_to_cpu(zmd->sb[0].sb->gen);
1059 else
1060 ret = dmz_recover_mblocks(zmd, 0);
1061
1062 if (sb_good[1])
1063 sb_gen[1] = le64_to_cpu(zmd->sb[1].sb->gen);
1064 else
1065 ret = dmz_recover_mblocks(zmd, 1);
1066
1067 if (ret) {
1068 dmz_dev_err(zmd->dev, "Recovery failed");
1069 return -EIO;
1070 }
1071
1072 if (sb_gen[0] >= sb_gen[1]) {
1073 zmd->sb_gen = sb_gen[0];
1074 zmd->mblk_primary = 0;
1075 } else {
1076 zmd->sb_gen = sb_gen[1];
1077 zmd->mblk_primary = 1;
1078 }
1079
1080 dmz_dev_debug(zmd->dev, "Using super block %u (gen %llu)",
1081 zmd->mblk_primary, zmd->sb_gen);
1082
1083 return 0;
1084}
1085
1086/*
1087 * Initialize a zone descriptor.
1088 */
1089static int dmz_init_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
1090 struct blk_zone *blkz)
1091{
1092 struct dmz_dev *dev = zmd->dev;
1093
1094 /* Ignore the eventual last runt (smaller) zone */
1095 if (blkz->len != dev->zone_nr_sectors) {
1096 if (blkz->start + blkz->len == dev->capacity)
1097 return 0;
1098 return -ENXIO;
1099 }
1100
1101 INIT_LIST_HEAD(&zone->link);
1102 atomic_set(&zone->refcount, 0);
1103 zone->chunk = DMZ_MAP_UNMAPPED;
1104
1105 if (blkz->type == BLK_ZONE_TYPE_CONVENTIONAL) {
1106 set_bit(DMZ_RND, &zone->flags);
1107 zmd->nr_rnd_zones++;
1108 } else if (blkz->type == BLK_ZONE_TYPE_SEQWRITE_REQ ||
1109 blkz->type == BLK_ZONE_TYPE_SEQWRITE_PREF) {
1110 set_bit(DMZ_SEQ, &zone->flags);
1111 } else
1112 return -ENXIO;
1113
1114 if (blkz->cond == BLK_ZONE_COND_OFFLINE)
1115 set_bit(DMZ_OFFLINE, &zone->flags);
1116 else if (blkz->cond == BLK_ZONE_COND_READONLY)
1117 set_bit(DMZ_READ_ONLY, &zone->flags);
1118
1119 if (dmz_is_rnd(zone))
1120 zone->wp_block = 0;
1121 else
1122 zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
1123
1124 if (!dmz_is_offline(zone) && !dmz_is_readonly(zone)) {
1125 zmd->nr_useable_zones++;
1126 if (dmz_is_rnd(zone)) {
1127 zmd->nr_rnd_zones++;
1128 if (!zmd->sb_zone) {
1129 /* Super block zone */
1130 zmd->sb_zone = zone;
1131 }
1132 }
1133 }
1134
1135 return 0;
1136}
1137
1138/*
1139 * Free zones descriptors.
1140 */
1141static void dmz_drop_zones(struct dmz_metadata *zmd)
1142{
1143 kfree(zmd->zones);
1144 zmd->zones = NULL;
1145}
1146
1147/*
1148 * The size of a zone report in number of zones.
1149 * This results in 4096*64B=256KB report zones commands.
1150 */
1151#define DMZ_REPORT_NR_ZONES 4096
1152
1153/*
1154 * Allocate and initialize zone descriptors using the zone
1155 * information from disk.
1156 */
1157static int dmz_init_zones(struct dmz_metadata *zmd)
1158{
1159 struct dmz_dev *dev = zmd->dev;
1160 struct dm_zone *zone;
1161 struct blk_zone *blkz;
1162 unsigned int nr_blkz;
1163 sector_t sector = 0;
1164 int i, ret = 0;
1165
1166 /* Init */
1167 zmd->zone_bitmap_size = dev->zone_nr_blocks >> 3;
1168 zmd->zone_nr_bitmap_blocks = zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT;
1169
1170 /* Allocate zone array */
1171 zmd->zones = kcalloc(dev->nr_zones, sizeof(struct dm_zone), GFP_KERNEL);
1172 if (!zmd->zones)
1173 return -ENOMEM;
1174
1175 dmz_dev_info(dev, "Using %zu B for zone information",
1176 sizeof(struct dm_zone) * dev->nr_zones);
1177
1178 /* Get zone information */
1179 nr_blkz = DMZ_REPORT_NR_ZONES;
1180 blkz = kcalloc(nr_blkz, sizeof(struct blk_zone), GFP_KERNEL);
1181 if (!blkz) {
1182 ret = -ENOMEM;
1183 goto out;
1184 }
1185
1186 /*
1187 * Get zone information and initialize zone descriptors.
1188 * At the same time, determine where the super block
1189 * should be: first block of the first randomly writable
1190 * zone.
1191 */
1192 zone = zmd->zones;
1193 while (sector < dev->capacity) {
1194 /* Get zone information */
1195 nr_blkz = DMZ_REPORT_NR_ZONES;
1196 ret = blkdev_report_zones(dev->bdev, sector, blkz,
1197 &nr_blkz, GFP_KERNEL);
1198 if (ret) {
1199 dmz_dev_err(dev, "Report zones failed %d", ret);
1200 goto out;
1201 }
1202
1203 if (!nr_blkz)
1204 break;
1205
1206 /* Process report */
1207 for (i = 0; i < nr_blkz; i++) {
1208 ret = dmz_init_zone(zmd, zone, &blkz[i]);
1209 if (ret)
1210 goto out;
1211 sector += dev->zone_nr_sectors;
1212 zone++;
1213 }
1214 }
1215
1216 /* The entire zone configuration of the disk should now be known */
1217 if (sector < dev->capacity) {
1218 dmz_dev_err(dev, "Failed to get correct zone information");
1219 ret = -ENXIO;
1220 }
1221out:
1222 kfree(blkz);
1223 if (ret)
1224 dmz_drop_zones(zmd);
1225
1226 return ret;
1227}
1228
1229/*
1230 * Update a zone information.
1231 */
1232static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1233{
1234 unsigned int nr_blkz = 1;
1235 struct blk_zone blkz;
1236 int ret;
1237
1238 /* Get zone information from disk */
1239 ret = blkdev_report_zones(zmd->dev->bdev, dmz_start_sect(zmd, zone),
1240 &blkz, &nr_blkz, GFP_NOIO);
1241 if (!nr_blkz)
1242 ret = -EIO;
1243 if (ret) {
1244 dmz_dev_err(zmd->dev, "Get zone %u report failed",
1245 dmz_id(zmd, zone));
1246 dmz_check_bdev(zmd->dev);
1247 return ret;
1248 }
1249
1250 clear_bit(DMZ_OFFLINE, &zone->flags);
1251 clear_bit(DMZ_READ_ONLY, &zone->flags);
1252 if (blkz.cond == BLK_ZONE_COND_OFFLINE)
1253 set_bit(DMZ_OFFLINE, &zone->flags);
1254 else if (blkz.cond == BLK_ZONE_COND_READONLY)
1255 set_bit(DMZ_READ_ONLY, &zone->flags);
1256
1257 if (dmz_is_seq(zone))
1258 zone->wp_block = dmz_sect2blk(blkz.wp - blkz.start);
1259 else
1260 zone->wp_block = 0;
1261
1262 return 0;
1263}
1264
1265/*
1266 * Check a zone write pointer position when the zone is marked
1267 * with the sequential write error flag.
1268 */
1269static int dmz_handle_seq_write_err(struct dmz_metadata *zmd,
1270 struct dm_zone *zone)
1271{
1272 unsigned int wp = 0;
1273 int ret;
1274
1275 wp = zone->wp_block;
1276 ret = dmz_update_zone(zmd, zone);
1277 if (ret)
1278 return ret;
1279
1280 dmz_dev_warn(zmd->dev, "Processing zone %u write error (zone wp %u/%u)",
1281 dmz_id(zmd, zone), zone->wp_block, wp);
1282
1283 if (zone->wp_block < wp) {
1284 dmz_invalidate_blocks(zmd, zone, zone->wp_block,
1285 wp - zone->wp_block);
1286 }
1287
1288 return 0;
1289}
1290
1291static struct dm_zone *dmz_get(struct dmz_metadata *zmd, unsigned int zone_id)
1292{
1293 return &zmd->zones[zone_id];
1294}
1295
1296/*
1297 * Reset a zone write pointer.
1298 */
1299static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1300{
1301 int ret;
1302
1303 /*
1304 * Ignore offline zones, read only zones,
1305 * and conventional zones.
1306 */
1307 if (dmz_is_offline(zone) ||
1308 dmz_is_readonly(zone) ||
1309 dmz_is_rnd(zone))
1310 return 0;
1311
1312 if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) {
1313 struct dmz_dev *dev = zmd->dev;
1314
1315 ret = blkdev_reset_zones(dev->bdev,
1316 dmz_start_sect(zmd, zone),
1317 dev->zone_nr_sectors, GFP_NOIO);
1318 if (ret) {
1319 dmz_dev_err(dev, "Reset zone %u failed %d",
1320 dmz_id(zmd, zone), ret);
1321 return ret;
1322 }
1323 }
1324
1325 /* Clear write error bit and rewind write pointer position */
1326 clear_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
1327 zone->wp_block = 0;
1328
1329 return 0;
1330}
1331
1332static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone);
1333
1334/*
1335 * Initialize chunk mapping.
1336 */
1337static int dmz_load_mapping(struct dmz_metadata *zmd)
1338{
1339 struct dmz_dev *dev = zmd->dev;
1340 struct dm_zone *dzone, *bzone;
1341 struct dmz_mblock *dmap_mblk = NULL;
1342 struct dmz_map *dmap;
1343 unsigned int i = 0, e = 0, chunk = 0;
1344 unsigned int dzone_id;
1345 unsigned int bzone_id;
1346
1347 /* Metadata block array for the chunk mapping table */
1348 zmd->map_mblk = kcalloc(zmd->nr_map_blocks,
1349 sizeof(struct dmz_mblk *), GFP_KERNEL);
1350 if (!zmd->map_mblk)
1351 return -ENOMEM;
1352
1353 /* Get chunk mapping table blocks and initialize zone mapping */
1354 while (chunk < zmd->nr_chunks) {
1355 if (!dmap_mblk) {
1356 /* Get mapping block */
1357 dmap_mblk = dmz_get_mblock(zmd, i + 1);
1358 if (IS_ERR(dmap_mblk))
1359 return PTR_ERR(dmap_mblk);
1360 zmd->map_mblk[i] = dmap_mblk;
1361 dmap = (struct dmz_map *) dmap_mblk->data;
1362 i++;
1363 e = 0;
1364 }
1365
1366 /* Check data zone */
1367 dzone_id = le32_to_cpu(dmap[e].dzone_id);
1368 if (dzone_id == DMZ_MAP_UNMAPPED)
1369 goto next;
1370
1371 if (dzone_id >= dev->nr_zones) {
1372 dmz_dev_err(dev, "Chunk %u mapping: invalid data zone ID %u",
1373 chunk, dzone_id);
1374 return -EIO;
1375 }
1376
1377 dzone = dmz_get(zmd, dzone_id);
1378 set_bit(DMZ_DATA, &dzone->flags);
1379 dzone->chunk = chunk;
1380 dmz_get_zone_weight(zmd, dzone);
1381
1382 if (dmz_is_rnd(dzone))
1383 list_add_tail(&dzone->link, &zmd->map_rnd_list);
1384 else
1385 list_add_tail(&dzone->link, &zmd->map_seq_list);
1386
1387 /* Check buffer zone */
1388 bzone_id = le32_to_cpu(dmap[e].bzone_id);
1389 if (bzone_id == DMZ_MAP_UNMAPPED)
1390 goto next;
1391
1392 if (bzone_id >= dev->nr_zones) {
1393 dmz_dev_err(dev, "Chunk %u mapping: invalid buffer zone ID %u",
1394 chunk, bzone_id);
1395 return -EIO;
1396 }
1397
1398 bzone = dmz_get(zmd, bzone_id);
1399 if (!dmz_is_rnd(bzone)) {
1400 dmz_dev_err(dev, "Chunk %u mapping: invalid buffer zone %u",
1401 chunk, bzone_id);
1402 return -EIO;
1403 }
1404
1405 set_bit(DMZ_DATA, &bzone->flags);
1406 set_bit(DMZ_BUF, &bzone->flags);
1407 bzone->chunk = chunk;
1408 bzone->bzone = dzone;
1409 dzone->bzone = bzone;
1410 dmz_get_zone_weight(zmd, bzone);
1411 list_add_tail(&bzone->link, &zmd->map_rnd_list);
1412next:
1413 chunk++;
1414 e++;
1415 if (e >= DMZ_MAP_ENTRIES)
1416 dmap_mblk = NULL;
1417 }
1418
1419 /*
1420 * At this point, only meta zones and mapped data zones were
1421 * fully initialized. All remaining zones are unmapped data
1422 * zones. Finish initializing those here.
1423 */
1424 for (i = 0; i < dev->nr_zones; i++) {
1425 dzone = dmz_get(zmd, i);
1426 if (dmz_is_meta(dzone))
1427 continue;
1428
1429 if (dmz_is_rnd(dzone))
1430 zmd->nr_rnd++;
1431 else
1432 zmd->nr_seq++;
1433
1434 if (dmz_is_data(dzone)) {
1435 /* Already initialized */
1436 continue;
1437 }
1438
1439 /* Unmapped data zone */
1440 set_bit(DMZ_DATA, &dzone->flags);
1441 dzone->chunk = DMZ_MAP_UNMAPPED;
1442 if (dmz_is_rnd(dzone)) {
1443 list_add_tail(&dzone->link, &zmd->unmap_rnd_list);
1444 atomic_inc(&zmd->unmap_nr_rnd);
1445 } else if (atomic_read(&zmd->nr_reserved_seq_zones) < zmd->nr_reserved_seq) {
1446 list_add_tail(&dzone->link, &zmd->reserved_seq_zones_list);
1447 atomic_inc(&zmd->nr_reserved_seq_zones);
1448 zmd->nr_seq--;
1449 } else {
1450 list_add_tail(&dzone->link, &zmd->unmap_seq_list);
1451 atomic_inc(&zmd->unmap_nr_seq);
1452 }
1453 }
1454
1455 return 0;
1456}
1457
1458/*
1459 * Set a data chunk mapping.
1460 */
1461static void dmz_set_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk,
1462 unsigned int dzone_id, unsigned int bzone_id)
1463{
1464 struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT];
1465 struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data;
1466 int map_idx = chunk & DMZ_MAP_ENTRIES_MASK;
1467
1468 dmap[map_idx].dzone_id = cpu_to_le32(dzone_id);
1469 dmap[map_idx].bzone_id = cpu_to_le32(bzone_id);
1470 dmz_dirty_mblock(zmd, dmap_mblk);
1471}
1472
1473/*
1474 * The list of mapped zones is maintained in LRU order.
1475 * This rotates a zone at the end of its map list.
1476 */
1477static void __dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1478{
1479 if (list_empty(&zone->link))
1480 return;
1481
1482 list_del_init(&zone->link);
1483 if (dmz_is_seq(zone)) {
1484 /* LRU rotate sequential zone */
1485 list_add_tail(&zone->link, &zmd->map_seq_list);
1486 } else {
1487 /* LRU rotate random zone */
1488 list_add_tail(&zone->link, &zmd->map_rnd_list);
1489 }
1490}
1491
1492/*
1493 * The list of mapped random zones is maintained
1494 * in LRU order. This rotates a zone at the end of the list.
1495 */
1496static void dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1497{
1498 __dmz_lru_zone(zmd, zone);
1499 if (zone->bzone)
1500 __dmz_lru_zone(zmd, zone->bzone);
1501}
1502
1503/*
1504 * Wait for any zone to be freed.
1505 */
1506static void dmz_wait_for_free_zones(struct dmz_metadata *zmd)
1507{
1508 DEFINE_WAIT(wait);
1509
1510 prepare_to_wait(&zmd->free_wq, &wait, TASK_UNINTERRUPTIBLE);
1511 dmz_unlock_map(zmd);
1512 dmz_unlock_metadata(zmd);
1513
1514 io_schedule_timeout(HZ);
1515
1516 dmz_lock_metadata(zmd);
1517 dmz_lock_map(zmd);
1518 finish_wait(&zmd->free_wq, &wait);
1519}
1520
1521/*
1522 * Lock a zone for reclaim (set the zone RECLAIM bit).
1523 * Returns false if the zone cannot be locked or if it is already locked
1524 * and 1 otherwise.
1525 */
1526int dmz_lock_zone_reclaim(struct dm_zone *zone)
1527{
1528 /* Active zones cannot be reclaimed */
1529 if (dmz_is_active(zone))
1530 return 0;
1531
1532 return !test_and_set_bit(DMZ_RECLAIM, &zone->flags);
1533}
1534
1535/*
1536 * Clear a zone reclaim flag.
1537 */
1538void dmz_unlock_zone_reclaim(struct dm_zone *zone)
1539{
1540 WARN_ON(dmz_is_active(zone));
1541 WARN_ON(!dmz_in_reclaim(zone));
1542
1543 clear_bit_unlock(DMZ_RECLAIM, &zone->flags);
1544 smp_mb__after_atomic();
1545 wake_up_bit(&zone->flags, DMZ_RECLAIM);
1546}
1547
1548/*
1549 * Wait for a zone reclaim to complete.
1550 */
1551static void dmz_wait_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *zone)
1552{
1553 dmz_unlock_map(zmd);
1554 dmz_unlock_metadata(zmd);
1555 wait_on_bit_timeout(&zone->flags, DMZ_RECLAIM, TASK_UNINTERRUPTIBLE, HZ);
1556 dmz_lock_metadata(zmd);
1557 dmz_lock_map(zmd);
1558}
1559
1560/*
1561 * Select a random write zone for reclaim.
1562 */
1563static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
1564{
1565 struct dm_zone *dzone = NULL;
1566 struct dm_zone *zone;
1567
1568 if (list_empty(&zmd->map_rnd_list))
1569 return ERR_PTR(-EBUSY);
1570
1571 list_for_each_entry(zone, &zmd->map_rnd_list, link) {
1572 if (dmz_is_buf(zone))
1573 dzone = zone->bzone;
1574 else
1575 dzone = zone;
1576 if (dmz_lock_zone_reclaim(dzone))
1577 return dzone;
1578 }
1579
1580 return ERR_PTR(-EBUSY);
1581}
1582
1583/*
1584 * Select a buffered sequential zone for reclaim.
1585 */
1586static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
1587{
1588 struct dm_zone *zone;
1589
1590 if (list_empty(&zmd->map_seq_list))
1591 return ERR_PTR(-EBUSY);
1592
1593 list_for_each_entry(zone, &zmd->map_seq_list, link) {
1594 if (!zone->bzone)
1595 continue;
1596 if (dmz_lock_zone_reclaim(zone))
1597 return zone;
1598 }
1599
1600 return ERR_PTR(-EBUSY);
1601}
1602
1603/*
1604 * Select a zone for reclaim.
1605 */
1606struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd)
1607{
1608 struct dm_zone *zone;
1609
1610 /*
1611 * Search for a zone candidate to reclaim: 2 cases are possible.
1612 * (1) There is no free sequential zones. Then a random data zone
1613 * cannot be reclaimed. So choose a sequential zone to reclaim so
1614 * that afterward a random zone can be reclaimed.
1615 * (2) At least one free sequential zone is available, then choose
1616 * the oldest random zone (data or buffer) that can be locked.
1617 */
1618 dmz_lock_map(zmd);
1619 if (list_empty(&zmd->reserved_seq_zones_list))
1620 zone = dmz_get_seq_zone_for_reclaim(zmd);
1621 else
1622 zone = dmz_get_rnd_zone_for_reclaim(zmd);
1623 dmz_unlock_map(zmd);
1624
1625 return zone;
1626}
1627
1628/*
1629 * Get the zone mapping a chunk, if the chunk is mapped already.
1630 * If no mapping exist and the operation is WRITE, a zone is
1631 * allocated and used to map the chunk.
1632 * The zone returned will be set to the active state.
1633 */
1634struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk, int op)
1635{
1636 struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT];
1637 struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data;
1638 int dmap_idx = chunk & DMZ_MAP_ENTRIES_MASK;
1639 unsigned int dzone_id;
1640 struct dm_zone *dzone = NULL;
1641 int ret = 0;
1642
1643 dmz_lock_map(zmd);
1644again:
1645 /* Get the chunk mapping */
1646 dzone_id = le32_to_cpu(dmap[dmap_idx].dzone_id);
1647 if (dzone_id == DMZ_MAP_UNMAPPED) {
1648 /*
1649 * Read or discard in unmapped chunks are fine. But for
1650 * writes, we need a mapping, so get one.
1651 */
1652 if (op != REQ_OP_WRITE)
1653 goto out;
1654
1655 /* Alloate a random zone */
1656 dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
1657 if (!dzone) {
1658 if (dmz_bdev_is_dying(zmd->dev)) {
1659 dzone = ERR_PTR(-EIO);
1660 goto out;
1661 }
1662 dmz_wait_for_free_zones(zmd);
1663 goto again;
1664 }
1665
1666 dmz_map_zone(zmd, dzone, chunk);
1667
1668 } else {
1669 /* The chunk is already mapped: get the mapping zone */
1670 dzone = dmz_get(zmd, dzone_id);
1671 if (dzone->chunk != chunk) {
1672 dzone = ERR_PTR(-EIO);
1673 goto out;
1674 }
1675
1676 /* Repair write pointer if the sequential dzone has error */
1677 if (dmz_seq_write_err(dzone)) {
1678 ret = dmz_handle_seq_write_err(zmd, dzone);
1679 if (ret) {
1680 dzone = ERR_PTR(-EIO);
1681 goto out;
1682 }
1683 clear_bit(DMZ_SEQ_WRITE_ERR, &dzone->flags);
1684 }
1685 }
1686
1687 /*
1688 * If the zone is being reclaimed, the chunk mapping may change
1689 * to a different zone. So wait for reclaim and retry. Otherwise,
1690 * activate the zone (this will prevent reclaim from touching it).
1691 */
1692 if (dmz_in_reclaim(dzone)) {
1693 dmz_wait_for_reclaim(zmd, dzone);
1694 goto again;
1695 }
1696 dmz_activate_zone(dzone);
1697 dmz_lru_zone(zmd, dzone);
1698out:
1699 dmz_unlock_map(zmd);
1700
1701 return dzone;
1702}
1703
1704/*
1705 * Write and discard change the block validity of data zones and their buffer
1706 * zones. Check here that valid blocks are still present. If all blocks are
1707 * invalid, the zones can be unmapped on the fly without waiting for reclaim
1708 * to do it.
1709 */
1710void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *dzone)
1711{
1712 struct dm_zone *bzone;
1713
1714 dmz_lock_map(zmd);
1715
1716 bzone = dzone->bzone;
1717 if (bzone) {
1718 if (dmz_weight(bzone))
1719 dmz_lru_zone(zmd, bzone);
1720 else {
1721 /* Empty buffer zone: reclaim it */
1722 dmz_unmap_zone(zmd, bzone);
1723 dmz_free_zone(zmd, bzone);
1724 bzone = NULL;
1725 }
1726 }
1727
1728 /* Deactivate the data zone */
1729 dmz_deactivate_zone(dzone);
1730 if (dmz_is_active(dzone) || bzone || dmz_weight(dzone))
1731 dmz_lru_zone(zmd, dzone);
1732 else {
1733 /* Unbuffered inactive empty data zone: reclaim it */
1734 dmz_unmap_zone(zmd, dzone);
1735 dmz_free_zone(zmd, dzone);
1736 }
1737
1738 dmz_unlock_map(zmd);
1739}
1740
1741/*
1742 * Allocate and map a random zone to buffer a chunk
1743 * already mapped to a sequential zone.
1744 */
1745struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd,
1746 struct dm_zone *dzone)
1747{
1748 struct dm_zone *bzone;
1749
1750 dmz_lock_map(zmd);
1751again:
1752 bzone = dzone->bzone;
1753 if (bzone)
1754 goto out;
1755
1756 /* Alloate a random zone */
1757 bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
1758 if (!bzone) {
1759 if (dmz_bdev_is_dying(zmd->dev)) {
1760 bzone = ERR_PTR(-EIO);
1761 goto out;
1762 }
1763 dmz_wait_for_free_zones(zmd);
1764 goto again;
1765 }
1766
1767 /* Update the chunk mapping */
1768 dmz_set_chunk_mapping(zmd, dzone->chunk, dmz_id(zmd, dzone),
1769 dmz_id(zmd, bzone));
1770
1771 set_bit(DMZ_BUF, &bzone->flags);
1772 bzone->chunk = dzone->chunk;
1773 bzone->bzone = dzone;
1774 dzone->bzone = bzone;
1775 list_add_tail(&bzone->link, &zmd->map_rnd_list);
1776out:
1777 dmz_unlock_map(zmd);
1778
1779 return bzone;
1780}
1781
1782/*
1783 * Get an unmapped (free) zone.
1784 * This must be called with the mapping lock held.
1785 */
1786struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned long flags)
1787{
1788 struct list_head *list;
1789 struct dm_zone *zone;
1790
1791 if (flags & DMZ_ALLOC_RND)
1792 list = &zmd->unmap_rnd_list;
1793 else
1794 list = &zmd->unmap_seq_list;
1795again:
1796 if (list_empty(list)) {
1797 /*
1798 * No free zone: if this is for reclaim, allow using the
1799 * reserved sequential zones.
1800 */
1801 if (!(flags & DMZ_ALLOC_RECLAIM) ||
1802 list_empty(&zmd->reserved_seq_zones_list))
1803 return NULL;
1804
1805 zone = list_first_entry(&zmd->reserved_seq_zones_list,
1806 struct dm_zone, link);
1807 list_del_init(&zone->link);
1808 atomic_dec(&zmd->nr_reserved_seq_zones);
1809 return zone;
1810 }
1811
1812 zone = list_first_entry(list, struct dm_zone, link);
1813 list_del_init(&zone->link);
1814
1815 if (dmz_is_rnd(zone))
1816 atomic_dec(&zmd->unmap_nr_rnd);
1817 else
1818 atomic_dec(&zmd->unmap_nr_seq);
1819
1820 if (dmz_is_offline(zone)) {
1821 dmz_dev_warn(zmd->dev, "Zone %u is offline", dmz_id(zmd, zone));
1822 zone = NULL;
1823 goto again;
1824 }
1825
1826 return zone;
1827}
1828
1829/*
1830 * Free a zone.
1831 * This must be called with the mapping lock held.
1832 */
1833void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1834{
1835 /* If this is a sequential zone, reset it */
1836 if (dmz_is_seq(zone))
1837 dmz_reset_zone(zmd, zone);
1838
1839 /* Return the zone to its type unmap list */
1840 if (dmz_is_rnd(zone)) {
1841 list_add_tail(&zone->link, &zmd->unmap_rnd_list);
1842 atomic_inc(&zmd->unmap_nr_rnd);
1843 } else if (atomic_read(&zmd->nr_reserved_seq_zones) <
1844 zmd->nr_reserved_seq) {
1845 list_add_tail(&zone->link, &zmd->reserved_seq_zones_list);
1846 atomic_inc(&zmd->nr_reserved_seq_zones);
1847 } else {
1848 list_add_tail(&zone->link, &zmd->unmap_seq_list);
1849 atomic_inc(&zmd->unmap_nr_seq);
1850 }
1851
1852 wake_up_all(&zmd->free_wq);
1853}
1854
1855/*
1856 * Map a chunk to a zone.
1857 * This must be called with the mapping lock held.
1858 */
1859void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *dzone,
1860 unsigned int chunk)
1861{
1862 /* Set the chunk mapping */
1863 dmz_set_chunk_mapping(zmd, chunk, dmz_id(zmd, dzone),
1864 DMZ_MAP_UNMAPPED);
1865 dzone->chunk = chunk;
1866 if (dmz_is_rnd(dzone))
1867 list_add_tail(&dzone->link, &zmd->map_rnd_list);
1868 else
1869 list_add_tail(&dzone->link, &zmd->map_seq_list);
1870}
1871
1872/*
1873 * Unmap a zone.
1874 * This must be called with the mapping lock held.
1875 */
1876void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1877{
1878 unsigned int chunk = zone->chunk;
1879 unsigned int dzone_id;
1880
1881 if (chunk == DMZ_MAP_UNMAPPED) {
1882 /* Already unmapped */
1883 return;
1884 }
1885
1886 if (test_and_clear_bit(DMZ_BUF, &zone->flags)) {
1887 /*
1888 * Unmapping the chunk buffer zone: clear only
1889 * the chunk buffer mapping
1890 */
1891 dzone_id = dmz_id(zmd, zone->bzone);
1892 zone->bzone->bzone = NULL;
1893 zone->bzone = NULL;
1894
1895 } else {
1896 /*
1897 * Unmapping the chunk data zone: the zone must
1898 * not be buffered.
1899 */
1900 if (WARN_ON(zone->bzone)) {
1901 zone->bzone->bzone = NULL;
1902 zone->bzone = NULL;
1903 }
1904 dzone_id = DMZ_MAP_UNMAPPED;
1905 }
1906
1907 dmz_set_chunk_mapping(zmd, chunk, dzone_id, DMZ_MAP_UNMAPPED);
1908
1909 zone->chunk = DMZ_MAP_UNMAPPED;
1910 list_del_init(&zone->link);
1911}
1912
1913/*
1914 * Set @nr_bits bits in @bitmap starting from @bit.
1915 * Return the number of bits changed from 0 to 1.
1916 */
1917static unsigned int dmz_set_bits(unsigned long *bitmap,
1918 unsigned int bit, unsigned int nr_bits)
1919{
1920 unsigned long *addr;
1921 unsigned int end = bit + nr_bits;
1922 unsigned int n = 0;
1923
1924 while (bit < end) {
1925 if (((bit & (BITS_PER_LONG - 1)) == 0) &&
1926 ((end - bit) >= BITS_PER_LONG)) {
1927 /* Try to set the whole word at once */
1928 addr = bitmap + BIT_WORD(bit);
1929 if (*addr == 0) {
1930 *addr = ULONG_MAX;
1931 n += BITS_PER_LONG;
1932 bit += BITS_PER_LONG;
1933 continue;
1934 }
1935 }
1936
1937 if (!test_and_set_bit(bit, bitmap))
1938 n++;
1939 bit++;
1940 }
1941
1942 return n;
1943}
1944
1945/*
1946 * Get the bitmap block storing the bit for chunk_block in zone.
1947 */
1948static struct dmz_mblock *dmz_get_bitmap(struct dmz_metadata *zmd,
1949 struct dm_zone *zone,
1950 sector_t chunk_block)
1951{
1952 sector_t bitmap_block = 1 + zmd->nr_map_blocks +
1953 (sector_t)(dmz_id(zmd, zone) * zmd->zone_nr_bitmap_blocks) +
1954 (chunk_block >> DMZ_BLOCK_SHIFT_BITS);
1955
1956 return dmz_get_mblock(zmd, bitmap_block);
1957}
1958
1959/*
1960 * Copy the valid blocks bitmap of from_zone to the bitmap of to_zone.
1961 */
1962int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
1963 struct dm_zone *to_zone)
1964{
1965 struct dmz_mblock *from_mblk, *to_mblk;
1966 sector_t chunk_block = 0;
1967
1968 /* Get the zones bitmap blocks */
1969 while (chunk_block < zmd->dev->zone_nr_blocks) {
1970 from_mblk = dmz_get_bitmap(zmd, from_zone, chunk_block);
1971 if (IS_ERR(from_mblk))
1972 return PTR_ERR(from_mblk);
1973 to_mblk = dmz_get_bitmap(zmd, to_zone, chunk_block);
1974 if (IS_ERR(to_mblk)) {
1975 dmz_release_mblock(zmd, from_mblk);
1976 return PTR_ERR(to_mblk);
1977 }
1978
1979 memcpy(to_mblk->data, from_mblk->data, DMZ_BLOCK_SIZE);
1980 dmz_dirty_mblock(zmd, to_mblk);
1981
1982 dmz_release_mblock(zmd, to_mblk);
1983 dmz_release_mblock(zmd, from_mblk);
1984
1985 chunk_block += DMZ_BLOCK_SIZE_BITS;
1986 }
1987
1988 to_zone->weight = from_zone->weight;
1989
1990 return 0;
1991}
1992
1993/*
1994 * Merge the valid blocks bitmap of from_zone into the bitmap of to_zone,
1995 * starting from chunk_block.
1996 */
1997int dmz_merge_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
1998 struct dm_zone *to_zone, sector_t chunk_block)
1999{
2000 unsigned int nr_blocks;
2001 int ret;
2002
2003 /* Get the zones bitmap blocks */
2004 while (chunk_block < zmd->dev->zone_nr_blocks) {
2005 /* Get a valid region from the source zone */
2006 ret = dmz_first_valid_block(zmd, from_zone, &chunk_block);
2007 if (ret <= 0)
2008 return ret;
2009
2010 nr_blocks = ret;
2011 ret = dmz_validate_blocks(zmd, to_zone, chunk_block, nr_blocks);
2012 if (ret)
2013 return ret;
2014
2015 chunk_block += nr_blocks;
2016 }
2017
2018 return 0;
2019}
2020
2021/*
2022 * Validate all the blocks in the range [block..block+nr_blocks-1].
2023 */
2024int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
2025 sector_t chunk_block, unsigned int nr_blocks)
2026{
2027 unsigned int count, bit, nr_bits;
2028 unsigned int zone_nr_blocks = zmd->dev->zone_nr_blocks;
2029 struct dmz_mblock *mblk;
2030 unsigned int n = 0;
2031
2032 dmz_dev_debug(zmd->dev, "=> VALIDATE zone %u, block %llu, %u blocks",
2033 dmz_id(zmd, zone), (unsigned long long)chunk_block,
2034 nr_blocks);
2035
2036 WARN_ON(chunk_block + nr_blocks > zone_nr_blocks);
2037
2038 while (nr_blocks) {
2039 /* Get bitmap block */
2040 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2041 if (IS_ERR(mblk))
2042 return PTR_ERR(mblk);
2043
2044 /* Set bits */
2045 bit = chunk_block & DMZ_BLOCK_MASK_BITS;
2046 nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
2047
2048 count = dmz_set_bits((unsigned long *)mblk->data, bit, nr_bits);
2049 if (count) {
2050 dmz_dirty_mblock(zmd, mblk);
2051 n += count;
2052 }
2053 dmz_release_mblock(zmd, mblk);
2054
2055 nr_blocks -= nr_bits;
2056 chunk_block += nr_bits;
2057 }
2058
2059 if (likely(zone->weight + n <= zone_nr_blocks))
2060 zone->weight += n;
2061 else {
2062 dmz_dev_warn(zmd->dev, "Zone %u: weight %u should be <= %u",
2063 dmz_id(zmd, zone), zone->weight,
2064 zone_nr_blocks - n);
2065 zone->weight = zone_nr_blocks;
2066 }
2067
2068 return 0;
2069}
2070
2071/*
2072 * Clear nr_bits bits in bitmap starting from bit.
2073 * Return the number of bits cleared.
2074 */
2075static int dmz_clear_bits(unsigned long *bitmap, int bit, int nr_bits)
2076{
2077 unsigned long *addr;
2078 int end = bit + nr_bits;
2079 int n = 0;
2080
2081 while (bit < end) {
2082 if (((bit & (BITS_PER_LONG - 1)) == 0) &&
2083 ((end - bit) >= BITS_PER_LONG)) {
2084 /* Try to clear whole word at once */
2085 addr = bitmap + BIT_WORD(bit);
2086 if (*addr == ULONG_MAX) {
2087 *addr = 0;
2088 n += BITS_PER_LONG;
2089 bit += BITS_PER_LONG;
2090 continue;
2091 }
2092 }
2093
2094 if (test_and_clear_bit(bit, bitmap))
2095 n++;
2096 bit++;
2097 }
2098
2099 return n;
2100}
2101
2102/*
2103 * Invalidate all the blocks in the range [block..block+nr_blocks-1].
2104 */
2105int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
2106 sector_t chunk_block, unsigned int nr_blocks)
2107{
2108 unsigned int count, bit, nr_bits;
2109 struct dmz_mblock *mblk;
2110 unsigned int n = 0;
2111
2112 dmz_dev_debug(zmd->dev, "=> INVALIDATE zone %u, block %llu, %u blocks",
2113 dmz_id(zmd, zone), (u64)chunk_block, nr_blocks);
2114
2115 WARN_ON(chunk_block + nr_blocks > zmd->dev->zone_nr_blocks);
2116
2117 while (nr_blocks) {
2118 /* Get bitmap block */
2119 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2120 if (IS_ERR(mblk))
2121 return PTR_ERR(mblk);
2122
2123 /* Clear bits */
2124 bit = chunk_block & DMZ_BLOCK_MASK_BITS;
2125 nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
2126
2127 count = dmz_clear_bits((unsigned long *)mblk->data,
2128 bit, nr_bits);
2129 if (count) {
2130 dmz_dirty_mblock(zmd, mblk);
2131 n += count;
2132 }
2133 dmz_release_mblock(zmd, mblk);
2134
2135 nr_blocks -= nr_bits;
2136 chunk_block += nr_bits;
2137 }
2138
2139 if (zone->weight >= n)
2140 zone->weight -= n;
2141 else {
2142 dmz_dev_warn(zmd->dev, "Zone %u: weight %u should be >= %u",
2143 dmz_id(zmd, zone), zone->weight, n);
2144 zone->weight = 0;
2145 }
2146
2147 return 0;
2148}
2149
2150/*
2151 * Get a block bit value.
2152 */
2153static int dmz_test_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2154 sector_t chunk_block)
2155{
2156 struct dmz_mblock *mblk;
2157 int ret;
2158
2159 WARN_ON(chunk_block >= zmd->dev->zone_nr_blocks);
2160
2161 /* Get bitmap block */
2162 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2163 if (IS_ERR(mblk))
2164 return PTR_ERR(mblk);
2165
2166 /* Get offset */
2167 ret = test_bit(chunk_block & DMZ_BLOCK_MASK_BITS,
2168 (unsigned long *) mblk->data) != 0;
2169
2170 dmz_release_mblock(zmd, mblk);
2171
2172 return ret;
2173}
2174
2175/*
2176 * Return the number of blocks from chunk_block to the first block with a bit
2177 * value specified by set. Search at most nr_blocks blocks from chunk_block.
2178 */
2179static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2180 sector_t chunk_block, unsigned int nr_blocks,
2181 int set)
2182{
2183 struct dmz_mblock *mblk;
2184 unsigned int bit, set_bit, nr_bits;
2185 unsigned long *bitmap;
2186 int n = 0;
2187
2188 WARN_ON(chunk_block + nr_blocks > zmd->dev->zone_nr_blocks);
2189
2190 while (nr_blocks) {
2191 /* Get bitmap block */
2192 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2193 if (IS_ERR(mblk))
2194 return PTR_ERR(mblk);
2195
2196 /* Get offset */
2197 bitmap = (unsigned long *) mblk->data;
2198 bit = chunk_block & DMZ_BLOCK_MASK_BITS;
2199 nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
2200 if (set)
2201 set_bit = find_next_bit(bitmap, DMZ_BLOCK_SIZE_BITS, bit);
2202 else
2203 set_bit = find_next_zero_bit(bitmap, DMZ_BLOCK_SIZE_BITS, bit);
2204 dmz_release_mblock(zmd, mblk);
2205
2206 n += set_bit - bit;
2207 if (set_bit < DMZ_BLOCK_SIZE_BITS)
2208 break;
2209
2210 nr_blocks -= nr_bits;
2211 chunk_block += nr_bits;
2212 }
2213
2214 return n;
2215}
2216
2217/*
2218 * Test if chunk_block is valid. If it is, the number of consecutive
2219 * valid blocks from chunk_block will be returned.
2220 */
2221int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone,
2222 sector_t chunk_block)
2223{
2224 int valid;
2225
2226 valid = dmz_test_block(zmd, zone, chunk_block);
2227 if (valid <= 0)
2228 return valid;
2229
2230 /* The block is valid: get the number of valid blocks from block */
2231 return dmz_to_next_set_block(zmd, zone, chunk_block,
2232 zmd->dev->zone_nr_blocks - chunk_block, 0);
2233}
2234
2235/*
2236 * Find the first valid block from @chunk_block in @zone.
2237 * If such a block is found, its number is returned using
2238 * @chunk_block and the total number of valid blocks from @chunk_block
2239 * is returned.
2240 */
2241int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2242 sector_t *chunk_block)
2243{
2244 sector_t start_block = *chunk_block;
2245 int ret;
2246
2247 ret = dmz_to_next_set_block(zmd, zone, start_block,
2248 zmd->dev->zone_nr_blocks - start_block, 1);
2249 if (ret < 0)
2250 return ret;
2251
2252 start_block += ret;
2253 *chunk_block = start_block;
2254
2255 return dmz_to_next_set_block(zmd, zone, start_block,
2256 zmd->dev->zone_nr_blocks - start_block, 0);
2257}
2258
2259/*
2260 * Count the number of bits set starting from bit up to bit + nr_bits - 1.
2261 */
2262static int dmz_count_bits(void *bitmap, int bit, int nr_bits)
2263{
2264 unsigned long *addr;
2265 int end = bit + nr_bits;
2266 int n = 0;
2267
2268 while (bit < end) {
2269 if (((bit & (BITS_PER_LONG - 1)) == 0) &&
2270 ((end - bit) >= BITS_PER_LONG)) {
2271 addr = (unsigned long *)bitmap + BIT_WORD(bit);
2272 if (*addr == ULONG_MAX) {
2273 n += BITS_PER_LONG;
2274 bit += BITS_PER_LONG;
2275 continue;
2276 }
2277 }
2278
2279 if (test_bit(bit, bitmap))
2280 n++;
2281 bit++;
2282 }
2283
2284 return n;
2285}
2286
2287/*
2288 * Get a zone weight.
2289 */
2290static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone)
2291{
2292 struct dmz_mblock *mblk;
2293 sector_t chunk_block = 0;
2294 unsigned int bit, nr_bits;
2295 unsigned int nr_blocks = zmd->dev->zone_nr_blocks;
2296 void *bitmap;
2297 int n = 0;
2298
2299 while (nr_blocks) {
2300 /* Get bitmap block */
2301 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2302 if (IS_ERR(mblk)) {
2303 n = 0;
2304 break;
2305 }
2306
2307 /* Count bits in this block */
2308 bitmap = mblk->data;
2309 bit = chunk_block & DMZ_BLOCK_MASK_BITS;
2310 nr_bits = min(nr_blocks, DMZ_BLOCK_SIZE_BITS - bit);
2311 n += dmz_count_bits(bitmap, bit, nr_bits);
2312
2313 dmz_release_mblock(zmd, mblk);
2314
2315 nr_blocks -= nr_bits;
2316 chunk_block += nr_bits;
2317 }
2318
2319 zone->weight = n;
2320}
2321
2322/*
2323 * Cleanup the zoned metadata resources.
2324 */
2325static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
2326{
2327 struct rb_root *root;
2328 struct dmz_mblock *mblk, *next;
2329 int i;
2330
2331 /* Release zone mapping resources */
2332 if (zmd->map_mblk) {
2333 for (i = 0; i < zmd->nr_map_blocks; i++)
2334 dmz_release_mblock(zmd, zmd->map_mblk[i]);
2335 kfree(zmd->map_mblk);
2336 zmd->map_mblk = NULL;
2337 }
2338
2339 /* Release super blocks */
2340 for (i = 0; i < 2; i++) {
2341 if (zmd->sb[i].mblk) {
2342 dmz_free_mblock(zmd, zmd->sb[i].mblk);
2343 zmd->sb[i].mblk = NULL;
2344 }
2345 }
2346
2347 /* Free cached blocks */
2348 while (!list_empty(&zmd->mblk_dirty_list)) {
2349 mblk = list_first_entry(&zmd->mblk_dirty_list,
2350 struct dmz_mblock, link);
2351 dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)",
2352 (u64)mblk->no, mblk->ref);
2353 list_del_init(&mblk->link);
2354 rb_erase(&mblk->node, &zmd->mblk_rbtree);
2355 dmz_free_mblock(zmd, mblk);
2356 }
2357
2358 while (!list_empty(&zmd->mblk_lru_list)) {
2359 mblk = list_first_entry(&zmd->mblk_lru_list,
2360 struct dmz_mblock, link);
2361 list_del_init(&mblk->link);
2362 rb_erase(&mblk->node, &zmd->mblk_rbtree);
2363 dmz_free_mblock(zmd, mblk);
2364 }
2365
2366 /* Sanity checks: the mblock rbtree should now be empty */
2367 root = &zmd->mblk_rbtree;
2368 rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
2369 dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree",
2370 (u64)mblk->no, mblk->ref);
2371 mblk->ref = 0;
2372 dmz_free_mblock(zmd, mblk);
2373 }
2374
2375 /* Free the zone descriptors */
2376 dmz_drop_zones(zmd);
2377
2378 mutex_destroy(&zmd->mblk_flush_lock);
2379 mutex_destroy(&zmd->map_lock);
2380}
2381
2382/*
2383 * Initialize the zoned metadata.
2384 */
2385int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **metadata)
2386{
2387 struct dmz_metadata *zmd;
2388 unsigned int i, zid;
2389 struct dm_zone *zone;
2390 int ret;
2391
2392 zmd = kzalloc(sizeof(struct dmz_metadata), GFP_KERNEL);
2393 if (!zmd)
2394 return -ENOMEM;
2395
2396 zmd->dev = dev;
2397 zmd->mblk_rbtree = RB_ROOT;
2398 init_rwsem(&zmd->mblk_sem);
2399 mutex_init(&zmd->mblk_flush_lock);
2400 spin_lock_init(&zmd->mblk_lock);
2401 INIT_LIST_HEAD(&zmd->mblk_lru_list);
2402 INIT_LIST_HEAD(&zmd->mblk_dirty_list);
2403
2404 mutex_init(&zmd->map_lock);
2405 atomic_set(&zmd->unmap_nr_rnd, 0);
2406 INIT_LIST_HEAD(&zmd->unmap_rnd_list);
2407 INIT_LIST_HEAD(&zmd->map_rnd_list);
2408
2409 atomic_set(&zmd->unmap_nr_seq, 0);
2410 INIT_LIST_HEAD(&zmd->unmap_seq_list);
2411 INIT_LIST_HEAD(&zmd->map_seq_list);
2412
2413 atomic_set(&zmd->nr_reserved_seq_zones, 0);
2414 INIT_LIST_HEAD(&zmd->reserved_seq_zones_list);
2415
2416 init_waitqueue_head(&zmd->free_wq);
2417
2418 /* Initialize zone descriptors */
2419 ret = dmz_init_zones(zmd);
2420 if (ret)
2421 goto err;
2422
2423 /* Get super block */
2424 ret = dmz_load_sb(zmd);
2425 if (ret)
2426 goto err;
2427
2428 /* Set metadata zones starting from sb_zone */
2429 zid = dmz_id(zmd, zmd->sb_zone);
2430 for (i = 0; i < zmd->nr_meta_zones << 1; i++) {
2431 zone = dmz_get(zmd, zid + i);
2432 if (!dmz_is_rnd(zone))
2433 goto err;
2434 set_bit(DMZ_META, &zone->flags);
2435 }
2436
2437 /* Load mapping table */
2438 ret = dmz_load_mapping(zmd);
2439 if (ret)
2440 goto err;
2441
2442 /*
2443 * Cache size boundaries: allow at least 2 super blocks, the chunk map
2444 * blocks and enough blocks to be able to cache the bitmap blocks of
2445 * up to 16 zones when idle (min_nr_mblks). Otherwise, if busy, allow
2446 * the cache to add 512 more metadata blocks.
2447 */
2448 zmd->min_nr_mblks = 2 + zmd->nr_map_blocks + zmd->zone_nr_bitmap_blocks * 16;
2449 zmd->max_nr_mblks = zmd->min_nr_mblks + 512;
2450 zmd->mblk_shrinker.count_objects = dmz_mblock_shrinker_count;
2451 zmd->mblk_shrinker.scan_objects = dmz_mblock_shrinker_scan;
2452 zmd->mblk_shrinker.seeks = DEFAULT_SEEKS;
2453
2454 /* Metadata cache shrinker */
2455 ret = register_shrinker(&zmd->mblk_shrinker);
2456 if (ret) {
2457 dmz_dev_err(dev, "Register metadata cache shrinker failed");
2458 goto err;
2459 }
2460
2461 dmz_dev_info(dev, "Host-%s zoned block device",
2462 bdev_zoned_model(dev->bdev) == BLK_ZONED_HA ?
2463 "aware" : "managed");
2464 dmz_dev_info(dev, " %llu 512-byte logical sectors",
2465 (u64)dev->capacity);
2466 dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors",
2467 dev->nr_zones, (u64)dev->zone_nr_sectors);
2468 dmz_dev_info(dev, " %u metadata zones",
2469 zmd->nr_meta_zones * 2);
2470 dmz_dev_info(dev, " %u data zones for %u chunks",
2471 zmd->nr_data_zones, zmd->nr_chunks);
2472 dmz_dev_info(dev, " %u random zones (%u unmapped)",
2473 zmd->nr_rnd, atomic_read(&zmd->unmap_nr_rnd));
2474 dmz_dev_info(dev, " %u sequential zones (%u unmapped)",
2475 zmd->nr_seq, atomic_read(&zmd->unmap_nr_seq));
2476 dmz_dev_info(dev, " %u reserved sequential data zones",
2477 zmd->nr_reserved_seq);
2478
2479 dmz_dev_debug(dev, "Format:");
2480 dmz_dev_debug(dev, "%u metadata blocks per set (%u max cache)",
2481 zmd->nr_meta_blocks, zmd->max_nr_mblks);
2482 dmz_dev_debug(dev, " %u data zone mapping blocks",
2483 zmd->nr_map_blocks);
2484 dmz_dev_debug(dev, " %u bitmap blocks",
2485 zmd->nr_bitmap_blocks);
2486
2487 *metadata = zmd;
2488
2489 return 0;
2490err:
2491 dmz_cleanup_metadata(zmd);
2492 kfree(zmd);
2493 *metadata = NULL;
2494
2495 return ret;
2496}
2497
2498/*
2499 * Cleanup the zoned metadata resources.
2500 */
2501void dmz_dtr_metadata(struct dmz_metadata *zmd)
2502{
2503 unregister_shrinker(&zmd->mblk_shrinker);
2504 dmz_cleanup_metadata(zmd);
2505 kfree(zmd);
2506}
2507
2508/*
2509 * Check zone information on resume.
2510 */
2511int dmz_resume_metadata(struct dmz_metadata *zmd)
2512{
2513 struct dmz_dev *dev = zmd->dev;
2514 struct dm_zone *zone;
2515 sector_t wp_block;
2516 unsigned int i;
2517 int ret;
2518
2519 /* Check zones */
2520 for (i = 0; i < dev->nr_zones; i++) {
2521 zone = dmz_get(zmd, i);
2522 if (!zone) {
2523 dmz_dev_err(dev, "Unable to get zone %u", i);
2524 return -EIO;
2525 }
2526
2527 wp_block = zone->wp_block;
2528
2529 ret = dmz_update_zone(zmd, zone);
2530 if (ret) {
2531 dmz_dev_err(dev, "Broken zone %u", i);
2532 return ret;
2533 }
2534
2535 if (dmz_is_offline(zone)) {
2536 dmz_dev_warn(dev, "Zone %u is offline", i);
2537 continue;
2538 }
2539
2540 /* Check write pointer */
2541 if (!dmz_is_seq(zone))
2542 zone->wp_block = 0;
2543 else if (zone->wp_block != wp_block) {
2544 dmz_dev_err(dev, "Zone %u: Invalid wp (%llu / %llu)",
2545 i, (u64)zone->wp_block, (u64)wp_block);
2546 zone->wp_block = wp_block;
2547 dmz_invalidate_blocks(zmd, zone, zone->wp_block,
2548 dev->zone_nr_blocks - zone->wp_block);
2549 }
2550 }
2551
2552 return 0;
2553}