blob: 9323c7c8580d44ac22d24bcf9fcf53be895c4989 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * Copyright (C) 2018 Google Limited.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm.h"
8#include "dm-core.h"
9
10#include <linux/crc32.h>
11#include <linux/dm-bufio.h>
12#include <linux/module.h>
13
14#define DM_MSG_PREFIX "bow"
15
16struct log_entry {
17 u64 source;
18 u64 dest;
19 u32 size;
20 u32 checksum;
21} __packed;
22
23struct log_sector {
24 u32 magic;
25 u16 header_version;
26 u16 header_size;
27 u32 block_size;
28 u32 count;
29 u32 sequence;
30 sector_t sector0;
31 struct log_entry entries[];
32} __packed;
33
34/*
35 * MAGIC is BOW in ascii
36 */
37#define MAGIC 0x00574f42
38#define HEADER_VERSION 0x0100
39
40/*
41 * A sorted set of ranges representing the state of the data on the device.
42 * Use an rb_tree for fast lookup of a given sector
43 * Consecutive ranges are always of different type - operations on this
44 * set must merge matching consecutive ranges.
45 *
46 * Top range is always of type TOP
47 */
48struct bow_range {
49 struct rb_node node;
50 sector_t sector;
51 enum {
52 INVALID, /* Type not set */
53 SECTOR0, /* First sector - holds log record */
54 SECTOR0_CURRENT,/* Live contents of sector0 */
55 UNCHANGED, /* Original contents */
56 TRIMMED, /* Range has been trimmed */
57 CHANGED, /* Range has been changed */
58 BACKUP, /* Range is being used as a backup */
59 TOP, /* Final range - sector is size of device */
60 } type;
61 struct list_head trimmed_list; /* list of TRIMMED ranges */
62};
63
64static const char * const readable_type[] = {
65 "Invalid",
66 "Sector0",
67 "Sector0_current",
68 "Unchanged",
69 "Free",
70 "Changed",
71 "Backup",
72 "Top",
73};
74
75enum state {
76 TRIM,
77 CHECKPOINT,
78 COMMITTED,
79};
80
81struct bow_context {
82 struct dm_dev *dev;
83 u32 block_size;
84 u32 block_shift;
85 struct workqueue_struct *workqueue;
86 struct dm_bufio_client *bufio;
87 struct mutex ranges_lock; /* Hold to access this struct and/or ranges */
88 struct rb_root ranges;
89 struct dm_kobject_holder kobj_holder; /* for sysfs attributes */
90 atomic_t state; /* One of the enum state values above */
91 u64 trims_total;
92 struct log_sector *log_sector;
93 struct list_head trimmed_list;
94 bool forward_trims;
95};
96
97sector_t range_top(struct bow_range *br)
98{
99 return container_of(rb_next(&br->node), struct bow_range, node)
100 ->sector;
101}
102
103u64 range_size(struct bow_range *br)
104{
105 return (range_top(br) - br->sector) * SECTOR_SIZE;
106}
107
108static sector_t bvec_top(struct bvec_iter *bi_iter)
109{
110 return bi_iter->bi_sector + bi_iter->bi_size / SECTOR_SIZE;
111}
112
113/*
114 * Find the first range that overlaps with bi_iter
115 * bi_iter is set to the size of the overlapping sub-range
116 */
117static struct bow_range *find_first_overlapping_range(struct rb_root *ranges,
118 struct bvec_iter *bi_iter)
119{
120 struct rb_node *node = ranges->rb_node;
121 struct bow_range *br;
122
123 while (node) {
124 br = container_of(node, struct bow_range, node);
125
126 if (br->sector <= bi_iter->bi_sector
127 && bi_iter->bi_sector < range_top(br))
128 break;
129
130 if (bi_iter->bi_sector < br->sector)
131 node = node->rb_left;
132 else
133 node = node->rb_right;
134 }
135
136 WARN_ON(!node);
137 if (!node)
138 return NULL;
139
140 if (range_top(br) - bi_iter->bi_sector
141 < bi_iter->bi_size >> SECTOR_SHIFT)
142 bi_iter->bi_size = (range_top(br) - bi_iter->bi_sector)
143 << SECTOR_SHIFT;
144
145 return br;
146}
147
148void add_before(struct rb_root *ranges, struct bow_range *new_br,
149 struct bow_range *existing)
150{
151 struct rb_node *parent = &(existing->node);
152 struct rb_node **link = &(parent->rb_left);
153
154 while (*link) {
155 parent = *link;
156 link = &((*link)->rb_right);
157 }
158
159 rb_link_node(&new_br->node, parent, link);
160 rb_insert_color(&new_br->node, ranges);
161}
162
163/*
164 * Given a range br returned by find_first_overlapping_range, split br into a
165 * leading range, a range matching the bi_iter and a trailing range.
166 * Leading and trailing may end up size 0 and will then be deleted. The
167 * new range matching the bi_iter is then returned and should have its type
168 * and type specific fields populated.
169 * If bi_iter runs off the end of the range, bi_iter is truncated accordingly
170 */
171static int split_range(struct bow_context *bc, struct bow_range **br,
172 struct bvec_iter *bi_iter)
173{
174 struct bow_range *new_br;
175
176 if (bi_iter->bi_sector < (*br)->sector) {
177 WARN_ON(true);
178 return BLK_STS_IOERR;
179 }
180
181 if (bi_iter->bi_sector > (*br)->sector) {
182 struct bow_range *leading_br =
183 kzalloc(sizeof(*leading_br), GFP_KERNEL);
184
185 if (!leading_br)
186 return BLK_STS_RESOURCE;
187
188 *leading_br = **br;
189 if (leading_br->type == TRIMMED)
190 list_add(&leading_br->trimmed_list, &bc->trimmed_list);
191
192 add_before(&bc->ranges, leading_br, *br);
193 (*br)->sector = bi_iter->bi_sector;
194 }
195
196 if (bvec_top(bi_iter) >= range_top(*br)) {
197 bi_iter->bi_size = (range_top(*br) - (*br)->sector)
198 * SECTOR_SIZE;
199 return BLK_STS_OK;
200 }
201
202 /* new_br will be the beginning, existing br will be the tail */
203 new_br = kzalloc(sizeof(*new_br), GFP_KERNEL);
204 if (!new_br)
205 return BLK_STS_RESOURCE;
206
207 new_br->sector = (*br)->sector;
208 (*br)->sector = bvec_top(bi_iter);
209 add_before(&bc->ranges, new_br, *br);
210 *br = new_br;
211
212 return BLK_STS_OK;
213}
214
215/*
216 * Sets type of a range. May merge range into surrounding ranges
217 * Since br may be invalidated, always sets br to NULL to prevent
218 * usage after this is called
219 */
220static void set_type(struct bow_context *bc, struct bow_range **br, int type)
221{
222 struct bow_range *prev = container_of(rb_prev(&(*br)->node),
223 struct bow_range, node);
224 struct bow_range *next = container_of(rb_next(&(*br)->node),
225 struct bow_range, node);
226
227 if ((*br)->type == TRIMMED) {
228 bc->trims_total -= range_size(*br);
229 list_del(&(*br)->trimmed_list);
230 }
231
232 if (type == TRIMMED) {
233 bc->trims_total += range_size(*br);
234 list_add(&(*br)->trimmed_list, &bc->trimmed_list);
235 }
236
237 (*br)->type = type;
238
239 if (next->type == type) {
240 if (type == TRIMMED)
241 list_del(&next->trimmed_list);
242 rb_erase(&next->node, &bc->ranges);
243 kfree(next);
244 }
245
246 if (prev->type == type) {
247 if (type == TRIMMED)
248 list_del(&(*br)->trimmed_list);
249 rb_erase(&(*br)->node, &bc->ranges);
250 kfree(*br);
251 }
252
253 *br = NULL;
254}
255
256static struct bow_range *find_free_range(struct bow_context *bc)
257{
258 if (list_empty(&bc->trimmed_list)) {
259 DMERR("Unable to find free space to back up to");
260 return NULL;
261 }
262
263 return list_first_entry(&bc->trimmed_list, struct bow_range,
264 trimmed_list);
265}
266
267static sector_t sector_to_page(struct bow_context const *bc, sector_t sector)
268{
269 WARN_ON((sector & (((sector_t)1 << (bc->block_shift - SECTOR_SHIFT)) - 1))
270 != 0);
271 return sector >> (bc->block_shift - SECTOR_SHIFT);
272}
273
274static int copy_data(struct bow_context const *bc,
275 struct bow_range *source, struct bow_range *dest,
276 u32 *checksum)
277{
278 int i;
279
280 if (range_size(source) != range_size(dest)) {
281 WARN_ON(1);
282 return BLK_STS_IOERR;
283 }
284
285 if (checksum)
286 *checksum = sector_to_page(bc, source->sector);
287
288 for (i = 0; i < range_size(source) >> bc->block_shift; ++i) {
289 struct dm_buffer *read_buffer, *write_buffer;
290 u8 *read, *write;
291 sector_t page = sector_to_page(bc, source->sector) + i;
292
293 read = dm_bufio_read(bc->bufio, page, &read_buffer);
294 if (IS_ERR(read)) {
295 DMERR("Cannot read page %llu",
296 (unsigned long long)page);
297 return PTR_ERR(read);
298 }
299
300 if (checksum)
301 *checksum = crc32(*checksum, read, bc->block_size);
302
303 write = dm_bufio_new(bc->bufio,
304 sector_to_page(bc, dest->sector) + i,
305 &write_buffer);
306 if (IS_ERR(write)) {
307 DMERR("Cannot write sector");
308 dm_bufio_release(read_buffer);
309 return PTR_ERR(write);
310 }
311
312 memcpy(write, read, bc->block_size);
313
314 dm_bufio_mark_buffer_dirty(write_buffer);
315 dm_bufio_release(write_buffer);
316 dm_bufio_release(read_buffer);
317 }
318
319 dm_bufio_write_dirty_buffers(bc->bufio);
320 return BLK_STS_OK;
321}
322
323/****** logging functions ******/
324
325static int add_log_entry(struct bow_context *bc, sector_t source, sector_t dest,
326 unsigned int size, u32 checksum);
327
328static int backup_log_sector(struct bow_context *bc)
329{
330 struct bow_range *first_br, *free_br;
331 struct bvec_iter bi_iter;
332 u32 checksum = 0;
333 int ret;
334
335 first_br = container_of(rb_first(&bc->ranges), struct bow_range, node);
336
337 if (first_br->type != SECTOR0) {
338 WARN_ON(1);
339 return BLK_STS_IOERR;
340 }
341
342 if (range_size(first_br) != bc->block_size) {
343 WARN_ON(1);
344 return BLK_STS_IOERR;
345 }
346
347 free_br = find_free_range(bc);
348 /* No space left - return this error to userspace */
349 if (!free_br)
350 return BLK_STS_NOSPC;
351 bi_iter.bi_sector = free_br->sector;
352 bi_iter.bi_size = bc->block_size;
353 ret = split_range(bc, &free_br, &bi_iter);
354 if (ret)
355 return ret;
356 if (bi_iter.bi_size != bc->block_size) {
357 WARN_ON(1);
358 return BLK_STS_IOERR;
359 }
360
361 ret = copy_data(bc, first_br, free_br, &checksum);
362 if (ret)
363 return ret;
364
365 bc->log_sector->count = 0;
366 bc->log_sector->sequence++;
367 ret = add_log_entry(bc, first_br->sector, free_br->sector,
368 range_size(first_br), checksum);
369 if (ret)
370 return ret;
371
372 set_type(bc, &free_br, BACKUP);
373 return BLK_STS_OK;
374}
375
376static int add_log_entry(struct bow_context *bc, sector_t source, sector_t dest,
377 unsigned int size, u32 checksum)
378{
379 struct dm_buffer *sector_buffer;
380 u8 *sector;
381
382 if (sizeof(struct log_sector)
383 + sizeof(struct log_entry) * (bc->log_sector->count + 1)
384 > bc->block_size) {
385 int ret = backup_log_sector(bc);
386
387 if (ret)
388 return ret;
389 }
390
391 sector = dm_bufio_new(bc->bufio, 0, &sector_buffer);
392 if (IS_ERR(sector)) {
393 DMERR("Cannot write boot sector");
394 dm_bufio_release(sector_buffer);
395 return BLK_STS_NOSPC;
396 }
397
398 bc->log_sector->entries[bc->log_sector->count].source = source;
399 bc->log_sector->entries[bc->log_sector->count].dest = dest;
400 bc->log_sector->entries[bc->log_sector->count].size = size;
401 bc->log_sector->entries[bc->log_sector->count].checksum = checksum;
402 bc->log_sector->count++;
403
404 memcpy(sector, bc->log_sector, bc->block_size);
405 dm_bufio_mark_buffer_dirty(sector_buffer);
406 dm_bufio_release(sector_buffer);
407 dm_bufio_write_dirty_buffers(bc->bufio);
408 return BLK_STS_OK;
409}
410
411static int prepare_log(struct bow_context *bc)
412{
413 struct bow_range *free_br, *first_br;
414 struct bvec_iter bi_iter;
415 u32 checksum = 0;
416 int ret;
417
418 /* Carve out first sector as log sector */
419 first_br = container_of(rb_first(&bc->ranges), struct bow_range, node);
420 if (first_br->type != UNCHANGED) {
421 WARN_ON(1);
422 return BLK_STS_IOERR;
423 }
424
425 if (range_size(first_br) < bc->block_size) {
426 WARN_ON(1);
427 return BLK_STS_IOERR;
428 }
429 bi_iter.bi_sector = 0;
430 bi_iter.bi_size = bc->block_size;
431 ret = split_range(bc, &first_br, &bi_iter);
432 if (ret)
433 return ret;
434 first_br->type = SECTOR0;
435 if (range_size(first_br) != bc->block_size) {
436 WARN_ON(1);
437 return BLK_STS_IOERR;
438 }
439
440 /* Find free sector for active sector0 reads/writes */
441 free_br = find_free_range(bc);
442 if (!free_br)
443 return BLK_STS_NOSPC;
444 bi_iter.bi_sector = free_br->sector;
445 bi_iter.bi_size = bc->block_size;
446 ret = split_range(bc, &free_br, &bi_iter);
447 if (ret)
448 return ret;
449 free_br->type = SECTOR0_CURRENT;
450
451 /* Copy data */
452 ret = copy_data(bc, first_br, free_br, NULL);
453 if (ret)
454 return ret;
455
456 bc->log_sector->sector0 = free_br->sector;
457
458 /* Find free sector to back up original sector zero */
459 free_br = find_free_range(bc);
460 if (!free_br)
461 return BLK_STS_NOSPC;
462 bi_iter.bi_sector = free_br->sector;
463 bi_iter.bi_size = bc->block_size;
464 ret = split_range(bc, &free_br, &bi_iter);
465 if (ret)
466 return ret;
467
468 /* Back up */
469 ret = copy_data(bc, first_br, free_br, &checksum);
470 if (ret)
471 return ret;
472
473 /*
474 * Set up our replacement boot sector - it will get written when we
475 * add the first log entry, which we do immediately
476 */
477 bc->log_sector->magic = MAGIC;
478 bc->log_sector->header_version = HEADER_VERSION;
479 bc->log_sector->header_size = sizeof(*bc->log_sector);
480 bc->log_sector->block_size = bc->block_size;
481 bc->log_sector->count = 0;
482 bc->log_sector->sequence = 0;
483
484 /* Add log entry */
485 ret = add_log_entry(bc, first_br->sector, free_br->sector,
486 range_size(first_br), checksum);
487 if (ret)
488 return ret;
489
490 set_type(bc, &free_br, BACKUP);
491 return BLK_STS_OK;
492}
493
494static struct bow_range *find_sector0_current(struct bow_context *bc)
495{
496 struct bvec_iter bi_iter;
497
498 bi_iter.bi_sector = bc->log_sector->sector0;
499 bi_iter.bi_size = bc->block_size;
500 return find_first_overlapping_range(&bc->ranges, &bi_iter);
501}
502
503/****** sysfs interface functions ******/
504
505static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
506 char *buf)
507{
508 struct bow_context *bc = container_of(kobj, struct bow_context,
509 kobj_holder.kobj);
510
511 return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&bc->state));
512}
513
514static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
515 const char *buf, size_t count)
516{
517 struct bow_context *bc = container_of(kobj, struct bow_context,
518 kobj_holder.kobj);
519 enum state state, original_state;
520 int ret;
521
522 state = buf[0] - '0';
523 if (state < TRIM || state > COMMITTED) {
524 DMERR("State value %d out of range", state);
525 return -EINVAL;
526 }
527
528 mutex_lock(&bc->ranges_lock);
529 original_state = atomic_read(&bc->state);
530 if (state != original_state + 1) {
531 DMERR("Invalid state change from %d to %d",
532 original_state, state);
533 ret = -EINVAL;
534 goto bad;
535 }
536
537 DMINFO("Switching to state %s", state == CHECKPOINT ? "Checkpoint"
538 : state == COMMITTED ? "Committed" : "Unknown");
539
540 if (state == CHECKPOINT) {
541 ret = prepare_log(bc);
542 if (ret) {
543 DMERR("Failed to switch to checkpoint state");
544 goto bad;
545 }
546 } else if (state == COMMITTED) {
547 struct bow_range *br = find_sector0_current(bc);
548 struct bow_range *sector0_br =
549 container_of(rb_first(&bc->ranges), struct bow_range,
550 node);
551
552 ret = copy_data(bc, br, sector0_br, 0);
553 if (ret) {
554 DMERR("Failed to switch to committed state");
555 goto bad;
556 }
557 }
558 atomic_inc(&bc->state);
559 ret = count;
560
561bad:
562 mutex_unlock(&bc->ranges_lock);
563 return ret;
564}
565
566static ssize_t free_show(struct kobject *kobj, struct kobj_attribute *attr,
567 char *buf)
568{
569 struct bow_context *bc = container_of(kobj, struct bow_context,
570 kobj_holder.kobj);
571 u64 trims_total;
572
573 mutex_lock(&bc->ranges_lock);
574 trims_total = bc->trims_total;
575 mutex_unlock(&bc->ranges_lock);
576
577 return scnprintf(buf, PAGE_SIZE, "%llu\n", trims_total);
578}
579
580static struct kobj_attribute attr_state = __ATTR_RW(state);
581static struct kobj_attribute attr_free = __ATTR_RO(free);
582
583static struct attribute *bow_attrs[] = {
584 &attr_state.attr,
585 &attr_free.attr,
586 NULL
587};
588
589static struct kobj_type bow_ktype = {
590 .sysfs_ops = &kobj_sysfs_ops,
591 .default_attrs = bow_attrs,
592 .release = dm_kobject_release
593};
594
595/****** constructor/destructor ******/
596
597static void dm_bow_dtr(struct dm_target *ti)
598{
599 struct bow_context *bc = (struct bow_context *) ti->private;
600 struct kobject *kobj;
601
602 while (rb_first(&bc->ranges)) {
603 struct bow_range *br = container_of(rb_first(&bc->ranges),
604 struct bow_range, node);
605
606 rb_erase(&br->node, &bc->ranges);
607 kfree(br);
608 }
609 if (bc->workqueue)
610 destroy_workqueue(bc->workqueue);
611 if (bc->bufio)
612 dm_bufio_client_destroy(bc->bufio);
613
614 kobj = &bc->kobj_holder.kobj;
615 if (kobj->state_initialized) {
616 kobject_put(kobj);
617 wait_for_completion(dm_get_completion_from_kobject(kobj));
618 }
619
620 kfree(bc->log_sector);
621 kfree(bc);
622}
623
624static int dm_bow_ctr(struct dm_target *ti, unsigned int argc, char **argv)
625{
626 struct bow_context *bc;
627 struct bow_range *br;
628 int ret;
629 struct mapped_device *md = dm_table_get_md(ti->table);
630
631 if (argc != 1) {
632 ti->error = "Invalid argument count";
633 return -EINVAL;
634 }
635
636 bc = kzalloc(sizeof(*bc), GFP_KERNEL);
637 if (!bc) {
638 ti->error = "Cannot allocate bow context";
639 return -ENOMEM;
640 }
641
642 ti->num_flush_bios = 1;
643 ti->num_discard_bios = 1;
644 ti->num_write_same_bios = 1;
645 ti->private = bc;
646
647 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
648 &bc->dev);
649 if (ret) {
650 ti->error = "Device lookup failed";
651 goto bad;
652 }
653
654 if (bc->dev->bdev->bd_queue->limits.max_discard_sectors == 0) {
655 bc->dev->bdev->bd_queue->limits.discard_granularity = 1 << 12;
656 bc->dev->bdev->bd_queue->limits.max_hw_discard_sectors = 1 << 15;
657 bc->dev->bdev->bd_queue->limits.max_discard_sectors = 1 << 15;
658 bc->forward_trims = false;
659 } else {
660 bc->forward_trims = true;
661 }
662
663 bc->block_size = bc->dev->bdev->bd_queue->limits.logical_block_size;
664 bc->block_shift = ilog2(bc->block_size);
665 bc->log_sector = kzalloc(bc->block_size, GFP_KERNEL);
666 if (!bc->log_sector) {
667 ti->error = "Cannot allocate log sector";
668 goto bad;
669 }
670
671 init_completion(&bc->kobj_holder.completion);
672 ret = kobject_init_and_add(&bc->kobj_holder.kobj, &bow_ktype,
673 &disk_to_dev(dm_disk(md))->kobj, "%s",
674 "bow");
675 if (ret) {
676 ti->error = "Cannot create sysfs node";
677 goto bad;
678 }
679
680 mutex_init(&bc->ranges_lock);
681 bc->ranges = RB_ROOT;
682 bc->bufio = dm_bufio_client_create(bc->dev->bdev, bc->block_size, 1, 0,
683 NULL, NULL);
684 if (IS_ERR(bc->bufio)) {
685 ti->error = "Cannot initialize dm-bufio";
686 ret = PTR_ERR(bc->bufio);
687 bc->bufio = NULL;
688 goto bad;
689 }
690
691 bc->workqueue = alloc_workqueue("dm-bow",
692 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM
693 | WQ_UNBOUND, num_online_cpus());
694 if (!bc->workqueue) {
695 ti->error = "Cannot allocate workqueue";
696 ret = -ENOMEM;
697 goto bad;
698 }
699
700 INIT_LIST_HEAD(&bc->trimmed_list);
701
702 br = kzalloc(sizeof(*br), GFP_KERNEL);
703 if (!br) {
704 ti->error = "Cannot allocate ranges";
705 ret = -ENOMEM;
706 goto bad;
707 }
708
709 br->sector = ti->len;
710 br->type = TOP;
711 rb_link_node(&br->node, NULL, &bc->ranges.rb_node);
712 rb_insert_color(&br->node, &bc->ranges);
713
714 br = kzalloc(sizeof(*br), GFP_KERNEL);
715 if (!br) {
716 ti->error = "Cannot allocate ranges";
717 ret = -ENOMEM;
718 goto bad;
719 }
720
721 br->sector = 0;
722 br->type = UNCHANGED;
723 rb_link_node(&br->node, bc->ranges.rb_node,
724 &bc->ranges.rb_node->rb_left);
725 rb_insert_color(&br->node, &bc->ranges);
726
727 ti->discards_supported = true;
728
729 return 0;
730
731bad:
732 dm_bow_dtr(ti);
733 return ret;
734}
735
736/****** Handle writes ******/
737
738static int prepare_unchanged_range(struct bow_context *bc, struct bow_range *br,
739 struct bvec_iter *bi_iter,
740 bool record_checksum)
741{
742 struct bow_range *backup_br;
743 struct bvec_iter backup_bi;
744 sector_t log_source, log_dest;
745 unsigned int log_size;
746 u32 checksum = 0;
747 int ret;
748 int original_type;
749 sector_t sector0;
750
751 /* Find a free range */
752 backup_br = find_free_range(bc);
753 if (!backup_br)
754 return BLK_STS_NOSPC;
755
756 /* Carve out a backup range. This may be smaller than the br given */
757 backup_bi.bi_sector = backup_br->sector;
758 backup_bi.bi_size = min(range_size(backup_br), (u64) bi_iter->bi_size);
759 ret = split_range(bc, &backup_br, &backup_bi);
760 if (ret)
761 return ret;
762
763 /*
764 * Carve out a changed range. This will not be smaller than the backup
765 * br since the backup br is smaller than the source range and iterator
766 */
767 bi_iter->bi_size = backup_bi.bi_size;
768 ret = split_range(bc, &br, bi_iter);
769 if (ret)
770 return ret;
771 if (range_size(br) != range_size(backup_br)) {
772 WARN_ON(1);
773 return BLK_STS_IOERR;
774 }
775
776
777 /* Copy data over */
778 ret = copy_data(bc, br, backup_br, record_checksum ? &checksum : NULL);
779 if (ret)
780 return ret;
781
782 /* Add an entry to the log */
783 log_source = br->sector;
784 log_dest = backup_br->sector;
785 log_size = range_size(br);
786
787 /*
788 * Set the types. Note that since set_type also amalgamates ranges
789 * we have to set both sectors to their final type before calling
790 * set_type on either
791 */
792 original_type = br->type;
793 sector0 = backup_br->sector;
794 if (backup_br->type == TRIMMED)
795 list_del(&backup_br->trimmed_list);
796 backup_br->type = br->type == SECTOR0_CURRENT ? SECTOR0_CURRENT
797 : BACKUP;
798 br->type = CHANGED;
799 set_type(bc, &backup_br, backup_br->type);
800
801 /*
802 * Add the log entry after marking the backup sector, since adding a log
803 * can cause another backup
804 */
805 ret = add_log_entry(bc, log_source, log_dest, log_size, checksum);
806 if (ret) {
807 br->type = original_type;
808 return ret;
809 }
810
811 /* Now it is safe to mark this backup successful */
812 if (original_type == SECTOR0_CURRENT)
813 bc->log_sector->sector0 = sector0;
814
815 set_type(bc, &br, br->type);
816 return ret;
817}
818
819static int prepare_free_range(struct bow_context *bc, struct bow_range *br,
820 struct bvec_iter *bi_iter)
821{
822 int ret;
823
824 ret = split_range(bc, &br, bi_iter);
825 if (ret)
826 return ret;
827 set_type(bc, &br, CHANGED);
828 return BLK_STS_OK;
829}
830
831static int prepare_changed_range(struct bow_context *bc, struct bow_range *br,
832 struct bvec_iter *bi_iter)
833{
834 /* Nothing to do ... */
835 return BLK_STS_OK;
836}
837
838static int prepare_one_range(struct bow_context *bc,
839 struct bvec_iter *bi_iter)
840{
841 struct bow_range *br = find_first_overlapping_range(&bc->ranges,
842 bi_iter);
843 switch (br->type) {
844 case CHANGED:
845 return prepare_changed_range(bc, br, bi_iter);
846
847 case TRIMMED:
848 return prepare_free_range(bc, br, bi_iter);
849
850 case UNCHANGED:
851 case BACKUP:
852 return prepare_unchanged_range(bc, br, bi_iter, true);
853
854 /*
855 * We cannot track the checksum for the active sector0, since it
856 * may change at any point.
857 */
858 case SECTOR0_CURRENT:
859 return prepare_unchanged_range(bc, br, bi_iter, false);
860
861 case SECTOR0: /* Handled in the dm_bow_map */
862 case TOP: /* Illegal - top is off the end of the device */
863 default:
864 WARN_ON(1);
865 return BLK_STS_IOERR;
866 }
867}
868
869struct write_work {
870 struct work_struct work;
871 struct bow_context *bc;
872 struct bio *bio;
873};
874
875static void bow_write(struct work_struct *work)
876{
877 struct write_work *ww = container_of(work, struct write_work, work);
878 struct bow_context *bc = ww->bc;
879 struct bio *bio = ww->bio;
880 struct bvec_iter bi_iter = bio->bi_iter;
881 int ret = BLK_STS_OK;
882
883 kfree(ww);
884
885 mutex_lock(&bc->ranges_lock);
886 do {
887 ret = prepare_one_range(bc, &bi_iter);
888 bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
889 bi_iter.bi_size = bio->bi_iter.bi_size
890 - (bi_iter.bi_sector - bio->bi_iter.bi_sector)
891 * SECTOR_SIZE;
892 } while (!ret && bi_iter.bi_size);
893
894 mutex_unlock(&bc->ranges_lock);
895
896 if (!ret) {
897 bio_set_dev(bio, bc->dev->bdev);
898 submit_bio(bio);
899 } else {
900 DMERR("Write failure with error %d", -ret);
901 bio->bi_status = ret;
902 bio_endio(bio);
903 }
904}
905
906static int queue_write(struct bow_context *bc, struct bio *bio)
907{
908 struct write_work *ww = kmalloc(sizeof(*ww), GFP_NOIO | __GFP_NORETRY
909 | __GFP_NOMEMALLOC | __GFP_NOWARN);
910 if (!ww) {
911 DMERR("Failed to allocate write_work");
912 return -ENOMEM;
913 }
914
915 INIT_WORK(&ww->work, bow_write);
916 ww->bc = bc;
917 ww->bio = bio;
918 queue_work(bc->workqueue, &ww->work);
919 return DM_MAPIO_SUBMITTED;
920}
921
922static int handle_sector0(struct bow_context *bc, struct bio *bio)
923{
924 int ret = DM_MAPIO_REMAPPED;
925
926 if (bio->bi_iter.bi_size > bc->block_size) {
927 struct bio * split = bio_split(bio,
928 bc->block_size >> SECTOR_SHIFT,
929 GFP_NOIO,
930 &fs_bio_set);
931 if (!split) {
932 DMERR("Failed to split bio");
933 bio->bi_status = BLK_STS_RESOURCE;
934 bio_endio(bio);
935 return DM_MAPIO_SUBMITTED;
936 }
937
938 bio_chain(split, bio);
939 split->bi_iter.bi_sector = bc->log_sector->sector0;
940 bio_set_dev(split, bc->dev->bdev);
941 submit_bio(split);
942
943 if (bio_data_dir(bio) == WRITE)
944 ret = queue_write(bc, bio);
945 } else {
946 bio->bi_iter.bi_sector = bc->log_sector->sector0;
947 }
948
949 return ret;
950}
951
952static int add_trim(struct bow_context *bc, struct bio *bio)
953{
954 struct bow_range *br;
955 struct bvec_iter bi_iter = bio->bi_iter;
956
957 DMDEBUG("add_trim: %llu, %u",
958 (unsigned long long)bio->bi_iter.bi_sector,
959 bio->bi_iter.bi_size);
960
961 do {
962 br = find_first_overlapping_range(&bc->ranges, &bi_iter);
963
964 switch (br->type) {
965 case UNCHANGED:
966 if (!split_range(bc, &br, &bi_iter))
967 set_type(bc, &br, TRIMMED);
968 break;
969
970 case TRIMMED:
971 /* Nothing to do */
972 break;
973
974 default:
975 /* No other case is legal in TRIM state */
976 WARN_ON(true);
977 break;
978 }
979
980 bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
981 bi_iter.bi_size = bio->bi_iter.bi_size
982 - (bi_iter.bi_sector - bio->bi_iter.bi_sector)
983 * SECTOR_SIZE;
984
985 } while (bi_iter.bi_size);
986
987 bio_endio(bio);
988 return DM_MAPIO_SUBMITTED;
989}
990
991static int remove_trim(struct bow_context *bc, struct bio *bio)
992{
993 struct bow_range *br;
994 struct bvec_iter bi_iter = bio->bi_iter;
995
996 DMDEBUG("remove_trim: %llu, %u",
997 (unsigned long long)bio->bi_iter.bi_sector,
998 bio->bi_iter.bi_size);
999
1000 do {
1001 br = find_first_overlapping_range(&bc->ranges, &bi_iter);
1002
1003 switch (br->type) {
1004 case UNCHANGED:
1005 /* Nothing to do */
1006 break;
1007
1008 case TRIMMED:
1009 if (!split_range(bc, &br, &bi_iter))
1010 set_type(bc, &br, UNCHANGED);
1011 break;
1012
1013 default:
1014 /* No other case is legal in TRIM state */
1015 WARN_ON(true);
1016 break;
1017 }
1018
1019 bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
1020 bi_iter.bi_size = bio->bi_iter.bi_size
1021 - (bi_iter.bi_sector - bio->bi_iter.bi_sector)
1022 * SECTOR_SIZE;
1023
1024 } while (bi_iter.bi_size);
1025
1026 return DM_MAPIO_REMAPPED;
1027}
1028
1029int remap_unless_illegal_trim(struct bow_context *bc, struct bio *bio)
1030{
1031 if (!bc->forward_trims && bio_op(bio) == REQ_OP_DISCARD) {
1032 bio->bi_status = BLK_STS_NOTSUPP;
1033 bio_endio(bio);
1034 return DM_MAPIO_SUBMITTED;
1035 } else {
1036 bio_set_dev(bio, bc->dev->bdev);
1037 return DM_MAPIO_REMAPPED;
1038 }
1039}
1040
1041/****** dm interface ******/
1042
1043static int dm_bow_map(struct dm_target *ti, struct bio *bio)
1044{
1045 int ret = DM_MAPIO_REMAPPED;
1046 struct bow_context *bc = ti->private;
1047
1048 if (likely(bc->state.counter == COMMITTED))
1049 return remap_unless_illegal_trim(bc, bio);
1050
1051 if (bio_data_dir(bio) == READ && bio->bi_iter.bi_sector != 0)
1052 return remap_unless_illegal_trim(bc, bio);
1053
1054 if (atomic_read(&bc->state) != COMMITTED) {
1055 enum state state;
1056
1057 mutex_lock(&bc->ranges_lock);
1058 state = atomic_read(&bc->state);
1059 if (state == TRIM) {
1060 if (bio_op(bio) == REQ_OP_DISCARD)
1061 ret = add_trim(bc, bio);
1062 else if (bio_data_dir(bio) == WRITE)
1063 ret = remove_trim(bc, bio);
1064 else
1065 /* pass-through */;
1066 } else if (state == CHECKPOINT) {
1067 if (bio->bi_iter.bi_sector == 0)
1068 ret = handle_sector0(bc, bio);
1069 else if (bio_data_dir(bio) == WRITE)
1070 ret = queue_write(bc, bio);
1071 else
1072 /* pass-through */;
1073 } else {
1074 /* pass-through */
1075 }
1076 mutex_unlock(&bc->ranges_lock);
1077 }
1078
1079 if (ret == DM_MAPIO_REMAPPED)
1080 return remap_unless_illegal_trim(bc, bio);
1081
1082 return ret;
1083}
1084
1085static void dm_bow_tablestatus(struct dm_target *ti, char *result,
1086 unsigned int maxlen)
1087{
1088 char *end = result + maxlen;
1089 struct bow_context *bc = ti->private;
1090 struct rb_node *i;
1091 int trimmed_list_length = 0;
1092 int trimmed_range_count = 0;
1093 struct bow_range *br;
1094
1095 if (maxlen == 0)
1096 return;
1097 result[0] = 0;
1098
1099 list_for_each_entry(br, &bc->trimmed_list, trimmed_list)
1100 if (br->type == TRIMMED) {
1101 ++trimmed_list_length;
1102 } else {
1103 scnprintf(result, end - result,
1104 "ERROR: non-trimmed entry in trimmed_list");
1105 return;
1106 }
1107
1108 if (!rb_first(&bc->ranges)) {
1109 scnprintf(result, end - result, "ERROR: Empty ranges");
1110 return;
1111 }
1112
1113 if (container_of(rb_first(&bc->ranges), struct bow_range, node)
1114 ->sector) {
1115 scnprintf(result, end - result,
1116 "ERROR: First range does not start at sector 0");
1117 return;
1118 }
1119
1120 for (i = rb_first(&bc->ranges); i; i = rb_next(i)) {
1121 struct bow_range *br = container_of(i, struct bow_range, node);
1122
1123 result += scnprintf(result, end - result, "%s: %llu",
1124 readable_type[br->type],
1125 (unsigned long long)br->sector);
1126 if (result >= end)
1127 return;
1128
1129 result += scnprintf(result, end - result, "\n");
1130 if (result >= end)
1131 return;
1132
1133 if (br->type == TRIMMED)
1134 ++trimmed_range_count;
1135
1136 if (br->type == TOP) {
1137 if (br->sector != ti->len) {
1138 scnprintf(result, end - result,
1139 "\nERROR: Top sector is incorrect");
1140 }
1141
1142 if (&br->node != rb_last(&bc->ranges)) {
1143 scnprintf(result, end - result,
1144 "\nERROR: Top sector is not last");
1145 }
1146
1147 break;
1148 }
1149
1150 if (!rb_next(i)) {
1151 scnprintf(result, end - result,
1152 "\nERROR: Last range not of type TOP");
1153 return;
1154 }
1155
1156 if (br->sector > range_top(br)) {
1157 scnprintf(result, end - result,
1158 "\nERROR: sectors out of order");
1159 return;
1160 }
1161 }
1162
1163 if (trimmed_range_count != trimmed_list_length)
1164 scnprintf(result, end - result,
1165 "\nERROR: not all trimmed ranges in trimmed list");
1166}
1167
1168static void dm_bow_status(struct dm_target *ti, status_type_t type,
1169 unsigned int status_flags, char *result,
1170 unsigned int maxlen)
1171{
1172 switch (type) {
1173 case STATUSTYPE_INFO:
1174 if (maxlen)
1175 result[0] = 0;
1176 break;
1177
1178 case STATUSTYPE_TABLE:
1179 dm_bow_tablestatus(ti, result, maxlen);
1180 break;
1181 }
1182}
1183
1184int dm_bow_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
1185{
1186 struct bow_context *bc = ti->private;
1187 struct dm_dev *dev = bc->dev;
1188
1189 *bdev = dev->bdev;
1190 /* Only pass ioctls through if the device sizes match exactly. */
1191 return ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1192}
1193
1194static int dm_bow_iterate_devices(struct dm_target *ti,
1195 iterate_devices_callout_fn fn, void *data)
1196{
1197 struct bow_context *bc = ti->private;
1198
1199 return fn(ti, bc->dev, 0, ti->len, data);
1200}
1201
1202static struct target_type bow_target = {
1203 .name = "bow",
1204 .version = {1, 1, 1},
1205 .module = THIS_MODULE,
1206 .ctr = dm_bow_ctr,
1207 .dtr = dm_bow_dtr,
1208 .map = dm_bow_map,
1209 .status = dm_bow_status,
1210 .prepare_ioctl = dm_bow_prepare_ioctl,
1211 .iterate_devices = dm_bow_iterate_devices,
1212};
1213
1214int __init dm_bow_init(void)
1215{
1216 int r = dm_register_target(&bow_target);
1217
1218 if (r < 0)
1219 DMERR("registering bow failed %d", r);
1220 return r;
1221}
1222
1223void dm_bow_exit(void)
1224{
1225 dm_unregister_target(&bow_target);
1226}
1227
1228MODULE_LICENSE("GPL");
1229
1230module_init(dm_bow_init);
1231module_exit(dm_bow_exit);