| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2011 Red Hat, Inc. | 
|  | 3 | * | 
|  | 4 | * This file is released under the GPL. | 
|  | 5 | */ | 
|  | 6 |  | 
|  | 7 | #ifndef _LINUX_DM_BLOCK_MANAGER_H | 
|  | 8 | #define _LINUX_DM_BLOCK_MANAGER_H | 
|  | 9 |  | 
|  | 10 | #include <linux/types.h> | 
|  | 11 | #include <linux/blkdev.h> | 
|  | 12 |  | 
|  | 13 | /*----------------------------------------------------------------*/ | 
|  | 14 |  | 
|  | 15 | /* | 
|  | 16 | * Block number. | 
|  | 17 | */ | 
|  | 18 | typedef uint64_t dm_block_t; | 
|  | 19 | struct dm_block; | 
|  | 20 |  | 
|  | 21 | dm_block_t dm_block_location(struct dm_block *b); | 
|  | 22 | void *dm_block_data(struct dm_block *b); | 
|  | 23 |  | 
|  | 24 | /*----------------------------------------------------------------*/ | 
|  | 25 |  | 
|  | 26 | /* | 
|  | 27 | * @name should be a unique identifier for the block manager, no longer | 
|  | 28 | * than 32 chars. | 
|  | 29 | * | 
|  | 30 | * @max_held_per_thread should be the maximum number of locks, read or | 
|  | 31 | * write, that an individual thread holds at any one time. | 
|  | 32 | */ | 
|  | 33 | struct dm_block_manager; | 
|  | 34 | struct dm_block_manager *dm_block_manager_create( | 
|  | 35 | struct block_device *bdev, unsigned block_size, | 
|  | 36 | unsigned max_held_per_thread); | 
|  | 37 | void dm_block_manager_destroy(struct dm_block_manager *bm); | 
|  | 38 |  | 
|  | 39 | unsigned dm_bm_block_size(struct dm_block_manager *bm); | 
|  | 40 | dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm); | 
|  | 41 |  | 
|  | 42 | /*----------------------------------------------------------------*/ | 
|  | 43 |  | 
|  | 44 | /* | 
|  | 45 | * The validator allows the caller to verify newly-read data and modify | 
|  | 46 | * the data just before writing, e.g. to calculate checksums.  It's | 
|  | 47 | * important to be consistent with your use of validators.  The only time | 
|  | 48 | * you can change validators is if you call dm_bm_write_lock_zero. | 
|  | 49 | */ | 
|  | 50 | struct dm_block_validator { | 
|  | 51 | const char *name; | 
|  | 52 | void (*prepare_for_write)(struct dm_block_validator *v, struct dm_block *b, size_t block_size); | 
|  | 53 |  | 
|  | 54 | /* | 
|  | 55 | * Return 0 if the checksum is valid or < 0 on error. | 
|  | 56 | */ | 
|  | 57 | int (*check)(struct dm_block_validator *v, struct dm_block *b, size_t block_size); | 
|  | 58 | }; | 
|  | 59 |  | 
|  | 60 | /*----------------------------------------------------------------*/ | 
|  | 61 |  | 
|  | 62 | /* | 
|  | 63 | * You can have multiple concurrent readers or a single writer holding a | 
|  | 64 | * block lock. | 
|  | 65 | */ | 
|  | 66 |  | 
|  | 67 | /* | 
|  | 68 | * dm_bm_lock() locks a block and returns through @result a pointer to | 
|  | 69 | * memory that holds a copy of that block.  If you have write-locked the | 
|  | 70 | * block then any changes you make to memory pointed to by @result will be | 
|  | 71 | * written back to the disk sometime after dm_bm_unlock is called. | 
|  | 72 | */ | 
|  | 73 | int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b, | 
|  | 74 | struct dm_block_validator *v, | 
|  | 75 | struct dm_block **result); | 
|  | 76 |  | 
|  | 77 | int dm_bm_write_lock(struct dm_block_manager *bm, dm_block_t b, | 
|  | 78 | struct dm_block_validator *v, | 
|  | 79 | struct dm_block **result); | 
|  | 80 |  | 
|  | 81 | /* | 
|  | 82 | * The *_try_lock variants return -EWOULDBLOCK if the block isn't | 
|  | 83 | * available immediately. | 
|  | 84 | */ | 
|  | 85 | int dm_bm_read_try_lock(struct dm_block_manager *bm, dm_block_t b, | 
|  | 86 | struct dm_block_validator *v, | 
|  | 87 | struct dm_block **result); | 
|  | 88 |  | 
|  | 89 | /* | 
|  | 90 | * Use dm_bm_write_lock_zero() when you know you're going to | 
|  | 91 | * overwrite the block completely.  It saves a disk read. | 
|  | 92 | */ | 
|  | 93 | int dm_bm_write_lock_zero(struct dm_block_manager *bm, dm_block_t b, | 
|  | 94 | struct dm_block_validator *v, | 
|  | 95 | struct dm_block **result); | 
|  | 96 |  | 
|  | 97 | void dm_bm_unlock(struct dm_block *b); | 
|  | 98 |  | 
|  | 99 | /* | 
|  | 100 | * It's a common idiom to have a superblock that should be committed last. | 
|  | 101 | * | 
|  | 102 | * @superblock should be write-locked on entry. It will be unlocked during | 
|  | 103 | * this function.  All dirty blocks are guaranteed to be written and flushed | 
|  | 104 | * before the superblock. | 
|  | 105 | * | 
|  | 106 | * This method always blocks. | 
|  | 107 | */ | 
|  | 108 | int dm_bm_flush(struct dm_block_manager *bm); | 
|  | 109 |  | 
|  | 110 | /* | 
|  | 111 | * Request data is prefetched into the cache. | 
|  | 112 | */ | 
|  | 113 | void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b); | 
|  | 114 |  | 
|  | 115 | /* | 
|  | 116 | * Switches the bm to a read only mode.  Once read-only mode | 
|  | 117 | * has been entered the following functions will return -EPERM. | 
|  | 118 | * | 
|  | 119 | *   dm_bm_write_lock | 
|  | 120 | *   dm_bm_write_lock_zero | 
|  | 121 | *   dm_bm_flush_and_unlock | 
|  | 122 | * | 
|  | 123 | * Additionally you should not use dm_bm_unlock_move, however no error will | 
|  | 124 | * be returned if you do. | 
|  | 125 | */ | 
|  | 126 | bool dm_bm_is_read_only(struct dm_block_manager *bm); | 
|  | 127 | void dm_bm_set_read_only(struct dm_block_manager *bm); | 
|  | 128 | void dm_bm_set_read_write(struct dm_block_manager *bm); | 
|  | 129 |  | 
|  | 130 | u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor); | 
|  | 131 |  | 
|  | 132 | /*----------------------------------------------------------------*/ | 
|  | 133 |  | 
|  | 134 | #endif	/* _LINUX_DM_BLOCK_MANAGER_H */ |