| lh | 9ed821d | 2023-04-07 01:36:19 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2009-2011 Red Hat, Inc. | 
|  | 3 | * | 
|  | 4 | * Author: Mikulas Patocka <mpatocka@redhat.com> | 
|  | 5 | * | 
|  | 6 | * This file is released under the GPL. | 
|  | 7 | */ | 
|  | 8 |  | 
|  | 9 | #ifndef DM_BUFIO_H | 
|  | 10 | #define DM_BUFIO_H | 
|  | 11 |  | 
|  | 12 | #include <linux/blkdev.h> | 
|  | 13 | #include <linux/types.h> | 
|  | 14 |  | 
|  | 15 | /*----------------------------------------------------------------*/ | 
|  | 16 |  | 
|  | 17 | struct dm_bufio_client; | 
|  | 18 | struct dm_buffer; | 
|  | 19 |  | 
|  | 20 | /* | 
|  | 21 | * Create a buffered IO cache on a given device | 
|  | 22 | */ | 
|  | 23 | struct dm_bufio_client * | 
|  | 24 | dm_bufio_client_create(struct block_device *bdev, unsigned block_size, | 
|  | 25 | unsigned reserved_buffers, unsigned aux_size, | 
|  | 26 | void (*alloc_callback)(struct dm_buffer *), | 
|  | 27 | void (*write_callback)(struct dm_buffer *)); | 
|  | 28 |  | 
|  | 29 | /* | 
|  | 30 | * Release a buffered IO cache. | 
|  | 31 | */ | 
|  | 32 | void dm_bufio_client_destroy(struct dm_bufio_client *c); | 
|  | 33 |  | 
|  | 34 | /* | 
|  | 35 | * WARNING: to avoid deadlocks, these conditions are observed: | 
|  | 36 | * | 
|  | 37 | * - At most one thread can hold at most "reserved_buffers" simultaneously. | 
|  | 38 | * - Each other threads can hold at most one buffer. | 
|  | 39 | * - Threads which call only dm_bufio_get can hold unlimited number of | 
|  | 40 | *   buffers. | 
|  | 41 | */ | 
|  | 42 |  | 
|  | 43 | /* | 
|  | 44 | * Read a given block from disk. Returns pointer to data.  Returns a | 
|  | 45 | * pointer to dm_buffer that can be used to release the buffer or to make | 
|  | 46 | * it dirty. | 
|  | 47 | */ | 
|  | 48 | void *dm_bufio_read(struct dm_bufio_client *c, sector_t block, | 
|  | 49 | struct dm_buffer **bp); | 
|  | 50 |  | 
|  | 51 | /* | 
|  | 52 | * Like dm_bufio_read, but return buffer from cache, don't read | 
|  | 53 | * it. If the buffer is not in the cache, return NULL. | 
|  | 54 | */ | 
|  | 55 | void *dm_bufio_get(struct dm_bufio_client *c, sector_t block, | 
|  | 56 | struct dm_buffer **bp); | 
|  | 57 |  | 
|  | 58 | /* | 
|  | 59 | * Like dm_bufio_read, but don't read anything from the disk.  It is | 
|  | 60 | * expected that the caller initializes the buffer and marks it dirty. | 
|  | 61 | */ | 
|  | 62 | void *dm_bufio_new(struct dm_bufio_client *c, sector_t block, | 
|  | 63 | struct dm_buffer **bp); | 
|  | 64 |  | 
|  | 65 | /* | 
|  | 66 | * Prefetch the specified blocks to the cache. | 
|  | 67 | * The function starts to read the blocks and returns without waiting for | 
|  | 68 | * I/O to finish. | 
|  | 69 | */ | 
|  | 70 | void dm_bufio_prefetch(struct dm_bufio_client *c, | 
|  | 71 | sector_t block, unsigned n_blocks); | 
|  | 72 |  | 
|  | 73 | /* | 
|  | 74 | * Release a reference obtained with dm_bufio_{read,get,new}. The data | 
|  | 75 | * pointer and dm_buffer pointer is no longer valid after this call. | 
|  | 76 | */ | 
|  | 77 | void dm_bufio_release(struct dm_buffer *b); | 
|  | 78 |  | 
|  | 79 | /* | 
|  | 80 | * Mark a buffer dirty. It should be called after the buffer is modified. | 
|  | 81 | * | 
|  | 82 | * In case of memory pressure, the buffer may be written after | 
|  | 83 | * dm_bufio_mark_buffer_dirty, but before dm_bufio_write_dirty_buffers.  So | 
|  | 84 | * dm_bufio_write_dirty_buffers guarantees that the buffer is on-disk but | 
|  | 85 | * the actual writing may occur earlier. | 
|  | 86 | */ | 
|  | 87 | void dm_bufio_mark_buffer_dirty(struct dm_buffer *b); | 
|  | 88 |  | 
|  | 89 | /* | 
|  | 90 | * Initiate writing of dirty buffers, without waiting for completion. | 
|  | 91 | */ | 
|  | 92 | void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c); | 
|  | 93 |  | 
|  | 94 | /* | 
|  | 95 | * Write all dirty buffers. Guarantees that all dirty buffers created prior | 
|  | 96 | * to this call are on disk when this call exits. | 
|  | 97 | */ | 
|  | 98 | int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c); | 
|  | 99 |  | 
|  | 100 | /* | 
|  | 101 | * Send an empty write barrier to the device to flush hardware disk cache. | 
|  | 102 | */ | 
|  | 103 | int dm_bufio_issue_flush(struct dm_bufio_client *c); | 
|  | 104 |  | 
|  | 105 | /* | 
|  | 106 | * Like dm_bufio_release but also move the buffer to the new | 
|  | 107 | * block. dm_bufio_write_dirty_buffers is needed to commit the new block. | 
|  | 108 | */ | 
|  | 109 | void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block); | 
|  | 110 |  | 
|  | 111 | unsigned dm_bufio_get_block_size(struct dm_bufio_client *c); | 
|  | 112 | sector_t dm_bufio_get_device_size(struct dm_bufio_client *c); | 
|  | 113 | sector_t dm_bufio_get_block_number(struct dm_buffer *b); | 
|  | 114 | void *dm_bufio_get_block_data(struct dm_buffer *b); | 
|  | 115 | void *dm_bufio_get_aux_data(struct dm_buffer *b); | 
|  | 116 | struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b); | 
|  | 117 |  | 
|  | 118 | /*----------------------------------------------------------------*/ | 
|  | 119 |  | 
|  | 120 | #endif |