yuezonghe | 824eb0c | 2024-06-27 02:32:26 -0700 | [diff] [blame] | 1 | #include <linux/err.h> |
| 2 | #include <linux/slab.h> |
| 3 | #include <linux/module.h> |
| 4 | #include <linux/spinlock.h> |
| 5 | #include <linux/hardirq.h> |
| 6 | #include "ctree.h" |
| 7 | #include "extent_map.h" |
| 8 | |
| 9 | |
| 10 | static struct kmem_cache *extent_map_cache; |
| 11 | |
| 12 | int __init extent_map_init(void) |
| 13 | { |
| 14 | extent_map_cache = kmem_cache_create("extent_map", |
| 15 | sizeof(struct extent_map), 0, |
| 16 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); |
| 17 | if (!extent_map_cache) |
| 18 | return -ENOMEM; |
| 19 | return 0; |
| 20 | } |
| 21 | |
| 22 | void extent_map_exit(void) |
| 23 | { |
| 24 | if (extent_map_cache) |
| 25 | kmem_cache_destroy(extent_map_cache); |
| 26 | } |
| 27 | |
| 28 | /** |
| 29 | * extent_map_tree_init - initialize extent map tree |
| 30 | * @tree: tree to initialize |
| 31 | * |
| 32 | * Initialize the extent tree @tree. Should be called for each new inode |
| 33 | * or other user of the extent_map interface. |
| 34 | */ |
| 35 | void extent_map_tree_init(struct extent_map_tree *tree) |
| 36 | { |
| 37 | tree->map = RB_ROOT; |
| 38 | rwlock_init(&tree->lock); |
| 39 | } |
| 40 | |
| 41 | /** |
| 42 | * alloc_extent_map - allocate new extent map structure |
| 43 | * |
| 44 | * Allocate a new extent_map structure. The new structure is |
| 45 | * returned with a reference count of one and needs to be |
| 46 | * freed using free_extent_map() |
| 47 | */ |
| 48 | struct extent_map *alloc_extent_map(void) |
| 49 | { |
| 50 | struct extent_map *em; |
| 51 | em = kmem_cache_alloc(extent_map_cache, GFP_NOFS); |
| 52 | if (!em) |
| 53 | return NULL; |
| 54 | em->in_tree = 0; |
| 55 | em->flags = 0; |
| 56 | em->compress_type = BTRFS_COMPRESS_NONE; |
| 57 | atomic_set(&em->refs, 1); |
| 58 | return em; |
| 59 | } |
| 60 | |
| 61 | /** |
| 62 | * free_extent_map - drop reference count of an extent_map |
| 63 | * @em: extent map beeing releasead |
| 64 | * |
| 65 | * Drops the reference out on @em by one and free the structure |
| 66 | * if the reference count hits zero. |
| 67 | */ |
| 68 | void free_extent_map(struct extent_map *em) |
| 69 | { |
| 70 | if (!em) |
| 71 | return; |
| 72 | WARN_ON(atomic_read(&em->refs) == 0); |
| 73 | if (atomic_dec_and_test(&em->refs)) { |
| 74 | WARN_ON(em->in_tree); |
| 75 | kmem_cache_free(extent_map_cache, em); |
| 76 | } |
| 77 | } |
| 78 | |
| 79 | static struct rb_node *tree_insert(struct rb_root *root, u64 offset, |
| 80 | struct rb_node *node) |
| 81 | { |
| 82 | struct rb_node **p = &root->rb_node; |
| 83 | struct rb_node *parent = NULL; |
| 84 | struct extent_map *entry; |
| 85 | |
| 86 | while (*p) { |
| 87 | parent = *p; |
| 88 | entry = rb_entry(parent, struct extent_map, rb_node); |
| 89 | |
| 90 | WARN_ON(!entry->in_tree); |
| 91 | |
| 92 | if (offset < entry->start) |
| 93 | p = &(*p)->rb_left; |
| 94 | else if (offset >= extent_map_end(entry)) |
| 95 | p = &(*p)->rb_right; |
| 96 | else |
| 97 | return parent; |
| 98 | } |
| 99 | |
| 100 | entry = rb_entry(node, struct extent_map, rb_node); |
| 101 | entry->in_tree = 1; |
| 102 | rb_link_node(node, parent, p); |
| 103 | rb_insert_color(node, root); |
| 104 | return NULL; |
| 105 | } |
| 106 | |
| 107 | /* |
| 108 | * search through the tree for an extent_map with a given offset. If |
| 109 | * it can't be found, try to find some neighboring extents |
| 110 | */ |
| 111 | static struct rb_node *__tree_search(struct rb_root *root, u64 offset, |
| 112 | struct rb_node **prev_ret, |
| 113 | struct rb_node **next_ret) |
| 114 | { |
| 115 | struct rb_node *n = root->rb_node; |
| 116 | struct rb_node *prev = NULL; |
| 117 | struct rb_node *orig_prev = NULL; |
| 118 | struct extent_map *entry; |
| 119 | struct extent_map *prev_entry = NULL; |
| 120 | |
| 121 | while (n) { |
| 122 | entry = rb_entry(n, struct extent_map, rb_node); |
| 123 | prev = n; |
| 124 | prev_entry = entry; |
| 125 | |
| 126 | WARN_ON(!entry->in_tree); |
| 127 | |
| 128 | if (offset < entry->start) |
| 129 | n = n->rb_left; |
| 130 | else if (offset >= extent_map_end(entry)) |
| 131 | n = n->rb_right; |
| 132 | else |
| 133 | return n; |
| 134 | } |
| 135 | |
| 136 | if (prev_ret) { |
| 137 | orig_prev = prev; |
| 138 | while (prev && offset >= extent_map_end(prev_entry)) { |
| 139 | prev = rb_next(prev); |
| 140 | prev_entry = rb_entry(prev, struct extent_map, rb_node); |
| 141 | } |
| 142 | *prev_ret = prev; |
| 143 | prev = orig_prev; |
| 144 | } |
| 145 | |
| 146 | if (next_ret) { |
| 147 | prev_entry = rb_entry(prev, struct extent_map, rb_node); |
| 148 | while (prev && offset < prev_entry->start) { |
| 149 | prev = rb_prev(prev); |
| 150 | prev_entry = rb_entry(prev, struct extent_map, rb_node); |
| 151 | } |
| 152 | *next_ret = prev; |
| 153 | } |
| 154 | return NULL; |
| 155 | } |
| 156 | |
| 157 | /* check to see if two extent_map structs are adjacent and safe to merge */ |
| 158 | static int mergable_maps(struct extent_map *prev, struct extent_map *next) |
| 159 | { |
| 160 | if (test_bit(EXTENT_FLAG_PINNED, &prev->flags)) |
| 161 | return 0; |
| 162 | |
| 163 | /* |
| 164 | * don't merge compressed extents, we need to know their |
| 165 | * actual size |
| 166 | */ |
| 167 | if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags)) |
| 168 | return 0; |
| 169 | |
| 170 | if (extent_map_end(prev) == next->start && |
| 171 | prev->flags == next->flags && |
| 172 | prev->bdev == next->bdev && |
| 173 | ((next->block_start == EXTENT_MAP_HOLE && |
| 174 | prev->block_start == EXTENT_MAP_HOLE) || |
| 175 | (next->block_start == EXTENT_MAP_INLINE && |
| 176 | prev->block_start == EXTENT_MAP_INLINE) || |
| 177 | (next->block_start == EXTENT_MAP_DELALLOC && |
| 178 | prev->block_start == EXTENT_MAP_DELALLOC) || |
| 179 | (next->block_start < EXTENT_MAP_LAST_BYTE - 1 && |
| 180 | next->block_start == extent_map_block_end(prev)))) { |
| 181 | return 1; |
| 182 | } |
| 183 | return 0; |
| 184 | } |
| 185 | |
| 186 | static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) |
| 187 | { |
| 188 | struct extent_map *merge = NULL; |
| 189 | struct rb_node *rb; |
| 190 | |
| 191 | if (em->start != 0) { |
| 192 | rb = rb_prev(&em->rb_node); |
| 193 | if (rb) |
| 194 | merge = rb_entry(rb, struct extent_map, rb_node); |
| 195 | if (rb && mergable_maps(merge, em)) { |
| 196 | em->start = merge->start; |
| 197 | em->len += merge->len; |
| 198 | em->block_len += merge->block_len; |
| 199 | em->block_start = merge->block_start; |
| 200 | merge->in_tree = 0; |
| 201 | rb_erase(&merge->rb_node, &tree->map); |
| 202 | free_extent_map(merge); |
| 203 | } |
| 204 | } |
| 205 | |
| 206 | rb = rb_next(&em->rb_node); |
| 207 | if (rb) |
| 208 | merge = rb_entry(rb, struct extent_map, rb_node); |
| 209 | if (rb && mergable_maps(em, merge)) { |
| 210 | em->len += merge->len; |
| 211 | em->block_len += merge->len; |
| 212 | rb_erase(&merge->rb_node, &tree->map); |
| 213 | merge->in_tree = 0; |
| 214 | free_extent_map(merge); |
| 215 | } |
| 216 | } |
| 217 | |
| 218 | int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len) |
| 219 | { |
| 220 | int ret = 0; |
| 221 | struct extent_map *em; |
| 222 | |
| 223 | write_lock(&tree->lock); |
| 224 | em = lookup_extent_mapping(tree, start, len); |
| 225 | |
| 226 | WARN_ON(!em || em->start != start); |
| 227 | |
| 228 | if (!em) |
| 229 | goto out; |
| 230 | |
| 231 | clear_bit(EXTENT_FLAG_PINNED, &em->flags); |
| 232 | |
| 233 | try_merge_map(tree, em); |
| 234 | |
| 235 | free_extent_map(em); |
| 236 | out: |
| 237 | write_unlock(&tree->lock); |
| 238 | return ret; |
| 239 | |
| 240 | } |
| 241 | |
| 242 | /** |
| 243 | * add_extent_mapping - add new extent map to the extent tree |
| 244 | * @tree: tree to insert new map in |
| 245 | * @em: map to insert |
| 246 | * |
| 247 | * Insert @em into @tree or perform a simple forward/backward merge with |
| 248 | * existing mappings. The extent_map struct passed in will be inserted |
| 249 | * into the tree directly, with an additional reference taken, or a |
| 250 | * reference dropped if the merge attempt was successful. |
| 251 | */ |
| 252 | int add_extent_mapping(struct extent_map_tree *tree, |
| 253 | struct extent_map *em) |
| 254 | { |
| 255 | int ret = 0; |
| 256 | struct rb_node *rb; |
| 257 | struct extent_map *exist; |
| 258 | |
| 259 | exist = lookup_extent_mapping(tree, em->start, em->len); |
| 260 | if (exist) { |
| 261 | free_extent_map(exist); |
| 262 | ret = -EEXIST; |
| 263 | goto out; |
| 264 | } |
| 265 | rb = tree_insert(&tree->map, em->start, &em->rb_node); |
| 266 | if (rb) { |
| 267 | ret = -EEXIST; |
| 268 | goto out; |
| 269 | } |
| 270 | atomic_inc(&em->refs); |
| 271 | |
| 272 | try_merge_map(tree, em); |
| 273 | out: |
| 274 | return ret; |
| 275 | } |
| 276 | |
| 277 | /* simple helper to do math around the end of an extent, handling wrap */ |
| 278 | static u64 range_end(u64 start, u64 len) |
| 279 | { |
| 280 | if (start + len < start) |
| 281 | return (u64)-1; |
| 282 | return start + len; |
| 283 | } |
| 284 | |
| 285 | struct extent_map *__lookup_extent_mapping(struct extent_map_tree *tree, |
| 286 | u64 start, u64 len, int strict) |
| 287 | { |
| 288 | struct extent_map *em; |
| 289 | struct rb_node *rb_node; |
| 290 | struct rb_node *prev = NULL; |
| 291 | struct rb_node *next = NULL; |
| 292 | u64 end = range_end(start, len); |
| 293 | |
| 294 | rb_node = __tree_search(&tree->map, start, &prev, &next); |
| 295 | if (!rb_node) { |
| 296 | if (prev) |
| 297 | rb_node = prev; |
| 298 | else if (next) |
| 299 | rb_node = next; |
| 300 | else |
| 301 | return NULL; |
| 302 | } |
| 303 | |
| 304 | em = rb_entry(rb_node, struct extent_map, rb_node); |
| 305 | |
| 306 | if (strict && !(end > em->start && start < extent_map_end(em))) |
| 307 | return NULL; |
| 308 | |
| 309 | atomic_inc(&em->refs); |
| 310 | return em; |
| 311 | } |
| 312 | |
| 313 | /** |
| 314 | * lookup_extent_mapping - lookup extent_map |
| 315 | * @tree: tree to lookup in |
| 316 | * @start: byte offset to start the search |
| 317 | * @len: length of the lookup range |
| 318 | * |
| 319 | * Find and return the first extent_map struct in @tree that intersects the |
| 320 | * [start, len] range. There may be additional objects in the tree that |
| 321 | * intersect, so check the object returned carefully to make sure that no |
| 322 | * additional lookups are needed. |
| 323 | */ |
| 324 | struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, |
| 325 | u64 start, u64 len) |
| 326 | { |
| 327 | return __lookup_extent_mapping(tree, start, len, 1); |
| 328 | } |
| 329 | |
| 330 | /** |
| 331 | * search_extent_mapping - find a nearby extent map |
| 332 | * @tree: tree to lookup in |
| 333 | * @start: byte offset to start the search |
| 334 | * @len: length of the lookup range |
| 335 | * |
| 336 | * Find and return the first extent_map struct in @tree that intersects the |
| 337 | * [start, len] range. |
| 338 | * |
| 339 | * If one can't be found, any nearby extent may be returned |
| 340 | */ |
| 341 | struct extent_map *search_extent_mapping(struct extent_map_tree *tree, |
| 342 | u64 start, u64 len) |
| 343 | { |
| 344 | return __lookup_extent_mapping(tree, start, len, 0); |
| 345 | } |
| 346 | |
| 347 | /** |
| 348 | * remove_extent_mapping - removes an extent_map from the extent tree |
| 349 | * @tree: extent tree to remove from |
| 350 | * @em: extent map beeing removed |
| 351 | * |
| 352 | * Removes @em from @tree. No reference counts are dropped, and no checks |
| 353 | * are done to see if the range is in use |
| 354 | */ |
| 355 | int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) |
| 356 | { |
| 357 | int ret = 0; |
| 358 | |
| 359 | WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); |
| 360 | rb_erase(&em->rb_node, &tree->map); |
| 361 | em->in_tree = 0; |
| 362 | return ret; |
| 363 | } |