| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2008 Oracle. All rights reserved. |
| 4 | */ |
| 5 | |
| 6 | #include <linux/kernel.h> |
| 7 | #include <linux/bio.h> |
| 8 | #include <linux/file.h> |
| 9 | #include <linux/fs.h> |
| 10 | #include <linux/pagemap.h> |
| 11 | #include <linux/highmem.h> |
| 12 | #include <linux/time.h> |
| 13 | #include <linux/init.h> |
| 14 | #include <linux/string.h> |
| 15 | #include <linux/backing-dev.h> |
| 16 | #include <linux/writeback.h> |
| 17 | #include <linux/slab.h> |
| 18 | #include <linux/sched/mm.h> |
| 19 | #include <linux/log2.h> |
| 20 | #include "ctree.h" |
| 21 | #include "disk-io.h" |
| 22 | #include "transaction.h" |
| 23 | #include "btrfs_inode.h" |
| 24 | #include "volumes.h" |
| 25 | #include "ordered-data.h" |
| 26 | #include "compression.h" |
| 27 | #include "extent_io.h" |
| 28 | #include "extent_map.h" |
| 29 | |
| 30 | static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" }; |
| 31 | |
| 32 | const char* btrfs_compress_type2str(enum btrfs_compression_type type) |
| 33 | { |
| 34 | switch (type) { |
| 35 | case BTRFS_COMPRESS_ZLIB: |
| 36 | case BTRFS_COMPRESS_LZO: |
| 37 | case BTRFS_COMPRESS_ZSTD: |
| 38 | case BTRFS_COMPRESS_NONE: |
| 39 | return btrfs_compress_types[type]; |
| 40 | } |
| 41 | |
| 42 | return NULL; |
| 43 | } |
| 44 | |
| 45 | bool btrfs_compress_is_valid_type(const char *str, size_t len) |
| 46 | { |
| 47 | int i; |
| 48 | |
| 49 | for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) { |
| 50 | size_t comp_len = strlen(btrfs_compress_types[i]); |
| 51 | |
| 52 | if (len < comp_len) |
| 53 | continue; |
| 54 | |
| 55 | if (!strncmp(btrfs_compress_types[i], str, comp_len)) |
| 56 | return true; |
| 57 | } |
| 58 | return false; |
| 59 | } |
| 60 | |
| 61 | static int btrfs_decompress_bio(struct compressed_bio *cb); |
| 62 | |
| 63 | static inline int compressed_bio_size(struct btrfs_fs_info *fs_info, |
| 64 | unsigned long disk_size) |
| 65 | { |
| 66 | u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); |
| 67 | |
| 68 | return sizeof(struct compressed_bio) + |
| 69 | (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size; |
| 70 | } |
| 71 | |
| 72 | static int check_compressed_csum(struct btrfs_inode *inode, |
| 73 | struct compressed_bio *cb, |
| 74 | u64 disk_start) |
| 75 | { |
| 76 | int ret; |
| 77 | struct page *page; |
| 78 | unsigned long i; |
| 79 | char *kaddr; |
| 80 | u32 csum; |
| 81 | u32 *cb_sum = &cb->sums; |
| 82 | |
| 83 | if (inode->flags & BTRFS_INODE_NODATASUM) |
| 84 | return 0; |
| 85 | |
| 86 | for (i = 0; i < cb->nr_pages; i++) { |
| 87 | page = cb->compressed_pages[i]; |
| 88 | csum = ~(u32)0; |
| 89 | |
| 90 | kaddr = kmap_atomic(page); |
| 91 | csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE); |
| 92 | btrfs_csum_final(csum, (u8 *)&csum); |
| 93 | kunmap_atomic(kaddr); |
| 94 | |
| 95 | if (csum != *cb_sum) { |
| 96 | btrfs_print_data_csum_error(inode, disk_start, csum, |
| 97 | *cb_sum, cb->mirror_num); |
| 98 | ret = -EIO; |
| 99 | goto fail; |
| 100 | } |
| 101 | cb_sum++; |
| 102 | |
| 103 | } |
| 104 | ret = 0; |
| 105 | fail: |
| 106 | return ret; |
| 107 | } |
| 108 | |
| 109 | /* when we finish reading compressed pages from the disk, we |
| 110 | * decompress them and then run the bio end_io routines on the |
| 111 | * decompressed pages (in the inode address space). |
| 112 | * |
| 113 | * This allows the checksumming and other IO error handling routines |
| 114 | * to work normally |
| 115 | * |
| 116 | * The compressed pages are freed here, and it must be run |
| 117 | * in process context |
| 118 | */ |
| 119 | static void end_compressed_bio_read(struct bio *bio) |
| 120 | { |
| 121 | struct compressed_bio *cb = bio->bi_private; |
| 122 | struct inode *inode; |
| 123 | struct page *page; |
| 124 | unsigned long index; |
| 125 | unsigned int mirror = btrfs_io_bio(bio)->mirror_num; |
| 126 | int ret = 0; |
| 127 | |
| 128 | if (bio->bi_status) |
| 129 | cb->errors = 1; |
| 130 | |
| 131 | /* if there are more bios still pending for this compressed |
| 132 | * extent, just exit |
| 133 | */ |
| 134 | if (!refcount_dec_and_test(&cb->pending_bios)) |
| 135 | goto out; |
| 136 | |
| 137 | /* |
| 138 | * Record the correct mirror_num in cb->orig_bio so that |
| 139 | * read-repair can work properly. |
| 140 | */ |
| 141 | ASSERT(btrfs_io_bio(cb->orig_bio)); |
| 142 | btrfs_io_bio(cb->orig_bio)->mirror_num = mirror; |
| 143 | cb->mirror_num = mirror; |
| 144 | |
| 145 | /* |
| 146 | * Some IO in this cb have failed, just skip checksum as there |
| 147 | * is no way it could be correct. |
| 148 | */ |
| 149 | if (cb->errors == 1) |
| 150 | goto csum_failed; |
| 151 | |
| 152 | inode = cb->inode; |
| 153 | ret = check_compressed_csum(BTRFS_I(inode), cb, |
| 154 | (u64)bio->bi_iter.bi_sector << 9); |
| 155 | if (ret) |
| 156 | goto csum_failed; |
| 157 | |
| 158 | /* ok, we're the last bio for this extent, lets start |
| 159 | * the decompression. |
| 160 | */ |
| 161 | ret = btrfs_decompress_bio(cb); |
| 162 | |
| 163 | csum_failed: |
| 164 | if (ret) |
| 165 | cb->errors = 1; |
| 166 | |
| 167 | /* release the compressed pages */ |
| 168 | index = 0; |
| 169 | for (index = 0; index < cb->nr_pages; index++) { |
| 170 | page = cb->compressed_pages[index]; |
| 171 | page->mapping = NULL; |
| 172 | put_page(page); |
| 173 | } |
| 174 | |
| 175 | /* do io completion on the original bio */ |
| 176 | if (cb->errors) { |
| 177 | bio_io_error(cb->orig_bio); |
| 178 | } else { |
| 179 | int i; |
| 180 | struct bio_vec *bvec; |
| 181 | |
| 182 | /* |
| 183 | * we have verified the checksum already, set page |
| 184 | * checked so the end_io handlers know about it |
| 185 | */ |
| 186 | ASSERT(!bio_flagged(bio, BIO_CLONED)); |
| 187 | bio_for_each_segment_all(bvec, cb->orig_bio, i) |
| 188 | SetPageChecked(bvec->bv_page); |
| 189 | |
| 190 | bio_endio(cb->orig_bio); |
| 191 | } |
| 192 | |
| 193 | /* finally free the cb struct */ |
| 194 | kfree(cb->compressed_pages); |
| 195 | kfree(cb); |
| 196 | out: |
| 197 | bio_put(bio); |
| 198 | } |
| 199 | |
| 200 | /* |
| 201 | * Clear the writeback bits on all of the file |
| 202 | * pages for a compressed write |
| 203 | */ |
| 204 | static noinline void end_compressed_writeback(struct inode *inode, |
| 205 | const struct compressed_bio *cb) |
| 206 | { |
| 207 | unsigned long index = cb->start >> PAGE_SHIFT; |
| 208 | unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; |
| 209 | struct page *pages[16]; |
| 210 | unsigned long nr_pages = end_index - index + 1; |
| 211 | int i; |
| 212 | int ret; |
| 213 | |
| 214 | if (cb->errors) |
| 215 | mapping_set_error(inode->i_mapping, -EIO); |
| 216 | |
| 217 | while (nr_pages > 0) { |
| 218 | ret = find_get_pages_contig(inode->i_mapping, index, |
| 219 | min_t(unsigned long, |
| 220 | nr_pages, ARRAY_SIZE(pages)), pages); |
| 221 | if (ret == 0) { |
| 222 | nr_pages -= 1; |
| 223 | index += 1; |
| 224 | continue; |
| 225 | } |
| 226 | for (i = 0; i < ret; i++) { |
| 227 | if (cb->errors) |
| 228 | SetPageError(pages[i]); |
| 229 | end_page_writeback(pages[i]); |
| 230 | put_page(pages[i]); |
| 231 | } |
| 232 | nr_pages -= ret; |
| 233 | index += ret; |
| 234 | } |
| 235 | /* the inode may be gone now */ |
| 236 | } |
| 237 | |
| 238 | /* |
| 239 | * do the cleanup once all the compressed pages hit the disk. |
| 240 | * This will clear writeback on the file pages and free the compressed |
| 241 | * pages. |
| 242 | * |
| 243 | * This also calls the writeback end hooks for the file pages so that |
| 244 | * metadata and checksums can be updated in the file. |
| 245 | */ |
| 246 | static void end_compressed_bio_write(struct bio *bio) |
| 247 | { |
| 248 | struct extent_io_tree *tree; |
| 249 | struct compressed_bio *cb = bio->bi_private; |
| 250 | struct inode *inode; |
| 251 | struct page *page; |
| 252 | unsigned long index; |
| 253 | |
| 254 | if (bio->bi_status) |
| 255 | cb->errors = 1; |
| 256 | |
| 257 | /* if there are more bios still pending for this compressed |
| 258 | * extent, just exit |
| 259 | */ |
| 260 | if (!refcount_dec_and_test(&cb->pending_bios)) |
| 261 | goto out; |
| 262 | |
| 263 | /* ok, we're the last bio for this extent, step one is to |
| 264 | * call back into the FS and do all the end_io operations |
| 265 | */ |
| 266 | inode = cb->inode; |
| 267 | tree = &BTRFS_I(inode)->io_tree; |
| 268 | cb->compressed_pages[0]->mapping = cb->inode->i_mapping; |
| 269 | tree->ops->writepage_end_io_hook(cb->compressed_pages[0], |
| 270 | cb->start, |
| 271 | cb->start + cb->len - 1, |
| 272 | NULL, |
| 273 | bio->bi_status ? |
| 274 | BLK_STS_OK : BLK_STS_NOTSUPP); |
| 275 | cb->compressed_pages[0]->mapping = NULL; |
| 276 | |
| 277 | end_compressed_writeback(inode, cb); |
| 278 | /* note, our inode could be gone now */ |
| 279 | |
| 280 | /* |
| 281 | * release the compressed pages, these came from alloc_page and |
| 282 | * are not attached to the inode at all |
| 283 | */ |
| 284 | index = 0; |
| 285 | for (index = 0; index < cb->nr_pages; index++) { |
| 286 | page = cb->compressed_pages[index]; |
| 287 | page->mapping = NULL; |
| 288 | put_page(page); |
| 289 | } |
| 290 | |
| 291 | /* finally free the cb struct */ |
| 292 | kfree(cb->compressed_pages); |
| 293 | kfree(cb); |
| 294 | out: |
| 295 | bio_put(bio); |
| 296 | } |
| 297 | |
| 298 | /* |
| 299 | * worker function to build and submit bios for previously compressed pages. |
| 300 | * The corresponding pages in the inode should be marked for writeback |
| 301 | * and the compressed pages should have a reference on them for dropping |
| 302 | * when the IO is complete. |
| 303 | * |
| 304 | * This also checksums the file bytes and gets things ready for |
| 305 | * the end io hooks. |
| 306 | */ |
| 307 | blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, |
| 308 | unsigned long len, u64 disk_start, |
| 309 | unsigned long compressed_len, |
| 310 | struct page **compressed_pages, |
| 311 | unsigned long nr_pages, |
| 312 | unsigned int write_flags) |
| 313 | { |
| 314 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| 315 | struct bio *bio = NULL; |
| 316 | struct compressed_bio *cb; |
| 317 | unsigned long bytes_left; |
| 318 | int pg_index = 0; |
| 319 | struct page *page; |
| 320 | u64 first_byte = disk_start; |
| 321 | struct block_device *bdev; |
| 322 | blk_status_t ret; |
| 323 | int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; |
| 324 | |
| 325 | WARN_ON(start & ((u64)PAGE_SIZE - 1)); |
| 326 | cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); |
| 327 | if (!cb) |
| 328 | return BLK_STS_RESOURCE; |
| 329 | refcount_set(&cb->pending_bios, 0); |
| 330 | cb->errors = 0; |
| 331 | cb->inode = inode; |
| 332 | cb->start = start; |
| 333 | cb->len = len; |
| 334 | cb->mirror_num = 0; |
| 335 | cb->compressed_pages = compressed_pages; |
| 336 | cb->compressed_len = compressed_len; |
| 337 | cb->orig_bio = NULL; |
| 338 | cb->nr_pages = nr_pages; |
| 339 | |
| 340 | bdev = fs_info->fs_devices->latest_bdev; |
| 341 | |
| 342 | bio = btrfs_bio_alloc(bdev, first_byte); |
| 343 | bio->bi_opf = REQ_OP_WRITE | write_flags; |
| 344 | bio->bi_private = cb; |
| 345 | bio->bi_end_io = end_compressed_bio_write; |
| 346 | refcount_set(&cb->pending_bios, 1); |
| 347 | |
| 348 | /* create and submit bios for the compressed pages */ |
| 349 | bytes_left = compressed_len; |
| 350 | for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { |
| 351 | int submit = 0; |
| 352 | |
| 353 | page = compressed_pages[pg_index]; |
| 354 | page->mapping = inode->i_mapping; |
| 355 | if (bio->bi_iter.bi_size) |
| 356 | submit = btrfs_merge_bio_hook(page, 0, PAGE_SIZE, bio, 0); |
| 357 | |
| 358 | page->mapping = NULL; |
| 359 | if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) < |
| 360 | PAGE_SIZE) { |
| 361 | /* |
| 362 | * inc the count before we submit the bio so |
| 363 | * we know the end IO handler won't happen before |
| 364 | * we inc the count. Otherwise, the cb might get |
| 365 | * freed before we're done setting it up |
| 366 | */ |
| 367 | refcount_inc(&cb->pending_bios); |
| 368 | ret = btrfs_bio_wq_end_io(fs_info, bio, |
| 369 | BTRFS_WQ_ENDIO_DATA); |
| 370 | BUG_ON(ret); /* -ENOMEM */ |
| 371 | |
| 372 | if (!skip_sum) { |
| 373 | ret = btrfs_csum_one_bio(inode, bio, start, 1); |
| 374 | BUG_ON(ret); /* -ENOMEM */ |
| 375 | } |
| 376 | |
| 377 | ret = btrfs_map_bio(fs_info, bio, 0, 1); |
| 378 | if (ret) { |
| 379 | bio->bi_status = ret; |
| 380 | bio_endio(bio); |
| 381 | } |
| 382 | |
| 383 | bio = btrfs_bio_alloc(bdev, first_byte); |
| 384 | bio->bi_opf = REQ_OP_WRITE | write_flags; |
| 385 | bio->bi_private = cb; |
| 386 | bio->bi_end_io = end_compressed_bio_write; |
| 387 | bio_add_page(bio, page, PAGE_SIZE, 0); |
| 388 | } |
| 389 | if (bytes_left < PAGE_SIZE) { |
| 390 | btrfs_info(fs_info, |
| 391 | "bytes left %lu compress len %lu nr %lu", |
| 392 | bytes_left, cb->compressed_len, cb->nr_pages); |
| 393 | } |
| 394 | bytes_left -= PAGE_SIZE; |
| 395 | first_byte += PAGE_SIZE; |
| 396 | cond_resched(); |
| 397 | } |
| 398 | |
| 399 | ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA); |
| 400 | BUG_ON(ret); /* -ENOMEM */ |
| 401 | |
| 402 | if (!skip_sum) { |
| 403 | ret = btrfs_csum_one_bio(inode, bio, start, 1); |
| 404 | BUG_ON(ret); /* -ENOMEM */ |
| 405 | } |
| 406 | |
| 407 | ret = btrfs_map_bio(fs_info, bio, 0, 1); |
| 408 | if (ret) { |
| 409 | bio->bi_status = ret; |
| 410 | bio_endio(bio); |
| 411 | } |
| 412 | |
| 413 | return 0; |
| 414 | } |
| 415 | |
| 416 | static u64 bio_end_offset(struct bio *bio) |
| 417 | { |
| 418 | struct bio_vec *last = bio_last_bvec_all(bio); |
| 419 | |
| 420 | return page_offset(last->bv_page) + last->bv_len + last->bv_offset; |
| 421 | } |
| 422 | |
| 423 | static noinline int add_ra_bio_pages(struct inode *inode, |
| 424 | u64 compressed_end, |
| 425 | struct compressed_bio *cb) |
| 426 | { |
| 427 | unsigned long end_index; |
| 428 | unsigned long pg_index; |
| 429 | u64 last_offset; |
| 430 | u64 isize = i_size_read(inode); |
| 431 | int ret; |
| 432 | struct page *page; |
| 433 | unsigned long nr_pages = 0; |
| 434 | struct extent_map *em; |
| 435 | struct address_space *mapping = inode->i_mapping; |
| 436 | struct extent_map_tree *em_tree; |
| 437 | struct extent_io_tree *tree; |
| 438 | u64 end; |
| 439 | int misses = 0; |
| 440 | |
| 441 | last_offset = bio_end_offset(cb->orig_bio); |
| 442 | em_tree = &BTRFS_I(inode)->extent_tree; |
| 443 | tree = &BTRFS_I(inode)->io_tree; |
| 444 | |
| 445 | if (isize == 0) |
| 446 | return 0; |
| 447 | |
| 448 | end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; |
| 449 | |
| 450 | while (last_offset < compressed_end) { |
| 451 | pg_index = last_offset >> PAGE_SHIFT; |
| 452 | |
| 453 | if (pg_index > end_index) |
| 454 | break; |
| 455 | |
| 456 | rcu_read_lock(); |
| 457 | page = radix_tree_lookup(&mapping->i_pages, pg_index); |
| 458 | rcu_read_unlock(); |
| 459 | if (page && !radix_tree_exceptional_entry(page)) { |
| 460 | misses++; |
| 461 | if (misses > 4) |
| 462 | break; |
| 463 | goto next; |
| 464 | } |
| 465 | |
| 466 | page = __page_cache_alloc(mapping_gfp_constraint(mapping, |
| 467 | ~__GFP_FS)); |
| 468 | if (!page) |
| 469 | break; |
| 470 | |
| 471 | if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { |
| 472 | put_page(page); |
| 473 | goto next; |
| 474 | } |
| 475 | |
| 476 | end = last_offset + PAGE_SIZE - 1; |
| 477 | /* |
| 478 | * at this point, we have a locked page in the page cache |
| 479 | * for these bytes in the file. But, we have to make |
| 480 | * sure they map to this compressed extent on disk. |
| 481 | */ |
| 482 | set_page_extent_mapped(page); |
| 483 | lock_extent(tree, last_offset, end); |
| 484 | read_lock(&em_tree->lock); |
| 485 | em = lookup_extent_mapping(em_tree, last_offset, |
| 486 | PAGE_SIZE); |
| 487 | read_unlock(&em_tree->lock); |
| 488 | |
| 489 | if (!em || last_offset < em->start || |
| 490 | (last_offset + PAGE_SIZE > extent_map_end(em)) || |
| 491 | (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { |
| 492 | free_extent_map(em); |
| 493 | unlock_extent(tree, last_offset, end); |
| 494 | unlock_page(page); |
| 495 | put_page(page); |
| 496 | break; |
| 497 | } |
| 498 | free_extent_map(em); |
| 499 | |
| 500 | if (page->index == end_index) { |
| 501 | char *userpage; |
| 502 | size_t zero_offset = isize & (PAGE_SIZE - 1); |
| 503 | |
| 504 | if (zero_offset) { |
| 505 | int zeros; |
| 506 | zeros = PAGE_SIZE - zero_offset; |
| 507 | userpage = kmap_atomic(page); |
| 508 | memset(userpage + zero_offset, 0, zeros); |
| 509 | flush_dcache_page(page); |
| 510 | kunmap_atomic(userpage); |
| 511 | } |
| 512 | } |
| 513 | |
| 514 | ret = bio_add_page(cb->orig_bio, page, |
| 515 | PAGE_SIZE, 0); |
| 516 | |
| 517 | if (ret == PAGE_SIZE) { |
| 518 | nr_pages++; |
| 519 | put_page(page); |
| 520 | } else { |
| 521 | unlock_extent(tree, last_offset, end); |
| 522 | unlock_page(page); |
| 523 | put_page(page); |
| 524 | break; |
| 525 | } |
| 526 | next: |
| 527 | last_offset += PAGE_SIZE; |
| 528 | } |
| 529 | return 0; |
| 530 | } |
| 531 | |
| 532 | /* |
| 533 | * for a compressed read, the bio we get passed has all the inode pages |
| 534 | * in it. We don't actually do IO on those pages but allocate new ones |
| 535 | * to hold the compressed pages on disk. |
| 536 | * |
| 537 | * bio->bi_iter.bi_sector points to the compressed extent on disk |
| 538 | * bio->bi_io_vec points to all of the inode pages |
| 539 | * |
| 540 | * After the compressed pages are read, we copy the bytes into the |
| 541 | * bio we were passed and then call the bio end_io calls |
| 542 | */ |
| 543 | blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, |
| 544 | int mirror_num, unsigned long bio_flags) |
| 545 | { |
| 546 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| 547 | struct extent_io_tree *tree; |
| 548 | struct extent_map_tree *em_tree; |
| 549 | struct compressed_bio *cb; |
| 550 | unsigned long compressed_len; |
| 551 | unsigned long nr_pages; |
| 552 | unsigned long pg_index; |
| 553 | struct page *page; |
| 554 | struct block_device *bdev; |
| 555 | struct bio *comp_bio; |
| 556 | u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9; |
| 557 | u64 em_len; |
| 558 | u64 em_start; |
| 559 | struct extent_map *em; |
| 560 | blk_status_t ret = BLK_STS_RESOURCE; |
| 561 | int faili = 0; |
| 562 | u32 *sums; |
| 563 | |
| 564 | tree = &BTRFS_I(inode)->io_tree; |
| 565 | em_tree = &BTRFS_I(inode)->extent_tree; |
| 566 | |
| 567 | /* we need the actual starting offset of this extent in the file */ |
| 568 | read_lock(&em_tree->lock); |
| 569 | em = lookup_extent_mapping(em_tree, |
| 570 | page_offset(bio_first_page_all(bio)), |
| 571 | PAGE_SIZE); |
| 572 | read_unlock(&em_tree->lock); |
| 573 | if (!em) |
| 574 | return BLK_STS_IOERR; |
| 575 | |
| 576 | compressed_len = em->block_len; |
| 577 | cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); |
| 578 | if (!cb) |
| 579 | goto out; |
| 580 | |
| 581 | refcount_set(&cb->pending_bios, 0); |
| 582 | cb->errors = 0; |
| 583 | cb->inode = inode; |
| 584 | cb->mirror_num = mirror_num; |
| 585 | sums = &cb->sums; |
| 586 | |
| 587 | cb->start = em->orig_start; |
| 588 | em_len = em->len; |
| 589 | em_start = em->start; |
| 590 | |
| 591 | free_extent_map(em); |
| 592 | em = NULL; |
| 593 | |
| 594 | cb->len = bio->bi_iter.bi_size; |
| 595 | cb->compressed_len = compressed_len; |
| 596 | cb->compress_type = extent_compress_type(bio_flags); |
| 597 | cb->orig_bio = bio; |
| 598 | |
| 599 | nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE); |
| 600 | cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *), |
| 601 | GFP_NOFS); |
| 602 | if (!cb->compressed_pages) |
| 603 | goto fail1; |
| 604 | |
| 605 | bdev = fs_info->fs_devices->latest_bdev; |
| 606 | |
| 607 | for (pg_index = 0; pg_index < nr_pages; pg_index++) { |
| 608 | cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | |
| 609 | __GFP_HIGHMEM); |
| 610 | if (!cb->compressed_pages[pg_index]) { |
| 611 | faili = pg_index - 1; |
| 612 | ret = BLK_STS_RESOURCE; |
| 613 | goto fail2; |
| 614 | } |
| 615 | } |
| 616 | faili = nr_pages - 1; |
| 617 | cb->nr_pages = nr_pages; |
| 618 | |
| 619 | add_ra_bio_pages(inode, em_start + em_len, cb); |
| 620 | |
| 621 | /* include any pages we added in add_ra-bio_pages */ |
| 622 | cb->len = bio->bi_iter.bi_size; |
| 623 | |
| 624 | comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte); |
| 625 | comp_bio->bi_opf = REQ_OP_READ; |
| 626 | comp_bio->bi_private = cb; |
| 627 | comp_bio->bi_end_io = end_compressed_bio_read; |
| 628 | refcount_set(&cb->pending_bios, 1); |
| 629 | |
| 630 | for (pg_index = 0; pg_index < nr_pages; pg_index++) { |
| 631 | int submit = 0; |
| 632 | |
| 633 | page = cb->compressed_pages[pg_index]; |
| 634 | page->mapping = inode->i_mapping; |
| 635 | page->index = em_start >> PAGE_SHIFT; |
| 636 | |
| 637 | if (comp_bio->bi_iter.bi_size) |
| 638 | submit = btrfs_merge_bio_hook(page, 0, PAGE_SIZE, |
| 639 | comp_bio, 0); |
| 640 | |
| 641 | page->mapping = NULL; |
| 642 | if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) < |
| 643 | PAGE_SIZE) { |
| 644 | ret = btrfs_bio_wq_end_io(fs_info, comp_bio, |
| 645 | BTRFS_WQ_ENDIO_DATA); |
| 646 | BUG_ON(ret); /* -ENOMEM */ |
| 647 | |
| 648 | /* |
| 649 | * inc the count before we submit the bio so |
| 650 | * we know the end IO handler won't happen before |
| 651 | * we inc the count. Otherwise, the cb might get |
| 652 | * freed before we're done setting it up |
| 653 | */ |
| 654 | refcount_inc(&cb->pending_bios); |
| 655 | |
| 656 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { |
| 657 | ret = btrfs_lookup_bio_sums(inode, comp_bio, |
| 658 | sums); |
| 659 | BUG_ON(ret); /* -ENOMEM */ |
| 660 | } |
| 661 | sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size, |
| 662 | fs_info->sectorsize); |
| 663 | |
| 664 | ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); |
| 665 | if (ret) { |
| 666 | comp_bio->bi_status = ret; |
| 667 | bio_endio(comp_bio); |
| 668 | } |
| 669 | |
| 670 | comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte); |
| 671 | comp_bio->bi_opf = REQ_OP_READ; |
| 672 | comp_bio->bi_private = cb; |
| 673 | comp_bio->bi_end_io = end_compressed_bio_read; |
| 674 | |
| 675 | bio_add_page(comp_bio, page, PAGE_SIZE, 0); |
| 676 | } |
| 677 | cur_disk_byte += PAGE_SIZE; |
| 678 | } |
| 679 | |
| 680 | ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA); |
| 681 | BUG_ON(ret); /* -ENOMEM */ |
| 682 | |
| 683 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { |
| 684 | ret = btrfs_lookup_bio_sums(inode, comp_bio, sums); |
| 685 | BUG_ON(ret); /* -ENOMEM */ |
| 686 | } |
| 687 | |
| 688 | ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); |
| 689 | if (ret) { |
| 690 | comp_bio->bi_status = ret; |
| 691 | bio_endio(comp_bio); |
| 692 | } |
| 693 | |
| 694 | return 0; |
| 695 | |
| 696 | fail2: |
| 697 | while (faili >= 0) { |
| 698 | __free_page(cb->compressed_pages[faili]); |
| 699 | faili--; |
| 700 | } |
| 701 | |
| 702 | kfree(cb->compressed_pages); |
| 703 | fail1: |
| 704 | kfree(cb); |
| 705 | out: |
| 706 | free_extent_map(em); |
| 707 | return ret; |
| 708 | } |
| 709 | |
| 710 | /* |
| 711 | * Heuristic uses systematic sampling to collect data from the input data |
| 712 | * range, the logic can be tuned by the following constants: |
| 713 | * |
| 714 | * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample |
| 715 | * @SAMPLING_INTERVAL - range from which the sampled data can be collected |
| 716 | */ |
| 717 | #define SAMPLING_READ_SIZE (16) |
| 718 | #define SAMPLING_INTERVAL (256) |
| 719 | |
| 720 | /* |
| 721 | * For statistical analysis of the input data we consider bytes that form a |
| 722 | * Galois Field of 256 objects. Each object has an attribute count, ie. how |
| 723 | * many times the object appeared in the sample. |
| 724 | */ |
| 725 | #define BUCKET_SIZE (256) |
| 726 | |
| 727 | /* |
| 728 | * The size of the sample is based on a statistical sampling rule of thumb. |
| 729 | * The common way is to perform sampling tests as long as the number of |
| 730 | * elements in each cell is at least 5. |
| 731 | * |
| 732 | * Instead of 5, we choose 32 to obtain more accurate results. |
| 733 | * If the data contain the maximum number of symbols, which is 256, we obtain a |
| 734 | * sample size bound by 8192. |
| 735 | * |
| 736 | * For a sample of at most 8KB of data per data range: 16 consecutive bytes |
| 737 | * from up to 512 locations. |
| 738 | */ |
| 739 | #define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \ |
| 740 | SAMPLING_READ_SIZE / SAMPLING_INTERVAL) |
| 741 | |
| 742 | struct bucket_item { |
| 743 | u32 count; |
| 744 | }; |
| 745 | |
| 746 | struct heuristic_ws { |
| 747 | /* Partial copy of input data */ |
| 748 | u8 *sample; |
| 749 | u32 sample_size; |
| 750 | /* Buckets store counters for each byte value */ |
| 751 | struct bucket_item *bucket; |
| 752 | /* Sorting buffer */ |
| 753 | struct bucket_item *bucket_b; |
| 754 | struct list_head list; |
| 755 | }; |
| 756 | |
| 757 | static void free_heuristic_ws(struct list_head *ws) |
| 758 | { |
| 759 | struct heuristic_ws *workspace; |
| 760 | |
| 761 | workspace = list_entry(ws, struct heuristic_ws, list); |
| 762 | |
| 763 | kvfree(workspace->sample); |
| 764 | kfree(workspace->bucket); |
| 765 | kfree(workspace->bucket_b); |
| 766 | kfree(workspace); |
| 767 | } |
| 768 | |
| 769 | static struct list_head *alloc_heuristic_ws(void) |
| 770 | { |
| 771 | struct heuristic_ws *ws; |
| 772 | |
| 773 | ws = kzalloc(sizeof(*ws), GFP_KERNEL); |
| 774 | if (!ws) |
| 775 | return ERR_PTR(-ENOMEM); |
| 776 | |
| 777 | ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL); |
| 778 | if (!ws->sample) |
| 779 | goto fail; |
| 780 | |
| 781 | ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL); |
| 782 | if (!ws->bucket) |
| 783 | goto fail; |
| 784 | |
| 785 | ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL); |
| 786 | if (!ws->bucket_b) |
| 787 | goto fail; |
| 788 | |
| 789 | INIT_LIST_HEAD(&ws->list); |
| 790 | return &ws->list; |
| 791 | fail: |
| 792 | free_heuristic_ws(&ws->list); |
| 793 | return ERR_PTR(-ENOMEM); |
| 794 | } |
| 795 | |
| 796 | struct workspaces_list { |
| 797 | struct list_head idle_ws; |
| 798 | spinlock_t ws_lock; |
| 799 | /* Number of free workspaces */ |
| 800 | int free_ws; |
| 801 | /* Total number of allocated workspaces */ |
| 802 | atomic_t total_ws; |
| 803 | /* Waiters for a free workspace */ |
| 804 | wait_queue_head_t ws_wait; |
| 805 | }; |
| 806 | |
| 807 | static struct workspaces_list btrfs_comp_ws[BTRFS_COMPRESS_TYPES]; |
| 808 | |
| 809 | static struct workspaces_list btrfs_heuristic_ws; |
| 810 | |
| 811 | static const struct btrfs_compress_op * const btrfs_compress_op[] = { |
| 812 | &btrfs_zlib_compress, |
| 813 | &btrfs_lzo_compress, |
| 814 | &btrfs_zstd_compress, |
| 815 | }; |
| 816 | |
| 817 | void __init btrfs_init_compress(void) |
| 818 | { |
| 819 | struct list_head *workspace; |
| 820 | int i; |
| 821 | |
| 822 | INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws); |
| 823 | spin_lock_init(&btrfs_heuristic_ws.ws_lock); |
| 824 | atomic_set(&btrfs_heuristic_ws.total_ws, 0); |
| 825 | init_waitqueue_head(&btrfs_heuristic_ws.ws_wait); |
| 826 | |
| 827 | workspace = alloc_heuristic_ws(); |
| 828 | if (IS_ERR(workspace)) { |
| 829 | pr_warn( |
| 830 | "BTRFS: cannot preallocate heuristic workspace, will try later\n"); |
| 831 | } else { |
| 832 | atomic_set(&btrfs_heuristic_ws.total_ws, 1); |
| 833 | btrfs_heuristic_ws.free_ws = 1; |
| 834 | list_add(workspace, &btrfs_heuristic_ws.idle_ws); |
| 835 | } |
| 836 | |
| 837 | for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { |
| 838 | INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws); |
| 839 | spin_lock_init(&btrfs_comp_ws[i].ws_lock); |
| 840 | atomic_set(&btrfs_comp_ws[i].total_ws, 0); |
| 841 | init_waitqueue_head(&btrfs_comp_ws[i].ws_wait); |
| 842 | |
| 843 | /* |
| 844 | * Preallocate one workspace for each compression type so |
| 845 | * we can guarantee forward progress in the worst case |
| 846 | */ |
| 847 | workspace = btrfs_compress_op[i]->alloc_workspace(); |
| 848 | if (IS_ERR(workspace)) { |
| 849 | pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n"); |
| 850 | } else { |
| 851 | atomic_set(&btrfs_comp_ws[i].total_ws, 1); |
| 852 | btrfs_comp_ws[i].free_ws = 1; |
| 853 | list_add(workspace, &btrfs_comp_ws[i].idle_ws); |
| 854 | } |
| 855 | } |
| 856 | } |
| 857 | |
| 858 | /* |
| 859 | * This finds an available workspace or allocates a new one. |
| 860 | * If it's not possible to allocate a new one, waits until there's one. |
| 861 | * Preallocation makes a forward progress guarantees and we do not return |
| 862 | * errors. |
| 863 | */ |
| 864 | static struct list_head *__find_workspace(int type, bool heuristic) |
| 865 | { |
| 866 | struct list_head *workspace; |
| 867 | int cpus = num_online_cpus(); |
| 868 | int idx = type - 1; |
| 869 | unsigned nofs_flag; |
| 870 | struct list_head *idle_ws; |
| 871 | spinlock_t *ws_lock; |
| 872 | atomic_t *total_ws; |
| 873 | wait_queue_head_t *ws_wait; |
| 874 | int *free_ws; |
| 875 | |
| 876 | if (heuristic) { |
| 877 | idle_ws = &btrfs_heuristic_ws.idle_ws; |
| 878 | ws_lock = &btrfs_heuristic_ws.ws_lock; |
| 879 | total_ws = &btrfs_heuristic_ws.total_ws; |
| 880 | ws_wait = &btrfs_heuristic_ws.ws_wait; |
| 881 | free_ws = &btrfs_heuristic_ws.free_ws; |
| 882 | } else { |
| 883 | idle_ws = &btrfs_comp_ws[idx].idle_ws; |
| 884 | ws_lock = &btrfs_comp_ws[idx].ws_lock; |
| 885 | total_ws = &btrfs_comp_ws[idx].total_ws; |
| 886 | ws_wait = &btrfs_comp_ws[idx].ws_wait; |
| 887 | free_ws = &btrfs_comp_ws[idx].free_ws; |
| 888 | } |
| 889 | |
| 890 | again: |
| 891 | spin_lock(ws_lock); |
| 892 | if (!list_empty(idle_ws)) { |
| 893 | workspace = idle_ws->next; |
| 894 | list_del(workspace); |
| 895 | (*free_ws)--; |
| 896 | spin_unlock(ws_lock); |
| 897 | return workspace; |
| 898 | |
| 899 | } |
| 900 | if (atomic_read(total_ws) > cpus) { |
| 901 | DEFINE_WAIT(wait); |
| 902 | |
| 903 | spin_unlock(ws_lock); |
| 904 | prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE); |
| 905 | if (atomic_read(total_ws) > cpus && !*free_ws) |
| 906 | schedule(); |
| 907 | finish_wait(ws_wait, &wait); |
| 908 | goto again; |
| 909 | } |
| 910 | atomic_inc(total_ws); |
| 911 | spin_unlock(ws_lock); |
| 912 | |
| 913 | /* |
| 914 | * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have |
| 915 | * to turn it off here because we might get called from the restricted |
| 916 | * context of btrfs_compress_bio/btrfs_compress_pages |
| 917 | */ |
| 918 | nofs_flag = memalloc_nofs_save(); |
| 919 | if (heuristic) |
| 920 | workspace = alloc_heuristic_ws(); |
| 921 | else |
| 922 | workspace = btrfs_compress_op[idx]->alloc_workspace(); |
| 923 | memalloc_nofs_restore(nofs_flag); |
| 924 | |
| 925 | if (IS_ERR(workspace)) { |
| 926 | atomic_dec(total_ws); |
| 927 | wake_up(ws_wait); |
| 928 | |
| 929 | /* |
| 930 | * Do not return the error but go back to waiting. There's a |
| 931 | * workspace preallocated for each type and the compression |
| 932 | * time is bounded so we get to a workspace eventually. This |
| 933 | * makes our caller's life easier. |
| 934 | * |
| 935 | * To prevent silent and low-probability deadlocks (when the |
| 936 | * initial preallocation fails), check if there are any |
| 937 | * workspaces at all. |
| 938 | */ |
| 939 | if (atomic_read(total_ws) == 0) { |
| 940 | static DEFINE_RATELIMIT_STATE(_rs, |
| 941 | /* once per minute */ 60 * HZ, |
| 942 | /* no burst */ 1); |
| 943 | |
| 944 | if (__ratelimit(&_rs)) { |
| 945 | pr_warn("BTRFS: no compression workspaces, low memory, retrying\n"); |
| 946 | } |
| 947 | } |
| 948 | goto again; |
| 949 | } |
| 950 | return workspace; |
| 951 | } |
| 952 | |
| 953 | static struct list_head *find_workspace(int type) |
| 954 | { |
| 955 | return __find_workspace(type, false); |
| 956 | } |
| 957 | |
| 958 | /* |
| 959 | * put a workspace struct back on the list or free it if we have enough |
| 960 | * idle ones sitting around |
| 961 | */ |
| 962 | static void __free_workspace(int type, struct list_head *workspace, |
| 963 | bool heuristic) |
| 964 | { |
| 965 | int idx = type - 1; |
| 966 | struct list_head *idle_ws; |
| 967 | spinlock_t *ws_lock; |
| 968 | atomic_t *total_ws; |
| 969 | wait_queue_head_t *ws_wait; |
| 970 | int *free_ws; |
| 971 | |
| 972 | if (heuristic) { |
| 973 | idle_ws = &btrfs_heuristic_ws.idle_ws; |
| 974 | ws_lock = &btrfs_heuristic_ws.ws_lock; |
| 975 | total_ws = &btrfs_heuristic_ws.total_ws; |
| 976 | ws_wait = &btrfs_heuristic_ws.ws_wait; |
| 977 | free_ws = &btrfs_heuristic_ws.free_ws; |
| 978 | } else { |
| 979 | idle_ws = &btrfs_comp_ws[idx].idle_ws; |
| 980 | ws_lock = &btrfs_comp_ws[idx].ws_lock; |
| 981 | total_ws = &btrfs_comp_ws[idx].total_ws; |
| 982 | ws_wait = &btrfs_comp_ws[idx].ws_wait; |
| 983 | free_ws = &btrfs_comp_ws[idx].free_ws; |
| 984 | } |
| 985 | |
| 986 | spin_lock(ws_lock); |
| 987 | if (*free_ws <= num_online_cpus()) { |
| 988 | list_add(workspace, idle_ws); |
| 989 | (*free_ws)++; |
| 990 | spin_unlock(ws_lock); |
| 991 | goto wake; |
| 992 | } |
| 993 | spin_unlock(ws_lock); |
| 994 | |
| 995 | if (heuristic) |
| 996 | free_heuristic_ws(workspace); |
| 997 | else |
| 998 | btrfs_compress_op[idx]->free_workspace(workspace); |
| 999 | atomic_dec(total_ws); |
| 1000 | wake: |
| 1001 | cond_wake_up(ws_wait); |
| 1002 | } |
| 1003 | |
| 1004 | static void free_workspace(int type, struct list_head *ws) |
| 1005 | { |
| 1006 | return __free_workspace(type, ws, false); |
| 1007 | } |
| 1008 | |
| 1009 | /* |
| 1010 | * cleanup function for module exit |
| 1011 | */ |
| 1012 | static void free_workspaces(void) |
| 1013 | { |
| 1014 | struct list_head *workspace; |
| 1015 | int i; |
| 1016 | |
| 1017 | while (!list_empty(&btrfs_heuristic_ws.idle_ws)) { |
| 1018 | workspace = btrfs_heuristic_ws.idle_ws.next; |
| 1019 | list_del(workspace); |
| 1020 | free_heuristic_ws(workspace); |
| 1021 | atomic_dec(&btrfs_heuristic_ws.total_ws); |
| 1022 | } |
| 1023 | |
| 1024 | for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { |
| 1025 | while (!list_empty(&btrfs_comp_ws[i].idle_ws)) { |
| 1026 | workspace = btrfs_comp_ws[i].idle_ws.next; |
| 1027 | list_del(workspace); |
| 1028 | btrfs_compress_op[i]->free_workspace(workspace); |
| 1029 | atomic_dec(&btrfs_comp_ws[i].total_ws); |
| 1030 | } |
| 1031 | } |
| 1032 | } |
| 1033 | |
| 1034 | /* |
| 1035 | * Given an address space and start and length, compress the bytes into @pages |
| 1036 | * that are allocated on demand. |
| 1037 | * |
| 1038 | * @type_level is encoded algorithm and level, where level 0 means whatever |
| 1039 | * default the algorithm chooses and is opaque here; |
| 1040 | * - compression algo are 0-3 |
| 1041 | * - the level are bits 4-7 |
| 1042 | * |
| 1043 | * @out_pages is an in/out parameter, holds maximum number of pages to allocate |
| 1044 | * and returns number of actually allocated pages |
| 1045 | * |
| 1046 | * @total_in is used to return the number of bytes actually read. It |
| 1047 | * may be smaller than the input length if we had to exit early because we |
| 1048 | * ran out of room in the pages array or because we cross the |
| 1049 | * max_out threshold. |
| 1050 | * |
| 1051 | * @total_out is an in/out parameter, must be set to the input length and will |
| 1052 | * be also used to return the total number of compressed bytes |
| 1053 | * |
| 1054 | * @max_out tells us the max number of bytes that we're allowed to |
| 1055 | * stuff into pages |
| 1056 | */ |
| 1057 | int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping, |
| 1058 | u64 start, struct page **pages, |
| 1059 | unsigned long *out_pages, |
| 1060 | unsigned long *total_in, |
| 1061 | unsigned long *total_out) |
| 1062 | { |
| 1063 | struct list_head *workspace; |
| 1064 | int ret; |
| 1065 | int type = type_level & 0xF; |
| 1066 | |
| 1067 | workspace = find_workspace(type); |
| 1068 | |
| 1069 | btrfs_compress_op[type - 1]->set_level(workspace, type_level); |
| 1070 | ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, |
| 1071 | start, pages, |
| 1072 | out_pages, |
| 1073 | total_in, total_out); |
| 1074 | free_workspace(type, workspace); |
| 1075 | return ret; |
| 1076 | } |
| 1077 | |
| 1078 | /* |
| 1079 | * pages_in is an array of pages with compressed data. |
| 1080 | * |
| 1081 | * disk_start is the starting logical offset of this array in the file |
| 1082 | * |
| 1083 | * orig_bio contains the pages from the file that we want to decompress into |
| 1084 | * |
| 1085 | * srclen is the number of bytes in pages_in |
| 1086 | * |
| 1087 | * The basic idea is that we have a bio that was created by readpages. |
| 1088 | * The pages in the bio are for the uncompressed data, and they may not |
| 1089 | * be contiguous. They all correspond to the range of bytes covered by |
| 1090 | * the compressed extent. |
| 1091 | */ |
| 1092 | static int btrfs_decompress_bio(struct compressed_bio *cb) |
| 1093 | { |
| 1094 | struct list_head *workspace; |
| 1095 | int ret; |
| 1096 | int type = cb->compress_type; |
| 1097 | |
| 1098 | workspace = find_workspace(type); |
| 1099 | ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb); |
| 1100 | free_workspace(type, workspace); |
| 1101 | |
| 1102 | return ret; |
| 1103 | } |
| 1104 | |
| 1105 | /* |
| 1106 | * a less complex decompression routine. Our compressed data fits in a |
| 1107 | * single page, and we want to read a single page out of it. |
| 1108 | * start_byte tells us the offset into the compressed data we're interested in |
| 1109 | */ |
| 1110 | int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, |
| 1111 | unsigned long start_byte, size_t srclen, size_t destlen) |
| 1112 | { |
| 1113 | struct list_head *workspace; |
| 1114 | int ret; |
| 1115 | |
| 1116 | workspace = find_workspace(type); |
| 1117 | |
| 1118 | ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, |
| 1119 | dest_page, start_byte, |
| 1120 | srclen, destlen); |
| 1121 | |
| 1122 | free_workspace(type, workspace); |
| 1123 | return ret; |
| 1124 | } |
| 1125 | |
| 1126 | void __cold btrfs_exit_compress(void) |
| 1127 | { |
| 1128 | free_workspaces(); |
| 1129 | } |
| 1130 | |
| 1131 | /* |
| 1132 | * Copy uncompressed data from working buffer to pages. |
| 1133 | * |
| 1134 | * buf_start is the byte offset we're of the start of our workspace buffer. |
| 1135 | * |
| 1136 | * total_out is the last byte of the buffer |
| 1137 | */ |
| 1138 | int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start, |
| 1139 | unsigned long total_out, u64 disk_start, |
| 1140 | struct bio *bio) |
| 1141 | { |
| 1142 | unsigned long buf_offset; |
| 1143 | unsigned long current_buf_start; |
| 1144 | unsigned long start_byte; |
| 1145 | unsigned long prev_start_byte; |
| 1146 | unsigned long working_bytes = total_out - buf_start; |
| 1147 | unsigned long bytes; |
| 1148 | char *kaddr; |
| 1149 | struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter); |
| 1150 | |
| 1151 | /* |
| 1152 | * start byte is the first byte of the page we're currently |
| 1153 | * copying into relative to the start of the compressed data. |
| 1154 | */ |
| 1155 | start_byte = page_offset(bvec.bv_page) - disk_start; |
| 1156 | |
| 1157 | /* we haven't yet hit data corresponding to this page */ |
| 1158 | if (total_out <= start_byte) |
| 1159 | return 1; |
| 1160 | |
| 1161 | /* |
| 1162 | * the start of the data we care about is offset into |
| 1163 | * the middle of our working buffer |
| 1164 | */ |
| 1165 | if (total_out > start_byte && buf_start < start_byte) { |
| 1166 | buf_offset = start_byte - buf_start; |
| 1167 | working_bytes -= buf_offset; |
| 1168 | } else { |
| 1169 | buf_offset = 0; |
| 1170 | } |
| 1171 | current_buf_start = buf_start; |
| 1172 | |
| 1173 | /* copy bytes from the working buffer into the pages */ |
| 1174 | while (working_bytes > 0) { |
| 1175 | bytes = min_t(unsigned long, bvec.bv_len, |
| 1176 | PAGE_SIZE - buf_offset); |
| 1177 | bytes = min(bytes, working_bytes); |
| 1178 | |
| 1179 | kaddr = kmap_atomic(bvec.bv_page); |
| 1180 | memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes); |
| 1181 | kunmap_atomic(kaddr); |
| 1182 | flush_dcache_page(bvec.bv_page); |
| 1183 | |
| 1184 | buf_offset += bytes; |
| 1185 | working_bytes -= bytes; |
| 1186 | current_buf_start += bytes; |
| 1187 | |
| 1188 | /* check if we need to pick another page */ |
| 1189 | bio_advance(bio, bytes); |
| 1190 | if (!bio->bi_iter.bi_size) |
| 1191 | return 0; |
| 1192 | bvec = bio_iter_iovec(bio, bio->bi_iter); |
| 1193 | prev_start_byte = start_byte; |
| 1194 | start_byte = page_offset(bvec.bv_page) - disk_start; |
| 1195 | |
| 1196 | /* |
| 1197 | * We need to make sure we're only adjusting |
| 1198 | * our offset into compression working buffer when |
| 1199 | * we're switching pages. Otherwise we can incorrectly |
| 1200 | * keep copying when we were actually done. |
| 1201 | */ |
| 1202 | if (start_byte != prev_start_byte) { |
| 1203 | /* |
| 1204 | * make sure our new page is covered by this |
| 1205 | * working buffer |
| 1206 | */ |
| 1207 | if (total_out <= start_byte) |
| 1208 | return 1; |
| 1209 | |
| 1210 | /* |
| 1211 | * the next page in the biovec might not be adjacent |
| 1212 | * to the last page, but it might still be found |
| 1213 | * inside this working buffer. bump our offset pointer |
| 1214 | */ |
| 1215 | if (total_out > start_byte && |
| 1216 | current_buf_start < start_byte) { |
| 1217 | buf_offset = start_byte - buf_start; |
| 1218 | working_bytes = total_out - start_byte; |
| 1219 | current_buf_start = buf_start + buf_offset; |
| 1220 | } |
| 1221 | } |
| 1222 | } |
| 1223 | |
| 1224 | return 1; |
| 1225 | } |
| 1226 | |
| 1227 | /* |
| 1228 | * Shannon Entropy calculation |
| 1229 | * |
| 1230 | * Pure byte distribution analysis fails to determine compressiability of data. |
| 1231 | * Try calculating entropy to estimate the average minimum number of bits |
| 1232 | * needed to encode the sampled data. |
| 1233 | * |
| 1234 | * For convenience, return the percentage of needed bits, instead of amount of |
| 1235 | * bits directly. |
| 1236 | * |
| 1237 | * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy |
| 1238 | * and can be compressible with high probability |
| 1239 | * |
| 1240 | * @ENTROPY_LVL_HIGH - data are not compressible with high probability |
| 1241 | * |
| 1242 | * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate. |
| 1243 | */ |
| 1244 | #define ENTROPY_LVL_ACEPTABLE (65) |
| 1245 | #define ENTROPY_LVL_HIGH (80) |
| 1246 | |
| 1247 | /* |
| 1248 | * For increasead precision in shannon_entropy calculation, |
| 1249 | * let's do pow(n, M) to save more digits after comma: |
| 1250 | * |
| 1251 | * - maximum int bit length is 64 |
| 1252 | * - ilog2(MAX_SAMPLE_SIZE) -> 13 |
| 1253 | * - 13 * 4 = 52 < 64 -> M = 4 |
| 1254 | * |
| 1255 | * So use pow(n, 4). |
| 1256 | */ |
| 1257 | static inline u32 ilog2_w(u64 n) |
| 1258 | { |
| 1259 | return ilog2(n * n * n * n); |
| 1260 | } |
| 1261 | |
| 1262 | static u32 shannon_entropy(struct heuristic_ws *ws) |
| 1263 | { |
| 1264 | const u32 entropy_max = 8 * ilog2_w(2); |
| 1265 | u32 entropy_sum = 0; |
| 1266 | u32 p, p_base, sz_base; |
| 1267 | u32 i; |
| 1268 | |
| 1269 | sz_base = ilog2_w(ws->sample_size); |
| 1270 | for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) { |
| 1271 | p = ws->bucket[i].count; |
| 1272 | p_base = ilog2_w(p); |
| 1273 | entropy_sum += p * (sz_base - p_base); |
| 1274 | } |
| 1275 | |
| 1276 | entropy_sum /= ws->sample_size; |
| 1277 | return entropy_sum * 100 / entropy_max; |
| 1278 | } |
| 1279 | |
| 1280 | #define RADIX_BASE 4U |
| 1281 | #define COUNTERS_SIZE (1U << RADIX_BASE) |
| 1282 | |
| 1283 | static u8 get4bits(u64 num, int shift) { |
| 1284 | u8 low4bits; |
| 1285 | |
| 1286 | num >>= shift; |
| 1287 | /* Reverse order */ |
| 1288 | low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE); |
| 1289 | return low4bits; |
| 1290 | } |
| 1291 | |
| 1292 | /* |
| 1293 | * Use 4 bits as radix base |
| 1294 | * Use 16 u32 counters for calculating new possition in buf array |
| 1295 | * |
| 1296 | * @array - array that will be sorted |
| 1297 | * @array_buf - buffer array to store sorting results |
| 1298 | * must be equal in size to @array |
| 1299 | * @num - array size |
| 1300 | */ |
| 1301 | static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf, |
| 1302 | int num) |
| 1303 | { |
| 1304 | u64 max_num; |
| 1305 | u64 buf_num; |
| 1306 | u32 counters[COUNTERS_SIZE]; |
| 1307 | u32 new_addr; |
| 1308 | u32 addr; |
| 1309 | int bitlen; |
| 1310 | int shift; |
| 1311 | int i; |
| 1312 | |
| 1313 | /* |
| 1314 | * Try avoid useless loop iterations for small numbers stored in big |
| 1315 | * counters. Example: 48 33 4 ... in 64bit array |
| 1316 | */ |
| 1317 | max_num = array[0].count; |
| 1318 | for (i = 1; i < num; i++) { |
| 1319 | buf_num = array[i].count; |
| 1320 | if (buf_num > max_num) |
| 1321 | max_num = buf_num; |
| 1322 | } |
| 1323 | |
| 1324 | buf_num = ilog2(max_num); |
| 1325 | bitlen = ALIGN(buf_num, RADIX_BASE * 2); |
| 1326 | |
| 1327 | shift = 0; |
| 1328 | while (shift < bitlen) { |
| 1329 | memset(counters, 0, sizeof(counters)); |
| 1330 | |
| 1331 | for (i = 0; i < num; i++) { |
| 1332 | buf_num = array[i].count; |
| 1333 | addr = get4bits(buf_num, shift); |
| 1334 | counters[addr]++; |
| 1335 | } |
| 1336 | |
| 1337 | for (i = 1; i < COUNTERS_SIZE; i++) |
| 1338 | counters[i] += counters[i - 1]; |
| 1339 | |
| 1340 | for (i = num - 1; i >= 0; i--) { |
| 1341 | buf_num = array[i].count; |
| 1342 | addr = get4bits(buf_num, shift); |
| 1343 | counters[addr]--; |
| 1344 | new_addr = counters[addr]; |
| 1345 | array_buf[new_addr] = array[i]; |
| 1346 | } |
| 1347 | |
| 1348 | shift += RADIX_BASE; |
| 1349 | |
| 1350 | /* |
| 1351 | * Normal radix expects to move data from a temporary array, to |
| 1352 | * the main one. But that requires some CPU time. Avoid that |
| 1353 | * by doing another sort iteration to original array instead of |
| 1354 | * memcpy() |
| 1355 | */ |
| 1356 | memset(counters, 0, sizeof(counters)); |
| 1357 | |
| 1358 | for (i = 0; i < num; i ++) { |
| 1359 | buf_num = array_buf[i].count; |
| 1360 | addr = get4bits(buf_num, shift); |
| 1361 | counters[addr]++; |
| 1362 | } |
| 1363 | |
| 1364 | for (i = 1; i < COUNTERS_SIZE; i++) |
| 1365 | counters[i] += counters[i - 1]; |
| 1366 | |
| 1367 | for (i = num - 1; i >= 0; i--) { |
| 1368 | buf_num = array_buf[i].count; |
| 1369 | addr = get4bits(buf_num, shift); |
| 1370 | counters[addr]--; |
| 1371 | new_addr = counters[addr]; |
| 1372 | array[new_addr] = array_buf[i]; |
| 1373 | } |
| 1374 | |
| 1375 | shift += RADIX_BASE; |
| 1376 | } |
| 1377 | } |
| 1378 | |
| 1379 | /* |
| 1380 | * Size of the core byte set - how many bytes cover 90% of the sample |
| 1381 | * |
| 1382 | * There are several types of structured binary data that use nearly all byte |
| 1383 | * values. The distribution can be uniform and counts in all buckets will be |
| 1384 | * nearly the same (eg. encrypted data). Unlikely to be compressible. |
| 1385 | * |
| 1386 | * Other possibility is normal (Gaussian) distribution, where the data could |
| 1387 | * be potentially compressible, but we have to take a few more steps to decide |
| 1388 | * how much. |
| 1389 | * |
| 1390 | * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently, |
| 1391 | * compression algo can easy fix that |
| 1392 | * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high |
| 1393 | * probability is not compressible |
| 1394 | */ |
| 1395 | #define BYTE_CORE_SET_LOW (64) |
| 1396 | #define BYTE_CORE_SET_HIGH (200) |
| 1397 | |
| 1398 | static int byte_core_set_size(struct heuristic_ws *ws) |
| 1399 | { |
| 1400 | u32 i; |
| 1401 | u32 coreset_sum = 0; |
| 1402 | const u32 core_set_threshold = ws->sample_size * 90 / 100; |
| 1403 | struct bucket_item *bucket = ws->bucket; |
| 1404 | |
| 1405 | /* Sort in reverse order */ |
| 1406 | radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE); |
| 1407 | |
| 1408 | for (i = 0; i < BYTE_CORE_SET_LOW; i++) |
| 1409 | coreset_sum += bucket[i].count; |
| 1410 | |
| 1411 | if (coreset_sum > core_set_threshold) |
| 1412 | return i; |
| 1413 | |
| 1414 | for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) { |
| 1415 | coreset_sum += bucket[i].count; |
| 1416 | if (coreset_sum > core_set_threshold) |
| 1417 | break; |
| 1418 | } |
| 1419 | |
| 1420 | return i; |
| 1421 | } |
| 1422 | |
| 1423 | /* |
| 1424 | * Count byte values in buckets. |
| 1425 | * This heuristic can detect textual data (configs, xml, json, html, etc). |
| 1426 | * Because in most text-like data byte set is restricted to limited number of |
| 1427 | * possible characters, and that restriction in most cases makes data easy to |
| 1428 | * compress. |
| 1429 | * |
| 1430 | * @BYTE_SET_THRESHOLD - consider all data within this byte set size: |
| 1431 | * less - compressible |
| 1432 | * more - need additional analysis |
| 1433 | */ |
| 1434 | #define BYTE_SET_THRESHOLD (64) |
| 1435 | |
| 1436 | static u32 byte_set_size(const struct heuristic_ws *ws) |
| 1437 | { |
| 1438 | u32 i; |
| 1439 | u32 byte_set_size = 0; |
| 1440 | |
| 1441 | for (i = 0; i < BYTE_SET_THRESHOLD; i++) { |
| 1442 | if (ws->bucket[i].count > 0) |
| 1443 | byte_set_size++; |
| 1444 | } |
| 1445 | |
| 1446 | /* |
| 1447 | * Continue collecting count of byte values in buckets. If the byte |
| 1448 | * set size is bigger then the threshold, it's pointless to continue, |
| 1449 | * the detection technique would fail for this type of data. |
| 1450 | */ |
| 1451 | for (; i < BUCKET_SIZE; i++) { |
| 1452 | if (ws->bucket[i].count > 0) { |
| 1453 | byte_set_size++; |
| 1454 | if (byte_set_size > BYTE_SET_THRESHOLD) |
| 1455 | return byte_set_size; |
| 1456 | } |
| 1457 | } |
| 1458 | |
| 1459 | return byte_set_size; |
| 1460 | } |
| 1461 | |
| 1462 | static bool sample_repeated_patterns(struct heuristic_ws *ws) |
| 1463 | { |
| 1464 | const u32 half_of_sample = ws->sample_size / 2; |
| 1465 | const u8 *data = ws->sample; |
| 1466 | |
| 1467 | return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0; |
| 1468 | } |
| 1469 | |
| 1470 | static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end, |
| 1471 | struct heuristic_ws *ws) |
| 1472 | { |
| 1473 | struct page *page; |
| 1474 | u64 index, index_end; |
| 1475 | u32 i, curr_sample_pos; |
| 1476 | u8 *in_data; |
| 1477 | |
| 1478 | /* |
| 1479 | * Compression handles the input data by chunks of 128KiB |
| 1480 | * (defined by BTRFS_MAX_UNCOMPRESSED) |
| 1481 | * |
| 1482 | * We do the same for the heuristic and loop over the whole range. |
| 1483 | * |
| 1484 | * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will |
| 1485 | * process no more than BTRFS_MAX_UNCOMPRESSED at a time. |
| 1486 | */ |
| 1487 | if (end - start > BTRFS_MAX_UNCOMPRESSED) |
| 1488 | end = start + BTRFS_MAX_UNCOMPRESSED; |
| 1489 | |
| 1490 | index = start >> PAGE_SHIFT; |
| 1491 | index_end = end >> PAGE_SHIFT; |
| 1492 | |
| 1493 | /* Don't miss unaligned end */ |
| 1494 | if (!IS_ALIGNED(end, PAGE_SIZE)) |
| 1495 | index_end++; |
| 1496 | |
| 1497 | curr_sample_pos = 0; |
| 1498 | while (index < index_end) { |
| 1499 | page = find_get_page(inode->i_mapping, index); |
| 1500 | in_data = kmap(page); |
| 1501 | /* Handle case where the start is not aligned to PAGE_SIZE */ |
| 1502 | i = start % PAGE_SIZE; |
| 1503 | while (i < PAGE_SIZE - SAMPLING_READ_SIZE) { |
| 1504 | /* Don't sample any garbage from the last page */ |
| 1505 | if (start > end - SAMPLING_READ_SIZE) |
| 1506 | break; |
| 1507 | memcpy(&ws->sample[curr_sample_pos], &in_data[i], |
| 1508 | SAMPLING_READ_SIZE); |
| 1509 | i += SAMPLING_INTERVAL; |
| 1510 | start += SAMPLING_INTERVAL; |
| 1511 | curr_sample_pos += SAMPLING_READ_SIZE; |
| 1512 | } |
| 1513 | kunmap(page); |
| 1514 | put_page(page); |
| 1515 | |
| 1516 | index++; |
| 1517 | } |
| 1518 | |
| 1519 | ws->sample_size = curr_sample_pos; |
| 1520 | } |
| 1521 | |
| 1522 | /* |
| 1523 | * Compression heuristic. |
| 1524 | * |
| 1525 | * For now is's a naive and optimistic 'return true', we'll extend the logic to |
| 1526 | * quickly (compared to direct compression) detect data characteristics |
| 1527 | * (compressible/uncompressible) to avoid wasting CPU time on uncompressible |
| 1528 | * data. |
| 1529 | * |
| 1530 | * The following types of analysis can be performed: |
| 1531 | * - detect mostly zero data |
| 1532 | * - detect data with low "byte set" size (text, etc) |
| 1533 | * - detect data with low/high "core byte" set |
| 1534 | * |
| 1535 | * Return non-zero if the compression should be done, 0 otherwise. |
| 1536 | */ |
| 1537 | int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end) |
| 1538 | { |
| 1539 | struct list_head *ws_list = __find_workspace(0, true); |
| 1540 | struct heuristic_ws *ws; |
| 1541 | u32 i; |
| 1542 | u8 byte; |
| 1543 | int ret = 0; |
| 1544 | |
| 1545 | ws = list_entry(ws_list, struct heuristic_ws, list); |
| 1546 | |
| 1547 | heuristic_collect_sample(inode, start, end, ws); |
| 1548 | |
| 1549 | if (sample_repeated_patterns(ws)) { |
| 1550 | ret = 1; |
| 1551 | goto out; |
| 1552 | } |
| 1553 | |
| 1554 | memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE); |
| 1555 | |
| 1556 | for (i = 0; i < ws->sample_size; i++) { |
| 1557 | byte = ws->sample[i]; |
| 1558 | ws->bucket[byte].count++; |
| 1559 | } |
| 1560 | |
| 1561 | i = byte_set_size(ws); |
| 1562 | if (i < BYTE_SET_THRESHOLD) { |
| 1563 | ret = 2; |
| 1564 | goto out; |
| 1565 | } |
| 1566 | |
| 1567 | i = byte_core_set_size(ws); |
| 1568 | if (i <= BYTE_CORE_SET_LOW) { |
| 1569 | ret = 3; |
| 1570 | goto out; |
| 1571 | } |
| 1572 | |
| 1573 | if (i >= BYTE_CORE_SET_HIGH) { |
| 1574 | ret = 0; |
| 1575 | goto out; |
| 1576 | } |
| 1577 | |
| 1578 | i = shannon_entropy(ws); |
| 1579 | if (i <= ENTROPY_LVL_ACEPTABLE) { |
| 1580 | ret = 4; |
| 1581 | goto out; |
| 1582 | } |
| 1583 | |
| 1584 | /* |
| 1585 | * For the levels below ENTROPY_LVL_HIGH, additional analysis would be |
| 1586 | * needed to give green light to compression. |
| 1587 | * |
| 1588 | * For now just assume that compression at that level is not worth the |
| 1589 | * resources because: |
| 1590 | * |
| 1591 | * 1. it is possible to defrag the data later |
| 1592 | * |
| 1593 | * 2. the data would turn out to be hardly compressible, eg. 150 byte |
| 1594 | * values, every bucket has counter at level ~54. The heuristic would |
| 1595 | * be confused. This can happen when data have some internal repeated |
| 1596 | * patterns like "abbacbbc...". This can be detected by analyzing |
| 1597 | * pairs of bytes, which is too costly. |
| 1598 | */ |
| 1599 | if (i < ENTROPY_LVL_HIGH) { |
| 1600 | ret = 5; |
| 1601 | goto out; |
| 1602 | } else { |
| 1603 | ret = 0; |
| 1604 | goto out; |
| 1605 | } |
| 1606 | |
| 1607 | out: |
| 1608 | __free_workspace(0, ws_list, true); |
| 1609 | return ret; |
| 1610 | } |
| 1611 | |
| 1612 | unsigned int btrfs_compress_str2level(const char *str) |
| 1613 | { |
| 1614 | if (strncmp(str, "zlib", 4) != 0) |
| 1615 | return 0; |
| 1616 | |
| 1617 | /* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */ |
| 1618 | if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0) |
| 1619 | return str[5] - '0'; |
| 1620 | |
| 1621 | return BTRFS_ZLIB_DEFAULT_LEVEL; |
| 1622 | } |