| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 | 
|  | 2 | /* | 
|  | 3 | * fs/mpage.c | 
|  | 4 | * | 
|  | 5 | * Copyright (C) 2002, Linus Torvalds. | 
|  | 6 | * | 
|  | 7 | * Contains functions related to preparing and submitting BIOs which contain | 
|  | 8 | * multiple pagecache pages. | 
|  | 9 | * | 
|  | 10 | * 15May2002	Andrew Morton | 
|  | 11 | *		Initial version | 
|  | 12 | * 27Jun2002	axboe@suse.de | 
|  | 13 | *		use bio_add_page() to build bio's just the right size | 
|  | 14 | */ | 
|  | 15 |  | 
|  | 16 | #include <linux/kernel.h> | 
|  | 17 | #include <linux/export.h> | 
|  | 18 | #include <linux/mm.h> | 
|  | 19 | #include <linux/kdev_t.h> | 
|  | 20 | #include <linux/gfp.h> | 
|  | 21 | #include <linux/bio.h> | 
|  | 22 | #include <linux/fs.h> | 
|  | 23 | #include <linux/buffer_head.h> | 
|  | 24 | #include <linux/blkdev.h> | 
|  | 25 | #include <linux/highmem.h> | 
|  | 26 | #include <linux/prefetch.h> | 
|  | 27 | #include <linux/mpage.h> | 
|  | 28 | #include <linux/mm_inline.h> | 
|  | 29 | #include <linux/writeback.h> | 
|  | 30 | #include <linux/backing-dev.h> | 
|  | 31 | #include <linux/pagevec.h> | 
|  | 32 | #include <linux/cleancache.h> | 
|  | 33 | #include "internal.h" | 
|  | 34 |  | 
|  | 35 | #define CREATE_TRACE_POINTS | 
|  | 36 | #include <trace/events/android_fs.h> | 
|  | 37 |  | 
|  | 38 | EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_start); | 
|  | 39 | EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_end); | 
|  | 40 | EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_start); | 
|  | 41 | EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_end); | 
|  | 42 | EXPORT_TRACEPOINT_SYMBOL(android_fs_fsync_start); | 
|  | 43 | EXPORT_TRACEPOINT_SYMBOL(android_fs_fsync_end); | 
|  | 44 |  | 
|  | 45 | /* | 
|  | 46 | * I/O completion handler for multipage BIOs. | 
|  | 47 | * | 
|  | 48 | * The mpage code never puts partial pages into a BIO (except for end-of-file). | 
|  | 49 | * If a page does not map to a contiguous run of blocks then it simply falls | 
|  | 50 | * back to block_read_full_page(). | 
|  | 51 | * | 
|  | 52 | * Why is this?  If a page's completion depends on a number of different BIOs | 
|  | 53 | * which can complete in any order (or at the same time) then determining the | 
|  | 54 | * status of that page is hard.  See end_buffer_async_read() for the details. | 
|  | 55 | * There is no point in duplicating all that complexity. | 
|  | 56 | */ | 
|  | 57 | static void mpage_end_io(struct bio *bio) | 
|  | 58 | { | 
|  | 59 | struct bio_vec *bv; | 
|  | 60 | int i; | 
|  | 61 |  | 
|  | 62 | if (trace_android_fs_dataread_end_enabled() && | 
|  | 63 | (bio_data_dir(bio) == READ)) { | 
|  | 64 | struct page *first_page = bio->bi_io_vec[0].bv_page; | 
|  | 65 |  | 
|  | 66 | if (first_page != NULL) | 
|  | 67 | trace_android_fs_dataread_end(first_page->mapping->host, | 
|  | 68 | page_offset(first_page), | 
|  | 69 | bio->bi_iter.bi_size); | 
|  | 70 | } | 
|  | 71 |  | 
|  | 72 | bio_for_each_segment_all(bv, bio, i) { | 
|  | 73 | struct page *page = bv->bv_page; | 
|  | 74 | page_endio(page, bio_op(bio), | 
|  | 75 | blk_status_to_errno(bio->bi_status)); | 
|  | 76 | } | 
|  | 77 |  | 
|  | 78 | bio_put(bio); | 
|  | 79 | } | 
|  | 80 |  | 
|  | 81 | static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio) | 
|  | 82 | { | 
|  | 83 | if (trace_android_fs_dataread_start_enabled() && (op == REQ_OP_READ)) { | 
|  | 84 | struct page *first_page = bio->bi_io_vec[0].bv_page; | 
|  | 85 |  | 
|  | 86 | if (first_page != NULL) { | 
|  | 87 | char *path, pathbuf[MAX_TRACE_PATHBUF_LEN]; | 
|  | 88 |  | 
|  | 89 | path = android_fstrace_get_pathname(pathbuf, | 
|  | 90 | MAX_TRACE_PATHBUF_LEN, | 
|  | 91 | first_page->mapping->host); | 
|  | 92 | trace_android_fs_dataread_start( | 
|  | 93 | first_page->mapping->host, | 
|  | 94 | page_offset(first_page), | 
|  | 95 | bio->bi_iter.bi_size, | 
|  | 96 | current->pid, | 
|  | 97 | path, | 
|  | 98 | current->comm); | 
|  | 99 | } | 
|  | 100 | } | 
|  | 101 | bio->bi_end_io = mpage_end_io; | 
|  | 102 | bio_set_op_attrs(bio, op, op_flags); | 
|  | 103 | guard_bio_eod(op, bio); | 
|  | 104 | submit_bio(bio); | 
|  | 105 | return NULL; | 
|  | 106 | } | 
|  | 107 |  | 
|  | 108 | static struct bio * | 
|  | 109 | mpage_alloc(struct block_device *bdev, | 
|  | 110 | sector_t first_sector, int nr_vecs, | 
|  | 111 | gfp_t gfp_flags) | 
|  | 112 | { | 
|  | 113 | struct bio *bio; | 
|  | 114 |  | 
|  | 115 | /* Restrict the given (page cache) mask for slab allocations */ | 
|  | 116 | gfp_flags &= GFP_KERNEL; | 
|  | 117 | bio = bio_alloc(gfp_flags, nr_vecs); | 
|  | 118 |  | 
|  | 119 | if (bio == NULL && (current->flags & PF_MEMALLOC)) { | 
|  | 120 | while (!bio && (nr_vecs /= 2)) | 
|  | 121 | bio = bio_alloc(gfp_flags, nr_vecs); | 
|  | 122 | } | 
|  | 123 |  | 
|  | 124 | if (bio) { | 
|  | 125 | bio_set_dev(bio, bdev); | 
|  | 126 | bio->bi_iter.bi_sector = first_sector; | 
|  | 127 | } | 
|  | 128 | return bio; | 
|  | 129 | } | 
|  | 130 |  | 
|  | 131 | /* | 
|  | 132 | * support function for mpage_readpages.  The fs supplied get_block might | 
|  | 133 | * return an up to date buffer.  This is used to map that buffer into | 
|  | 134 | * the page, which allows readpage to avoid triggering a duplicate call | 
|  | 135 | * to get_block. | 
|  | 136 | * | 
|  | 137 | * The idea is to avoid adding buffers to pages that don't already have | 
|  | 138 | * them.  So when the buffer is up to date and the page size == block size, | 
|  | 139 | * this marks the page up to date instead of adding new buffers. | 
|  | 140 | */ | 
|  | 141 | static void | 
|  | 142 | map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) | 
|  | 143 | { | 
|  | 144 | struct inode *inode = page->mapping->host; | 
|  | 145 | struct buffer_head *page_bh, *head; | 
|  | 146 | int block = 0; | 
|  | 147 |  | 
|  | 148 | if (!page_has_buffers(page)) { | 
|  | 149 | /* | 
|  | 150 | * don't make any buffers if there is only one buffer on | 
|  | 151 | * the page and the page just needs to be set up to date | 
|  | 152 | */ | 
|  | 153 | if (inode->i_blkbits == PAGE_SHIFT && | 
|  | 154 | buffer_uptodate(bh)) { | 
|  | 155 | SetPageUptodate(page); | 
|  | 156 | return; | 
|  | 157 | } | 
|  | 158 | create_empty_buffers(page, i_blocksize(inode), 0); | 
|  | 159 | } | 
|  | 160 | head = page_buffers(page); | 
|  | 161 | page_bh = head; | 
|  | 162 | do { | 
|  | 163 | if (block == page_block) { | 
|  | 164 | page_bh->b_state = bh->b_state; | 
|  | 165 | page_bh->b_bdev = bh->b_bdev; | 
|  | 166 | page_bh->b_blocknr = bh->b_blocknr; | 
|  | 167 | break; | 
|  | 168 | } | 
|  | 169 | page_bh = page_bh->b_this_page; | 
|  | 170 | block++; | 
|  | 171 | } while (page_bh != head); | 
|  | 172 | } | 
|  | 173 |  | 
|  | 174 | struct mpage_readpage_args { | 
|  | 175 | struct bio *bio; | 
|  | 176 | struct page *page; | 
|  | 177 | unsigned int nr_pages; | 
|  | 178 | bool is_readahead; | 
|  | 179 | sector_t last_block_in_bio; | 
|  | 180 | struct buffer_head map_bh; | 
|  | 181 | unsigned long first_logical_block; | 
|  | 182 | get_block_t *get_block; | 
|  | 183 | }; | 
|  | 184 |  | 
|  | 185 | /* | 
|  | 186 | * This is the worker routine which does all the work of mapping the disk | 
|  | 187 | * blocks and constructs largest possible bios, submits them for IO if the | 
|  | 188 | * blocks are not contiguous on the disk. | 
|  | 189 | * | 
|  | 190 | * We pass a buffer_head back and forth and use its buffer_mapped() flag to | 
|  | 191 | * represent the validity of its disk mapping and to decide when to do the next | 
|  | 192 | * get_block() call. | 
|  | 193 | */ | 
|  | 194 | static struct bio *do_mpage_readpage(struct mpage_readpage_args *args) | 
|  | 195 | { | 
|  | 196 | struct page *page = args->page; | 
|  | 197 | struct inode *inode = page->mapping->host; | 
|  | 198 | const unsigned blkbits = inode->i_blkbits; | 
|  | 199 | const unsigned blocks_per_page = PAGE_SIZE >> blkbits; | 
|  | 200 | const unsigned blocksize = 1 << blkbits; | 
|  | 201 | struct buffer_head *map_bh = &args->map_bh; | 
|  | 202 | sector_t block_in_file; | 
|  | 203 | sector_t last_block; | 
|  | 204 | sector_t last_block_in_file; | 
|  | 205 | sector_t blocks[MAX_BUF_PER_PAGE]; | 
|  | 206 | unsigned page_block; | 
|  | 207 | unsigned first_hole = blocks_per_page; | 
|  | 208 | struct block_device *bdev = NULL; | 
|  | 209 | int length; | 
|  | 210 | int fully_mapped = 1; | 
|  | 211 | int op_flags; | 
|  | 212 | unsigned nblocks; | 
|  | 213 | unsigned relative_block; | 
|  | 214 | gfp_t gfp; | 
|  | 215 |  | 
|  | 216 | if (args->is_readahead) { | 
|  | 217 | op_flags = REQ_RAHEAD; | 
|  | 218 | gfp = readahead_gfp_mask(page->mapping); | 
|  | 219 | } else { | 
|  | 220 | op_flags = 0; | 
|  | 221 | gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); | 
|  | 222 | } | 
|  | 223 |  | 
|  | 224 | if (page_has_buffers(page)) | 
|  | 225 | goto confused; | 
|  | 226 |  | 
|  | 227 | block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); | 
|  | 228 | last_block = block_in_file + args->nr_pages * blocks_per_page; | 
|  | 229 | last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; | 
|  | 230 | if (last_block > last_block_in_file) | 
|  | 231 | last_block = last_block_in_file; | 
|  | 232 | page_block = 0; | 
|  | 233 |  | 
|  | 234 | /* | 
|  | 235 | * Map blocks using the result from the previous get_blocks call first. | 
|  | 236 | */ | 
|  | 237 | nblocks = map_bh->b_size >> blkbits; | 
|  | 238 | if (buffer_mapped(map_bh) && | 
|  | 239 | block_in_file > args->first_logical_block && | 
|  | 240 | block_in_file < (args->first_logical_block + nblocks)) { | 
|  | 241 | unsigned map_offset = block_in_file - args->first_logical_block; | 
|  | 242 | unsigned last = nblocks - map_offset; | 
|  | 243 |  | 
|  | 244 | for (relative_block = 0; ; relative_block++) { | 
|  | 245 | if (relative_block == last) { | 
|  | 246 | clear_buffer_mapped(map_bh); | 
|  | 247 | break; | 
|  | 248 | } | 
|  | 249 | if (page_block == blocks_per_page) | 
|  | 250 | break; | 
|  | 251 | blocks[page_block] = map_bh->b_blocknr + map_offset + | 
|  | 252 | relative_block; | 
|  | 253 | page_block++; | 
|  | 254 | block_in_file++; | 
|  | 255 | } | 
|  | 256 | bdev = map_bh->b_bdev; | 
|  | 257 | } | 
|  | 258 |  | 
|  | 259 | /* | 
|  | 260 | * Then do more get_blocks calls until we are done with this page. | 
|  | 261 | */ | 
|  | 262 | map_bh->b_page = page; | 
|  | 263 | while (page_block < blocks_per_page) { | 
|  | 264 | map_bh->b_state = 0; | 
|  | 265 | map_bh->b_size = 0; | 
|  | 266 |  | 
|  | 267 | if (block_in_file < last_block) { | 
|  | 268 | map_bh->b_size = (last_block-block_in_file) << blkbits; | 
|  | 269 | if (args->get_block(inode, block_in_file, map_bh, 0)) | 
|  | 270 | goto confused; | 
|  | 271 | args->first_logical_block = block_in_file; | 
|  | 272 | } | 
|  | 273 |  | 
|  | 274 | if (!buffer_mapped(map_bh)) { | 
|  | 275 | fully_mapped = 0; | 
|  | 276 | if (first_hole == blocks_per_page) | 
|  | 277 | first_hole = page_block; | 
|  | 278 | page_block++; | 
|  | 279 | block_in_file++; | 
|  | 280 | continue; | 
|  | 281 | } | 
|  | 282 |  | 
|  | 283 | /* some filesystems will copy data into the page during | 
|  | 284 | * the get_block call, in which case we don't want to | 
|  | 285 | * read it again.  map_buffer_to_page copies the data | 
|  | 286 | * we just collected from get_block into the page's buffers | 
|  | 287 | * so readpage doesn't have to repeat the get_block call | 
|  | 288 | */ | 
|  | 289 | if (buffer_uptodate(map_bh)) { | 
|  | 290 | map_buffer_to_page(page, map_bh, page_block); | 
|  | 291 | goto confused; | 
|  | 292 | } | 
|  | 293 |  | 
|  | 294 | if (first_hole != blocks_per_page) | 
|  | 295 | goto confused;		/* hole -> non-hole */ | 
|  | 296 |  | 
|  | 297 | /* Contiguous blocks? */ | 
|  | 298 | if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1) | 
|  | 299 | goto confused; | 
|  | 300 | nblocks = map_bh->b_size >> blkbits; | 
|  | 301 | for (relative_block = 0; ; relative_block++) { | 
|  | 302 | if (relative_block == nblocks) { | 
|  | 303 | clear_buffer_mapped(map_bh); | 
|  | 304 | break; | 
|  | 305 | } else if (page_block == blocks_per_page) | 
|  | 306 | break; | 
|  | 307 | blocks[page_block] = map_bh->b_blocknr+relative_block; | 
|  | 308 | page_block++; | 
|  | 309 | block_in_file++; | 
|  | 310 | } | 
|  | 311 | bdev = map_bh->b_bdev; | 
|  | 312 | } | 
|  | 313 |  | 
|  | 314 | if (first_hole != blocks_per_page) { | 
|  | 315 | zero_user_segment(page, first_hole << blkbits, PAGE_SIZE); | 
|  | 316 | if (first_hole == 0) { | 
|  | 317 | SetPageUptodate(page); | 
|  | 318 | unlock_page(page); | 
|  | 319 | goto out; | 
|  | 320 | } | 
|  | 321 | } else if (fully_mapped) { | 
|  | 322 | SetPageMappedToDisk(page); | 
|  | 323 | } | 
|  | 324 |  | 
|  | 325 | if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) && | 
|  | 326 | cleancache_get_page(page) == 0) { | 
|  | 327 | SetPageUptodate(page); | 
|  | 328 | goto confused; | 
|  | 329 | } | 
|  | 330 |  | 
|  | 331 | /* | 
|  | 332 | * This page will go to BIO.  Do we need to send this BIO off first? | 
|  | 333 | */ | 
|  | 334 | if (args->bio && (args->last_block_in_bio != blocks[0] - 1)) | 
|  | 335 | args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio); | 
|  | 336 |  | 
|  | 337 | alloc_new: | 
|  | 338 | if (args->bio == NULL) { | 
|  | 339 | if (first_hole == blocks_per_page) { | 
|  | 340 | if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9), | 
|  | 341 | page)) | 
|  | 342 | goto out; | 
|  | 343 | } | 
|  | 344 | args->bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), | 
|  | 345 | min_t(int, args->nr_pages, | 
|  | 346 | BIO_MAX_PAGES), | 
|  | 347 | gfp); | 
|  | 348 | if (args->bio == NULL) | 
|  | 349 | goto confused; | 
|  | 350 | } | 
|  | 351 |  | 
|  | 352 | length = first_hole << blkbits; | 
|  | 353 | if (bio_add_page(args->bio, page, length, 0) < length) { | 
|  | 354 | args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio); | 
|  | 355 | goto alloc_new; | 
|  | 356 | } | 
|  | 357 |  | 
|  | 358 | relative_block = block_in_file - args->first_logical_block; | 
|  | 359 | nblocks = map_bh->b_size >> blkbits; | 
|  | 360 | if ((buffer_boundary(map_bh) && relative_block == nblocks) || | 
|  | 361 | (first_hole != blocks_per_page)) | 
|  | 362 | args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio); | 
|  | 363 | else | 
|  | 364 | args->last_block_in_bio = blocks[blocks_per_page - 1]; | 
|  | 365 | out: | 
|  | 366 | return args->bio; | 
|  | 367 |  | 
|  | 368 | confused: | 
|  | 369 | if (args->bio) | 
|  | 370 | args->bio = mpage_bio_submit(REQ_OP_READ, op_flags, args->bio); | 
|  | 371 | if (!PageUptodate(page)) | 
|  | 372 | block_read_full_page(page, args->get_block); | 
|  | 373 | else | 
|  | 374 | unlock_page(page); | 
|  | 375 | goto out; | 
|  | 376 | } | 
|  | 377 |  | 
|  | 378 | /** | 
|  | 379 | * mpage_readpages - populate an address space with some pages & start reads against them | 
|  | 380 | * @mapping: the address_space | 
|  | 381 | * @pages: The address of a list_head which contains the target pages.  These | 
|  | 382 | *   pages have their ->index populated and are otherwise uninitialised. | 
|  | 383 | *   The page at @pages->prev has the lowest file offset, and reads should be | 
|  | 384 | *   issued in @pages->prev to @pages->next order. | 
|  | 385 | * @nr_pages: The number of pages at *@pages | 
|  | 386 | * @get_block: The filesystem's block mapper function. | 
|  | 387 | * | 
|  | 388 | * This function walks the pages and the blocks within each page, building and | 
|  | 389 | * emitting large BIOs. | 
|  | 390 | * | 
|  | 391 | * If anything unusual happens, such as: | 
|  | 392 | * | 
|  | 393 | * - encountering a page which has buffers | 
|  | 394 | * - encountering a page which has a non-hole after a hole | 
|  | 395 | * - encountering a page with non-contiguous blocks | 
|  | 396 | * | 
|  | 397 | * then this code just gives up and calls the buffer_head-based read function. | 
|  | 398 | * It does handle a page which has holes at the end - that is a common case: | 
|  | 399 | * the end-of-file on blocksize < PAGE_SIZE setups. | 
|  | 400 | * | 
|  | 401 | * BH_Boundary explanation: | 
|  | 402 | * | 
|  | 403 | * There is a problem.  The mpage read code assembles several pages, gets all | 
|  | 404 | * their disk mappings, and then submits them all.  That's fine, but obtaining | 
|  | 405 | * the disk mappings may require I/O.  Reads of indirect blocks, for example. | 
|  | 406 | * | 
|  | 407 | * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be | 
|  | 408 | * submitted in the following order: | 
|  | 409 | * | 
|  | 410 | * 	12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16 | 
|  | 411 | * | 
|  | 412 | * because the indirect block has to be read to get the mappings of blocks | 
|  | 413 | * 13,14,15,16.  Obviously, this impacts performance. | 
|  | 414 | * | 
|  | 415 | * So what we do it to allow the filesystem's get_block() function to set | 
|  | 416 | * BH_Boundary when it maps block 11.  BH_Boundary says: mapping of the block | 
|  | 417 | * after this one will require I/O against a block which is probably close to | 
|  | 418 | * this one.  So you should push what I/O you have currently accumulated. | 
|  | 419 | * | 
|  | 420 | * This all causes the disk requests to be issued in the correct order. | 
|  | 421 | */ | 
|  | 422 | int | 
|  | 423 | mpage_readpages(struct address_space *mapping, struct list_head *pages, | 
|  | 424 | unsigned nr_pages, get_block_t get_block) | 
|  | 425 | { | 
|  | 426 | struct mpage_readpage_args args = { | 
|  | 427 | .get_block = get_block, | 
|  | 428 | .is_readahead = true, | 
|  | 429 | }; | 
|  | 430 | unsigned page_idx; | 
|  | 431 |  | 
|  | 432 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { | 
|  | 433 | struct page *page = lru_to_page(pages); | 
|  | 434 |  | 
|  | 435 | prefetchw(&page->flags); | 
|  | 436 | list_del(&page->lru); | 
|  | 437 | if (!add_to_page_cache_lru(page, mapping, | 
|  | 438 | page->index, | 
|  | 439 | readahead_gfp_mask(mapping))) { | 
|  | 440 | args.page = page; | 
|  | 441 | args.nr_pages = nr_pages - page_idx; | 
|  | 442 | args.bio = do_mpage_readpage(&args); | 
|  | 443 | } | 
|  | 444 | put_page(page); | 
|  | 445 | } | 
|  | 446 | BUG_ON(!list_empty(pages)); | 
|  | 447 | if (args.bio) | 
|  | 448 | mpage_bio_submit(REQ_OP_READ, REQ_RAHEAD, args.bio); | 
|  | 449 | return 0; | 
|  | 450 | } | 
|  | 451 | EXPORT_SYMBOL(mpage_readpages); | 
|  | 452 |  | 
|  | 453 | /* | 
|  | 454 | * This isn't called much at all | 
|  | 455 | */ | 
|  | 456 | int mpage_readpage(struct page *page, get_block_t get_block) | 
|  | 457 | { | 
|  | 458 | struct mpage_readpage_args args = { | 
|  | 459 | .page = page, | 
|  | 460 | .nr_pages = 1, | 
|  | 461 | .get_block = get_block, | 
|  | 462 | }; | 
|  | 463 |  | 
|  | 464 | args.bio = do_mpage_readpage(&args); | 
|  | 465 | if (args.bio) | 
|  | 466 | mpage_bio_submit(REQ_OP_READ, 0, args.bio); | 
|  | 467 | return 0; | 
|  | 468 | } | 
|  | 469 | EXPORT_SYMBOL(mpage_readpage); | 
|  | 470 |  | 
|  | 471 | /* | 
|  | 472 | * Writing is not so simple. | 
|  | 473 | * | 
|  | 474 | * If the page has buffers then they will be used for obtaining the disk | 
|  | 475 | * mapping.  We only support pages which are fully mapped-and-dirty, with a | 
|  | 476 | * special case for pages which are unmapped at the end: end-of-file. | 
|  | 477 | * | 
|  | 478 | * If the page has no buffers (preferred) then the page is mapped here. | 
|  | 479 | * | 
|  | 480 | * If all blocks are found to be contiguous then the page can go into the | 
|  | 481 | * BIO.  Otherwise fall back to the mapping's writepage(). | 
|  | 482 | * | 
|  | 483 | * FIXME: This code wants an estimate of how many pages are still to be | 
|  | 484 | * written, so it can intelligently allocate a suitably-sized BIO.  For now, | 
|  | 485 | * just allocate full-size (16-page) BIOs. | 
|  | 486 | */ | 
|  | 487 |  | 
|  | 488 | struct mpage_data { | 
|  | 489 | struct bio *bio; | 
|  | 490 | sector_t last_block_in_bio; | 
|  | 491 | get_block_t *get_block; | 
|  | 492 | unsigned use_writepage; | 
|  | 493 | }; | 
|  | 494 |  | 
|  | 495 | /* | 
|  | 496 | * We have our BIO, so we can now mark the buffers clean.  Make | 
|  | 497 | * sure to only clean buffers which we know we'll be writing. | 
|  | 498 | */ | 
|  | 499 | static void clean_buffers(struct page *page, unsigned first_unmapped) | 
|  | 500 | { | 
|  | 501 | unsigned buffer_counter = 0; | 
|  | 502 | struct buffer_head *bh, *head; | 
|  | 503 | if (!page_has_buffers(page)) | 
|  | 504 | return; | 
|  | 505 | head = page_buffers(page); | 
|  | 506 | bh = head; | 
|  | 507 |  | 
|  | 508 | do { | 
|  | 509 | if (buffer_counter++ == first_unmapped) | 
|  | 510 | break; | 
|  | 511 | clear_buffer_dirty(bh); | 
|  | 512 | bh = bh->b_this_page; | 
|  | 513 | } while (bh != head); | 
|  | 514 |  | 
|  | 515 | /* | 
|  | 516 | * we cannot drop the bh if the page is not uptodate or a concurrent | 
|  | 517 | * readpage would fail to serialize with the bh and it would read from | 
|  | 518 | * disk before we reach the platter. | 
|  | 519 | */ | 
|  | 520 | if (buffer_heads_over_limit && PageUptodate(page)) | 
|  | 521 | try_to_free_buffers(page); | 
|  | 522 | } | 
|  | 523 |  | 
|  | 524 | /* | 
|  | 525 | * For situations where we want to clean all buffers attached to a page. | 
|  | 526 | * We don't need to calculate how many buffers are attached to the page, | 
|  | 527 | * we just need to specify a number larger than the maximum number of buffers. | 
|  | 528 | */ | 
|  | 529 | void clean_page_buffers(struct page *page) | 
|  | 530 | { | 
|  | 531 | clean_buffers(page, ~0U); | 
|  | 532 | } | 
|  | 533 |  | 
|  | 534 | static int __mpage_writepage(struct page *page, struct writeback_control *wbc, | 
|  | 535 | void *data) | 
|  | 536 | { | 
|  | 537 | struct mpage_data *mpd = data; | 
|  | 538 | struct bio *bio = mpd->bio; | 
|  | 539 | struct address_space *mapping = page->mapping; | 
|  | 540 | struct inode *inode = page->mapping->host; | 
|  | 541 | const unsigned blkbits = inode->i_blkbits; | 
|  | 542 | unsigned long end_index; | 
|  | 543 | const unsigned blocks_per_page = PAGE_SIZE >> blkbits; | 
|  | 544 | sector_t last_block; | 
|  | 545 | sector_t block_in_file; | 
|  | 546 | sector_t blocks[MAX_BUF_PER_PAGE]; | 
|  | 547 | unsigned page_block; | 
|  | 548 | unsigned first_unmapped = blocks_per_page; | 
|  | 549 | struct block_device *bdev = NULL; | 
|  | 550 | int boundary = 0; | 
|  | 551 | sector_t boundary_block = 0; | 
|  | 552 | struct block_device *boundary_bdev = NULL; | 
|  | 553 | int length; | 
|  | 554 | struct buffer_head map_bh; | 
|  | 555 | loff_t i_size = i_size_read(inode); | 
|  | 556 | int ret = 0; | 
|  | 557 | int op_flags = wbc_to_write_flags(wbc); | 
|  | 558 |  | 
|  | 559 | if (page_has_buffers(page)) { | 
|  | 560 | struct buffer_head *head = page_buffers(page); | 
|  | 561 | struct buffer_head *bh = head; | 
|  | 562 |  | 
|  | 563 | /* If they're all mapped and dirty, do it */ | 
|  | 564 | page_block = 0; | 
|  | 565 | do { | 
|  | 566 | BUG_ON(buffer_locked(bh)); | 
|  | 567 | if (!buffer_mapped(bh)) { | 
|  | 568 | /* | 
|  | 569 | * unmapped dirty buffers are created by | 
|  | 570 | * __set_page_dirty_buffers -> mmapped data | 
|  | 571 | */ | 
|  | 572 | if (buffer_dirty(bh)) | 
|  | 573 | goto confused; | 
|  | 574 | if (first_unmapped == blocks_per_page) | 
|  | 575 | first_unmapped = page_block; | 
|  | 576 | continue; | 
|  | 577 | } | 
|  | 578 |  | 
|  | 579 | if (first_unmapped != blocks_per_page) | 
|  | 580 | goto confused;	/* hole -> non-hole */ | 
|  | 581 |  | 
|  | 582 | if (!buffer_dirty(bh) || !buffer_uptodate(bh)) | 
|  | 583 | goto confused; | 
|  | 584 | if (page_block) { | 
|  | 585 | if (bh->b_blocknr != blocks[page_block-1] + 1) | 
|  | 586 | goto confused; | 
|  | 587 | } | 
|  | 588 | blocks[page_block++] = bh->b_blocknr; | 
|  | 589 | boundary = buffer_boundary(bh); | 
|  | 590 | if (boundary) { | 
|  | 591 | boundary_block = bh->b_blocknr; | 
|  | 592 | boundary_bdev = bh->b_bdev; | 
|  | 593 | } | 
|  | 594 | bdev = bh->b_bdev; | 
|  | 595 | } while ((bh = bh->b_this_page) != head); | 
|  | 596 |  | 
|  | 597 | if (first_unmapped) | 
|  | 598 | goto page_is_mapped; | 
|  | 599 |  | 
|  | 600 | /* | 
|  | 601 | * Page has buffers, but they are all unmapped. The page was | 
|  | 602 | * created by pagein or read over a hole which was handled by | 
|  | 603 | * block_read_full_page().  If this address_space is also | 
|  | 604 | * using mpage_readpages then this can rarely happen. | 
|  | 605 | */ | 
|  | 606 | goto confused; | 
|  | 607 | } | 
|  | 608 |  | 
|  | 609 | /* | 
|  | 610 | * The page has no buffers: map it to disk | 
|  | 611 | */ | 
|  | 612 | BUG_ON(!PageUptodate(page)); | 
|  | 613 | block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); | 
|  | 614 | last_block = (i_size - 1) >> blkbits; | 
|  | 615 | map_bh.b_page = page; | 
|  | 616 | for (page_block = 0; page_block < blocks_per_page; ) { | 
|  | 617 |  | 
|  | 618 | map_bh.b_state = 0; | 
|  | 619 | map_bh.b_size = 1 << blkbits; | 
|  | 620 | if (mpd->get_block(inode, block_in_file, &map_bh, 1)) | 
|  | 621 | goto confused; | 
|  | 622 | if (buffer_new(&map_bh)) | 
|  | 623 | clean_bdev_bh_alias(&map_bh); | 
|  | 624 | if (buffer_boundary(&map_bh)) { | 
|  | 625 | boundary_block = map_bh.b_blocknr; | 
|  | 626 | boundary_bdev = map_bh.b_bdev; | 
|  | 627 | } | 
|  | 628 | if (page_block) { | 
|  | 629 | if (map_bh.b_blocknr != blocks[page_block-1] + 1) | 
|  | 630 | goto confused; | 
|  | 631 | } | 
|  | 632 | blocks[page_block++] = map_bh.b_blocknr; | 
|  | 633 | boundary = buffer_boundary(&map_bh); | 
|  | 634 | bdev = map_bh.b_bdev; | 
|  | 635 | if (block_in_file == last_block) | 
|  | 636 | break; | 
|  | 637 | block_in_file++; | 
|  | 638 | } | 
|  | 639 | BUG_ON(page_block == 0); | 
|  | 640 |  | 
|  | 641 | first_unmapped = page_block; | 
|  | 642 |  | 
|  | 643 | page_is_mapped: | 
|  | 644 | end_index = i_size >> PAGE_SHIFT; | 
|  | 645 | if (page->index >= end_index) { | 
|  | 646 | /* | 
|  | 647 | * The page straddles i_size.  It must be zeroed out on each | 
|  | 648 | * and every writepage invocation because it may be mmapped. | 
|  | 649 | * "A file is mapped in multiples of the page size.  For a file | 
|  | 650 | * that is not a multiple of the page size, the remaining memory | 
|  | 651 | * is zeroed when mapped, and writes to that region are not | 
|  | 652 | * written out to the file." | 
|  | 653 | */ | 
|  | 654 | unsigned offset = i_size & (PAGE_SIZE - 1); | 
|  | 655 |  | 
|  | 656 | if (page->index > end_index || !offset) | 
|  | 657 | goto confused; | 
|  | 658 | zero_user_segment(page, offset, PAGE_SIZE); | 
|  | 659 | } | 
|  | 660 |  | 
|  | 661 | /* | 
|  | 662 | * This page will go to BIO.  Do we need to send this BIO off first? | 
|  | 663 | */ | 
|  | 664 | if (bio && mpd->last_block_in_bio != blocks[0] - 1) | 
|  | 665 | bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio); | 
|  | 666 |  | 
|  | 667 | alloc_new: | 
|  | 668 | if (bio == NULL) { | 
|  | 669 | if (first_unmapped == blocks_per_page) { | 
|  | 670 | if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9), | 
|  | 671 | page, wbc)) | 
|  | 672 | goto out; | 
|  | 673 | } | 
|  | 674 | bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), | 
|  | 675 | BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH); | 
|  | 676 | if (bio == NULL) | 
|  | 677 | goto confused; | 
|  | 678 |  | 
|  | 679 | wbc_init_bio(wbc, bio); | 
|  | 680 | bio->bi_write_hint = inode->i_write_hint; | 
|  | 681 | } | 
|  | 682 |  | 
|  | 683 | /* | 
|  | 684 | * Must try to add the page before marking the buffer clean or | 
|  | 685 | * the confused fail path above (OOM) will be very confused when | 
|  | 686 | * it finds all bh marked clean (i.e. it will not write anything) | 
|  | 687 | */ | 
|  | 688 | wbc_account_io(wbc, page, PAGE_SIZE); | 
|  | 689 | length = first_unmapped << blkbits; | 
|  | 690 | if (bio_add_page(bio, page, length, 0) < length) { | 
|  | 691 | bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio); | 
|  | 692 | goto alloc_new; | 
|  | 693 | } | 
|  | 694 |  | 
|  | 695 | clean_buffers(page, first_unmapped); | 
|  | 696 |  | 
|  | 697 | BUG_ON(PageWriteback(page)); | 
|  | 698 | set_page_writeback(page); | 
|  | 699 | unlock_page(page); | 
|  | 700 | if (boundary || (first_unmapped != blocks_per_page)) { | 
|  | 701 | bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio); | 
|  | 702 | if (boundary_block) { | 
|  | 703 | write_boundary_block(boundary_bdev, | 
|  | 704 | boundary_block, 1 << blkbits); | 
|  | 705 | } | 
|  | 706 | } else { | 
|  | 707 | mpd->last_block_in_bio = blocks[blocks_per_page - 1]; | 
|  | 708 | } | 
|  | 709 | goto out; | 
|  | 710 |  | 
|  | 711 | confused: | 
|  | 712 | if (bio) | 
|  | 713 | bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio); | 
|  | 714 |  | 
|  | 715 | if (mpd->use_writepage) { | 
|  | 716 | ret = mapping->a_ops->writepage(page, wbc); | 
|  | 717 | } else { | 
|  | 718 | ret = -EAGAIN; | 
|  | 719 | goto out; | 
|  | 720 | } | 
|  | 721 | /* | 
|  | 722 | * The caller has a ref on the inode, so *mapping is stable | 
|  | 723 | */ | 
|  | 724 | mapping_set_error(mapping, ret); | 
|  | 725 | out: | 
|  | 726 | mpd->bio = bio; | 
|  | 727 | return ret; | 
|  | 728 | } | 
|  | 729 |  | 
|  | 730 | /** | 
|  | 731 | * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them | 
|  | 732 | * @mapping: address space structure to write | 
|  | 733 | * @wbc: subtract the number of written pages from *@wbc->nr_to_write | 
|  | 734 | * @get_block: the filesystem's block mapper function. | 
|  | 735 | *             If this is NULL then use a_ops->writepage.  Otherwise, go | 
|  | 736 | *             direct-to-BIO. | 
|  | 737 | * | 
|  | 738 | * This is a library function, which implements the writepages() | 
|  | 739 | * address_space_operation. | 
|  | 740 | * | 
|  | 741 | * If a page is already under I/O, generic_writepages() skips it, even | 
|  | 742 | * if it's dirty.  This is desirable behaviour for memory-cleaning writeback, | 
|  | 743 | * but it is INCORRECT for data-integrity system calls such as fsync().  fsync() | 
|  | 744 | * and msync() need to guarantee that all the data which was dirty at the time | 
|  | 745 | * the call was made get new I/O started against them.  If wbc->sync_mode is | 
|  | 746 | * WB_SYNC_ALL then we were called for data integrity and we must wait for | 
|  | 747 | * existing IO to complete. | 
|  | 748 | */ | 
|  | 749 | int | 
|  | 750 | mpage_writepages(struct address_space *mapping, | 
|  | 751 | struct writeback_control *wbc, get_block_t get_block) | 
|  | 752 | { | 
|  | 753 | struct blk_plug plug; | 
|  | 754 | int ret; | 
|  | 755 |  | 
|  | 756 | blk_start_plug(&plug); | 
|  | 757 |  | 
|  | 758 | if (!get_block) | 
|  | 759 | ret = generic_writepages(mapping, wbc); | 
|  | 760 | else { | 
|  | 761 | struct mpage_data mpd = { | 
|  | 762 | .bio = NULL, | 
|  | 763 | .last_block_in_bio = 0, | 
|  | 764 | .get_block = get_block, | 
|  | 765 | .use_writepage = 1, | 
|  | 766 | }; | 
|  | 767 |  | 
|  | 768 | ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd); | 
|  | 769 | if (mpd.bio) { | 
|  | 770 | int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? | 
|  | 771 | REQ_SYNC : 0); | 
|  | 772 | mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio); | 
|  | 773 | } | 
|  | 774 | } | 
|  | 775 | blk_finish_plug(&plug); | 
|  | 776 | return ret; | 
|  | 777 | } | 
|  | 778 | EXPORT_SYMBOL(mpage_writepages); | 
|  | 779 |  | 
|  | 780 | int mpage_writepage(struct page *page, get_block_t get_block, | 
|  | 781 | struct writeback_control *wbc) | 
|  | 782 | { | 
|  | 783 | struct mpage_data mpd = { | 
|  | 784 | .bio = NULL, | 
|  | 785 | .last_block_in_bio = 0, | 
|  | 786 | .get_block = get_block, | 
|  | 787 | .use_writepage = 0, | 
|  | 788 | }; | 
|  | 789 | int ret = __mpage_writepage(page, wbc, &mpd); | 
|  | 790 | if (mpd.bio) { | 
|  | 791 | int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? | 
|  | 792 | REQ_SYNC : 0); | 
|  | 793 | mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio); | 
|  | 794 | } | 
|  | 795 | return ret; | 
|  | 796 | } | 
|  | 797 | EXPORT_SYMBOL(mpage_writepage); |