yuezonghe | 824eb0c | 2024-06-27 02:32:26 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * unix_io.c --- This is the Unix (well, really POSIX) implementation |
| 3 | * of the I/O manager. |
| 4 | * |
| 5 | * Implements a one-block write-through cache. |
| 6 | * |
| 7 | * Includes support for Windows NT support under Cygwin. |
| 8 | * |
| 9 | * Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, |
| 10 | * 2002 by Theodore Ts'o. |
| 11 | * |
| 12 | * %Begin-Header% |
| 13 | * This file may be redistributed under the terms of the GNU Library |
| 14 | * General Public License, version 2. |
| 15 | * %End-Header% |
| 16 | */ |
| 17 | |
| 18 | #define _LARGEFILE_SOURCE |
| 19 | #define _LARGEFILE64_SOURCE |
| 20 | #ifndef _GNU_SOURCE |
| 21 | #define _GNU_SOURCE |
| 22 | #endif |
| 23 | |
| 24 | #include "config.h" |
| 25 | #include <stdio.h> |
| 26 | #include <string.h> |
| 27 | #if HAVE_UNISTD_H |
| 28 | #include <unistd.h> |
| 29 | #endif |
| 30 | #if HAVE_ERRNO_H |
| 31 | #include <errno.h> |
| 32 | #endif |
| 33 | #include <fcntl.h> |
| 34 | #include <time.h> |
| 35 | #ifdef __linux__ |
| 36 | #include <sys/utsname.h> |
| 37 | #endif |
| 38 | #ifdef HAVE_SYS_IOCTL_H |
| 39 | #include <sys/ioctl.h> |
| 40 | #endif |
| 41 | #ifdef HAVE_SYS_MOUNT_H |
| 42 | #include <sys/mount.h> |
| 43 | #endif |
| 44 | #if HAVE_SYS_STAT_H |
| 45 | #include <sys/stat.h> |
| 46 | #endif |
| 47 | #if HAVE_SYS_TYPES_H |
| 48 | #include <sys/types.h> |
| 49 | #endif |
| 50 | #if HAVE_SYS_RESOURCE_H |
| 51 | #include <sys/resource.h> |
| 52 | #endif |
| 53 | #if HAVE_LINUX_FALLOC_H |
| 54 | #include <linux/falloc.h> |
| 55 | #endif |
| 56 | |
| 57 | #if defined(__linux__) && defined(_IO) && !defined(BLKROGET) |
| 58 | #define BLKROGET _IO(0x12, 94) /* Get read-only status (0 = read_write). */ |
| 59 | #endif |
| 60 | |
| 61 | #undef ALIGN_DEBUG |
| 62 | |
| 63 | #include "ext2_fs.h" |
| 64 | #include "ext2fs.h" |
| 65 | |
| 66 | /* |
| 67 | * For checking structure magic numbers... |
| 68 | */ |
| 69 | |
| 70 | #define EXT2_CHECK_MAGIC(struct, code) \ |
| 71 | if ((struct)->magic != (code)) return (code) |
| 72 | |
| 73 | struct unix_cache { |
| 74 | char *buf; |
| 75 | unsigned long long block; |
| 76 | int access_time; |
| 77 | unsigned dirty:1; |
| 78 | unsigned in_use:1; |
| 79 | }; |
| 80 | |
| 81 | #define CACHE_SIZE 8 |
| 82 | #define WRITE_DIRECT_SIZE 4 /* Must be smaller than CACHE_SIZE */ |
| 83 | #define READ_DIRECT_SIZE 4 /* Should be smaller than CACHE_SIZE */ |
| 84 | |
| 85 | struct unix_private_data { |
| 86 | int magic; |
| 87 | int dev; |
| 88 | int flags; |
| 89 | int align; |
| 90 | int access_time; |
| 91 | ext2_loff_t offset; |
| 92 | struct unix_cache cache[CACHE_SIZE]; |
| 93 | void *bounce; |
| 94 | struct struct_io_stats io_stats; |
| 95 | }; |
| 96 | |
| 97 | #define IS_ALIGNED(n, align) ((((unsigned long) n) & \ |
| 98 | ((unsigned long) ((align)-1))) == 0) |
| 99 | |
| 100 | static errcode_t unix_open(const char *name, int flags, io_channel *channel); |
| 101 | static errcode_t unix_close(io_channel channel); |
| 102 | static errcode_t unix_set_blksize(io_channel channel, int blksize); |
| 103 | static errcode_t unix_read_blk(io_channel channel, unsigned long block, |
| 104 | int count, void *data); |
| 105 | static errcode_t unix_write_blk(io_channel channel, unsigned long block, |
| 106 | int count, const void *data); |
| 107 | static errcode_t unix_flush(io_channel channel); |
| 108 | static errcode_t unix_write_byte(io_channel channel, unsigned long offset, |
| 109 | int size, const void *data); |
| 110 | static errcode_t unix_set_option(io_channel channel, const char *option, |
| 111 | const char *arg); |
| 112 | static errcode_t unix_get_stats(io_channel channel, io_stats *stats) |
| 113 | ; |
| 114 | static void reuse_cache(io_channel channel, struct unix_private_data *data, |
| 115 | struct unix_cache *cache, unsigned long long block); |
| 116 | static errcode_t unix_read_blk64(io_channel channel, unsigned long long block, |
| 117 | int count, void *data); |
| 118 | static errcode_t unix_write_blk64(io_channel channel, unsigned long long block, |
| 119 | int count, const void *data); |
| 120 | static errcode_t unix_discard(io_channel channel, unsigned long long block, |
| 121 | unsigned long long count); |
| 122 | |
| 123 | static struct struct_io_manager struct_unix_manager = { |
| 124 | EXT2_ET_MAGIC_IO_MANAGER, |
| 125 | "Unix I/O Manager", |
| 126 | unix_open, |
| 127 | unix_close, |
| 128 | unix_set_blksize, |
| 129 | unix_read_blk, |
| 130 | unix_write_blk, |
| 131 | unix_flush, |
| 132 | unix_write_byte, |
| 133 | unix_set_option, |
| 134 | unix_get_stats, |
| 135 | unix_read_blk64, |
| 136 | unix_write_blk64, |
| 137 | unix_discard, |
| 138 | }; |
| 139 | |
| 140 | io_manager unix_io_manager = &struct_unix_manager; |
| 141 | |
| 142 | static errcode_t unix_get_stats(io_channel channel, io_stats *stats) |
| 143 | { |
| 144 | errcode_t retval = 0; |
| 145 | |
| 146 | struct unix_private_data *data; |
| 147 | |
| 148 | EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL); |
| 149 | data = (struct unix_private_data *) channel->private_data; |
| 150 | EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL); |
| 151 | |
| 152 | if (stats) |
| 153 | *stats = &data->io_stats; |
| 154 | |
| 155 | return retval; |
| 156 | } |
| 157 | |
| 158 | /* |
| 159 | * Here are the raw I/O functions |
| 160 | */ |
| 161 | static errcode_t raw_read_blk(io_channel channel, |
| 162 | struct unix_private_data *data, |
| 163 | unsigned long long block, |
| 164 | int count, void *bufv) |
| 165 | { |
| 166 | errcode_t retval; |
| 167 | ssize_t size; |
| 168 | ext2_loff_t location; |
| 169 | int actual = 0; |
| 170 | unsigned char *buf = bufv; |
| 171 | |
| 172 | size = (count < 0) ? -count : count * channel->block_size; |
| 173 | data->io_stats.bytes_read += size; |
| 174 | location = ((ext2_loff_t) block * channel->block_size) + data->offset; |
| 175 | if (ext2fs_llseek(data->dev, location, SEEK_SET) != location) { |
| 176 | retval = errno ? errno : EXT2_ET_LLSEEK_FAILED; |
| 177 | goto error_out; |
| 178 | } |
| 179 | if ((channel->align == 0) || |
| 180 | (IS_ALIGNED(buf, channel->align) && |
| 181 | IS_ALIGNED(size, channel->align))) { |
| 182 | actual = read(data->dev, buf, size); |
| 183 | if (actual != size) { |
| 184 | short_read: |
| 185 | if (actual < 0) |
| 186 | actual = 0; |
| 187 | retval = EXT2_ET_SHORT_READ; |
| 188 | goto error_out; |
| 189 | } |
| 190 | return 0; |
| 191 | } |
| 192 | |
| 193 | #ifdef ALIGN_DEBUG |
| 194 | printf("raw_read_blk: O_DIRECT fallback: %p %lu\n", buf, |
| 195 | (unsigned long) size); |
| 196 | #endif |
| 197 | |
| 198 | /* |
| 199 | * The buffer or size which we're trying to read isn't aligned |
| 200 | * to the O_DIRECT rules, so we need to do this the hard way... |
| 201 | */ |
| 202 | while (size > 0) { |
| 203 | actual = read(data->dev, data->bounce, channel->block_size); |
| 204 | if (actual != channel->block_size) |
| 205 | goto short_read; |
| 206 | actual = size; |
| 207 | if (size > channel->block_size) |
| 208 | actual = channel->block_size; |
| 209 | memcpy(buf, data->bounce, actual); |
| 210 | size -= actual; |
| 211 | buf += actual; |
| 212 | } |
| 213 | return 0; |
| 214 | |
| 215 | error_out: |
| 216 | memset((char *) buf+actual, 0, size-actual); |
| 217 | if (channel->read_error) |
| 218 | retval = (channel->read_error)(channel, block, count, buf, |
| 219 | size, actual, retval); |
| 220 | return retval; |
| 221 | } |
| 222 | |
| 223 | static errcode_t raw_write_blk(io_channel channel, |
| 224 | struct unix_private_data *data, |
| 225 | unsigned long long block, |
| 226 | int count, const void *bufv) |
| 227 | { |
| 228 | ssize_t size; |
| 229 | ext2_loff_t location; |
| 230 | int actual = 0; |
| 231 | errcode_t retval; |
| 232 | const unsigned char *buf = bufv; |
| 233 | |
| 234 | if (count == 1) |
| 235 | size = channel->block_size; |
| 236 | else { |
| 237 | if (count < 0) |
| 238 | size = -count; |
| 239 | else |
| 240 | size = count * channel->block_size; |
| 241 | } |
| 242 | data->io_stats.bytes_written += size; |
| 243 | |
| 244 | location = ((ext2_loff_t) block * channel->block_size) + data->offset; |
| 245 | if (ext2fs_llseek(data->dev, location, SEEK_SET) != location) { |
| 246 | retval = errno ? errno : EXT2_ET_LLSEEK_FAILED; |
| 247 | goto error_out; |
| 248 | } |
| 249 | |
| 250 | if ((channel->align == 0) || |
| 251 | (IS_ALIGNED(buf, channel->align) && |
| 252 | IS_ALIGNED(size, channel->align))) { |
| 253 | actual = write(data->dev, buf, size); |
| 254 | if (actual != size) { |
| 255 | short_write: |
| 256 | retval = EXT2_ET_SHORT_WRITE; |
| 257 | goto error_out; |
| 258 | } |
| 259 | return 0; |
| 260 | } |
| 261 | |
| 262 | #ifdef ALIGN_DEBUG |
| 263 | printf("raw_write_blk: O_DIRECT fallback: %p %lu\n", buf, |
| 264 | (unsigned long) size); |
| 265 | #endif |
| 266 | /* |
| 267 | * The buffer or size which we're trying to write isn't aligned |
| 268 | * to the O_DIRECT rules, so we need to do this the hard way... |
| 269 | */ |
| 270 | while (size > 0) { |
| 271 | if (size < channel->block_size) { |
| 272 | actual = read(data->dev, data->bounce, |
| 273 | channel->block_size); |
| 274 | if (actual != channel->block_size) { |
| 275 | retval = EXT2_ET_SHORT_READ; |
| 276 | goto error_out; |
| 277 | } |
| 278 | } |
| 279 | actual = size; |
| 280 | if (size > channel->block_size) |
| 281 | actual = channel->block_size; |
| 282 | memcpy(data->bounce, buf, actual); |
| 283 | actual = write(data->dev, data->bounce, channel->block_size); |
| 284 | if (actual != channel->block_size) |
| 285 | goto short_write; |
| 286 | size -= actual; |
| 287 | buf += actual; |
| 288 | } |
| 289 | return 0; |
| 290 | |
| 291 | error_out: |
| 292 | if (channel->write_error) |
| 293 | retval = (channel->write_error)(channel, block, count, buf, |
| 294 | size, actual, retval); |
| 295 | return retval; |
| 296 | } |
| 297 | |
| 298 | |
| 299 | /* |
| 300 | * Here we implement the cache functions |
| 301 | */ |
| 302 | |
| 303 | /* Allocate the cache buffers */ |
| 304 | static errcode_t alloc_cache(io_channel channel, |
| 305 | struct unix_private_data *data) |
| 306 | { |
| 307 | errcode_t retval; |
| 308 | struct unix_cache *cache; |
| 309 | int i; |
| 310 | |
| 311 | data->access_time = 0; |
| 312 | for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) { |
| 313 | cache->block = 0; |
| 314 | cache->access_time = 0; |
| 315 | cache->dirty = 0; |
| 316 | cache->in_use = 0; |
| 317 | if (cache->buf) |
| 318 | ext2fs_free_mem(&cache->buf); |
| 319 | retval = io_channel_alloc_buf(channel, 0, &cache->buf); |
| 320 | if (retval) |
| 321 | return retval; |
| 322 | } |
| 323 | if (channel->align) { |
| 324 | if (data->bounce) |
| 325 | ext2fs_free_mem(&data->bounce); |
| 326 | retval = io_channel_alloc_buf(channel, 0, &data->bounce); |
| 327 | } |
| 328 | return retval; |
| 329 | } |
| 330 | |
| 331 | /* Free the cache buffers */ |
| 332 | static void free_cache(struct unix_private_data *data) |
| 333 | { |
| 334 | struct unix_cache *cache; |
| 335 | int i; |
| 336 | |
| 337 | data->access_time = 0; |
| 338 | for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) { |
| 339 | cache->block = 0; |
| 340 | cache->access_time = 0; |
| 341 | cache->dirty = 0; |
| 342 | cache->in_use = 0; |
| 343 | if (cache->buf) |
| 344 | ext2fs_free_mem(&cache->buf); |
| 345 | } |
| 346 | if (data->bounce) |
| 347 | ext2fs_free_mem(&data->bounce); |
| 348 | } |
| 349 | |
| 350 | #ifndef NO_IO_CACHE |
| 351 | /* |
| 352 | * Try to find a block in the cache. If the block is not found, and |
| 353 | * eldest is a non-zero pointer, then fill in eldest with the cache |
| 354 | * entry to that should be reused. |
| 355 | */ |
| 356 | static struct unix_cache *find_cached_block(struct unix_private_data *data, |
| 357 | unsigned long long block, |
| 358 | struct unix_cache **eldest) |
| 359 | { |
| 360 | struct unix_cache *cache, *unused_cache, *oldest_cache; |
| 361 | int i; |
| 362 | |
| 363 | unused_cache = oldest_cache = 0; |
| 364 | for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) { |
| 365 | if (!cache->in_use) { |
| 366 | if (!unused_cache) |
| 367 | unused_cache = cache; |
| 368 | continue; |
| 369 | } |
| 370 | if (cache->block == block) { |
| 371 | cache->access_time = ++data->access_time; |
| 372 | return cache; |
| 373 | } |
| 374 | if (!oldest_cache || |
| 375 | (cache->access_time < oldest_cache->access_time)) |
| 376 | oldest_cache = cache; |
| 377 | } |
| 378 | if (eldest) |
| 379 | *eldest = (unused_cache) ? unused_cache : oldest_cache; |
| 380 | return 0; |
| 381 | } |
| 382 | |
| 383 | /* |
| 384 | * Reuse a particular cache entry for another block. |
| 385 | */ |
| 386 | static void reuse_cache(io_channel channel, struct unix_private_data *data, |
| 387 | struct unix_cache *cache, unsigned long long block) |
| 388 | { |
| 389 | if (cache->dirty && cache->in_use) |
| 390 | raw_write_blk(channel, data, cache->block, 1, cache->buf); |
| 391 | |
| 392 | cache->in_use = 1; |
| 393 | cache->dirty = 0; |
| 394 | cache->block = block; |
| 395 | cache->access_time = ++data->access_time; |
| 396 | } |
| 397 | |
| 398 | /* |
| 399 | * Flush all of the blocks in the cache |
| 400 | */ |
| 401 | static errcode_t flush_cached_blocks(io_channel channel, |
| 402 | struct unix_private_data *data, |
| 403 | int invalidate) |
| 404 | |
| 405 | { |
| 406 | struct unix_cache *cache; |
| 407 | errcode_t retval, retval2; |
| 408 | int i; |
| 409 | |
| 410 | retval2 = 0; |
| 411 | for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) { |
| 412 | if (!cache->in_use) |
| 413 | continue; |
| 414 | |
| 415 | if (invalidate) |
| 416 | cache->in_use = 0; |
| 417 | |
| 418 | if (!cache->dirty) |
| 419 | continue; |
| 420 | |
| 421 | retval = raw_write_blk(channel, data, |
| 422 | cache->block, 1, cache->buf); |
| 423 | if (retval) |
| 424 | retval2 = retval; |
| 425 | else |
| 426 | cache->dirty = 0; |
| 427 | } |
| 428 | return retval2; |
| 429 | } |
| 430 | #endif /* NO_IO_CACHE */ |
| 431 | |
| 432 | #ifdef __linux__ |
| 433 | #ifndef BLKDISCARDZEROES |
| 434 | #define BLKDISCARDZEROES _IO(0x12,124) |
| 435 | #endif |
| 436 | #endif |
| 437 | |
| 438 | int ext2fs_open_file(const char *pathname, int flags, mode_t mode) |
| 439 | { |
| 440 | if (mode) |
| 441 | #if defined(HAVE_OPEN64) && !defined(__OSX_AVAILABLE_BUT_DEPRECATED) |
| 442 | return open64(pathname, flags, mode); |
| 443 | else |
| 444 | return open64(pathname, flags); |
| 445 | #else |
| 446 | return open(pathname, flags, mode); |
| 447 | else |
| 448 | return open(pathname, flags); |
| 449 | #endif |
| 450 | } |
| 451 | |
| 452 | int ext2fs_stat(const char *path, ext2fs_struct_stat *buf) |
| 453 | { |
| 454 | #if defined(HAVE_FSTAT64) && !defined(__OSX_AVAILABLE_BUT_DEPRECATED) |
| 455 | return stat64(path, buf); |
| 456 | #else |
| 457 | return stat(path, buf); |
| 458 | #endif |
| 459 | } |
| 460 | |
| 461 | int ext2fs_fstat(int fd, ext2fs_struct_stat *buf) |
| 462 | { |
| 463 | #if defined(HAVE_FSTAT64) && !defined(__OSX_AVAILABLE_BUT_DEPRECATED) |
| 464 | return fstat64(fd, buf); |
| 465 | #else |
| 466 | return fstat(fd, buf); |
| 467 | #endif |
| 468 | } |
| 469 | |
| 470 | static errcode_t unix_open(const char *name, int flags, io_channel *channel) |
| 471 | { |
| 472 | io_channel io = NULL; |
| 473 | struct unix_private_data *data = NULL; |
| 474 | errcode_t retval; |
| 475 | int open_flags; |
| 476 | int f_nocache = 0; |
| 477 | ext2fs_struct_stat st; |
| 478 | #ifdef __linux__ |
| 479 | struct utsname ut; |
| 480 | #endif |
| 481 | |
| 482 | if (name == 0) |
| 483 | return EXT2_ET_BAD_DEVICE_NAME; |
| 484 | retval = ext2fs_get_mem(sizeof(struct struct_io_channel), &io); |
| 485 | if (retval) |
| 486 | goto cleanup; |
| 487 | memset(io, 0, sizeof(struct struct_io_channel)); |
| 488 | io->magic = EXT2_ET_MAGIC_IO_CHANNEL; |
| 489 | retval = ext2fs_get_mem(sizeof(struct unix_private_data), &data); |
| 490 | if (retval) |
| 491 | goto cleanup; |
| 492 | |
| 493 | io->manager = unix_io_manager; |
| 494 | retval = ext2fs_get_mem(strlen(name)+1, &io->name); |
| 495 | if (retval) |
| 496 | goto cleanup; |
| 497 | |
| 498 | strcpy(io->name, name); |
| 499 | io->private_data = data; |
| 500 | io->block_size = 1024; |
| 501 | io->read_error = 0; |
| 502 | io->write_error = 0; |
| 503 | io->refcount = 1; |
| 504 | |
| 505 | memset(data, 0, sizeof(struct unix_private_data)); |
| 506 | data->magic = EXT2_ET_MAGIC_UNIX_IO_CHANNEL; |
| 507 | data->io_stats.num_fields = 2; |
| 508 | data->dev = -1; |
| 509 | |
| 510 | open_flags = (flags & IO_FLAG_RW) ? O_RDWR : O_RDONLY; |
| 511 | if (flags & IO_FLAG_EXCLUSIVE) |
| 512 | open_flags |= O_EXCL; |
| 513 | #if defined(O_DIRECT) |
| 514 | if (flags & IO_FLAG_DIRECT_IO) { |
| 515 | open_flags |= O_DIRECT; |
| 516 | io->align = ext2fs_get_dio_alignment(data->dev); |
| 517 | } |
| 518 | #elif defined(F_NOCACHE) |
| 519 | if (flags & IO_FLAG_DIRECT_IO) { |
| 520 | f_nocache = F_NOCACHE; |
| 521 | io->align = 4096; |
| 522 | } |
| 523 | #endif |
| 524 | data->flags = flags; |
| 525 | |
| 526 | data->dev = ext2fs_open_file(io->name, open_flags, 0); |
| 527 | if (data->dev < 0) { |
| 528 | retval = errno; |
| 529 | goto cleanup; |
| 530 | } |
| 531 | if (f_nocache) { |
| 532 | if (fcntl(data->dev, f_nocache, 1) < 0) { |
| 533 | retval = errno; |
| 534 | goto cleanup; |
| 535 | } |
| 536 | } |
| 537 | |
| 538 | /* |
| 539 | * If the device is really a block device, then set the |
| 540 | * appropriate flag, otherwise we can set DISCARD_ZEROES flag |
| 541 | * because we are going to use punch hole instead of discard |
| 542 | * and if it succeed, subsequent read from sparse area returns |
| 543 | * zero. |
| 544 | */ |
| 545 | if (ext2fs_stat(io->name, &st) == 0) { |
| 546 | if (S_ISBLK(st.st_mode)) |
| 547 | io->flags |= CHANNEL_FLAGS_BLOCK_DEVICE; |
| 548 | else |
| 549 | io->flags |= CHANNEL_FLAGS_DISCARD_ZEROES; |
| 550 | } |
| 551 | |
| 552 | #ifdef BLKDISCARDZEROES |
| 553 | { |
| 554 | int zeroes = 0; |
| 555 | if (ioctl(data->dev, BLKDISCARDZEROES, &zeroes) == 0 && |
| 556 | zeroes) |
| 557 | io->flags |= CHANNEL_FLAGS_DISCARD_ZEROES; |
| 558 | } |
| 559 | #endif |
| 560 | |
| 561 | #if defined(__CYGWIN__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) |
| 562 | /* |
| 563 | * Some operating systems require that the buffers be aligned, |
| 564 | * regardless of O_DIRECT |
| 565 | */ |
| 566 | if (!io->align) |
| 567 | io->align = 512; |
| 568 | #endif |
| 569 | |
| 570 | |
| 571 | if ((retval = alloc_cache(io, data))) |
| 572 | goto cleanup; |
| 573 | |
| 574 | #ifdef BLKROGET |
| 575 | if (flags & IO_FLAG_RW) { |
| 576 | int error; |
| 577 | int readonly = 0; |
| 578 | |
| 579 | /* Is the block device actually writable? */ |
| 580 | error = ioctl(data->dev, BLKROGET, &readonly); |
| 581 | if (!error && readonly) { |
| 582 | retval = EPERM; |
| 583 | goto cleanup; |
| 584 | } |
| 585 | } |
| 586 | #endif |
| 587 | |
| 588 | #ifdef __linux__ |
| 589 | #undef RLIM_INFINITY |
| 590 | #if (defined(__alpha__) || ((defined(__sparc__) || defined(__mips__)) && (SIZEOF_LONG == 4))) |
| 591 | #define RLIM_INFINITY ((unsigned long)(~0UL>>1)) |
| 592 | #else |
| 593 | #define RLIM_INFINITY (~0UL) |
| 594 | #endif |
| 595 | /* |
| 596 | * Work around a bug in 2.4.10-2.4.18 kernels where writes to |
| 597 | * block devices are wrongly getting hit by the filesize |
| 598 | * limit. This workaround isn't perfect, since it won't work |
| 599 | * if glibc wasn't built against 2.2 header files. (Sigh.) |
| 600 | * |
| 601 | */ |
| 602 | if ((flags & IO_FLAG_RW) && |
| 603 | (uname(&ut) == 0) && |
| 604 | ((ut.release[0] == '2') && (ut.release[1] == '.') && |
| 605 | (ut.release[2] == '4') && (ut.release[3] == '.') && |
| 606 | (ut.release[4] == '1') && (ut.release[5] >= '0') && |
| 607 | (ut.release[5] < '8')) && |
| 608 | (ext2fs_stat(io->name, &st) == 0) && |
| 609 | (S_ISBLK(st.st_mode))) { |
| 610 | struct rlimit rlim; |
| 611 | |
| 612 | rlim.rlim_cur = rlim.rlim_max = (unsigned long) RLIM_INFINITY; |
| 613 | setrlimit(RLIMIT_FSIZE, &rlim); |
| 614 | getrlimit(RLIMIT_FSIZE, &rlim); |
| 615 | if (((unsigned long) rlim.rlim_cur) < |
| 616 | ((unsigned long) rlim.rlim_max)) { |
| 617 | rlim.rlim_cur = rlim.rlim_max; |
| 618 | setrlimit(RLIMIT_FSIZE, &rlim); |
| 619 | } |
| 620 | } |
| 621 | #endif |
| 622 | *channel = io; |
| 623 | return 0; |
| 624 | |
| 625 | cleanup: |
| 626 | if (data) { |
| 627 | if (data->dev >= 0) |
| 628 | close(data->dev); |
| 629 | free_cache(data); |
| 630 | ext2fs_free_mem(&data); |
| 631 | } |
| 632 | if (io) { |
| 633 | if (io->name) { |
| 634 | ext2fs_free_mem(&io->name); |
| 635 | } |
| 636 | ext2fs_free_mem(&io); |
| 637 | } |
| 638 | return retval; |
| 639 | } |
| 640 | |
| 641 | static errcode_t unix_close(io_channel channel) |
| 642 | { |
| 643 | struct unix_private_data *data; |
| 644 | errcode_t retval = 0; |
| 645 | |
| 646 | EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL); |
| 647 | data = (struct unix_private_data *) channel->private_data; |
| 648 | EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL); |
| 649 | |
| 650 | if (--channel->refcount > 0) |
| 651 | return 0; |
| 652 | |
| 653 | #ifndef NO_IO_CACHE |
| 654 | retval = flush_cached_blocks(channel, data, 0); |
| 655 | #endif |
| 656 | |
| 657 | if (close(data->dev) < 0) |
| 658 | retval = errno; |
| 659 | free_cache(data); |
| 660 | |
| 661 | ext2fs_free_mem(&channel->private_data); |
| 662 | if (channel->name) |
| 663 | ext2fs_free_mem(&channel->name); |
| 664 | ext2fs_free_mem(&channel); |
| 665 | return retval; |
| 666 | } |
| 667 | |
| 668 | static errcode_t unix_set_blksize(io_channel channel, int blksize) |
| 669 | { |
| 670 | struct unix_private_data *data; |
| 671 | errcode_t retval; |
| 672 | |
| 673 | EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL); |
| 674 | data = (struct unix_private_data *) channel->private_data; |
| 675 | EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL); |
| 676 | |
| 677 | if (channel->block_size != blksize) { |
| 678 | #ifndef NO_IO_CACHE |
| 679 | if ((retval = flush_cached_blocks(channel, data, 0))) |
| 680 | return retval; |
| 681 | #endif |
| 682 | |
| 683 | channel->block_size = blksize; |
| 684 | free_cache(data); |
| 685 | if ((retval = alloc_cache(channel, data))) |
| 686 | return retval; |
| 687 | } |
| 688 | return 0; |
| 689 | } |
| 690 | |
| 691 | |
| 692 | static errcode_t unix_read_blk64(io_channel channel, unsigned long long block, |
| 693 | int count, void *buf) |
| 694 | { |
| 695 | struct unix_private_data *data; |
| 696 | struct unix_cache *cache, *reuse[READ_DIRECT_SIZE]; |
| 697 | errcode_t retval; |
| 698 | char *cp; |
| 699 | int i, j; |
| 700 | |
| 701 | EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL); |
| 702 | data = (struct unix_private_data *) channel->private_data; |
| 703 | EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL); |
| 704 | |
| 705 | #ifdef NO_IO_CACHE |
| 706 | return raw_read_blk(channel, data, block, count, buf); |
| 707 | #else |
| 708 | /* |
| 709 | * If we're doing an odd-sized read or a very large read, |
| 710 | * flush out the cache and then do a direct read. |
| 711 | */ |
| 712 | if (count < 0 || count > WRITE_DIRECT_SIZE) { |
| 713 | if ((retval = flush_cached_blocks(channel, data, 0))) |
| 714 | return retval; |
| 715 | return raw_read_blk(channel, data, block, count, buf); |
| 716 | } |
| 717 | |
| 718 | cp = buf; |
| 719 | while (count > 0) { |
| 720 | /* If it's in the cache, use it! */ |
| 721 | if ((cache = find_cached_block(data, block, &reuse[0]))) { |
| 722 | #ifdef DEBUG |
| 723 | printf("Using cached block %lu\n", block); |
| 724 | #endif |
| 725 | memcpy(cp, cache->buf, channel->block_size); |
| 726 | count--; |
| 727 | block++; |
| 728 | cp += channel->block_size; |
| 729 | continue; |
| 730 | } |
| 731 | if (count == 1) { |
| 732 | /* |
| 733 | * Special case where we read directly into the |
| 734 | * cache buffer; important in the O_DIRECT case |
| 735 | */ |
| 736 | cache = reuse[0]; |
| 737 | reuse_cache(channel, data, cache, block); |
| 738 | if ((retval = raw_read_blk(channel, data, block, 1, |
| 739 | cache->buf))) { |
| 740 | cache->in_use = 0; |
| 741 | return retval; |
| 742 | } |
| 743 | memcpy(cp, cache->buf, channel->block_size); |
| 744 | return 0; |
| 745 | } |
| 746 | |
| 747 | /* |
| 748 | * Find the number of uncached blocks so we can do a |
| 749 | * single read request |
| 750 | */ |
| 751 | for (i=1; i < count; i++) |
| 752 | if (find_cached_block(data, block+i, &reuse[i])) |
| 753 | break; |
| 754 | #ifdef DEBUG |
| 755 | printf("Reading %d blocks starting at %lu\n", i, block); |
| 756 | #endif |
| 757 | if ((retval = raw_read_blk(channel, data, block, i, cp))) |
| 758 | return retval; |
| 759 | |
| 760 | /* Save the results in the cache */ |
| 761 | for (j=0; j < i; j++) { |
| 762 | count--; |
| 763 | cache = reuse[j]; |
| 764 | reuse_cache(channel, data, cache, block++); |
| 765 | memcpy(cache->buf, cp, channel->block_size); |
| 766 | cp += channel->block_size; |
| 767 | } |
| 768 | } |
| 769 | return 0; |
| 770 | #endif /* NO_IO_CACHE */ |
| 771 | } |
| 772 | |
| 773 | static errcode_t unix_read_blk(io_channel channel, unsigned long block, |
| 774 | int count, void *buf) |
| 775 | { |
| 776 | return unix_read_blk64(channel, block, count, buf); |
| 777 | } |
| 778 | |
| 779 | static errcode_t unix_write_blk64(io_channel channel, unsigned long long block, |
| 780 | int count, const void *buf) |
| 781 | { |
| 782 | struct unix_private_data *data; |
| 783 | struct unix_cache *cache, *reuse; |
| 784 | errcode_t retval = 0; |
| 785 | const char *cp; |
| 786 | int writethrough; |
| 787 | |
| 788 | EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL); |
| 789 | data = (struct unix_private_data *) channel->private_data; |
| 790 | EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL); |
| 791 | |
| 792 | #ifdef NO_IO_CACHE |
| 793 | return raw_write_blk(channel, data, block, count, buf); |
| 794 | #else |
| 795 | /* |
| 796 | * If we're doing an odd-sized write or a very large write, |
| 797 | * flush out the cache completely and then do a direct write. |
| 798 | */ |
| 799 | if (count < 0 || count > WRITE_DIRECT_SIZE) { |
| 800 | if ((retval = flush_cached_blocks(channel, data, 1))) |
| 801 | return retval; |
| 802 | return raw_write_blk(channel, data, block, count, buf); |
| 803 | } |
| 804 | |
| 805 | /* |
| 806 | * For a moderate-sized multi-block write, first force a write |
| 807 | * if we're in write-through cache mode, and then fill the |
| 808 | * cache with the blocks. |
| 809 | */ |
| 810 | writethrough = channel->flags & CHANNEL_FLAGS_WRITETHROUGH; |
| 811 | if (writethrough) |
| 812 | retval = raw_write_blk(channel, data, block, count, buf); |
| 813 | |
| 814 | cp = buf; |
| 815 | while (count > 0) { |
| 816 | cache = find_cached_block(data, block, &reuse); |
| 817 | if (!cache) { |
| 818 | cache = reuse; |
| 819 | reuse_cache(channel, data, cache, block); |
| 820 | } |
| 821 | memcpy(cache->buf, cp, channel->block_size); |
| 822 | cache->dirty = !writethrough; |
| 823 | count--; |
| 824 | block++; |
| 825 | cp += channel->block_size; |
| 826 | } |
| 827 | return retval; |
| 828 | #endif /* NO_IO_CACHE */ |
| 829 | } |
| 830 | |
| 831 | static errcode_t unix_write_blk(io_channel channel, unsigned long block, |
| 832 | int count, const void *buf) |
| 833 | { |
| 834 | return unix_write_blk64(channel, block, count, buf); |
| 835 | } |
| 836 | |
| 837 | static errcode_t unix_write_byte(io_channel channel, unsigned long offset, |
| 838 | int size, const void *buf) |
| 839 | { |
| 840 | struct unix_private_data *data; |
| 841 | errcode_t retval = 0; |
| 842 | ssize_t actual; |
| 843 | |
| 844 | EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL); |
| 845 | data = (struct unix_private_data *) channel->private_data; |
| 846 | EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL); |
| 847 | |
| 848 | if (channel->align != 0) { |
| 849 | #ifdef ALIGN_DEBUG |
| 850 | printf("unix_write_byte: O_DIRECT fallback\n"); |
| 851 | #endif |
| 852 | return EXT2_ET_UNIMPLEMENTED; |
| 853 | } |
| 854 | |
| 855 | #ifndef NO_IO_CACHE |
| 856 | /* |
| 857 | * Flush out the cache completely |
| 858 | */ |
| 859 | if ((retval = flush_cached_blocks(channel, data, 1))) |
| 860 | return retval; |
| 861 | #endif |
| 862 | |
| 863 | if (lseek(data->dev, offset + data->offset, SEEK_SET) < 0) |
| 864 | return errno; |
| 865 | |
| 866 | actual = write(data->dev, buf, size); |
| 867 | if (actual != size) |
| 868 | return EXT2_ET_SHORT_WRITE; |
| 869 | |
| 870 | return 0; |
| 871 | } |
| 872 | |
| 873 | /* |
| 874 | * Flush data buffers to disk. |
| 875 | */ |
| 876 | static errcode_t unix_flush(io_channel channel) |
| 877 | { |
| 878 | struct unix_private_data *data; |
| 879 | errcode_t retval = 0; |
| 880 | |
| 881 | EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL); |
| 882 | data = (struct unix_private_data *) channel->private_data; |
| 883 | EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL); |
| 884 | |
| 885 | #ifndef NO_IO_CACHE |
| 886 | retval = flush_cached_blocks(channel, data, 0); |
| 887 | #endif |
| 888 | fsync(data->dev); |
| 889 | return retval; |
| 890 | } |
| 891 | |
| 892 | static errcode_t unix_set_option(io_channel channel, const char *option, |
| 893 | const char *arg) |
| 894 | { |
| 895 | struct unix_private_data *data; |
| 896 | unsigned long long tmp; |
| 897 | char *end; |
| 898 | |
| 899 | EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL); |
| 900 | data = (struct unix_private_data *) channel->private_data; |
| 901 | EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL); |
| 902 | |
| 903 | if (!strcmp(option, "offset")) { |
| 904 | if (!arg) |
| 905 | return EXT2_ET_INVALID_ARGUMENT; |
| 906 | |
| 907 | tmp = strtoull(arg, &end, 0); |
| 908 | if (*end) |
| 909 | return EXT2_ET_INVALID_ARGUMENT; |
| 910 | data->offset = tmp; |
| 911 | if (data->offset < 0) |
| 912 | return EXT2_ET_INVALID_ARGUMENT; |
| 913 | return 0; |
| 914 | } |
| 915 | return EXT2_ET_INVALID_ARGUMENT; |
| 916 | } |
| 917 | |
| 918 | #if defined(__linux__) && !defined(BLKDISCARD) |
| 919 | #define BLKDISCARD _IO(0x12,119) |
| 920 | #endif |
| 921 | |
| 922 | static errcode_t unix_discard(io_channel channel, unsigned long long block, |
| 923 | unsigned long long count) |
| 924 | { |
| 925 | struct unix_private_data *data; |
| 926 | int ret; |
| 927 | |
| 928 | EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL); |
| 929 | data = (struct unix_private_data *) channel->private_data; |
| 930 | EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL); |
| 931 | |
| 932 | if (channel->flags & CHANNEL_FLAGS_BLOCK_DEVICE) { |
| 933 | #ifdef BLKDISCARD |
| 934 | __uint64_t range[2]; |
| 935 | |
| 936 | range[0] = (__uint64_t)(block) * channel->block_size; |
| 937 | range[1] = (__uint64_t)(count) * channel->block_size; |
| 938 | |
| 939 | ret = ioctl(data->dev, BLKDISCARD, &range); |
| 940 | #else |
| 941 | goto unimplemented; |
| 942 | #endif |
| 943 | } else { |
| 944 | #if defined(HAVE_FALLOCATE) && defined(FALLOC_FL_PUNCH_HOLE) |
| 945 | /* |
| 946 | * If we are not on block device, try to use punch hole |
| 947 | * to reclaim free space. |
| 948 | */ |
| 949 | ret = fallocate(data->dev, |
| 950 | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, |
| 951 | (off_t)(block) * channel->block_size, |
| 952 | (off_t)(count) * channel->block_size); |
| 953 | #else |
| 954 | goto unimplemented; |
| 955 | #endif |
| 956 | } |
| 957 | if (ret < 0) { |
| 958 | if (errno == EOPNOTSUPP) |
| 959 | goto unimplemented; |
| 960 | return errno; |
| 961 | } |
| 962 | return 0; |
| 963 | unimplemented: |
| 964 | return EXT2_ET_UNIMPLEMENTED; |
| 965 | } |