blob: 7d878e36759b29ae233efa2153c502fba4d55c38 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/*
2 * inode.c
3 *
4 * PURPOSE
5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
6 *
7 * COPYRIGHT
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
12 *
13 * (C) 1998 Dave Boynton
14 * (C) 1998-2004 Ben Fennema
15 * (C) 1999-2000 Stelias Computing Inc
16 *
17 * HISTORY
18 *
19 * 10/04/98 dgb Added rudimentary directory functions
20 * 10/07/98 Fully working udf_block_map! It works!
21 * 11/25/98 bmap altered to better support extents
22 * 12/06/98 blf partition support in udf_iget, udf_block_map
23 * and udf_read_inode
24 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
25 * block boundaries (which is not actually allowed)
26 * 12/20/98 added support for strategy 4096
27 * 03/07/99 rewrote udf_block_map (again)
28 * New funcs, inode_bmap, udf_next_aext
29 * 04/19/99 Support for writing device EA's for major/minor #
30 */
31
32#include "udfdecl.h"
33#include <linux/mm.h>
34#include <linux/module.h>
35#include <linux/pagemap.h>
36#include <linux/writeback.h>
37#include <linux/slab.h>
38#include <linux/crc-itu-t.h>
39#include <linux/mpage.h>
40#include <linux/uio.h>
41#include <linux/bio.h>
42
43#include "udf_i.h"
44#include "udf_sb.h"
45
46#define EXTENT_MERGE_SIZE 5
47
48#define FE_MAPPED_PERMS (FE_PERM_U_READ | FE_PERM_U_WRITE | FE_PERM_U_EXEC | \
49 FE_PERM_G_READ | FE_PERM_G_WRITE | FE_PERM_G_EXEC | \
50 FE_PERM_O_READ | FE_PERM_O_WRITE | FE_PERM_O_EXEC)
51
52#define FE_DELETE_PERMS (FE_PERM_U_DELETE | FE_PERM_G_DELETE | \
53 FE_PERM_O_DELETE)
54
55static umode_t udf_convert_permissions(struct fileEntry *);
56static int udf_update_inode(struct inode *, int);
57static int udf_sync_inode(struct inode *inode);
58static int udf_alloc_i_data(struct inode *inode, size_t size);
59static sector_t inode_getblk(struct inode *, sector_t, int *, int *);
60static int udf_insert_aext(struct inode *, struct extent_position,
61 struct kernel_lb_addr, uint32_t);
62static void udf_split_extents(struct inode *, int *, int, udf_pblk_t,
63 struct kernel_long_ad *, int *);
64static void udf_prealloc_extents(struct inode *, int, int,
65 struct kernel_long_ad *, int *);
66static void udf_merge_extents(struct inode *, struct kernel_long_ad *, int *);
67static int udf_update_extents(struct inode *, struct kernel_long_ad *, int,
68 int, struct extent_position *);
69static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
70
71static void __udf_clear_extent_cache(struct inode *inode)
72{
73 struct udf_inode_info *iinfo = UDF_I(inode);
74
75 if (iinfo->cached_extent.lstart != -1) {
76 brelse(iinfo->cached_extent.epos.bh);
77 iinfo->cached_extent.lstart = -1;
78 }
79}
80
81/* Invalidate extent cache */
82static void udf_clear_extent_cache(struct inode *inode)
83{
84 struct udf_inode_info *iinfo = UDF_I(inode);
85
86 spin_lock(&iinfo->i_extent_cache_lock);
87 __udf_clear_extent_cache(inode);
88 spin_unlock(&iinfo->i_extent_cache_lock);
89}
90
91/* Return contents of extent cache */
92static int udf_read_extent_cache(struct inode *inode, loff_t bcount,
93 loff_t *lbcount, struct extent_position *pos)
94{
95 struct udf_inode_info *iinfo = UDF_I(inode);
96 int ret = 0;
97
98 spin_lock(&iinfo->i_extent_cache_lock);
99 if ((iinfo->cached_extent.lstart <= bcount) &&
100 (iinfo->cached_extent.lstart != -1)) {
101 /* Cache hit */
102 *lbcount = iinfo->cached_extent.lstart;
103 memcpy(pos, &iinfo->cached_extent.epos,
104 sizeof(struct extent_position));
105 if (pos->bh)
106 get_bh(pos->bh);
107 ret = 1;
108 }
109 spin_unlock(&iinfo->i_extent_cache_lock);
110 return ret;
111}
112
113/* Add extent to extent cache */
114static void udf_update_extent_cache(struct inode *inode, loff_t estart,
115 struct extent_position *pos)
116{
117 struct udf_inode_info *iinfo = UDF_I(inode);
118
119 spin_lock(&iinfo->i_extent_cache_lock);
120 /* Invalidate previously cached extent */
121 __udf_clear_extent_cache(inode);
122 if (pos->bh)
123 get_bh(pos->bh);
124 memcpy(&iinfo->cached_extent.epos, pos, sizeof(*pos));
125 iinfo->cached_extent.lstart = estart;
126 switch (iinfo->i_alloc_type) {
127 case ICBTAG_FLAG_AD_SHORT:
128 iinfo->cached_extent.epos.offset -= sizeof(struct short_ad);
129 break;
130 case ICBTAG_FLAG_AD_LONG:
131 iinfo->cached_extent.epos.offset -= sizeof(struct long_ad);
132 break;
133 }
134 spin_unlock(&iinfo->i_extent_cache_lock);
135}
136
137void udf_evict_inode(struct inode *inode)
138{
139 struct udf_inode_info *iinfo = UDF_I(inode);
140 int want_delete = 0;
141
142 if (!is_bad_inode(inode)) {
143 if (!inode->i_nlink) {
144 want_delete = 1;
145 udf_setsize(inode, 0);
146 udf_update_inode(inode, IS_SYNC(inode));
147 }
148 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
149 inode->i_size != iinfo->i_lenExtents) {
150 udf_warn(inode->i_sb,
151 "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
152 inode->i_ino, inode->i_mode,
153 (unsigned long long)inode->i_size,
154 (unsigned long long)iinfo->i_lenExtents);
155 }
156 }
157 truncate_inode_pages_final(&inode->i_data);
158 invalidate_inode_buffers(inode);
159 clear_inode(inode);
160 kfree(iinfo->i_ext.i_data);
161 iinfo->i_ext.i_data = NULL;
162 udf_clear_extent_cache(inode);
163 if (want_delete) {
164 udf_free_inode(inode);
165 }
166}
167
168static void udf_write_failed(struct address_space *mapping, loff_t to)
169{
170 struct inode *inode = mapping->host;
171 struct udf_inode_info *iinfo = UDF_I(inode);
172 loff_t isize = inode->i_size;
173
174 if (to > isize) {
175 truncate_pagecache(inode, isize);
176 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
177 down_write(&iinfo->i_data_sem);
178 udf_clear_extent_cache(inode);
179 udf_truncate_extents(inode);
180 up_write(&iinfo->i_data_sem);
181 }
182 }
183}
184
185static int udf_writepage(struct page *page, struct writeback_control *wbc)
186{
187 return block_write_full_page(page, udf_get_block, wbc);
188}
189
190static int udf_writepages(struct address_space *mapping,
191 struct writeback_control *wbc)
192{
193 return mpage_writepages(mapping, wbc, udf_get_block);
194}
195
196static int udf_readpage(struct file *file, struct page *page)
197{
198 return mpage_readpage(page, udf_get_block);
199}
200
201static int udf_readpages(struct file *file, struct address_space *mapping,
202 struct list_head *pages, unsigned nr_pages)
203{
204 return mpage_readpages(mapping, pages, nr_pages, udf_get_block);
205}
206
207static int udf_write_begin(struct file *file, struct address_space *mapping,
208 loff_t pos, unsigned len, unsigned flags,
209 struct page **pagep, void **fsdata)
210{
211 int ret;
212
213 ret = block_write_begin(mapping, pos, len, flags, pagep, udf_get_block);
214 if (unlikely(ret))
215 udf_write_failed(mapping, pos + len);
216 return ret;
217}
218
219static ssize_t udf_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
220{
221 struct file *file = iocb->ki_filp;
222 struct address_space *mapping = file->f_mapping;
223 struct inode *inode = mapping->host;
224 size_t count = iov_iter_count(iter);
225 ssize_t ret;
226
227 ret = blockdev_direct_IO(iocb, inode, iter, udf_get_block);
228 if (unlikely(ret < 0 && iov_iter_rw(iter) == WRITE))
229 udf_write_failed(mapping, iocb->ki_pos + count);
230 return ret;
231}
232
233static sector_t udf_bmap(struct address_space *mapping, sector_t block)
234{
235 return generic_block_bmap(mapping, block, udf_get_block);
236}
237
238const struct address_space_operations udf_aops = {
239 .readpage = udf_readpage,
240 .readpages = udf_readpages,
241 .writepage = udf_writepage,
242 .writepages = udf_writepages,
243 .write_begin = udf_write_begin,
244 .write_end = generic_write_end,
245 .direct_IO = udf_direct_IO,
246 .bmap = udf_bmap,
247};
248
249/*
250 * Expand file stored in ICB to a normal one-block-file
251 *
252 * This function requires i_data_sem for writing and releases it.
253 * This function requires i_mutex held
254 */
255int udf_expand_file_adinicb(struct inode *inode)
256{
257 struct page *page;
258 char *kaddr;
259 struct udf_inode_info *iinfo = UDF_I(inode);
260 int err;
261
262 WARN_ON_ONCE(!inode_is_locked(inode));
263 if (!iinfo->i_lenAlloc) {
264 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
265 iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
266 else
267 iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
268 /* from now on we have normal address_space methods */
269 inode->i_data.a_ops = &udf_aops;
270 up_write(&iinfo->i_data_sem);
271 mark_inode_dirty(inode);
272 return 0;
273 }
274 /*
275 * Release i_data_sem so that we can lock a page - page lock ranks
276 * above i_data_sem. i_mutex still protects us against file changes.
277 */
278 up_write(&iinfo->i_data_sem);
279
280 page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
281 if (!page)
282 return -ENOMEM;
283
284 if (!PageUptodate(page)) {
285 kaddr = kmap_atomic(page);
286 memset(kaddr + iinfo->i_lenAlloc, 0x00,
287 PAGE_SIZE - iinfo->i_lenAlloc);
288 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr,
289 iinfo->i_lenAlloc);
290 flush_dcache_page(page);
291 SetPageUptodate(page);
292 kunmap_atomic(kaddr);
293 }
294 down_write(&iinfo->i_data_sem);
295 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0x00,
296 iinfo->i_lenAlloc);
297 iinfo->i_lenAlloc = 0;
298 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
299 iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
300 else
301 iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
302 /* from now on we have normal address_space methods */
303 inode->i_data.a_ops = &udf_aops;
304 set_page_dirty(page);
305 unlock_page(page);
306 up_write(&iinfo->i_data_sem);
307 err = filemap_fdatawrite(inode->i_mapping);
308 if (err) {
309 /* Restore everything back so that we don't lose data... */
310 lock_page(page);
311 down_write(&iinfo->i_data_sem);
312 kaddr = kmap_atomic(page);
313 memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr,
314 inode->i_size);
315 kunmap_atomic(kaddr);
316 unlock_page(page);
317 iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
318 inode->i_data.a_ops = &udf_adinicb_aops;
319 iinfo->i_lenAlloc = inode->i_size;
320 up_write(&iinfo->i_data_sem);
321 }
322 put_page(page);
323 mark_inode_dirty(inode);
324
325 return err;
326}
327
328struct buffer_head *udf_expand_dir_adinicb(struct inode *inode,
329 udf_pblk_t *block, int *err)
330{
331 udf_pblk_t newblock;
332 struct buffer_head *dbh = NULL;
333 struct kernel_lb_addr eloc;
334 uint8_t alloctype;
335 struct extent_position epos;
336
337 struct udf_fileident_bh sfibh, dfibh;
338 loff_t f_pos = udf_ext0_offset(inode);
339 int size = udf_ext0_offset(inode) + inode->i_size;
340 struct fileIdentDesc cfi, *sfi, *dfi;
341 struct udf_inode_info *iinfo = UDF_I(inode);
342
343 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
344 alloctype = ICBTAG_FLAG_AD_SHORT;
345 else
346 alloctype = ICBTAG_FLAG_AD_LONG;
347
348 if (!inode->i_size) {
349 iinfo->i_alloc_type = alloctype;
350 mark_inode_dirty(inode);
351 return NULL;
352 }
353
354 /* alloc block, and copy data to it */
355 *block = udf_new_block(inode->i_sb, inode,
356 iinfo->i_location.partitionReferenceNum,
357 iinfo->i_location.logicalBlockNum, err);
358 if (!(*block))
359 return NULL;
360 newblock = udf_get_pblock(inode->i_sb, *block,
361 iinfo->i_location.partitionReferenceNum,
362 0);
363 if (!newblock)
364 return NULL;
365 dbh = udf_tgetblk(inode->i_sb, newblock);
366 if (!dbh)
367 return NULL;
368 lock_buffer(dbh);
369 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
370 set_buffer_uptodate(dbh);
371 unlock_buffer(dbh);
372 mark_buffer_dirty_inode(dbh, inode);
373
374 sfibh.soffset = sfibh.eoffset =
375 f_pos & (inode->i_sb->s_blocksize - 1);
376 sfibh.sbh = sfibh.ebh = NULL;
377 dfibh.soffset = dfibh.eoffset = 0;
378 dfibh.sbh = dfibh.ebh = dbh;
379 while (f_pos < size) {
380 iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
381 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL,
382 NULL, NULL, NULL);
383 if (!sfi) {
384 brelse(dbh);
385 return NULL;
386 }
387 iinfo->i_alloc_type = alloctype;
388 sfi->descTag.tagLocation = cpu_to_le32(*block);
389 dfibh.soffset = dfibh.eoffset;
390 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
391 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
392 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
393 sfi->fileIdent +
394 le16_to_cpu(sfi->lengthOfImpUse))) {
395 iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
396 brelse(dbh);
397 return NULL;
398 }
399 }
400 mark_buffer_dirty_inode(dbh, inode);
401
402 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0,
403 iinfo->i_lenAlloc);
404 iinfo->i_lenAlloc = 0;
405 eloc.logicalBlockNum = *block;
406 eloc.partitionReferenceNum =
407 iinfo->i_location.partitionReferenceNum;
408 iinfo->i_lenExtents = inode->i_size;
409 epos.bh = NULL;
410 epos.block = iinfo->i_location;
411 epos.offset = udf_file_entry_alloc_offset(inode);
412 udf_add_aext(inode, &epos, &eloc, inode->i_size, 0);
413 /* UniqueID stuff */
414
415 brelse(epos.bh);
416 mark_inode_dirty(inode);
417 return dbh;
418}
419
420static int udf_get_block(struct inode *inode, sector_t block,
421 struct buffer_head *bh_result, int create)
422{
423 int err, new;
424 sector_t phys = 0;
425 struct udf_inode_info *iinfo;
426
427 if (!create) {
428 phys = udf_block_map(inode, block);
429 if (phys)
430 map_bh(bh_result, inode->i_sb, phys);
431 return 0;
432 }
433
434 err = -EIO;
435 new = 0;
436 iinfo = UDF_I(inode);
437
438 down_write(&iinfo->i_data_sem);
439 if (block == iinfo->i_next_alloc_block + 1) {
440 iinfo->i_next_alloc_block++;
441 iinfo->i_next_alloc_goal++;
442 }
443
444 /*
445 * Block beyond EOF and prealloc extents? Just discard preallocation
446 * as it is not useful and complicates things.
447 */
448 if (((loff_t)block) << inode->i_blkbits > iinfo->i_lenExtents)
449 udf_discard_prealloc(inode);
450 udf_clear_extent_cache(inode);
451 phys = inode_getblk(inode, block, &err, &new);
452 if (!phys)
453 goto abort;
454
455 if (new)
456 set_buffer_new(bh_result);
457 map_bh(bh_result, inode->i_sb, phys);
458
459abort:
460 up_write(&iinfo->i_data_sem);
461 return err;
462}
463
464static struct buffer_head *udf_getblk(struct inode *inode, udf_pblk_t block,
465 int create, int *err)
466{
467 struct buffer_head *bh;
468 struct buffer_head dummy;
469
470 dummy.b_state = 0;
471 dummy.b_blocknr = -1000;
472 *err = udf_get_block(inode, block, &dummy, create);
473 if (!*err && buffer_mapped(&dummy)) {
474 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
475 if (buffer_new(&dummy)) {
476 lock_buffer(bh);
477 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
478 set_buffer_uptodate(bh);
479 unlock_buffer(bh);
480 mark_buffer_dirty_inode(bh, inode);
481 }
482 return bh;
483 }
484
485 return NULL;
486}
487
488/* Extend the file with new blocks totaling 'new_block_bytes',
489 * return the number of extents added
490 */
491static int udf_do_extend_file(struct inode *inode,
492 struct extent_position *last_pos,
493 struct kernel_long_ad *last_ext,
494 loff_t new_block_bytes)
495{
496 uint32_t add;
497 int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
498 struct super_block *sb = inode->i_sb;
499 struct udf_inode_info *iinfo;
500 int err;
501
502 /* The previous extent is fake and we should not extend by anything
503 * - there's nothing to do... */
504 if (!new_block_bytes && fake)
505 return 0;
506
507 iinfo = UDF_I(inode);
508 /* Round the last extent up to a multiple of block size */
509 if (last_ext->extLength & (sb->s_blocksize - 1)) {
510 last_ext->extLength =
511 (last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
512 (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
513 sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
514 iinfo->i_lenExtents =
515 (iinfo->i_lenExtents + sb->s_blocksize - 1) &
516 ~(sb->s_blocksize - 1);
517 }
518
519 /* Can we merge with the previous extent? */
520 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
521 EXT_NOT_RECORDED_NOT_ALLOCATED) {
522 add = (1 << 30) - sb->s_blocksize -
523 (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
524 if (add > new_block_bytes)
525 add = new_block_bytes;
526 new_block_bytes -= add;
527 last_ext->extLength += add;
528 }
529
530 if (fake) {
531 err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
532 last_ext->extLength, 1);
533 if (err < 0)
534 goto out_err;
535 count++;
536 } else {
537 struct kernel_lb_addr tmploc;
538 uint32_t tmplen;
539
540 udf_write_aext(inode, last_pos, &last_ext->extLocation,
541 last_ext->extLength, 1);
542
543 /*
544 * We've rewritten the last extent. If we are going to add
545 * more extents, we may need to enter possible following
546 * empty indirect extent.
547 */
548 if (new_block_bytes)
549 udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0);
550 }
551
552 /* Managed to do everything necessary? */
553 if (!new_block_bytes)
554 goto out;
555
556 /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
557 last_ext->extLocation.logicalBlockNum = 0;
558 last_ext->extLocation.partitionReferenceNum = 0;
559 add = (1 << 30) - sb->s_blocksize;
560 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | add;
561
562 /* Create enough extents to cover the whole hole */
563 while (new_block_bytes > add) {
564 new_block_bytes -= add;
565 err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
566 last_ext->extLength, 1);
567 if (err)
568 goto out_err;
569 count++;
570 }
571 if (new_block_bytes) {
572 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
573 new_block_bytes;
574 err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
575 last_ext->extLength, 1);
576 if (err)
577 goto out_err;
578 count++;
579 }
580
581out:
582 /* last_pos should point to the last written extent... */
583 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
584 last_pos->offset -= sizeof(struct short_ad);
585 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
586 last_pos->offset -= sizeof(struct long_ad);
587 else
588 return -EIO;
589
590 return count;
591out_err:
592 /* Remove extents we've created so far */
593 udf_clear_extent_cache(inode);
594 udf_truncate_extents(inode);
595 return err;
596}
597
598/* Extend the final block of the file to final_block_len bytes */
599static void udf_do_extend_final_block(struct inode *inode,
600 struct extent_position *last_pos,
601 struct kernel_long_ad *last_ext,
602 uint32_t new_elen)
603{
604 uint32_t added_bytes;
605
606 /*
607 * Extent already large enough? It may be already rounded up to block
608 * size...
609 */
610 if (new_elen <= (last_ext->extLength & UDF_EXTENT_LENGTH_MASK))
611 return;
612 added_bytes = new_elen - (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
613 last_ext->extLength += added_bytes;
614 UDF_I(inode)->i_lenExtents += added_bytes;
615
616 udf_write_aext(inode, last_pos, &last_ext->extLocation,
617 last_ext->extLength, 1);
618}
619
620static int udf_extend_file(struct inode *inode, loff_t newsize)
621{
622
623 struct extent_position epos;
624 struct kernel_lb_addr eloc;
625 uint32_t elen;
626 int8_t etype;
627 struct super_block *sb = inode->i_sb;
628 sector_t first_block = newsize >> sb->s_blocksize_bits, offset;
629 loff_t new_elen;
630 int adsize;
631 struct udf_inode_info *iinfo = UDF_I(inode);
632 struct kernel_long_ad extent;
633 int err = 0;
634 bool within_last_ext;
635
636 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
637 adsize = sizeof(struct short_ad);
638 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
639 adsize = sizeof(struct long_ad);
640 else
641 BUG();
642
643 /*
644 * When creating hole in file, just don't bother with preserving
645 * preallocation. It likely won't be very useful anyway.
646 */
647 udf_discard_prealloc(inode);
648
649 etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
650 within_last_ext = (etype != -1);
651 /* We don't expect extents past EOF... */
652 WARN_ON_ONCE(within_last_ext &&
653 elen > ((loff_t)offset + 1) << inode->i_blkbits);
654
655 if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) ||
656 (epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
657 /* File has no extents at all or has empty last
658 * indirect extent! Create a fake extent... */
659 extent.extLocation.logicalBlockNum = 0;
660 extent.extLocation.partitionReferenceNum = 0;
661 extent.extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
662 } else {
663 epos.offset -= adsize;
664 etype = udf_next_aext(inode, &epos, &extent.extLocation,
665 &extent.extLength, 0);
666 extent.extLength |= etype << 30;
667 }
668
669 new_elen = ((loff_t)offset << inode->i_blkbits) |
670 (newsize & (sb->s_blocksize - 1));
671
672 /* File has extent covering the new size (could happen when extending
673 * inside a block)?
674 */
675 if (within_last_ext) {
676 /* Extending file within the last file block */
677 udf_do_extend_final_block(inode, &epos, &extent, new_elen);
678 } else {
679 err = udf_do_extend_file(inode, &epos, &extent, new_elen);
680 }
681
682 if (err < 0)
683 goto out;
684 err = 0;
685 iinfo->i_lenExtents = newsize;
686out:
687 brelse(epos.bh);
688 return err;
689}
690
691static sector_t inode_getblk(struct inode *inode, sector_t block,
692 int *err, int *new)
693{
694 struct kernel_long_ad laarr[EXTENT_MERGE_SIZE];
695 struct extent_position prev_epos, cur_epos, next_epos;
696 int count = 0, startnum = 0, endnum = 0;
697 uint32_t elen = 0, tmpelen;
698 struct kernel_lb_addr eloc, tmpeloc;
699 int c = 1;
700 loff_t lbcount = 0, b_off = 0;
701 udf_pblk_t newblocknum, newblock = 0;
702 sector_t offset = 0;
703 int8_t etype;
704 struct udf_inode_info *iinfo = UDF_I(inode);
705 udf_pblk_t goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
706 int lastblock = 0;
707 bool isBeyondEOF;
708
709 *err = 0;
710 *new = 0;
711 prev_epos.offset = udf_file_entry_alloc_offset(inode);
712 prev_epos.block = iinfo->i_location;
713 prev_epos.bh = NULL;
714 cur_epos = next_epos = prev_epos;
715 b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
716
717 /* find the extent which contains the block we are looking for.
718 alternate between laarr[0] and laarr[1] for locations of the
719 current extent, and the previous extent */
720 do {
721 if (prev_epos.bh != cur_epos.bh) {
722 brelse(prev_epos.bh);
723 get_bh(cur_epos.bh);
724 prev_epos.bh = cur_epos.bh;
725 }
726 if (cur_epos.bh != next_epos.bh) {
727 brelse(cur_epos.bh);
728 get_bh(next_epos.bh);
729 cur_epos.bh = next_epos.bh;
730 }
731
732 lbcount += elen;
733
734 prev_epos.block = cur_epos.block;
735 cur_epos.block = next_epos.block;
736
737 prev_epos.offset = cur_epos.offset;
738 cur_epos.offset = next_epos.offset;
739
740 etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1);
741 if (etype == -1)
742 break;
743
744 c = !c;
745
746 laarr[c].extLength = (etype << 30) | elen;
747 laarr[c].extLocation = eloc;
748
749 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
750 pgoal = eloc.logicalBlockNum +
751 ((elen + inode->i_sb->s_blocksize - 1) >>
752 inode->i_sb->s_blocksize_bits);
753
754 count++;
755 } while (lbcount + elen <= b_off);
756
757 b_off -= lbcount;
758 offset = b_off >> inode->i_sb->s_blocksize_bits;
759 /*
760 * Move prev_epos and cur_epos into indirect extent if we are at
761 * the pointer to it
762 */
763 udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0);
764 udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0);
765
766 /* if the extent is allocated and recorded, return the block
767 if the extent is not a multiple of the blocksize, round up */
768
769 if (etype == (EXT_RECORDED_ALLOCATED >> 30)) {
770 if (elen & (inode->i_sb->s_blocksize - 1)) {
771 elen = EXT_RECORDED_ALLOCATED |
772 ((elen + inode->i_sb->s_blocksize - 1) &
773 ~(inode->i_sb->s_blocksize - 1));
774 udf_write_aext(inode, &cur_epos, &eloc, elen, 1);
775 }
776 newblock = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
777 goto out_free;
778 }
779
780 /* Are we beyond EOF and preallocated extent? */
781 if (etype == -1) {
782 int ret;
783 loff_t hole_len;
784
785 isBeyondEOF = true;
786 if (count) {
787 if (c)
788 laarr[0] = laarr[1];
789 startnum = 1;
790 } else {
791 /* Create a fake extent when there's not one */
792 memset(&laarr[0].extLocation, 0x00,
793 sizeof(struct kernel_lb_addr));
794 laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
795 /* Will udf_do_extend_file() create real extent from
796 a fake one? */
797 startnum = (offset > 0);
798 }
799 /* Create extents for the hole between EOF and offset */
800 hole_len = (loff_t)offset << inode->i_blkbits;
801 ret = udf_do_extend_file(inode, &prev_epos, laarr, hole_len);
802 if (ret < 0) {
803 *err = ret;
804 goto out_free;
805 }
806 c = 0;
807 offset = 0;
808 count += ret;
809 /*
810 * Is there any real extent? - otherwise we overwrite the fake
811 * one...
812 */
813 if (count)
814 c = !c;
815 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
816 inode->i_sb->s_blocksize;
817 memset(&laarr[c].extLocation, 0x00,
818 sizeof(struct kernel_lb_addr));
819 count++;
820 endnum = c + 1;
821 lastblock = 1;
822 } else {
823 isBeyondEOF = false;
824 endnum = startnum = ((count > 2) ? 2 : count);
825
826 /* if the current extent is in position 0,
827 swap it with the previous */
828 if (!c && count != 1) {
829 laarr[2] = laarr[0];
830 laarr[0] = laarr[1];
831 laarr[1] = laarr[2];
832 c = 1;
833 }
834
835 /* if the current block is located in an extent,
836 read the next extent */
837 etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0);
838 if (etype != -1) {
839 laarr[c + 1].extLength = (etype << 30) | elen;
840 laarr[c + 1].extLocation = eloc;
841 count++;
842 startnum++;
843 endnum++;
844 } else
845 lastblock = 1;
846 }
847
848 /* if the current extent is not recorded but allocated, get the
849 * block in the extent corresponding to the requested block */
850 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
851 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
852 else { /* otherwise, allocate a new block */
853 if (iinfo->i_next_alloc_block == block)
854 goal = iinfo->i_next_alloc_goal;
855
856 if (!goal) {
857 if (!(goal = pgoal)) /* XXX: what was intended here? */
858 goal = iinfo->i_location.logicalBlockNum + 1;
859 }
860
861 newblocknum = udf_new_block(inode->i_sb, inode,
862 iinfo->i_location.partitionReferenceNum,
863 goal, err);
864 if (!newblocknum) {
865 *err = -ENOSPC;
866 goto out_free;
867 }
868 if (isBeyondEOF)
869 iinfo->i_lenExtents += inode->i_sb->s_blocksize;
870 }
871
872 /* if the extent the requsted block is located in contains multiple
873 * blocks, split the extent into at most three extents. blocks prior
874 * to requested block, requested block, and blocks after requested
875 * block */
876 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
877
878 /* We preallocate blocks only for regular files. It also makes sense
879 * for directories but there's a problem when to drop the
880 * preallocation. We might use some delayed work for that but I feel
881 * it's overengineering for a filesystem like UDF. */
882 if (S_ISREG(inode->i_mode))
883 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
884
885 /* merge any continuous blocks in laarr */
886 udf_merge_extents(inode, laarr, &endnum);
887
888 /* write back the new extents, inserting new extents if the new number
889 * of extents is greater than the old number, and deleting extents if
890 * the new number of extents is less than the old number */
891 *err = udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
892 if (*err < 0)
893 goto out_free;
894
895 newblock = udf_get_pblock(inode->i_sb, newblocknum,
896 iinfo->i_location.partitionReferenceNum, 0);
897 if (!newblock) {
898 *err = -EIO;
899 goto out_free;
900 }
901 *new = 1;
902 iinfo->i_next_alloc_block = block;
903 iinfo->i_next_alloc_goal = newblocknum;
904 inode->i_ctime = current_time(inode);
905
906 if (IS_SYNC(inode))
907 udf_sync_inode(inode);
908 else
909 mark_inode_dirty(inode);
910out_free:
911 brelse(prev_epos.bh);
912 brelse(cur_epos.bh);
913 brelse(next_epos.bh);
914 return newblock;
915}
916
917static void udf_split_extents(struct inode *inode, int *c, int offset,
918 udf_pblk_t newblocknum,
919 struct kernel_long_ad *laarr, int *endnum)
920{
921 unsigned long blocksize = inode->i_sb->s_blocksize;
922 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
923
924 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
925 (laarr[*c].extLength >> 30) ==
926 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
927 int curr = *c;
928 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
929 blocksize - 1) >> blocksize_bits;
930 int8_t etype = (laarr[curr].extLength >> 30);
931
932 if (blen == 1)
933 ;
934 else if (!offset || blen == offset + 1) {
935 laarr[curr + 2] = laarr[curr + 1];
936 laarr[curr + 1] = laarr[curr];
937 } else {
938 laarr[curr + 3] = laarr[curr + 1];
939 laarr[curr + 2] = laarr[curr + 1] = laarr[curr];
940 }
941
942 if (offset) {
943 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
944 udf_free_blocks(inode->i_sb, inode,
945 &laarr[curr].extLocation,
946 0, offset);
947 laarr[curr].extLength =
948 EXT_NOT_RECORDED_NOT_ALLOCATED |
949 (offset << blocksize_bits);
950 laarr[curr].extLocation.logicalBlockNum = 0;
951 laarr[curr].extLocation.
952 partitionReferenceNum = 0;
953 } else
954 laarr[curr].extLength = (etype << 30) |
955 (offset << blocksize_bits);
956 curr++;
957 (*c)++;
958 (*endnum)++;
959 }
960
961 laarr[curr].extLocation.logicalBlockNum = newblocknum;
962 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
963 laarr[curr].extLocation.partitionReferenceNum =
964 UDF_I(inode)->i_location.partitionReferenceNum;
965 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
966 blocksize;
967 curr++;
968
969 if (blen != offset + 1) {
970 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
971 laarr[curr].extLocation.logicalBlockNum +=
972 offset + 1;
973 laarr[curr].extLength = (etype << 30) |
974 ((blen - (offset + 1)) << blocksize_bits);
975 curr++;
976 (*endnum)++;
977 }
978 }
979}
980
981static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
982 struct kernel_long_ad *laarr,
983 int *endnum)
984{
985 int start, length = 0, currlength = 0, i;
986
987 if (*endnum >= (c + 1)) {
988 if (!lastblock)
989 return;
990 else
991 start = c;
992 } else {
993 if ((laarr[c + 1].extLength >> 30) ==
994 (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
995 start = c + 1;
996 length = currlength =
997 (((laarr[c + 1].extLength &
998 UDF_EXTENT_LENGTH_MASK) +
999 inode->i_sb->s_blocksize - 1) >>
1000 inode->i_sb->s_blocksize_bits);
1001 } else
1002 start = c;
1003 }
1004
1005 for (i = start + 1; i <= *endnum; i++) {
1006 if (i == *endnum) {
1007 if (lastblock)
1008 length += UDF_DEFAULT_PREALLOC_BLOCKS;
1009 } else if ((laarr[i].extLength >> 30) ==
1010 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
1011 length += (((laarr[i].extLength &
1012 UDF_EXTENT_LENGTH_MASK) +
1013 inode->i_sb->s_blocksize - 1) >>
1014 inode->i_sb->s_blocksize_bits);
1015 } else
1016 break;
1017 }
1018
1019 if (length) {
1020 int next = laarr[start].extLocation.logicalBlockNum +
1021 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
1022 inode->i_sb->s_blocksize - 1) >>
1023 inode->i_sb->s_blocksize_bits);
1024 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
1025 laarr[start].extLocation.partitionReferenceNum,
1026 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ?
1027 length : UDF_DEFAULT_PREALLOC_BLOCKS) -
1028 currlength);
1029 if (numalloc) {
1030 if (start == (c + 1))
1031 laarr[start].extLength +=
1032 (numalloc <<
1033 inode->i_sb->s_blocksize_bits);
1034 else {
1035 memmove(&laarr[c + 2], &laarr[c + 1],
1036 sizeof(struct long_ad) * (*endnum - (c + 1)));
1037 (*endnum)++;
1038 laarr[c + 1].extLocation.logicalBlockNum = next;
1039 laarr[c + 1].extLocation.partitionReferenceNum =
1040 laarr[c].extLocation.
1041 partitionReferenceNum;
1042 laarr[c + 1].extLength =
1043 EXT_NOT_RECORDED_ALLOCATED |
1044 (numalloc <<
1045 inode->i_sb->s_blocksize_bits);
1046 start = c + 1;
1047 }
1048
1049 for (i = start + 1; numalloc && i < *endnum; i++) {
1050 int elen = ((laarr[i].extLength &
1051 UDF_EXTENT_LENGTH_MASK) +
1052 inode->i_sb->s_blocksize - 1) >>
1053 inode->i_sb->s_blocksize_bits;
1054
1055 if (elen > numalloc) {
1056 laarr[i].extLength -=
1057 (numalloc <<
1058 inode->i_sb->s_blocksize_bits);
1059 numalloc = 0;
1060 } else {
1061 numalloc -= elen;
1062 if (*endnum > (i + 1))
1063 memmove(&laarr[i],
1064 &laarr[i + 1],
1065 sizeof(struct long_ad) *
1066 (*endnum - (i + 1)));
1067 i--;
1068 (*endnum)--;
1069 }
1070 }
1071 UDF_I(inode)->i_lenExtents +=
1072 numalloc << inode->i_sb->s_blocksize_bits;
1073 }
1074 }
1075}
1076
1077static void udf_merge_extents(struct inode *inode, struct kernel_long_ad *laarr,
1078 int *endnum)
1079{
1080 int i;
1081 unsigned long blocksize = inode->i_sb->s_blocksize;
1082 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
1083
1084 for (i = 0; i < (*endnum - 1); i++) {
1085 struct kernel_long_ad *li /*l[i]*/ = &laarr[i];
1086 struct kernel_long_ad *lip1 /*l[i plus 1]*/ = &laarr[i + 1];
1087
1088 if (((li->extLength >> 30) == (lip1->extLength >> 30)) &&
1089 (((li->extLength >> 30) ==
1090 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
1091 ((lip1->extLocation.logicalBlockNum -
1092 li->extLocation.logicalBlockNum) ==
1093 (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
1094 blocksize - 1) >> blocksize_bits)))) {
1095
1096 if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
1097 (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
1098 blocksize - 1) <= UDF_EXTENT_LENGTH_MASK) {
1099 li->extLength = lip1->extLength +
1100 (((li->extLength &
1101 UDF_EXTENT_LENGTH_MASK) +
1102 blocksize - 1) & ~(blocksize - 1));
1103 if (*endnum > (i + 2))
1104 memmove(&laarr[i + 1], &laarr[i + 2],
1105 sizeof(struct long_ad) *
1106 (*endnum - (i + 2)));
1107 i--;
1108 (*endnum)--;
1109 }
1110 } else if (((li->extLength >> 30) ==
1111 (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
1112 ((lip1->extLength >> 30) ==
1113 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) {
1114 udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0,
1115 ((li->extLength &
1116 UDF_EXTENT_LENGTH_MASK) +
1117 blocksize - 1) >> blocksize_bits);
1118 li->extLocation.logicalBlockNum = 0;
1119 li->extLocation.partitionReferenceNum = 0;
1120
1121 if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
1122 (lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
1123 blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
1124 lip1->extLength = (lip1->extLength -
1125 (li->extLength &
1126 UDF_EXTENT_LENGTH_MASK) +
1127 UDF_EXTENT_LENGTH_MASK) &
1128 ~(blocksize - 1);
1129 li->extLength = (li->extLength &
1130 UDF_EXTENT_FLAG_MASK) +
1131 (UDF_EXTENT_LENGTH_MASK + 1) -
1132 blocksize;
1133 } else {
1134 li->extLength = lip1->extLength +
1135 (((li->extLength &
1136 UDF_EXTENT_LENGTH_MASK) +
1137 blocksize - 1) & ~(blocksize - 1));
1138 if (*endnum > (i + 2))
1139 memmove(&laarr[i + 1], &laarr[i + 2],
1140 sizeof(struct long_ad) *
1141 (*endnum - (i + 2)));
1142 i--;
1143 (*endnum)--;
1144 }
1145 } else if ((li->extLength >> 30) ==
1146 (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
1147 udf_free_blocks(inode->i_sb, inode,
1148 &li->extLocation, 0,
1149 ((li->extLength &
1150 UDF_EXTENT_LENGTH_MASK) +
1151 blocksize - 1) >> blocksize_bits);
1152 li->extLocation.logicalBlockNum = 0;
1153 li->extLocation.partitionReferenceNum = 0;
1154 li->extLength = (li->extLength &
1155 UDF_EXTENT_LENGTH_MASK) |
1156 EXT_NOT_RECORDED_NOT_ALLOCATED;
1157 }
1158 }
1159}
1160
1161static int udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr,
1162 int startnum, int endnum,
1163 struct extent_position *epos)
1164{
1165 int start = 0, i;
1166 struct kernel_lb_addr tmploc;
1167 uint32_t tmplen;
1168 int err;
1169
1170 if (startnum > endnum) {
1171 for (i = 0; i < (startnum - endnum); i++)
1172 udf_delete_aext(inode, *epos);
1173 } else if (startnum < endnum) {
1174 for (i = 0; i < (endnum - startnum); i++) {
1175 err = udf_insert_aext(inode, *epos,
1176 laarr[i].extLocation,
1177 laarr[i].extLength);
1178 /*
1179 * If we fail here, we are likely corrupting the extent
1180 * list and leaking blocks. At least stop early to
1181 * limit the damage.
1182 */
1183 if (err < 0)
1184 return err;
1185 udf_next_aext(inode, epos, &laarr[i].extLocation,
1186 &laarr[i].extLength, 1);
1187 start++;
1188 }
1189 }
1190
1191 for (i = start; i < endnum; i++) {
1192 udf_next_aext(inode, epos, &tmploc, &tmplen, 0);
1193 udf_write_aext(inode, epos, &laarr[i].extLocation,
1194 laarr[i].extLength, 1);
1195 }
1196 return 0;
1197}
1198
1199struct buffer_head *udf_bread(struct inode *inode, udf_pblk_t block,
1200 int create, int *err)
1201{
1202 struct buffer_head *bh = NULL;
1203
1204 bh = udf_getblk(inode, block, create, err);
1205 if (!bh)
1206 return NULL;
1207
1208 if (buffer_uptodate(bh))
1209 return bh;
1210
1211 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
1212
1213 wait_on_buffer(bh);
1214 if (buffer_uptodate(bh))
1215 return bh;
1216
1217 brelse(bh);
1218 *err = -EIO;
1219 return NULL;
1220}
1221
1222int udf_setsize(struct inode *inode, loff_t newsize)
1223{
1224 int err;
1225 struct udf_inode_info *iinfo;
1226 unsigned int bsize = i_blocksize(inode);
1227
1228 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1229 S_ISLNK(inode->i_mode)))
1230 return -EINVAL;
1231 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1232 return -EPERM;
1233
1234 iinfo = UDF_I(inode);
1235 if (newsize > inode->i_size) {
1236 down_write(&iinfo->i_data_sem);
1237 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1238 if (bsize <
1239 (udf_file_entry_alloc_offset(inode) + newsize)) {
1240 err = udf_expand_file_adinicb(inode);
1241 if (err)
1242 return err;
1243 down_write(&iinfo->i_data_sem);
1244 } else {
1245 iinfo->i_lenAlloc = newsize;
1246 goto set_size;
1247 }
1248 }
1249 err = udf_extend_file(inode, newsize);
1250 if (err) {
1251 up_write(&iinfo->i_data_sem);
1252 return err;
1253 }
1254set_size:
1255 up_write(&iinfo->i_data_sem);
1256 truncate_setsize(inode, newsize);
1257 } else {
1258 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1259 down_write(&iinfo->i_data_sem);
1260 udf_clear_extent_cache(inode);
1261 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + newsize,
1262 0x00, bsize - newsize -
1263 udf_file_entry_alloc_offset(inode));
1264 iinfo->i_lenAlloc = newsize;
1265 truncate_setsize(inode, newsize);
1266 up_write(&iinfo->i_data_sem);
1267 goto update_time;
1268 }
1269 err = block_truncate_page(inode->i_mapping, newsize,
1270 udf_get_block);
1271 if (err)
1272 return err;
1273 truncate_setsize(inode, newsize);
1274 down_write(&iinfo->i_data_sem);
1275 udf_clear_extent_cache(inode);
1276 err = udf_truncate_extents(inode);
1277 up_write(&iinfo->i_data_sem);
1278 if (err)
1279 return err;
1280 }
1281update_time:
1282 inode->i_mtime = inode->i_ctime = current_time(inode);
1283 if (IS_SYNC(inode))
1284 udf_sync_inode(inode);
1285 else
1286 mark_inode_dirty(inode);
1287 return 0;
1288}
1289
1290/*
1291 * Maximum length of linked list formed by ICB hierarchy. The chosen number is
1292 * arbitrary - just that we hopefully don't limit any real use of rewritten
1293 * inode on write-once media but avoid looping for too long on corrupted media.
1294 */
1295#define UDF_MAX_ICB_NESTING 1024
1296
1297static int udf_read_inode(struct inode *inode, bool hidden_inode)
1298{
1299 struct buffer_head *bh = NULL;
1300 struct fileEntry *fe;
1301 struct extendedFileEntry *efe;
1302 uint16_t ident;
1303 struct udf_inode_info *iinfo = UDF_I(inode);
1304 struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
1305 struct kernel_lb_addr *iloc = &iinfo->i_location;
1306 unsigned int link_count;
1307 unsigned int indirections = 0;
1308 int bs = inode->i_sb->s_blocksize;
1309 int ret = -EIO;
1310 uint32_t uid, gid;
1311
1312reread:
1313 if (iloc->partitionReferenceNum >= sbi->s_partitions) {
1314 udf_debug("partition reference: %u > logical volume partitions: %u\n",
1315 iloc->partitionReferenceNum, sbi->s_partitions);
1316 return -EIO;
1317 }
1318
1319 if (iloc->logicalBlockNum >=
1320 sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) {
1321 udf_debug("block=%u, partition=%u out of range\n",
1322 iloc->logicalBlockNum, iloc->partitionReferenceNum);
1323 return -EIO;
1324 }
1325
1326 /*
1327 * Set defaults, but the inode is still incomplete!
1328 * Note: get_new_inode() sets the following on a new inode:
1329 * i_sb = sb
1330 * i_no = ino
1331 * i_flags = sb->s_flags
1332 * i_state = 0
1333 * clean_inode(): zero fills and sets
1334 * i_count = 1
1335 * i_nlink = 1
1336 * i_op = NULL;
1337 */
1338 bh = udf_read_ptagged(inode->i_sb, iloc, 0, &ident);
1339 if (!bh) {
1340 udf_err(inode->i_sb, "(ino %lu) failed !bh\n", inode->i_ino);
1341 return -EIO;
1342 }
1343
1344 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
1345 ident != TAG_IDENT_USE) {
1346 udf_err(inode->i_sb, "(ino %lu) failed ident=%u\n",
1347 inode->i_ino, ident);
1348 goto out;
1349 }
1350
1351 fe = (struct fileEntry *)bh->b_data;
1352 efe = (struct extendedFileEntry *)bh->b_data;
1353
1354 if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
1355 struct buffer_head *ibh;
1356
1357 ibh = udf_read_ptagged(inode->i_sb, iloc, 1, &ident);
1358 if (ident == TAG_IDENT_IE && ibh) {
1359 struct kernel_lb_addr loc;
1360 struct indirectEntry *ie;
1361
1362 ie = (struct indirectEntry *)ibh->b_data;
1363 loc = lelb_to_cpu(ie->indirectICB.extLocation);
1364
1365 if (ie->indirectICB.extLength) {
1366 brelse(ibh);
1367 memcpy(&iinfo->i_location, &loc,
1368 sizeof(struct kernel_lb_addr));
1369 if (++indirections > UDF_MAX_ICB_NESTING) {
1370 udf_err(inode->i_sb,
1371 "too many ICBs in ICB hierarchy"
1372 " (max %d supported)\n",
1373 UDF_MAX_ICB_NESTING);
1374 goto out;
1375 }
1376 brelse(bh);
1377 goto reread;
1378 }
1379 }
1380 brelse(ibh);
1381 } else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
1382 udf_err(inode->i_sb, "unsupported strategy type: %u\n",
1383 le16_to_cpu(fe->icbTag.strategyType));
1384 goto out;
1385 }
1386 if (fe->icbTag.strategyType == cpu_to_le16(4))
1387 iinfo->i_strat4096 = 0;
1388 else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
1389 iinfo->i_strat4096 = 1;
1390
1391 iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
1392 ICBTAG_FLAG_AD_MASK;
1393 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_SHORT &&
1394 iinfo->i_alloc_type != ICBTAG_FLAG_AD_LONG &&
1395 iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
1396 ret = -EIO;
1397 goto out;
1398 }
1399 iinfo->i_hidden = hidden_inode;
1400 iinfo->i_unique = 0;
1401 iinfo->i_lenEAttr = 0;
1402 iinfo->i_lenExtents = 0;
1403 iinfo->i_lenAlloc = 0;
1404 iinfo->i_next_alloc_block = 0;
1405 iinfo->i_next_alloc_goal = 0;
1406 if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
1407 iinfo->i_efe = 1;
1408 iinfo->i_use = 0;
1409 ret = udf_alloc_i_data(inode, bs -
1410 sizeof(struct extendedFileEntry));
1411 if (ret)
1412 goto out;
1413 memcpy(iinfo->i_ext.i_data,
1414 bh->b_data + sizeof(struct extendedFileEntry),
1415 bs - sizeof(struct extendedFileEntry));
1416 } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
1417 iinfo->i_efe = 0;
1418 iinfo->i_use = 0;
1419 ret = udf_alloc_i_data(inode, bs - sizeof(struct fileEntry));
1420 if (ret)
1421 goto out;
1422 memcpy(iinfo->i_ext.i_data,
1423 bh->b_data + sizeof(struct fileEntry),
1424 bs - sizeof(struct fileEntry));
1425 } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
1426 iinfo->i_efe = 0;
1427 iinfo->i_use = 1;
1428 iinfo->i_lenAlloc = le32_to_cpu(
1429 ((struct unallocSpaceEntry *)bh->b_data)->
1430 lengthAllocDescs);
1431 ret = udf_alloc_i_data(inode, bs -
1432 sizeof(struct unallocSpaceEntry));
1433 if (ret)
1434 goto out;
1435 memcpy(iinfo->i_ext.i_data,
1436 bh->b_data + sizeof(struct unallocSpaceEntry),
1437 bs - sizeof(struct unallocSpaceEntry));
1438 return 0;
1439 }
1440
1441 ret = -EIO;
1442 read_lock(&sbi->s_cred_lock);
1443 uid = le32_to_cpu(fe->uid);
1444 if (uid == UDF_INVALID_ID ||
1445 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET))
1446 inode->i_uid = sbi->s_uid;
1447 else
1448 i_uid_write(inode, uid);
1449
1450 gid = le32_to_cpu(fe->gid);
1451 if (gid == UDF_INVALID_ID ||
1452 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
1453 inode->i_gid = sbi->s_gid;
1454 else
1455 i_gid_write(inode, gid);
1456
1457 if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
1458 sbi->s_fmode != UDF_INVALID_MODE)
1459 inode->i_mode = sbi->s_fmode;
1460 else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY &&
1461 sbi->s_dmode != UDF_INVALID_MODE)
1462 inode->i_mode = sbi->s_dmode;
1463 else
1464 inode->i_mode = udf_convert_permissions(fe);
1465 inode->i_mode &= ~sbi->s_umask;
1466 iinfo->i_extraPerms = le32_to_cpu(fe->permissions) & ~FE_MAPPED_PERMS;
1467
1468 read_unlock(&sbi->s_cred_lock);
1469
1470 link_count = le16_to_cpu(fe->fileLinkCount);
1471 if (!link_count) {
1472 if (!hidden_inode) {
1473 ret = -ESTALE;
1474 goto out;
1475 }
1476 link_count = 1;
1477 }
1478 set_nlink(inode, link_count);
1479
1480 inode->i_size = le64_to_cpu(fe->informationLength);
1481 iinfo->i_lenExtents = inode->i_size;
1482
1483 if (iinfo->i_efe == 0) {
1484 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1485 (inode->i_sb->s_blocksize_bits - 9);
1486
1487 udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime);
1488 udf_disk_stamp_to_time(&inode->i_mtime, fe->modificationTime);
1489 udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime);
1490
1491 iinfo->i_unique = le64_to_cpu(fe->uniqueID);
1492 iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
1493 iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs);
1494 iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint);
1495 iinfo->i_streamdir = 0;
1496 iinfo->i_lenStreams = 0;
1497 } else {
1498 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1499 (inode->i_sb->s_blocksize_bits - 9);
1500
1501 udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime);
1502 udf_disk_stamp_to_time(&inode->i_mtime, efe->modificationTime);
1503 udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime);
1504 udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime);
1505
1506 iinfo->i_unique = le64_to_cpu(efe->uniqueID);
1507 iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
1508 iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
1509 iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
1510
1511 /* Named streams */
1512 iinfo->i_streamdir = (efe->streamDirectoryICB.extLength != 0);
1513 iinfo->i_locStreamdir =
1514 lelb_to_cpu(efe->streamDirectoryICB.extLocation);
1515 iinfo->i_lenStreams = le64_to_cpu(efe->objectSize);
1516 if (iinfo->i_lenStreams >= inode->i_size)
1517 iinfo->i_lenStreams -= inode->i_size;
1518 else
1519 iinfo->i_lenStreams = 0;
1520 }
1521 inode->i_generation = iinfo->i_unique;
1522
1523 /*
1524 * Sanity check length of allocation descriptors and extended attrs to
1525 * avoid integer overflows
1526 */
1527 if (iinfo->i_lenEAttr > bs || iinfo->i_lenAlloc > bs)
1528 goto out;
1529 /* Now do exact checks */
1530 if (udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc > bs)
1531 goto out;
1532 /* Sanity checks for files in ICB so that we don't get confused later */
1533 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
1534 /*
1535 * For file in ICB data is stored in allocation descriptor
1536 * so sizes should match
1537 */
1538 if (iinfo->i_lenAlloc != inode->i_size)
1539 goto out;
1540 /* File in ICB has to fit in there... */
1541 if (inode->i_size > bs - udf_file_entry_alloc_offset(inode))
1542 goto out;
1543 }
1544
1545 switch (fe->icbTag.fileType) {
1546 case ICBTAG_FILE_TYPE_DIRECTORY:
1547 inode->i_op = &udf_dir_inode_operations;
1548 inode->i_fop = &udf_dir_operations;
1549 inode->i_mode |= S_IFDIR;
1550 inc_nlink(inode);
1551 break;
1552 case ICBTAG_FILE_TYPE_REALTIME:
1553 case ICBTAG_FILE_TYPE_REGULAR:
1554 case ICBTAG_FILE_TYPE_UNDEF:
1555 case ICBTAG_FILE_TYPE_VAT20:
1556 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
1557 inode->i_data.a_ops = &udf_adinicb_aops;
1558 else
1559 inode->i_data.a_ops = &udf_aops;
1560 inode->i_op = &udf_file_inode_operations;
1561 inode->i_fop = &udf_file_operations;
1562 inode->i_mode |= S_IFREG;
1563 break;
1564 case ICBTAG_FILE_TYPE_BLOCK:
1565 inode->i_mode |= S_IFBLK;
1566 break;
1567 case ICBTAG_FILE_TYPE_CHAR:
1568 inode->i_mode |= S_IFCHR;
1569 break;
1570 case ICBTAG_FILE_TYPE_FIFO:
1571 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1572 break;
1573 case ICBTAG_FILE_TYPE_SOCKET:
1574 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1575 break;
1576 case ICBTAG_FILE_TYPE_SYMLINK:
1577 inode->i_data.a_ops = &udf_symlink_aops;
1578 inode->i_op = &udf_symlink_inode_operations;
1579 inode_nohighmem(inode);
1580 inode->i_mode = S_IFLNK | 0777;
1581 break;
1582 case ICBTAG_FILE_TYPE_MAIN:
1583 udf_debug("METADATA FILE-----\n");
1584 break;
1585 case ICBTAG_FILE_TYPE_MIRROR:
1586 udf_debug("METADATA MIRROR FILE-----\n");
1587 break;
1588 case ICBTAG_FILE_TYPE_BITMAP:
1589 udf_debug("METADATA BITMAP FILE-----\n");
1590 break;
1591 default:
1592 udf_err(inode->i_sb, "(ino %lu) failed unknown file type=%u\n",
1593 inode->i_ino, fe->icbTag.fileType);
1594 goto out;
1595 }
1596 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1597 struct deviceSpec *dsea =
1598 (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
1599 if (dsea) {
1600 init_special_inode(inode, inode->i_mode,
1601 MKDEV(le32_to_cpu(dsea->majorDeviceIdent),
1602 le32_to_cpu(dsea->minorDeviceIdent)));
1603 /* Developer ID ??? */
1604 } else
1605 goto out;
1606 }
1607 ret = 0;
1608out:
1609 brelse(bh);
1610 return ret;
1611}
1612
1613static int udf_alloc_i_data(struct inode *inode, size_t size)
1614{
1615 struct udf_inode_info *iinfo = UDF_I(inode);
1616 iinfo->i_ext.i_data = kmalloc(size, GFP_KERNEL);
1617 if (!iinfo->i_ext.i_data)
1618 return -ENOMEM;
1619 return 0;
1620}
1621
1622static umode_t udf_convert_permissions(struct fileEntry *fe)
1623{
1624 umode_t mode;
1625 uint32_t permissions;
1626 uint32_t flags;
1627
1628 permissions = le32_to_cpu(fe->permissions);
1629 flags = le16_to_cpu(fe->icbTag.flags);
1630
1631 mode = ((permissions) & 0007) |
1632 ((permissions >> 2) & 0070) |
1633 ((permissions >> 4) & 0700) |
1634 ((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1635 ((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1636 ((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1637
1638 return mode;
1639}
1640
1641void udf_update_extra_perms(struct inode *inode, umode_t mode)
1642{
1643 struct udf_inode_info *iinfo = UDF_I(inode);
1644
1645 /*
1646 * UDF 2.01 sec. 3.3.3.3 Note 2:
1647 * In Unix, delete permission tracks write
1648 */
1649 iinfo->i_extraPerms &= ~FE_DELETE_PERMS;
1650 if (mode & 0200)
1651 iinfo->i_extraPerms |= FE_PERM_U_DELETE;
1652 if (mode & 0020)
1653 iinfo->i_extraPerms |= FE_PERM_G_DELETE;
1654 if (mode & 0002)
1655 iinfo->i_extraPerms |= FE_PERM_O_DELETE;
1656}
1657
1658int udf_write_inode(struct inode *inode, struct writeback_control *wbc)
1659{
1660 return udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
1661}
1662
1663static int udf_sync_inode(struct inode *inode)
1664{
1665 return udf_update_inode(inode, 1);
1666}
1667
1668static void udf_adjust_time(struct udf_inode_info *iinfo, struct timespec64 time)
1669{
1670 if (iinfo->i_crtime.tv_sec > time.tv_sec ||
1671 (iinfo->i_crtime.tv_sec == time.tv_sec &&
1672 iinfo->i_crtime.tv_nsec > time.tv_nsec))
1673 iinfo->i_crtime = time;
1674}
1675
1676static int udf_update_inode(struct inode *inode, int do_sync)
1677{
1678 struct buffer_head *bh = NULL;
1679 struct fileEntry *fe;
1680 struct extendedFileEntry *efe;
1681 uint64_t lb_recorded;
1682 uint32_t udfperms;
1683 uint16_t icbflags;
1684 uint16_t crclen;
1685 int err = 0;
1686 struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
1687 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
1688 struct udf_inode_info *iinfo = UDF_I(inode);
1689
1690 bh = udf_tgetblk(inode->i_sb,
1691 udf_get_lb_pblock(inode->i_sb, &iinfo->i_location, 0));
1692 if (!bh) {
1693 udf_debug("getblk failure\n");
1694 return -EIO;
1695 }
1696
1697 lock_buffer(bh);
1698 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
1699 fe = (struct fileEntry *)bh->b_data;
1700 efe = (struct extendedFileEntry *)bh->b_data;
1701
1702 if (iinfo->i_use) {
1703 struct unallocSpaceEntry *use =
1704 (struct unallocSpaceEntry *)bh->b_data;
1705
1706 use->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1707 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry),
1708 iinfo->i_ext.i_data, inode->i_sb->s_blocksize -
1709 sizeof(struct unallocSpaceEntry));
1710 use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE);
1711 crclen = sizeof(struct unallocSpaceEntry);
1712
1713 goto finish;
1714 }
1715
1716 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
1717 fe->uid = cpu_to_le32(UDF_INVALID_ID);
1718 else
1719 fe->uid = cpu_to_le32(i_uid_read(inode));
1720
1721 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
1722 fe->gid = cpu_to_le32(UDF_INVALID_ID);
1723 else
1724 fe->gid = cpu_to_le32(i_gid_read(inode));
1725
1726 udfperms = ((inode->i_mode & 0007)) |
1727 ((inode->i_mode & 0070) << 2) |
1728 ((inode->i_mode & 0700) << 4);
1729
1730 udfperms |= iinfo->i_extraPerms;
1731 fe->permissions = cpu_to_le32(udfperms);
1732
1733 if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0)
1734 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1735 else {
1736 if (iinfo->i_hidden)
1737 fe->fileLinkCount = cpu_to_le16(0);
1738 else
1739 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1740 }
1741
1742 fe->informationLength = cpu_to_le64(inode->i_size);
1743
1744 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1745 struct regid *eid;
1746 struct deviceSpec *dsea =
1747 (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
1748 if (!dsea) {
1749 dsea = (struct deviceSpec *)
1750 udf_add_extendedattr(inode,
1751 sizeof(struct deviceSpec) +
1752 sizeof(struct regid), 12, 0x3);
1753 dsea->attrType = cpu_to_le32(12);
1754 dsea->attrSubtype = 1;
1755 dsea->attrLength = cpu_to_le32(
1756 sizeof(struct deviceSpec) +
1757 sizeof(struct regid));
1758 dsea->impUseLength = cpu_to_le32(sizeof(struct regid));
1759 }
1760 eid = (struct regid *)dsea->impUse;
1761 memset(eid, 0, sizeof(*eid));
1762 strcpy(eid->ident, UDF_ID_DEVELOPER);
1763 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1764 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1765 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1766 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1767 }
1768
1769 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
1770 lb_recorded = 0; /* No extents => no blocks! */
1771 else
1772 lb_recorded =
1773 (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
1774 (blocksize_bits - 9);
1775
1776 if (iinfo->i_efe == 0) {
1777 memcpy(bh->b_data + sizeof(struct fileEntry),
1778 iinfo->i_ext.i_data,
1779 inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1780 fe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
1781
1782 udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime);
1783 udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime);
1784 udf_time_to_disk_stamp(&fe->attrTime, inode->i_ctime);
1785 memset(&(fe->impIdent), 0, sizeof(struct regid));
1786 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1787 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1788 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1789 fe->uniqueID = cpu_to_le64(iinfo->i_unique);
1790 fe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
1791 fe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1792 fe->checkpoint = cpu_to_le32(iinfo->i_checkpoint);
1793 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1794 crclen = sizeof(struct fileEntry);
1795 } else {
1796 memcpy(bh->b_data + sizeof(struct extendedFileEntry),
1797 iinfo->i_ext.i_data,
1798 inode->i_sb->s_blocksize -
1799 sizeof(struct extendedFileEntry));
1800 efe->objectSize =
1801 cpu_to_le64(inode->i_size + iinfo->i_lenStreams);
1802 efe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
1803
1804 if (iinfo->i_streamdir) {
1805 struct long_ad *icb_lad = &efe->streamDirectoryICB;
1806
1807 icb_lad->extLocation =
1808 cpu_to_lelb(iinfo->i_locStreamdir);
1809 icb_lad->extLength =
1810 cpu_to_le32(inode->i_sb->s_blocksize);
1811 }
1812
1813 udf_adjust_time(iinfo, inode->i_atime);
1814 udf_adjust_time(iinfo, inode->i_mtime);
1815 udf_adjust_time(iinfo, inode->i_ctime);
1816
1817 udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime);
1818 udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime);
1819 udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime);
1820 udf_time_to_disk_stamp(&efe->attrTime, inode->i_ctime);
1821
1822 memset(&(efe->impIdent), 0, sizeof(efe->impIdent));
1823 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1824 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1825 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1826 efe->uniqueID = cpu_to_le64(iinfo->i_unique);
1827 efe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
1828 efe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
1829 efe->checkpoint = cpu_to_le32(iinfo->i_checkpoint);
1830 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1831 crclen = sizeof(struct extendedFileEntry);
1832 }
1833
1834finish:
1835 if (iinfo->i_strat4096) {
1836 fe->icbTag.strategyType = cpu_to_le16(4096);
1837 fe->icbTag.strategyParameter = cpu_to_le16(1);
1838 fe->icbTag.numEntries = cpu_to_le16(2);
1839 } else {
1840 fe->icbTag.strategyType = cpu_to_le16(4);
1841 fe->icbTag.numEntries = cpu_to_le16(1);
1842 }
1843
1844 if (iinfo->i_use)
1845 fe->icbTag.fileType = ICBTAG_FILE_TYPE_USE;
1846 else if (S_ISDIR(inode->i_mode))
1847 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1848 else if (S_ISREG(inode->i_mode))
1849 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1850 else if (S_ISLNK(inode->i_mode))
1851 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1852 else if (S_ISBLK(inode->i_mode))
1853 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1854 else if (S_ISCHR(inode->i_mode))
1855 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1856 else if (S_ISFIFO(inode->i_mode))
1857 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1858 else if (S_ISSOCK(inode->i_mode))
1859 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1860
1861 icbflags = iinfo->i_alloc_type |
1862 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1863 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1864 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1865 (le16_to_cpu(fe->icbTag.flags) &
1866 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1867 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1868
1869 fe->icbTag.flags = cpu_to_le16(icbflags);
1870 if (sbi->s_udfrev >= 0x0200)
1871 fe->descTag.descVersion = cpu_to_le16(3);
1872 else
1873 fe->descTag.descVersion = cpu_to_le16(2);
1874 fe->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number);
1875 fe->descTag.tagLocation = cpu_to_le32(
1876 iinfo->i_location.logicalBlockNum);
1877 crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc - sizeof(struct tag);
1878 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1879 fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(struct tag),
1880 crclen));
1881 fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
1882
1883 set_buffer_uptodate(bh);
1884 unlock_buffer(bh);
1885
1886 /* write the data blocks */
1887 mark_buffer_dirty(bh);
1888 if (do_sync) {
1889 sync_dirty_buffer(bh);
1890 if (buffer_write_io_error(bh)) {
1891 udf_warn(inode->i_sb, "IO error syncing udf inode [%08lx]\n",
1892 inode->i_ino);
1893 err = -EIO;
1894 }
1895 }
1896 brelse(bh);
1897
1898 return err;
1899}
1900
1901struct inode *__udf_iget(struct super_block *sb, struct kernel_lb_addr *ino,
1902 bool hidden_inode)
1903{
1904 unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1905 struct inode *inode = iget_locked(sb, block);
1906 int err;
1907
1908 if (!inode)
1909 return ERR_PTR(-ENOMEM);
1910
1911 if (!(inode->i_state & I_NEW)) {
1912 if (UDF_I(inode)->i_hidden != hidden_inode) {
1913 iput(inode);
1914 return ERR_PTR(-EFSCORRUPTED);
1915 }
1916 return inode;
1917 }
1918
1919 memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
1920 err = udf_read_inode(inode, hidden_inode);
1921 if (err < 0) {
1922 iget_failed(inode);
1923 return ERR_PTR(err);
1924 }
1925 unlock_new_inode(inode);
1926
1927 return inode;
1928}
1929
1930int udf_setup_indirect_aext(struct inode *inode, udf_pblk_t block,
1931 struct extent_position *epos)
1932{
1933 struct super_block *sb = inode->i_sb;
1934 struct buffer_head *bh;
1935 struct allocExtDesc *aed;
1936 struct extent_position nepos;
1937 struct kernel_lb_addr neloc;
1938 int ver, adsize;
1939
1940 if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
1941 adsize = sizeof(struct short_ad);
1942 else if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_LONG)
1943 adsize = sizeof(struct long_ad);
1944 else
1945 return -EIO;
1946
1947 neloc.logicalBlockNum = block;
1948 neloc.partitionReferenceNum = epos->block.partitionReferenceNum;
1949
1950 bh = udf_tgetblk(sb, udf_get_lb_pblock(sb, &neloc, 0));
1951 if (!bh)
1952 return -EIO;
1953 lock_buffer(bh);
1954 memset(bh->b_data, 0x00, sb->s_blocksize);
1955 set_buffer_uptodate(bh);
1956 unlock_buffer(bh);
1957 mark_buffer_dirty_inode(bh, inode);
1958
1959 aed = (struct allocExtDesc *)(bh->b_data);
1960 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT)) {
1961 aed->previousAllocExtLocation =
1962 cpu_to_le32(epos->block.logicalBlockNum);
1963 }
1964 aed->lengthAllocDescs = cpu_to_le32(0);
1965 if (UDF_SB(sb)->s_udfrev >= 0x0200)
1966 ver = 3;
1967 else
1968 ver = 2;
1969 udf_new_tag(bh->b_data, TAG_IDENT_AED, ver, 1, block,
1970 sizeof(struct tag));
1971
1972 nepos.block = neloc;
1973 nepos.offset = sizeof(struct allocExtDesc);
1974 nepos.bh = bh;
1975
1976 /*
1977 * Do we have to copy current last extent to make space for indirect
1978 * one?
1979 */
1980 if (epos->offset + adsize > sb->s_blocksize) {
1981 struct kernel_lb_addr cp_loc;
1982 uint32_t cp_len;
1983 int cp_type;
1984
1985 epos->offset -= adsize;
1986 cp_type = udf_current_aext(inode, epos, &cp_loc, &cp_len, 0);
1987 cp_len |= ((uint32_t)cp_type) << 30;
1988
1989 __udf_add_aext(inode, &nepos, &cp_loc, cp_len, 1);
1990 udf_write_aext(inode, epos, &nepos.block,
1991 sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDECS, 0);
1992 } else {
1993 __udf_add_aext(inode, epos, &nepos.block,
1994 sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDECS, 0);
1995 }
1996
1997 brelse(epos->bh);
1998 *epos = nepos;
1999
2000 return 0;
2001}
2002
2003/*
2004 * Append extent at the given position - should be the first free one in inode
2005 * / indirect extent. This function assumes there is enough space in the inode
2006 * or indirect extent. Use udf_add_aext() if you didn't check for this before.
2007 */
2008int __udf_add_aext(struct inode *inode, struct extent_position *epos,
2009 struct kernel_lb_addr *eloc, uint32_t elen, int inc)
2010{
2011 struct udf_inode_info *iinfo = UDF_I(inode);
2012 struct allocExtDesc *aed;
2013 int adsize;
2014
2015 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
2016 adsize = sizeof(struct short_ad);
2017 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
2018 adsize = sizeof(struct long_ad);
2019 else
2020 return -EIO;
2021
2022 if (!epos->bh) {
2023 WARN_ON(iinfo->i_lenAlloc !=
2024 epos->offset - udf_file_entry_alloc_offset(inode));
2025 } else {
2026 aed = (struct allocExtDesc *)epos->bh->b_data;
2027 WARN_ON(le32_to_cpu(aed->lengthAllocDescs) !=
2028 epos->offset - sizeof(struct allocExtDesc));
2029 WARN_ON(epos->offset + adsize > inode->i_sb->s_blocksize);
2030 }
2031
2032 udf_write_aext(inode, epos, eloc, elen, inc);
2033
2034 if (!epos->bh) {
2035 iinfo->i_lenAlloc += adsize;
2036 mark_inode_dirty(inode);
2037 } else {
2038 aed = (struct allocExtDesc *)epos->bh->b_data;
2039 le32_add_cpu(&aed->lengthAllocDescs, adsize);
2040 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2041 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
2042 udf_update_tag(epos->bh->b_data,
2043 epos->offset + (inc ? 0 : adsize));
2044 else
2045 udf_update_tag(epos->bh->b_data,
2046 sizeof(struct allocExtDesc));
2047 mark_buffer_dirty_inode(epos->bh, inode);
2048 }
2049
2050 return 0;
2051}
2052
2053/*
2054 * Append extent at given position - should be the first free one in inode
2055 * / indirect extent. Takes care of allocating and linking indirect blocks.
2056 */
2057int udf_add_aext(struct inode *inode, struct extent_position *epos,
2058 struct kernel_lb_addr *eloc, uint32_t elen, int inc)
2059{
2060 int adsize;
2061 struct super_block *sb = inode->i_sb;
2062
2063 if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
2064 adsize = sizeof(struct short_ad);
2065 else if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_LONG)
2066 adsize = sizeof(struct long_ad);
2067 else
2068 return -EIO;
2069
2070 if (epos->offset + (2 * adsize) > sb->s_blocksize) {
2071 int err;
2072 udf_pblk_t new_block;
2073
2074 new_block = udf_new_block(sb, NULL,
2075 epos->block.partitionReferenceNum,
2076 epos->block.logicalBlockNum, &err);
2077 if (!new_block)
2078 return -ENOSPC;
2079
2080 err = udf_setup_indirect_aext(inode, new_block, epos);
2081 if (err)
2082 return err;
2083 }
2084
2085 return __udf_add_aext(inode, epos, eloc, elen, inc);
2086}
2087
2088void udf_write_aext(struct inode *inode, struct extent_position *epos,
2089 struct kernel_lb_addr *eloc, uint32_t elen, int inc)
2090{
2091 int adsize;
2092 uint8_t *ptr;
2093 struct short_ad *sad;
2094 struct long_ad *lad;
2095 struct udf_inode_info *iinfo = UDF_I(inode);
2096
2097 if (!epos->bh)
2098 ptr = iinfo->i_ext.i_data + epos->offset -
2099 udf_file_entry_alloc_offset(inode) +
2100 iinfo->i_lenEAttr;
2101 else
2102 ptr = epos->bh->b_data + epos->offset;
2103
2104 switch (iinfo->i_alloc_type) {
2105 case ICBTAG_FLAG_AD_SHORT:
2106 sad = (struct short_ad *)ptr;
2107 sad->extLength = cpu_to_le32(elen);
2108 sad->extPosition = cpu_to_le32(eloc->logicalBlockNum);
2109 adsize = sizeof(struct short_ad);
2110 break;
2111 case ICBTAG_FLAG_AD_LONG:
2112 lad = (struct long_ad *)ptr;
2113 lad->extLength = cpu_to_le32(elen);
2114 lad->extLocation = cpu_to_lelb(*eloc);
2115 memset(lad->impUse, 0x00, sizeof(lad->impUse));
2116 adsize = sizeof(struct long_ad);
2117 break;
2118 default:
2119 return;
2120 }
2121
2122 if (epos->bh) {
2123 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2124 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) {
2125 struct allocExtDesc *aed =
2126 (struct allocExtDesc *)epos->bh->b_data;
2127 udf_update_tag(epos->bh->b_data,
2128 le32_to_cpu(aed->lengthAllocDescs) +
2129 sizeof(struct allocExtDesc));
2130 }
2131 mark_buffer_dirty_inode(epos->bh, inode);
2132 } else {
2133 mark_inode_dirty(inode);
2134 }
2135
2136 if (inc)
2137 epos->offset += adsize;
2138}
2139
2140/*
2141 * Only 1 indirect extent in a row really makes sense but allow upto 16 in case
2142 * someone does some weird stuff.
2143 */
2144#define UDF_MAX_INDIR_EXTS 16
2145
2146int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
2147 struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
2148{
2149 int8_t etype;
2150 unsigned int indirections = 0;
2151
2152 while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
2153 (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
2154 udf_pblk_t block;
2155
2156 if (++indirections > UDF_MAX_INDIR_EXTS) {
2157 udf_err(inode->i_sb,
2158 "too many indirect extents in inode %lu\n",
2159 inode->i_ino);
2160 return -1;
2161 }
2162
2163 epos->block = *eloc;
2164 epos->offset = sizeof(struct allocExtDesc);
2165 brelse(epos->bh);
2166 block = udf_get_lb_pblock(inode->i_sb, &epos->block, 0);
2167 epos->bh = udf_tread(inode->i_sb, block);
2168 if (!epos->bh) {
2169 udf_debug("reading block %u failed!\n", block);
2170 return -1;
2171 }
2172 }
2173
2174 return etype;
2175}
2176
2177int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
2178 struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
2179{
2180 int alen;
2181 int8_t etype;
2182 uint8_t *ptr;
2183 struct short_ad *sad;
2184 struct long_ad *lad;
2185 struct udf_inode_info *iinfo = UDF_I(inode);
2186
2187 if (!epos->bh) {
2188 if (!epos->offset)
2189 epos->offset = udf_file_entry_alloc_offset(inode);
2190 ptr = iinfo->i_ext.i_data + epos->offset -
2191 udf_file_entry_alloc_offset(inode) +
2192 iinfo->i_lenEAttr;
2193 alen = udf_file_entry_alloc_offset(inode) +
2194 iinfo->i_lenAlloc;
2195 } else {
2196 struct allocExtDesc *header =
2197 (struct allocExtDesc *)epos->bh->b_data;
2198
2199 if (!epos->offset)
2200 epos->offset = sizeof(struct allocExtDesc);
2201 ptr = epos->bh->b_data + epos->offset;
2202 if (check_add_overflow(sizeof(struct allocExtDesc),
2203 le32_to_cpu(header->lengthAllocDescs), &alen))
2204 return -1;
2205 }
2206
2207 switch (iinfo->i_alloc_type) {
2208 case ICBTAG_FLAG_AD_SHORT:
2209 sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc);
2210 if (!sad)
2211 return -1;
2212 etype = le32_to_cpu(sad->extLength) >> 30;
2213 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
2214 eloc->partitionReferenceNum =
2215 iinfo->i_location.partitionReferenceNum;
2216 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
2217 break;
2218 case ICBTAG_FLAG_AD_LONG:
2219 lad = udf_get_filelongad(ptr, alen, &epos->offset, inc);
2220 if (!lad)
2221 return -1;
2222 etype = le32_to_cpu(lad->extLength) >> 30;
2223 *eloc = lelb_to_cpu(lad->extLocation);
2224 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
2225 break;
2226 default:
2227 udf_debug("alloc_type = %u unsupported\n", iinfo->i_alloc_type);
2228 return -1;
2229 }
2230
2231 return etype;
2232}
2233
2234static int udf_insert_aext(struct inode *inode, struct extent_position epos,
2235 struct kernel_lb_addr neloc, uint32_t nelen)
2236{
2237 struct kernel_lb_addr oeloc;
2238 uint32_t oelen;
2239 int8_t etype;
2240 int err;
2241
2242 if (epos.bh)
2243 get_bh(epos.bh);
2244
2245 while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) {
2246 udf_write_aext(inode, &epos, &neloc, nelen, 1);
2247 neloc = oeloc;
2248 nelen = (etype << 30) | oelen;
2249 }
2250 err = udf_add_aext(inode, &epos, &neloc, nelen, 1);
2251 brelse(epos.bh);
2252
2253 return err;
2254}
2255
2256int8_t udf_delete_aext(struct inode *inode, struct extent_position epos)
2257{
2258 struct extent_position oepos;
2259 int adsize;
2260 int8_t etype;
2261 struct allocExtDesc *aed;
2262 struct udf_inode_info *iinfo;
2263 struct kernel_lb_addr eloc;
2264 uint32_t elen;
2265
2266 if (epos.bh) {
2267 get_bh(epos.bh);
2268 get_bh(epos.bh);
2269 }
2270
2271 iinfo = UDF_I(inode);
2272 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
2273 adsize = sizeof(struct short_ad);
2274 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
2275 adsize = sizeof(struct long_ad);
2276 else
2277 adsize = 0;
2278
2279 oepos = epos;
2280 if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1)
2281 return -1;
2282
2283 while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
2284 udf_write_aext(inode, &oepos, &eloc, (etype << 30) | elen, 1);
2285 if (oepos.bh != epos.bh) {
2286 oepos.block = epos.block;
2287 brelse(oepos.bh);
2288 get_bh(epos.bh);
2289 oepos.bh = epos.bh;
2290 oepos.offset = epos.offset - adsize;
2291 }
2292 }
2293 memset(&eloc, 0x00, sizeof(struct kernel_lb_addr));
2294 elen = 0;
2295
2296 if (epos.bh != oepos.bh) {
2297 udf_free_blocks(inode->i_sb, inode, &epos.block, 0, 1);
2298 udf_write_aext(inode, &oepos, &eloc, elen, 1);
2299 udf_write_aext(inode, &oepos, &eloc, elen, 1);
2300 if (!oepos.bh) {
2301 iinfo->i_lenAlloc -= (adsize * 2);
2302 mark_inode_dirty(inode);
2303 } else {
2304 aed = (struct allocExtDesc *)oepos.bh->b_data;
2305 le32_add_cpu(&aed->lengthAllocDescs, -(2 * adsize));
2306 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2307 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
2308 udf_update_tag(oepos.bh->b_data,
2309 oepos.offset - (2 * adsize));
2310 else
2311 udf_update_tag(oepos.bh->b_data,
2312 sizeof(struct allocExtDesc));
2313 mark_buffer_dirty_inode(oepos.bh, inode);
2314 }
2315 } else {
2316 udf_write_aext(inode, &oepos, &eloc, elen, 1);
2317 if (!oepos.bh) {
2318 iinfo->i_lenAlloc -= adsize;
2319 mark_inode_dirty(inode);
2320 } else {
2321 aed = (struct allocExtDesc *)oepos.bh->b_data;
2322 le32_add_cpu(&aed->lengthAllocDescs, -adsize);
2323 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
2324 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
2325 udf_update_tag(oepos.bh->b_data,
2326 epos.offset - adsize);
2327 else
2328 udf_update_tag(oepos.bh->b_data,
2329 sizeof(struct allocExtDesc));
2330 mark_buffer_dirty_inode(oepos.bh, inode);
2331 }
2332 }
2333
2334 brelse(epos.bh);
2335 brelse(oepos.bh);
2336
2337 return (elen >> 30);
2338}
2339
2340int8_t inode_bmap(struct inode *inode, sector_t block,
2341 struct extent_position *pos, struct kernel_lb_addr *eloc,
2342 uint32_t *elen, sector_t *offset)
2343{
2344 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
2345 loff_t lbcount = 0, bcount = (loff_t) block << blocksize_bits;
2346 int8_t etype;
2347 struct udf_inode_info *iinfo;
2348
2349 iinfo = UDF_I(inode);
2350 if (!udf_read_extent_cache(inode, bcount, &lbcount, pos)) {
2351 pos->offset = 0;
2352 pos->block = iinfo->i_location;
2353 pos->bh = NULL;
2354 }
2355 *elen = 0;
2356 do {
2357 etype = udf_next_aext(inode, pos, eloc, elen, 1);
2358 if (etype == -1) {
2359 *offset = (bcount - lbcount) >> blocksize_bits;
2360 iinfo->i_lenExtents = lbcount;
2361 return -1;
2362 }
2363 lbcount += *elen;
2364 } while (lbcount <= bcount);
2365 /* update extent cache */
2366 udf_update_extent_cache(inode, lbcount - *elen, pos);
2367 *offset = (bcount + *elen - lbcount) >> blocksize_bits;
2368
2369 return etype;
2370}
2371
2372udf_pblk_t udf_block_map(struct inode *inode, sector_t block)
2373{
2374 struct kernel_lb_addr eloc;
2375 uint32_t elen;
2376 sector_t offset;
2377 struct extent_position epos = {};
2378 udf_pblk_t ret;
2379
2380 down_read(&UDF_I(inode)->i_data_sem);
2381
2382 if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) ==
2383 (EXT_RECORDED_ALLOCATED >> 30))
2384 ret = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
2385 else
2386 ret = 0;
2387
2388 up_read(&UDF_I(inode)->i_data_sem);
2389 brelse(epos.bh);
2390
2391 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2392 return udf_fixed_to_variable(ret);
2393 else
2394 return ret;
2395}