blob: a1962c93bd262a38f516c9af8616d32bc08ddeaa [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/*
2 * super.c
3 *
4 * PURPOSE
5 * Super block routines for the OSTA-UDF(tm) filesystem.
6 *
7 * DESCRIPTION
8 * OSTA-UDF(tm) = Optical Storage Technology Association
9 * Universal Disk Format.
10 *
11 * This code is based on version 2.00 of the UDF specification,
12 * and revision 3 of the ECMA 167 standard [equivalent to ISO 13346].
13 * http://www.osta.org/
14 * http://www.ecma.ch/
15 * http://www.iso.org/
16 *
17 * COPYRIGHT
18 * This file is distributed under the terms of the GNU General Public
19 * License (GPL). Copies of the GPL can be obtained from:
20 * ftp://prep.ai.mit.edu/pub/gnu/GPL
21 * Each contributing author retains all rights to their own work.
22 *
23 * (C) 1998 Dave Boynton
24 * (C) 1998-2004 Ben Fennema
25 * (C) 2000 Stelias Computing Inc
26 *
27 * HISTORY
28 *
29 * 09/24/98 dgb changed to allow compiling outside of kernel, and
30 * added some debugging.
31 * 10/01/98 dgb updated to allow (some) possibility of compiling w/2.0.34
32 * 10/16/98 attempting some multi-session support
33 * 10/17/98 added freespace count for "df"
34 * 11/11/98 gr added novrs option
35 * 11/26/98 dgb added fileset,anchor mount options
36 * 12/06/98 blf really hosed things royally. vat/sparing support. sequenced
37 * vol descs. rewrote option handling based on isofs
38 * 12/20/98 find the free space bitmap (if it exists)
39 */
40
41#include "udfdecl.h"
42
43#include <linux/blkdev.h>
44#include <linux/slab.h>
45#include <linux/kernel.h>
46#include <linux/module.h>
47#include <linux/parser.h>
48#include <linux/stat.h>
49#include <linux/cdrom.h>
50#include <linux/nls.h>
51#include <linux/vfs.h>
52#include <linux/vmalloc.h>
53#include <linux/errno.h>
54#include <linux/mount.h>
55#include <linux/seq_file.h>
56#include <linux/bitmap.h>
57#include <linux/crc-itu-t.h>
58#include <linux/log2.h>
59#include <asm/byteorder.h>
60#include <linux/iversion.h>
61
62#include "udf_sb.h"
63#include "udf_i.h"
64
65#include <linux/init.h>
66#include <linux/uaccess.h>
67
68enum {
69 VDS_POS_PRIMARY_VOL_DESC,
70 VDS_POS_UNALLOC_SPACE_DESC,
71 VDS_POS_LOGICAL_VOL_DESC,
72 VDS_POS_IMP_USE_VOL_DESC,
73 VDS_POS_LENGTH
74};
75
76#define VSD_FIRST_SECTOR_OFFSET 32768
77#define VSD_MAX_SECTOR_OFFSET 0x800000
78
79/*
80 * Maximum number of Terminating Descriptor / Logical Volume Integrity
81 * Descriptor redirections. The chosen numbers are arbitrary - just that we
82 * hopefully don't limit any real use of rewritten inode on write-once media
83 * but avoid looping for too long on corrupted media.
84 */
85#define UDF_MAX_TD_NESTING 64
86#define UDF_MAX_LVID_NESTING 1000
87
88enum { UDF_MAX_LINKS = 0xffff };
89/*
90 * We limit filesize to 4TB. This is arbitrary as the on-disk format supports
91 * more but because the file space is described by a linked list of extents,
92 * each of which can have at most 1GB, the creation and handling of extents
93 * gets unusably slow beyond certain point...
94 */
95#define UDF_MAX_FILESIZE (1ULL << 42)
96
97/* These are the "meat" - everything else is stuffing */
98static int udf_fill_super(struct super_block *, void *, int);
99static void udf_put_super(struct super_block *);
100static int udf_sync_fs(struct super_block *, int);
101static int udf_remount_fs(struct super_block *, int *, char *);
102static void udf_load_logicalvolint(struct super_block *, struct kernel_extent_ad);
103static void udf_open_lvid(struct super_block *);
104static void udf_close_lvid(struct super_block *);
105static unsigned int udf_count_free(struct super_block *);
106static int udf_statfs(struct dentry *, struct kstatfs *);
107static int udf_show_options(struct seq_file *, struct dentry *);
108
109struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
110{
111 struct logicalVolIntegrityDesc *lvid;
112 unsigned int partnum;
113 unsigned int offset;
114
115 if (!UDF_SB(sb)->s_lvid_bh)
116 return NULL;
117 lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
118 partnum = le32_to_cpu(lvid->numOfPartitions);
119 /* The offset is to skip freeSpaceTable and sizeTable arrays */
120 offset = partnum * 2 * sizeof(uint32_t);
121 return (struct logicalVolIntegrityDescImpUse *)
122 (((uint8_t *)(lvid + 1)) + offset);
123}
124
125/* UDF filesystem type */
126static struct dentry *udf_mount(struct file_system_type *fs_type,
127 int flags, const char *dev_name, void *data)
128{
129 return mount_bdev(fs_type, flags, dev_name, data, udf_fill_super);
130}
131
132static struct file_system_type udf_fstype = {
133 .owner = THIS_MODULE,
134 .name = "udf",
135 .mount = udf_mount,
136 .kill_sb = kill_block_super,
137 .fs_flags = FS_REQUIRES_DEV,
138};
139MODULE_ALIAS_FS("udf");
140
141static struct kmem_cache *udf_inode_cachep;
142
143static struct inode *udf_alloc_inode(struct super_block *sb)
144{
145 struct udf_inode_info *ei;
146 ei = kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL);
147 if (!ei)
148 return NULL;
149
150 ei->i_unique = 0;
151 ei->i_lenExtents = 0;
152 ei->i_lenStreams = 0;
153 ei->i_next_alloc_block = 0;
154 ei->i_next_alloc_goal = 0;
155 ei->i_strat4096 = 0;
156 ei->i_streamdir = 0;
157 ei->i_hidden = 0;
158 init_rwsem(&ei->i_data_sem);
159 ei->cached_extent.lstart = -1;
160 spin_lock_init(&ei->i_extent_cache_lock);
161 inode_set_iversion(&ei->vfs_inode, 1);
162
163 return &ei->vfs_inode;
164}
165
166static void udf_free_in_core_inode(struct inode *inode)
167{
168 kmem_cache_free(udf_inode_cachep, UDF_I(inode));
169}
170
171static void init_once(void *foo)
172{
173 struct udf_inode_info *ei = (struct udf_inode_info *)foo;
174
175 ei->i_ext.i_data = NULL;
176 inode_init_once(&ei->vfs_inode);
177}
178
179static int __init init_inodecache(void)
180{
181 udf_inode_cachep = kmem_cache_create("udf_inode_cache",
182 sizeof(struct udf_inode_info),
183 0, (SLAB_RECLAIM_ACCOUNT |
184 SLAB_MEM_SPREAD |
185 SLAB_ACCOUNT),
186 init_once);
187 if (!udf_inode_cachep)
188 return -ENOMEM;
189 return 0;
190}
191
192static void destroy_inodecache(void)
193{
194 /*
195 * Make sure all delayed rcu free inodes are flushed before we
196 * destroy cache.
197 */
198 rcu_barrier();
199 kmem_cache_destroy(udf_inode_cachep);
200}
201
202/* Superblock operations */
203static const struct super_operations udf_sb_ops = {
204 .alloc_inode = udf_alloc_inode,
205 .free_inode = udf_free_in_core_inode,
206 .write_inode = udf_write_inode,
207 .evict_inode = udf_evict_inode,
208 .put_super = udf_put_super,
209 .sync_fs = udf_sync_fs,
210 .statfs = udf_statfs,
211 .remount_fs = udf_remount_fs,
212 .show_options = udf_show_options,
213};
214
215struct udf_options {
216 unsigned char novrs;
217 unsigned int blocksize;
218 unsigned int session;
219 unsigned int lastblock;
220 unsigned int anchor;
221 unsigned int flags;
222 umode_t umask;
223 kgid_t gid;
224 kuid_t uid;
225 umode_t fmode;
226 umode_t dmode;
227 struct nls_table *nls_map;
228};
229
230static int __init init_udf_fs(void)
231{
232 int err;
233
234 err = init_inodecache();
235 if (err)
236 goto out1;
237 err = register_filesystem(&udf_fstype);
238 if (err)
239 goto out;
240
241 return 0;
242
243out:
244 destroy_inodecache();
245
246out1:
247 return err;
248}
249
250static void __exit exit_udf_fs(void)
251{
252 unregister_filesystem(&udf_fstype);
253 destroy_inodecache();
254}
255
256static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count)
257{
258 struct udf_sb_info *sbi = UDF_SB(sb);
259
260 sbi->s_partmaps = kcalloc(count, sizeof(*sbi->s_partmaps), GFP_KERNEL);
261 if (!sbi->s_partmaps) {
262 sbi->s_partitions = 0;
263 return -ENOMEM;
264 }
265
266 sbi->s_partitions = count;
267 return 0;
268}
269
270static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
271{
272 int i;
273 int nr_groups = bitmap->s_nr_groups;
274
275 for (i = 0; i < nr_groups; i++)
276 if (!IS_ERR_OR_NULL(bitmap->s_block_bitmap[i]))
277 brelse(bitmap->s_block_bitmap[i]);
278
279 kvfree(bitmap);
280}
281
282static void udf_free_partition(struct udf_part_map *map)
283{
284 int i;
285 struct udf_meta_data *mdata;
286
287 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
288 iput(map->s_uspace.s_table);
289 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
290 udf_sb_free_bitmap(map->s_uspace.s_bitmap);
291 if (map->s_partition_type == UDF_SPARABLE_MAP15)
292 for (i = 0; i < 4; i++)
293 brelse(map->s_type_specific.s_sparing.s_spar_map[i]);
294 else if (map->s_partition_type == UDF_METADATA_MAP25) {
295 mdata = &map->s_type_specific.s_metadata;
296 iput(mdata->s_metadata_fe);
297 mdata->s_metadata_fe = NULL;
298
299 iput(mdata->s_mirror_fe);
300 mdata->s_mirror_fe = NULL;
301
302 iput(mdata->s_bitmap_fe);
303 mdata->s_bitmap_fe = NULL;
304 }
305}
306
307static void udf_sb_free_partitions(struct super_block *sb)
308{
309 struct udf_sb_info *sbi = UDF_SB(sb);
310 int i;
311
312 if (!sbi->s_partmaps)
313 return;
314 for (i = 0; i < sbi->s_partitions; i++)
315 udf_free_partition(&sbi->s_partmaps[i]);
316 kfree(sbi->s_partmaps);
317 sbi->s_partmaps = NULL;
318}
319
320static int udf_show_options(struct seq_file *seq, struct dentry *root)
321{
322 struct super_block *sb = root->d_sb;
323 struct udf_sb_info *sbi = UDF_SB(sb);
324
325 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT))
326 seq_puts(seq, ",nostrict");
327 if (UDF_QUERY_FLAG(sb, UDF_FLAG_BLOCKSIZE_SET))
328 seq_printf(seq, ",bs=%lu", sb->s_blocksize);
329 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
330 seq_puts(seq, ",unhide");
331 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
332 seq_puts(seq, ",undelete");
333 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_USE_AD_IN_ICB))
334 seq_puts(seq, ",noadinicb");
335 if (UDF_QUERY_FLAG(sb, UDF_FLAG_USE_SHORT_AD))
336 seq_puts(seq, ",shortad");
337 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_FORGET))
338 seq_puts(seq, ",uid=forget");
339 if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_FORGET))
340 seq_puts(seq, ",gid=forget");
341 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET))
342 seq_printf(seq, ",uid=%u", from_kuid(&init_user_ns, sbi->s_uid));
343 if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
344 seq_printf(seq, ",gid=%u", from_kgid(&init_user_ns, sbi->s_gid));
345 if (sbi->s_umask != 0)
346 seq_printf(seq, ",umask=%ho", sbi->s_umask);
347 if (sbi->s_fmode != UDF_INVALID_MODE)
348 seq_printf(seq, ",mode=%ho", sbi->s_fmode);
349 if (sbi->s_dmode != UDF_INVALID_MODE)
350 seq_printf(seq, ",dmode=%ho", sbi->s_dmode);
351 if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET))
352 seq_printf(seq, ",session=%d", sbi->s_session);
353 if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET))
354 seq_printf(seq, ",lastblock=%u", sbi->s_last_block);
355 if (sbi->s_anchor != 0)
356 seq_printf(seq, ",anchor=%u", sbi->s_anchor);
357 if (sbi->s_nls_map)
358 seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset);
359 else
360 seq_puts(seq, ",iocharset=utf8");
361
362 return 0;
363}
364
365/*
366 * udf_parse_options
367 *
368 * PURPOSE
369 * Parse mount options.
370 *
371 * DESCRIPTION
372 * The following mount options are supported:
373 *
374 * gid= Set the default group.
375 * umask= Set the default umask.
376 * mode= Set the default file permissions.
377 * dmode= Set the default directory permissions.
378 * uid= Set the default user.
379 * bs= Set the block size.
380 * unhide Show otherwise hidden files.
381 * undelete Show deleted files in lists.
382 * adinicb Embed data in the inode (default)
383 * noadinicb Don't embed data in the inode
384 * shortad Use short ad's
385 * longad Use long ad's (default)
386 * nostrict Unset strict conformance
387 * iocharset= Set the NLS character set
388 *
389 * The remaining are for debugging and disaster recovery:
390 *
391 * novrs Skip volume sequence recognition
392 *
393 * The following expect a offset from 0.
394 *
395 * session= Set the CDROM session (default= last session)
396 * anchor= Override standard anchor location. (default= 256)
397 * volume= Override the VolumeDesc location. (unused)
398 * partition= Override the PartitionDesc location. (unused)
399 * lastblock= Set the last block of the filesystem/
400 *
401 * The following expect a offset from the partition root.
402 *
403 * fileset= Override the fileset block location. (unused)
404 * rootdir= Override the root directory location. (unused)
405 * WARNING: overriding the rootdir to a non-directory may
406 * yield highly unpredictable results.
407 *
408 * PRE-CONDITIONS
409 * options Pointer to mount options string.
410 * uopts Pointer to mount options variable.
411 *
412 * POST-CONDITIONS
413 * <return> 1 Mount options parsed okay.
414 * <return> 0 Error parsing mount options.
415 *
416 * HISTORY
417 * July 1, 1997 - Andrew E. Mileski
418 * Written, tested, and released.
419 */
420
421enum {
422 Opt_novrs, Opt_nostrict, Opt_bs, Opt_unhide, Opt_undelete,
423 Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad,
424 Opt_gid, Opt_uid, Opt_umask, Opt_session, Opt_lastblock,
425 Opt_anchor, Opt_volume, Opt_partition, Opt_fileset,
426 Opt_rootdir, Opt_utf8, Opt_iocharset,
427 Opt_err, Opt_uforget, Opt_uignore, Opt_gforget, Opt_gignore,
428 Opt_fmode, Opt_dmode
429};
430
431static const match_table_t tokens = {
432 {Opt_novrs, "novrs"},
433 {Opt_nostrict, "nostrict"},
434 {Opt_bs, "bs=%u"},
435 {Opt_unhide, "unhide"},
436 {Opt_undelete, "undelete"},
437 {Opt_noadinicb, "noadinicb"},
438 {Opt_adinicb, "adinicb"},
439 {Opt_shortad, "shortad"},
440 {Opt_longad, "longad"},
441 {Opt_uforget, "uid=forget"},
442 {Opt_uignore, "uid=ignore"},
443 {Opt_gforget, "gid=forget"},
444 {Opt_gignore, "gid=ignore"},
445 {Opt_gid, "gid=%u"},
446 {Opt_uid, "uid=%u"},
447 {Opt_umask, "umask=%o"},
448 {Opt_session, "session=%u"},
449 {Opt_lastblock, "lastblock=%u"},
450 {Opt_anchor, "anchor=%u"},
451 {Opt_volume, "volume=%u"},
452 {Opt_partition, "partition=%u"},
453 {Opt_fileset, "fileset=%u"},
454 {Opt_rootdir, "rootdir=%u"},
455 {Opt_utf8, "utf8"},
456 {Opt_iocharset, "iocharset=%s"},
457 {Opt_fmode, "mode=%o"},
458 {Opt_dmode, "dmode=%o"},
459 {Opt_err, NULL}
460};
461
462static int udf_parse_options(char *options, struct udf_options *uopt,
463 bool remount)
464{
465 char *p;
466 int option;
467
468 uopt->novrs = 0;
469 uopt->session = 0xFFFFFFFF;
470 uopt->lastblock = 0;
471 uopt->anchor = 0;
472
473 if (!options)
474 return 1;
475
476 while ((p = strsep(&options, ",")) != NULL) {
477 substring_t args[MAX_OPT_ARGS];
478 int token;
479 unsigned n;
480 if (!*p)
481 continue;
482
483 token = match_token(p, tokens, args);
484 switch (token) {
485 case Opt_novrs:
486 uopt->novrs = 1;
487 break;
488 case Opt_bs:
489 if (match_int(&args[0], &option))
490 return 0;
491 n = option;
492 if (n != 512 && n != 1024 && n != 2048 && n != 4096)
493 return 0;
494 uopt->blocksize = n;
495 uopt->flags |= (1 << UDF_FLAG_BLOCKSIZE_SET);
496 break;
497 case Opt_unhide:
498 uopt->flags |= (1 << UDF_FLAG_UNHIDE);
499 break;
500 case Opt_undelete:
501 uopt->flags |= (1 << UDF_FLAG_UNDELETE);
502 break;
503 case Opt_noadinicb:
504 uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB);
505 break;
506 case Opt_adinicb:
507 uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB);
508 break;
509 case Opt_shortad:
510 uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD);
511 break;
512 case Opt_longad:
513 uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD);
514 break;
515 case Opt_gid:
516 if (match_int(args, &option))
517 return 0;
518 uopt->gid = make_kgid(current_user_ns(), option);
519 if (!gid_valid(uopt->gid))
520 return 0;
521 uopt->flags |= (1 << UDF_FLAG_GID_SET);
522 break;
523 case Opt_uid:
524 if (match_int(args, &option))
525 return 0;
526 uopt->uid = make_kuid(current_user_ns(), option);
527 if (!uid_valid(uopt->uid))
528 return 0;
529 uopt->flags |= (1 << UDF_FLAG_UID_SET);
530 break;
531 case Opt_umask:
532 if (match_octal(args, &option))
533 return 0;
534 uopt->umask = option;
535 break;
536 case Opt_nostrict:
537 uopt->flags &= ~(1 << UDF_FLAG_STRICT);
538 break;
539 case Opt_session:
540 if (match_int(args, &option))
541 return 0;
542 uopt->session = option;
543 if (!remount)
544 uopt->flags |= (1 << UDF_FLAG_SESSION_SET);
545 break;
546 case Opt_lastblock:
547 if (match_int(args, &option))
548 return 0;
549 uopt->lastblock = option;
550 if (!remount)
551 uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET);
552 break;
553 case Opt_anchor:
554 if (match_int(args, &option))
555 return 0;
556 uopt->anchor = option;
557 break;
558 case Opt_volume:
559 case Opt_partition:
560 case Opt_fileset:
561 case Opt_rootdir:
562 /* Ignored (never implemented properly) */
563 break;
564 case Opt_utf8:
565 if (!remount) {
566 unload_nls(uopt->nls_map);
567 uopt->nls_map = NULL;
568 }
569 break;
570 case Opt_iocharset:
571 if (!remount) {
572 unload_nls(uopt->nls_map);
573 uopt->nls_map = NULL;
574 }
575 /* When nls_map is not loaded then UTF-8 is used */
576 if (!remount && strcmp(args[0].from, "utf8") != 0) {
577 uopt->nls_map = load_nls(args[0].from);
578 if (!uopt->nls_map) {
579 pr_err("iocharset %s not found\n",
580 args[0].from);
581 return 0;
582 }
583 }
584 break;
585 case Opt_uforget:
586 uopt->flags |= (1 << UDF_FLAG_UID_FORGET);
587 break;
588 case Opt_uignore:
589 case Opt_gignore:
590 /* These options are superseeded by uid=<number> */
591 break;
592 case Opt_gforget:
593 uopt->flags |= (1 << UDF_FLAG_GID_FORGET);
594 break;
595 case Opt_fmode:
596 if (match_octal(args, &option))
597 return 0;
598 uopt->fmode = option & 0777;
599 break;
600 case Opt_dmode:
601 if (match_octal(args, &option))
602 return 0;
603 uopt->dmode = option & 0777;
604 break;
605 default:
606 pr_err("bad mount option \"%s\" or missing value\n", p);
607 return 0;
608 }
609 }
610 return 1;
611}
612
613static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
614{
615 struct udf_options uopt;
616 struct udf_sb_info *sbi = UDF_SB(sb);
617 int error = 0;
618
619 if (!(*flags & SB_RDONLY) && UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT))
620 return -EACCES;
621
622 sync_filesystem(sb);
623
624 uopt.flags = sbi->s_flags;
625 uopt.uid = sbi->s_uid;
626 uopt.gid = sbi->s_gid;
627 uopt.umask = sbi->s_umask;
628 uopt.fmode = sbi->s_fmode;
629 uopt.dmode = sbi->s_dmode;
630 uopt.nls_map = NULL;
631
632 if (!udf_parse_options(options, &uopt, true))
633 return -EINVAL;
634
635 write_lock(&sbi->s_cred_lock);
636 sbi->s_flags = uopt.flags;
637 sbi->s_uid = uopt.uid;
638 sbi->s_gid = uopt.gid;
639 sbi->s_umask = uopt.umask;
640 sbi->s_fmode = uopt.fmode;
641 sbi->s_dmode = uopt.dmode;
642 write_unlock(&sbi->s_cred_lock);
643
644 if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
645 goto out_unlock;
646
647 if (*flags & SB_RDONLY)
648 udf_close_lvid(sb);
649 else
650 udf_open_lvid(sb);
651
652out_unlock:
653 return error;
654}
655
656/*
657 * Check VSD descriptor. Returns -1 in case we are at the end of volume
658 * recognition area, 0 if the descriptor is valid but non-interesting, 1 if
659 * we found one of NSR descriptors we are looking for.
660 */
661static int identify_vsd(const struct volStructDesc *vsd)
662{
663 int ret = 0;
664
665 if (!memcmp(vsd->stdIdent, VSD_STD_ID_CD001, VSD_STD_ID_LEN)) {
666 switch (vsd->structType) {
667 case 0:
668 udf_debug("ISO9660 Boot Record found\n");
669 break;
670 case 1:
671 udf_debug("ISO9660 Primary Volume Descriptor found\n");
672 break;
673 case 2:
674 udf_debug("ISO9660 Supplementary Volume Descriptor found\n");
675 break;
676 case 3:
677 udf_debug("ISO9660 Volume Partition Descriptor found\n");
678 break;
679 case 255:
680 udf_debug("ISO9660 Volume Descriptor Set Terminator found\n");
681 break;
682 default:
683 udf_debug("ISO9660 VRS (%u) found\n", vsd->structType);
684 break;
685 }
686 } else if (!memcmp(vsd->stdIdent, VSD_STD_ID_BEA01, VSD_STD_ID_LEN))
687 ; /* ret = 0 */
688 else if (!memcmp(vsd->stdIdent, VSD_STD_ID_NSR02, VSD_STD_ID_LEN))
689 ret = 1;
690 else if (!memcmp(vsd->stdIdent, VSD_STD_ID_NSR03, VSD_STD_ID_LEN))
691 ret = 1;
692 else if (!memcmp(vsd->stdIdent, VSD_STD_ID_BOOT2, VSD_STD_ID_LEN))
693 ; /* ret = 0 */
694 else if (!memcmp(vsd->stdIdent, VSD_STD_ID_CDW02, VSD_STD_ID_LEN))
695 ; /* ret = 0 */
696 else {
697 /* TEA01 or invalid id : end of volume recognition area */
698 ret = -1;
699 }
700
701 return ret;
702}
703
704/*
705 * Check Volume Structure Descriptors (ECMA 167 2/9.1)
706 * We also check any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1)
707 * @return 1 if NSR02 or NSR03 found,
708 * -1 if first sector read error, 0 otherwise
709 */
710static int udf_check_vsd(struct super_block *sb)
711{
712 struct volStructDesc *vsd = NULL;
713 loff_t sector = VSD_FIRST_SECTOR_OFFSET;
714 int sectorsize;
715 struct buffer_head *bh = NULL;
716 int nsr = 0;
717 struct udf_sb_info *sbi;
718 loff_t session_offset;
719
720 sbi = UDF_SB(sb);
721 if (sb->s_blocksize < sizeof(struct volStructDesc))
722 sectorsize = sizeof(struct volStructDesc);
723 else
724 sectorsize = sb->s_blocksize;
725
726 session_offset = (loff_t)sbi->s_session << sb->s_blocksize_bits;
727 sector += session_offset;
728
729 udf_debug("Starting at sector %u (%lu byte sectors)\n",
730 (unsigned int)(sector >> sb->s_blocksize_bits),
731 sb->s_blocksize);
732 /* Process the sequence (if applicable). The hard limit on the sector
733 * offset is arbitrary, hopefully large enough so that all valid UDF
734 * filesystems will be recognised. There is no mention of an upper
735 * bound to the size of the volume recognition area in the standard.
736 * The limit will prevent the code to read all the sectors of a
737 * specially crafted image (like a bluray disc full of CD001 sectors),
738 * potentially causing minutes or even hours of uninterruptible I/O
739 * activity. This actually happened with uninitialised SSD partitions
740 * (all 0xFF) before the check for the limit and all valid IDs were
741 * added */
742 for (; !nsr && sector < VSD_MAX_SECTOR_OFFSET; sector += sectorsize) {
743 /* Read a block */
744 bh = udf_tread(sb, sector >> sb->s_blocksize_bits);
745 if (!bh)
746 break;
747
748 vsd = (struct volStructDesc *)(bh->b_data +
749 (sector & (sb->s_blocksize - 1)));
750 nsr = identify_vsd(vsd);
751 /* Found NSR or end? */
752 if (nsr) {
753 brelse(bh);
754 break;
755 }
756 /*
757 * Special handling for improperly formatted VRS (e.g., Win10)
758 * where components are separated by 2048 bytes even though
759 * sectors are 4K
760 */
761 if (sb->s_blocksize == 4096) {
762 nsr = identify_vsd(vsd + 1);
763 /* Ignore unknown IDs... */
764 if (nsr < 0)
765 nsr = 0;
766 }
767 brelse(bh);
768 }
769
770 if (nsr > 0)
771 return 1;
772 else if (!bh && sector - session_offset == VSD_FIRST_SECTOR_OFFSET)
773 return -1;
774 else
775 return 0;
776}
777
778static int udf_verify_domain_identifier(struct super_block *sb,
779 struct regid *ident, char *dname)
780{
781 struct domainEntityIDSuffix *suffix;
782
783 if (memcmp(ident->ident, UDF_ID_COMPLIANT, strlen(UDF_ID_COMPLIANT))) {
784 udf_warn(sb, "Not OSTA UDF compliant %s descriptor.\n", dname);
785 goto force_ro;
786 }
787 if (ident->flags & (1 << ENTITYID_FLAGS_DIRTY)) {
788 udf_warn(sb, "Possibly not OSTA UDF compliant %s descriptor.\n",
789 dname);
790 goto force_ro;
791 }
792 suffix = (struct domainEntityIDSuffix *)ident->identSuffix;
793 if (suffix->flags & (1 << ENTITYIDSUFFIX_FLAGS_HARDWRITEPROTECT) ||
794 suffix->flags & (1 << ENTITYIDSUFFIX_FLAGS_SOFTWRITEPROTECT)) {
795 if (!sb_rdonly(sb)) {
796 udf_warn(sb, "Descriptor for %s marked write protected."
797 " Forcing read only mount.\n", dname);
798 }
799 goto force_ro;
800 }
801 return 0;
802
803force_ro:
804 if (!sb_rdonly(sb))
805 return -EACCES;
806 UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
807 return 0;
808}
809
810static int udf_load_fileset(struct super_block *sb, struct fileSetDesc *fset,
811 struct kernel_lb_addr *root)
812{
813 int ret;
814
815 ret = udf_verify_domain_identifier(sb, &fset->domainIdent, "file set");
816 if (ret < 0)
817 return ret;
818
819 *root = lelb_to_cpu(fset->rootDirectoryICB.extLocation);
820 UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum);
821
822 udf_debug("Rootdir at block=%u, partition=%u\n",
823 root->logicalBlockNum, root->partitionReferenceNum);
824 return 0;
825}
826
827static int udf_find_fileset(struct super_block *sb,
828 struct kernel_lb_addr *fileset,
829 struct kernel_lb_addr *root)
830{
831 struct buffer_head *bh = NULL;
832 uint16_t ident;
833 int ret;
834
835 if (fileset->logicalBlockNum == 0xFFFFFFFF &&
836 fileset->partitionReferenceNum == 0xFFFF)
837 return -EINVAL;
838
839 bh = udf_read_ptagged(sb, fileset, 0, &ident);
840 if (!bh)
841 return -EIO;
842 if (ident != TAG_IDENT_FSD) {
843 brelse(bh);
844 return -EINVAL;
845 }
846
847 udf_debug("Fileset at block=%u, partition=%u\n",
848 fileset->logicalBlockNum, fileset->partitionReferenceNum);
849
850 UDF_SB(sb)->s_partition = fileset->partitionReferenceNum;
851 ret = udf_load_fileset(sb, (struct fileSetDesc *)bh->b_data, root);
852 brelse(bh);
853 return ret;
854}
855
856/*
857 * Load primary Volume Descriptor Sequence
858 *
859 * Return <0 on error, 0 on success. -EAGAIN is special meaning next sequence
860 * should be tried.
861 */
862static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
863{
864 struct primaryVolDesc *pvoldesc;
865 uint8_t *outstr;
866 struct buffer_head *bh;
867 uint16_t ident;
868 int ret = -ENOMEM;
869 struct timestamp *ts;
870
871 outstr = kmalloc(128, GFP_NOFS);
872 if (!outstr)
873 return -ENOMEM;
874
875 bh = udf_read_tagged(sb, block, block, &ident);
876 if (!bh) {
877 ret = -EAGAIN;
878 goto out2;
879 }
880
881 if (ident != TAG_IDENT_PVD) {
882 ret = -EIO;
883 goto out_bh;
884 }
885
886 pvoldesc = (struct primaryVolDesc *)bh->b_data;
887
888 udf_disk_stamp_to_time(&UDF_SB(sb)->s_record_time,
889 pvoldesc->recordingDateAndTime);
890 ts = &pvoldesc->recordingDateAndTime;
891 udf_debug("recording time %04u/%02u/%02u %02u:%02u (%x)\n",
892 le16_to_cpu(ts->year), ts->month, ts->day, ts->hour,
893 ts->minute, le16_to_cpu(ts->typeAndTimezone));
894
895 ret = udf_dstrCS0toChar(sb, outstr, 31, pvoldesc->volIdent, 32);
896 if (ret < 0) {
897 strcpy(UDF_SB(sb)->s_volume_ident, "InvalidName");
898 pr_warn("incorrect volume identification, setting to "
899 "'InvalidName'\n");
900 } else {
901 strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
902 }
903 udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
904
905 ret = udf_dstrCS0toChar(sb, outstr, 127, pvoldesc->volSetIdent, 128);
906 if (ret < 0) {
907 ret = 0;
908 goto out_bh;
909 }
910 outstr[ret] = 0;
911 udf_debug("volSetIdent[] = '%s'\n", outstr);
912
913 ret = 0;
914out_bh:
915 brelse(bh);
916out2:
917 kfree(outstr);
918 return ret;
919}
920
921struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
922 u32 meta_file_loc, u32 partition_ref)
923{
924 struct kernel_lb_addr addr;
925 struct inode *metadata_fe;
926
927 addr.logicalBlockNum = meta_file_loc;
928 addr.partitionReferenceNum = partition_ref;
929
930 metadata_fe = udf_iget_special(sb, &addr);
931
932 if (IS_ERR(metadata_fe)) {
933 udf_warn(sb, "metadata inode efe not found\n");
934 return metadata_fe;
935 }
936 if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
937 udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n");
938 iput(metadata_fe);
939 return ERR_PTR(-EIO);
940 }
941
942 return metadata_fe;
943}
944
945static int udf_load_metadata_files(struct super_block *sb, int partition,
946 int type1_index)
947{
948 struct udf_sb_info *sbi = UDF_SB(sb);
949 struct udf_part_map *map;
950 struct udf_meta_data *mdata;
951 struct kernel_lb_addr addr;
952 struct inode *fe;
953
954 map = &sbi->s_partmaps[partition];
955 mdata = &map->s_type_specific.s_metadata;
956 mdata->s_phys_partition_ref = type1_index;
957
958 /* metadata address */
959 udf_debug("Metadata file location: block = %u part = %u\n",
960 mdata->s_meta_file_loc, mdata->s_phys_partition_ref);
961
962 fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc,
963 mdata->s_phys_partition_ref);
964 if (IS_ERR(fe)) {
965 /* mirror file entry */
966 udf_debug("Mirror metadata file location: block = %u part = %u\n",
967 mdata->s_mirror_file_loc, mdata->s_phys_partition_ref);
968
969 fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc,
970 mdata->s_phys_partition_ref);
971
972 if (IS_ERR(fe)) {
973 udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
974 return PTR_ERR(fe);
975 }
976 mdata->s_mirror_fe = fe;
977 } else
978 mdata->s_metadata_fe = fe;
979
980
981 /*
982 * bitmap file entry
983 * Note:
984 * Load only if bitmap file location differs from 0xFFFFFFFF (DCN-5102)
985 */
986 if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
987 addr.logicalBlockNum = mdata->s_bitmap_file_loc;
988 addr.partitionReferenceNum = mdata->s_phys_partition_ref;
989
990 udf_debug("Bitmap file location: block = %u part = %u\n",
991 addr.logicalBlockNum, addr.partitionReferenceNum);
992
993 fe = udf_iget_special(sb, &addr);
994 if (IS_ERR(fe)) {
995 if (sb_rdonly(sb))
996 udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
997 else {
998 udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
999 return PTR_ERR(fe);
1000 }
1001 } else
1002 mdata->s_bitmap_fe = fe;
1003 }
1004
1005 udf_debug("udf_load_metadata_files Ok\n");
1006 return 0;
1007}
1008
1009int udf_compute_nr_groups(struct super_block *sb, u32 partition)
1010{
1011 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
1012 return DIV_ROUND_UP(map->s_partition_len +
1013 (sizeof(struct spaceBitmapDesc) << 3),
1014 sb->s_blocksize * 8);
1015}
1016
1017static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
1018{
1019 struct udf_bitmap *bitmap;
1020 int nr_groups;
1021 int size;
1022
1023 nr_groups = udf_compute_nr_groups(sb, index);
1024 size = sizeof(struct udf_bitmap) +
1025 (sizeof(struct buffer_head *) * nr_groups);
1026
1027 if (size <= PAGE_SIZE)
1028 bitmap = kzalloc(size, GFP_KERNEL);
1029 else
1030 bitmap = vzalloc(size); /* TODO: get rid of vzalloc */
1031
1032 if (!bitmap)
1033 return NULL;
1034
1035 bitmap->s_nr_groups = nr_groups;
1036 return bitmap;
1037}
1038
1039static int check_partition_desc(struct super_block *sb,
1040 struct partitionDesc *p,
1041 struct udf_part_map *map)
1042{
1043 bool umap, utable, fmap, ftable;
1044 struct partitionHeaderDesc *phd;
1045
1046 switch (le32_to_cpu(p->accessType)) {
1047 case PD_ACCESS_TYPE_READ_ONLY:
1048 case PD_ACCESS_TYPE_WRITE_ONCE:
1049 case PD_ACCESS_TYPE_NONE:
1050 goto force_ro;
1051 }
1052
1053 /* No Partition Header Descriptor? */
1054 if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) &&
1055 strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03))
1056 goto force_ro;
1057
1058 phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
1059 utable = phd->unallocSpaceTable.extLength;
1060 umap = phd->unallocSpaceBitmap.extLength;
1061 ftable = phd->freedSpaceTable.extLength;
1062 fmap = phd->freedSpaceBitmap.extLength;
1063
1064 /* No allocation info? */
1065 if (!utable && !umap && !ftable && !fmap)
1066 goto force_ro;
1067
1068 /* We don't support blocks that require erasing before overwrite */
1069 if (ftable || fmap)
1070 goto force_ro;
1071 /* UDF 2.60: 2.3.3 - no mixing of tables & bitmaps, no VAT. */
1072 if (utable && umap)
1073 goto force_ro;
1074
1075 if (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
1076 map->s_partition_type == UDF_VIRTUAL_MAP20)
1077 goto force_ro;
1078
1079 return 0;
1080force_ro:
1081 if (!sb_rdonly(sb))
1082 return -EACCES;
1083 UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
1084 return 0;
1085}
1086
1087static int udf_fill_partdesc_info(struct super_block *sb,
1088 struct partitionDesc *p, int p_index)
1089{
1090 struct udf_part_map *map;
1091 struct udf_sb_info *sbi = UDF_SB(sb);
1092 struct partitionHeaderDesc *phd;
1093 u32 sum;
1094 int err;
1095
1096 map = &sbi->s_partmaps[p_index];
1097
1098 map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
1099 map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
1100 if (check_add_overflow(map->s_partition_root, map->s_partition_len,
1101 &sum)) {
1102 udf_err(sb, "Partition %d has invalid location %u + %u\n",
1103 p_index, map->s_partition_root, map->s_partition_len);
1104 return -EFSCORRUPTED;
1105 }
1106
1107 if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
1108 map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
1109 if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE))
1110 map->s_partition_flags |= UDF_PART_FLAG_WRITE_ONCE;
1111 if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE))
1112 map->s_partition_flags |= UDF_PART_FLAG_REWRITABLE;
1113 if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE))
1114 map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE;
1115
1116 udf_debug("Partition (%d type %x) starts at physical %u, block length %u\n",
1117 p_index, map->s_partition_type,
1118 map->s_partition_root, map->s_partition_len);
1119
1120 err = check_partition_desc(sb, p, map);
1121 if (err)
1122 return err;
1123
1124 /*
1125 * Skip loading allocation info it we cannot ever write to the fs.
1126 * This is a correctness thing as we may have decided to force ro mount
1127 * to avoid allocation info we don't support.
1128 */
1129 if (UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT))
1130 return 0;
1131
1132 phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
1133 if (phd->unallocSpaceTable.extLength) {
1134 struct kernel_lb_addr loc = {
1135 .logicalBlockNum = le32_to_cpu(
1136 phd->unallocSpaceTable.extPosition),
1137 .partitionReferenceNum = p_index,
1138 };
1139 struct inode *inode;
1140
1141 inode = udf_iget_special(sb, &loc);
1142 if (IS_ERR(inode)) {
1143 udf_debug("cannot load unallocSpaceTable (part %d)\n",
1144 p_index);
1145 return PTR_ERR(inode);
1146 }
1147 map->s_uspace.s_table = inode;
1148 map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
1149 udf_debug("unallocSpaceTable (part %d) @ %lu\n",
1150 p_index, map->s_uspace.s_table->i_ino);
1151 }
1152
1153 if (phd->unallocSpaceBitmap.extLength) {
1154 struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
1155 if (!bitmap)
1156 return -ENOMEM;
1157 map->s_uspace.s_bitmap = bitmap;
1158 bitmap->s_extPosition = le32_to_cpu(
1159 phd->unallocSpaceBitmap.extPosition);
1160 map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
1161 /* Check whether math over bitmap won't overflow. */
1162 if (check_add_overflow(map->s_partition_len,
1163 sizeof(struct spaceBitmapDesc) << 3,
1164 &sum)) {
1165 udf_err(sb, "Partition %d is too long (%u)\n", p_index,
1166 map->s_partition_len);
1167 return -EFSCORRUPTED;
1168 }
1169 udf_debug("unallocSpaceBitmap (part %d) @ %u\n",
1170 p_index, bitmap->s_extPosition);
1171 }
1172
1173 return 0;
1174}
1175
1176static void udf_find_vat_block(struct super_block *sb, int p_index,
1177 int type1_index, sector_t start_block)
1178{
1179 struct udf_sb_info *sbi = UDF_SB(sb);
1180 struct udf_part_map *map = &sbi->s_partmaps[p_index];
1181 sector_t vat_block;
1182 struct kernel_lb_addr ino;
1183 struct inode *inode;
1184
1185 /*
1186 * VAT file entry is in the last recorded block. Some broken disks have
1187 * it a few blocks before so try a bit harder...
1188 */
1189 ino.partitionReferenceNum = type1_index;
1190 for (vat_block = start_block;
1191 vat_block >= map->s_partition_root &&
1192 vat_block >= start_block - 3; vat_block--) {
1193 ino.logicalBlockNum = vat_block - map->s_partition_root;
1194 inode = udf_iget_special(sb, &ino);
1195 if (!IS_ERR(inode)) {
1196 sbi->s_vat_inode = inode;
1197 break;
1198 }
1199 }
1200}
1201
1202static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
1203{
1204 struct udf_sb_info *sbi = UDF_SB(sb);
1205 struct udf_part_map *map = &sbi->s_partmaps[p_index];
1206 struct buffer_head *bh = NULL;
1207 struct udf_inode_info *vati;
1208 uint32_t pos;
1209 struct virtualAllocationTable20 *vat20;
1210 sector_t blocks = i_size_read(sb->s_bdev->bd_inode) >>
1211 sb->s_blocksize_bits;
1212
1213 udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block);
1214 if (!sbi->s_vat_inode &&
1215 sbi->s_last_block != blocks - 1) {
1216 pr_notice("Failed to read VAT inode from the last recorded block (%lu), retrying with the last block of the device (%lu).\n",
1217 (unsigned long)sbi->s_last_block,
1218 (unsigned long)blocks - 1);
1219 udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
1220 }
1221 if (!sbi->s_vat_inode)
1222 return -EIO;
1223
1224 if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
1225 map->s_type_specific.s_virtual.s_start_offset = 0;
1226 map->s_type_specific.s_virtual.s_num_entries =
1227 (sbi->s_vat_inode->i_size - 36) >> 2;
1228 } else if (map->s_partition_type == UDF_VIRTUAL_MAP20) {
1229 vati = UDF_I(sbi->s_vat_inode);
1230 if (vati->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
1231 pos = udf_block_map(sbi->s_vat_inode, 0);
1232 bh = sb_bread(sb, pos);
1233 if (!bh)
1234 return -EIO;
1235 vat20 = (struct virtualAllocationTable20 *)bh->b_data;
1236 } else {
1237 vat20 = (struct virtualAllocationTable20 *)
1238 vati->i_ext.i_data;
1239 }
1240
1241 map->s_type_specific.s_virtual.s_start_offset =
1242 le16_to_cpu(vat20->lengthHeader);
1243 map->s_type_specific.s_virtual.s_num_entries =
1244 (sbi->s_vat_inode->i_size -
1245 map->s_type_specific.s_virtual.
1246 s_start_offset) >> 2;
1247 brelse(bh);
1248 }
1249 return 0;
1250}
1251
1252/*
1253 * Load partition descriptor block
1254 *
1255 * Returns <0 on error, 0 on success, -EAGAIN is special - try next descriptor
1256 * sequence.
1257 */
1258static int udf_load_partdesc(struct super_block *sb, sector_t block)
1259{
1260 struct buffer_head *bh;
1261 struct partitionDesc *p;
1262 struct udf_part_map *map;
1263 struct udf_sb_info *sbi = UDF_SB(sb);
1264 int i, type1_idx;
1265 uint16_t partitionNumber;
1266 uint16_t ident;
1267 int ret;
1268
1269 bh = udf_read_tagged(sb, block, block, &ident);
1270 if (!bh)
1271 return -EAGAIN;
1272 if (ident != TAG_IDENT_PD) {
1273 ret = 0;
1274 goto out_bh;
1275 }
1276
1277 p = (struct partitionDesc *)bh->b_data;
1278 partitionNumber = le16_to_cpu(p->partitionNumber);
1279
1280 /* First scan for TYPE1 and SPARABLE partitions */
1281 for (i = 0; i < sbi->s_partitions; i++) {
1282 map = &sbi->s_partmaps[i];
1283 udf_debug("Searching map: (%u == %u)\n",
1284 map->s_partition_num, partitionNumber);
1285 if (map->s_partition_num == partitionNumber &&
1286 (map->s_partition_type == UDF_TYPE1_MAP15 ||
1287 map->s_partition_type == UDF_SPARABLE_MAP15))
1288 break;
1289 }
1290
1291 if (i >= sbi->s_partitions) {
1292 udf_debug("Partition (%u) not found in partition map\n",
1293 partitionNumber);
1294 ret = 0;
1295 goto out_bh;
1296 }
1297
1298 ret = udf_fill_partdesc_info(sb, p, i);
1299 if (ret < 0)
1300 goto out_bh;
1301
1302 /*
1303 * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and
1304 * PHYSICAL partitions are already set up
1305 */
1306 type1_idx = i;
1307 map = NULL; /* supress 'maybe used uninitialized' warning */
1308 for (i = 0; i < sbi->s_partitions; i++) {
1309 map = &sbi->s_partmaps[i];
1310
1311 if (map->s_partition_num == partitionNumber &&
1312 (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
1313 map->s_partition_type == UDF_VIRTUAL_MAP20 ||
1314 map->s_partition_type == UDF_METADATA_MAP25))
1315 break;
1316 }
1317
1318 if (i >= sbi->s_partitions) {
1319 ret = 0;
1320 goto out_bh;
1321 }
1322
1323 ret = udf_fill_partdesc_info(sb, p, i);
1324 if (ret < 0)
1325 goto out_bh;
1326
1327 if (map->s_partition_type == UDF_METADATA_MAP25) {
1328 ret = udf_load_metadata_files(sb, i, type1_idx);
1329 if (ret < 0) {
1330 udf_err(sb, "error loading MetaData partition map %d\n",
1331 i);
1332 goto out_bh;
1333 }
1334 } else {
1335 /*
1336 * If we have a partition with virtual map, we don't handle
1337 * writing to it (we overwrite blocks instead of relocating
1338 * them).
1339 */
1340 if (!sb_rdonly(sb)) {
1341 ret = -EACCES;
1342 goto out_bh;
1343 }
1344 UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
1345 ret = udf_load_vat(sb, i, type1_idx);
1346 if (ret < 0)
1347 goto out_bh;
1348 }
1349 ret = 0;
1350out_bh:
1351 /* In case loading failed, we handle cleanup in udf_fill_super */
1352 brelse(bh);
1353 return ret;
1354}
1355
1356static int udf_load_sparable_map(struct super_block *sb,
1357 struct udf_part_map *map,
1358 struct sparablePartitionMap *spm)
1359{
1360 uint32_t loc;
1361 uint16_t ident;
1362 struct sparingTable *st;
1363 struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing;
1364 int i;
1365 struct buffer_head *bh;
1366
1367 map->s_partition_type = UDF_SPARABLE_MAP15;
1368 sdata->s_packet_len = le16_to_cpu(spm->packetLength);
1369 if (!is_power_of_2(sdata->s_packet_len)) {
1370 udf_err(sb, "error loading logical volume descriptor: "
1371 "Invalid packet length %u\n",
1372 (unsigned)sdata->s_packet_len);
1373 return -EIO;
1374 }
1375 if (spm->numSparingTables > 4) {
1376 udf_err(sb, "error loading logical volume descriptor: "
1377 "Too many sparing tables (%d)\n",
1378 (int)spm->numSparingTables);
1379 return -EIO;
1380 }
1381 if (le32_to_cpu(spm->sizeSparingTable) > sb->s_blocksize) {
1382 udf_err(sb, "error loading logical volume descriptor: "
1383 "Too big sparing table size (%u)\n",
1384 le32_to_cpu(spm->sizeSparingTable));
1385 return -EIO;
1386 }
1387
1388 for (i = 0; i < spm->numSparingTables; i++) {
1389 loc = le32_to_cpu(spm->locSparingTable[i]);
1390 bh = udf_read_tagged(sb, loc, loc, &ident);
1391 if (!bh)
1392 continue;
1393
1394 st = (struct sparingTable *)bh->b_data;
1395 if (ident != 0 ||
1396 strncmp(st->sparingIdent.ident, UDF_ID_SPARING,
1397 strlen(UDF_ID_SPARING)) ||
1398 sizeof(*st) + le16_to_cpu(st->reallocationTableLen) >
1399 sb->s_blocksize) {
1400 brelse(bh);
1401 continue;
1402 }
1403
1404 sdata->s_spar_map[i] = bh;
1405 }
1406 map->s_partition_func = udf_get_pblock_spar15;
1407 return 0;
1408}
1409
1410static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1411 struct kernel_lb_addr *fileset)
1412{
1413 struct logicalVolDesc *lvd;
1414 int i, offset;
1415 uint8_t type;
1416 struct udf_sb_info *sbi = UDF_SB(sb);
1417 struct genericPartitionMap *gpm;
1418 uint16_t ident;
1419 struct buffer_head *bh;
1420 unsigned int table_len;
1421 int ret;
1422
1423 bh = udf_read_tagged(sb, block, block, &ident);
1424 if (!bh)
1425 return -EAGAIN;
1426 BUG_ON(ident != TAG_IDENT_LVD);
1427 lvd = (struct logicalVolDesc *)bh->b_data;
1428 table_len = le32_to_cpu(lvd->mapTableLength);
1429 if (table_len > sb->s_blocksize - sizeof(*lvd)) {
1430 udf_err(sb, "error loading logical volume descriptor: "
1431 "Partition table too long (%u > %lu)\n", table_len,
1432 sb->s_blocksize - sizeof(*lvd));
1433 ret = -EIO;
1434 goto out_bh;
1435 }
1436
1437 ret = udf_verify_domain_identifier(sb, &lvd->domainIdent,
1438 "logical volume");
1439 if (ret)
1440 goto out_bh;
1441 ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
1442 if (ret)
1443 goto out_bh;
1444
1445 for (i = 0, offset = 0;
1446 i < sbi->s_partitions && offset < table_len;
1447 i++, offset += gpm->partitionMapLength) {
1448 struct udf_part_map *map = &sbi->s_partmaps[i];
1449 gpm = (struct genericPartitionMap *)
1450 &(lvd->partitionMaps[offset]);
1451 type = gpm->partitionMapType;
1452 if (type == 1) {
1453 struct genericPartitionMap1 *gpm1 =
1454 (struct genericPartitionMap1 *)gpm;
1455 map->s_partition_type = UDF_TYPE1_MAP15;
1456 map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum);
1457 map->s_partition_num = le16_to_cpu(gpm1->partitionNum);
1458 map->s_partition_func = NULL;
1459 } else if (type == 2) {
1460 struct udfPartitionMap2 *upm2 =
1461 (struct udfPartitionMap2 *)gpm;
1462 if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL,
1463 strlen(UDF_ID_VIRTUAL))) {
1464 u16 suf =
1465 le16_to_cpu(((__le16 *)upm2->partIdent.
1466 identSuffix)[0]);
1467 if (suf < 0x0200) {
1468 map->s_partition_type =
1469 UDF_VIRTUAL_MAP15;
1470 map->s_partition_func =
1471 udf_get_pblock_virt15;
1472 } else {
1473 map->s_partition_type =
1474 UDF_VIRTUAL_MAP20;
1475 map->s_partition_func =
1476 udf_get_pblock_virt20;
1477 }
1478 } else if (!strncmp(upm2->partIdent.ident,
1479 UDF_ID_SPARABLE,
1480 strlen(UDF_ID_SPARABLE))) {
1481 ret = udf_load_sparable_map(sb, map,
1482 (struct sparablePartitionMap *)gpm);
1483 if (ret < 0)
1484 goto out_bh;
1485 } else if (!strncmp(upm2->partIdent.ident,
1486 UDF_ID_METADATA,
1487 strlen(UDF_ID_METADATA))) {
1488 struct udf_meta_data *mdata =
1489 &map->s_type_specific.s_metadata;
1490 struct metadataPartitionMap *mdm =
1491 (struct metadataPartitionMap *)
1492 &(lvd->partitionMaps[offset]);
1493 udf_debug("Parsing Logical vol part %d type %u id=%s\n",
1494 i, type, UDF_ID_METADATA);
1495
1496 map->s_partition_type = UDF_METADATA_MAP25;
1497 map->s_partition_func = udf_get_pblock_meta25;
1498
1499 mdata->s_meta_file_loc =
1500 le32_to_cpu(mdm->metadataFileLoc);
1501 mdata->s_mirror_file_loc =
1502 le32_to_cpu(mdm->metadataMirrorFileLoc);
1503 mdata->s_bitmap_file_loc =
1504 le32_to_cpu(mdm->metadataBitmapFileLoc);
1505 mdata->s_alloc_unit_size =
1506 le32_to_cpu(mdm->allocUnitSize);
1507 mdata->s_align_unit_size =
1508 le16_to_cpu(mdm->alignUnitSize);
1509 if (mdm->flags & 0x01)
1510 mdata->s_flags |= MF_DUPLICATE_MD;
1511
1512 udf_debug("Metadata Ident suffix=0x%x\n",
1513 le16_to_cpu(*(__le16 *)
1514 mdm->partIdent.identSuffix));
1515 udf_debug("Metadata part num=%u\n",
1516 le16_to_cpu(mdm->partitionNum));
1517 udf_debug("Metadata part alloc unit size=%u\n",
1518 le32_to_cpu(mdm->allocUnitSize));
1519 udf_debug("Metadata file loc=%u\n",
1520 le32_to_cpu(mdm->metadataFileLoc));
1521 udf_debug("Mirror file loc=%u\n",
1522 le32_to_cpu(mdm->metadataMirrorFileLoc));
1523 udf_debug("Bitmap file loc=%u\n",
1524 le32_to_cpu(mdm->metadataBitmapFileLoc));
1525 udf_debug("Flags: %d %u\n",
1526 mdata->s_flags, mdm->flags);
1527 } else {
1528 udf_debug("Unknown ident: %s\n",
1529 upm2->partIdent.ident);
1530 continue;
1531 }
1532 map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum);
1533 map->s_partition_num = le16_to_cpu(upm2->partitionNum);
1534 }
1535 udf_debug("Partition (%d:%u) type %u on volume %u\n",
1536 i, map->s_partition_num, type, map->s_volumeseqnum);
1537 }
1538
1539 if (fileset) {
1540 struct long_ad *la = (struct long_ad *)&(lvd->logicalVolContentsUse[0]);
1541
1542 *fileset = lelb_to_cpu(la->extLocation);
1543 udf_debug("FileSet found in LogicalVolDesc at block=%u, partition=%u\n",
1544 fileset->logicalBlockNum,
1545 fileset->partitionReferenceNum);
1546 }
1547 if (lvd->integritySeqExt.extLength)
1548 udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
1549 ret = 0;
1550
1551 if (!sbi->s_lvid_bh) {
1552 /* We can't generate unique IDs without a valid LVID */
1553 if (sb_rdonly(sb)) {
1554 UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
1555 } else {
1556 udf_warn(sb, "Damaged or missing LVID, forcing "
1557 "readonly mount\n");
1558 ret = -EACCES;
1559 }
1560 }
1561out_bh:
1562 brelse(bh);
1563 return ret;
1564}
1565
1566/*
1567 * Find the prevailing Logical Volume Integrity Descriptor.
1568 */
1569static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ad loc)
1570{
1571 struct buffer_head *bh, *final_bh;
1572 uint16_t ident;
1573 struct udf_sb_info *sbi = UDF_SB(sb);
1574 struct logicalVolIntegrityDesc *lvid;
1575 int indirections = 0;
1576 u32 parts, impuselen;
1577
1578 while (++indirections <= UDF_MAX_LVID_NESTING) {
1579 final_bh = NULL;
1580 while (loc.extLength > 0 &&
1581 (bh = udf_read_tagged(sb, loc.extLocation,
1582 loc.extLocation, &ident))) {
1583 if (ident != TAG_IDENT_LVID) {
1584 brelse(bh);
1585 break;
1586 }
1587
1588 brelse(final_bh);
1589 final_bh = bh;
1590
1591 loc.extLength -= sb->s_blocksize;
1592 loc.extLocation++;
1593 }
1594
1595 if (!final_bh)
1596 return;
1597
1598 brelse(sbi->s_lvid_bh);
1599 sbi->s_lvid_bh = final_bh;
1600
1601 lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data;
1602 if (lvid->nextIntegrityExt.extLength == 0)
1603 goto check;
1604
1605 loc = leea_to_cpu(lvid->nextIntegrityExt);
1606 }
1607
1608 udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n",
1609 UDF_MAX_LVID_NESTING);
1610out_err:
1611 brelse(sbi->s_lvid_bh);
1612 sbi->s_lvid_bh = NULL;
1613 return;
1614check:
1615 parts = le32_to_cpu(lvid->numOfPartitions);
1616 impuselen = le32_to_cpu(lvid->lengthOfImpUse);
1617 if (parts >= sb->s_blocksize || impuselen >= sb->s_blocksize ||
1618 sizeof(struct logicalVolIntegrityDesc) + impuselen +
1619 2 * parts * sizeof(u32) > sb->s_blocksize) {
1620 udf_warn(sb, "Corrupted LVID (parts=%u, impuselen=%u), "
1621 "ignoring.\n", parts, impuselen);
1622 goto out_err;
1623 }
1624}
1625
1626/*
1627 * Step for reallocation of table of partition descriptor sequence numbers.
1628 * Must be power of 2.
1629 */
1630#define PART_DESC_ALLOC_STEP 32
1631
1632struct part_desc_seq_scan_data {
1633 struct udf_vds_record rec;
1634 u32 partnum;
1635};
1636
1637struct desc_seq_scan_data {
1638 struct udf_vds_record vds[VDS_POS_LENGTH];
1639 unsigned int size_part_descs;
1640 unsigned int num_part_descs;
1641 struct part_desc_seq_scan_data *part_descs_loc;
1642};
1643
1644static struct udf_vds_record *handle_partition_descriptor(
1645 struct buffer_head *bh,
1646 struct desc_seq_scan_data *data)
1647{
1648 struct partitionDesc *desc = (struct partitionDesc *)bh->b_data;
1649 int partnum;
1650 int i;
1651
1652 partnum = le16_to_cpu(desc->partitionNumber);
1653 for (i = 0; i < data->num_part_descs; i++)
1654 if (partnum == data->part_descs_loc[i].partnum)
1655 return &(data->part_descs_loc[i].rec);
1656 if (data->num_part_descs >= data->size_part_descs) {
1657 struct part_desc_seq_scan_data *new_loc;
1658 unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP);
1659
1660 new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL);
1661 if (!new_loc)
1662 return ERR_PTR(-ENOMEM);
1663 memcpy(new_loc, data->part_descs_loc,
1664 data->size_part_descs * sizeof(*new_loc));
1665 kfree(data->part_descs_loc);
1666 data->part_descs_loc = new_loc;
1667 data->size_part_descs = new_size;
1668 }
1669 return &(data->part_descs_loc[data->num_part_descs++].rec);
1670}
1671
1672
1673static struct udf_vds_record *get_volume_descriptor_record(uint16_t ident,
1674 struct buffer_head *bh, struct desc_seq_scan_data *data)
1675{
1676 switch (ident) {
1677 case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
1678 return &(data->vds[VDS_POS_PRIMARY_VOL_DESC]);
1679 case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
1680 return &(data->vds[VDS_POS_IMP_USE_VOL_DESC]);
1681 case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
1682 return &(data->vds[VDS_POS_LOGICAL_VOL_DESC]);
1683 case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
1684 return &(data->vds[VDS_POS_UNALLOC_SPACE_DESC]);
1685 case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
1686 return handle_partition_descriptor(bh, data);
1687 }
1688 return NULL;
1689}
1690
1691/*
1692 * Process a main/reserve volume descriptor sequence.
1693 * @block First block of first extent of the sequence.
1694 * @lastblock Lastblock of first extent of the sequence.
1695 * @fileset There we store extent containing root fileset
1696 *
1697 * Returns <0 on error, 0 on success. -EAGAIN is special - try next descriptor
1698 * sequence
1699 */
1700static noinline int udf_process_sequence(
1701 struct super_block *sb,
1702 sector_t block, sector_t lastblock,
1703 struct kernel_lb_addr *fileset)
1704{
1705 struct buffer_head *bh = NULL;
1706 struct udf_vds_record *curr;
1707 struct generic_desc *gd;
1708 struct volDescPtr *vdp;
1709 bool done = false;
1710 uint32_t vdsn;
1711 uint16_t ident;
1712 int ret;
1713 unsigned int indirections = 0;
1714 struct desc_seq_scan_data data;
1715 unsigned int i;
1716
1717 memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
1718 data.size_part_descs = PART_DESC_ALLOC_STEP;
1719 data.num_part_descs = 0;
1720 data.part_descs_loc = kcalloc(data.size_part_descs,
1721 sizeof(*data.part_descs_loc),
1722 GFP_KERNEL);
1723 if (!data.part_descs_loc)
1724 return -ENOMEM;
1725
1726 /*
1727 * Read the main descriptor sequence and find which descriptors
1728 * are in it.
1729 */
1730 for (; (!done && block <= lastblock); block++) {
1731 bh = udf_read_tagged(sb, block, block, &ident);
1732 if (!bh)
1733 break;
1734
1735 /* Process each descriptor (ISO 13346 3/8.3-8.4) */
1736 gd = (struct generic_desc *)bh->b_data;
1737 vdsn = le32_to_cpu(gd->volDescSeqNum);
1738 switch (ident) {
1739 case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */
1740 if (++indirections > UDF_MAX_TD_NESTING) {
1741 udf_err(sb, "too many Volume Descriptor "
1742 "Pointers (max %u supported)\n",
1743 UDF_MAX_TD_NESTING);
1744 brelse(bh);
1745 ret = -EIO;
1746 goto out;
1747 }
1748
1749 vdp = (struct volDescPtr *)bh->b_data;
1750 block = le32_to_cpu(vdp->nextVolDescSeqExt.extLocation);
1751 lastblock = le32_to_cpu(
1752 vdp->nextVolDescSeqExt.extLength) >>
1753 sb->s_blocksize_bits;
1754 lastblock += block - 1;
1755 /* For loop is going to increment 'block' again */
1756 block--;
1757 break;
1758 case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
1759 case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
1760 case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
1761 case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
1762 case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
1763 curr = get_volume_descriptor_record(ident, bh, &data);
1764 if (IS_ERR(curr)) {
1765 brelse(bh);
1766 ret = PTR_ERR(curr);
1767 goto out;
1768 }
1769 /* Descriptor we don't care about? */
1770 if (!curr)
1771 break;
1772 if (vdsn >= curr->volDescSeqNum) {
1773 curr->volDescSeqNum = vdsn;
1774 curr->block = block;
1775 }
1776 break;
1777 case TAG_IDENT_TD: /* ISO 13346 3/10.9 */
1778 done = true;
1779 break;
1780 }
1781 brelse(bh);
1782 }
1783 /*
1784 * Now read interesting descriptors again and process them
1785 * in a suitable order
1786 */
1787 if (!data.vds[VDS_POS_PRIMARY_VOL_DESC].block) {
1788 udf_err(sb, "Primary Volume Descriptor not found!\n");
1789 ret = -EAGAIN;
1790 goto out;
1791 }
1792 ret = udf_load_pvoldesc(sb, data.vds[VDS_POS_PRIMARY_VOL_DESC].block);
1793 if (ret < 0)
1794 goto out;
1795
1796 if (data.vds[VDS_POS_LOGICAL_VOL_DESC].block) {
1797 ret = udf_load_logicalvol(sb,
1798 data.vds[VDS_POS_LOGICAL_VOL_DESC].block,
1799 fileset);
1800 if (ret < 0)
1801 goto out;
1802 }
1803
1804 /* Now handle prevailing Partition Descriptors */
1805 for (i = 0; i < data.num_part_descs; i++) {
1806 ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block);
1807 if (ret < 0)
1808 goto out;
1809 }
1810 ret = 0;
1811out:
1812 kfree(data.part_descs_loc);
1813 return ret;
1814}
1815
1816/*
1817 * Load Volume Descriptor Sequence described by anchor in bh
1818 *
1819 * Returns <0 on error, 0 on success
1820 */
1821static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
1822 struct kernel_lb_addr *fileset)
1823{
1824 struct anchorVolDescPtr *anchor;
1825 sector_t main_s, main_e, reserve_s, reserve_e;
1826 int ret;
1827
1828 anchor = (struct anchorVolDescPtr *)bh->b_data;
1829
1830 /* Locate the main sequence */
1831 main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation);
1832 main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength);
1833 main_e = main_e >> sb->s_blocksize_bits;
1834 main_e += main_s - 1;
1835
1836 /* Locate the reserve sequence */
1837 reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation);
1838 reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength);
1839 reserve_e = reserve_e >> sb->s_blocksize_bits;
1840 reserve_e += reserve_s - 1;
1841
1842 /* Process the main & reserve sequences */
1843 /* responsible for finding the PartitionDesc(s) */
1844 ret = udf_process_sequence(sb, main_s, main_e, fileset);
1845 if (ret != -EAGAIN)
1846 return ret;
1847 udf_sb_free_partitions(sb);
1848 ret = udf_process_sequence(sb, reserve_s, reserve_e, fileset);
1849 if (ret < 0) {
1850 udf_sb_free_partitions(sb);
1851 /* No sequence was OK, return -EIO */
1852 if (ret == -EAGAIN)
1853 ret = -EIO;
1854 }
1855 return ret;
1856}
1857
1858/*
1859 * Check whether there is an anchor block in the given block and
1860 * load Volume Descriptor Sequence if so.
1861 *
1862 * Returns <0 on error, 0 on success, -EAGAIN is special - try next anchor
1863 * block
1864 */
1865static int udf_check_anchor_block(struct super_block *sb, sector_t block,
1866 struct kernel_lb_addr *fileset)
1867{
1868 struct buffer_head *bh;
1869 uint16_t ident;
1870 int ret;
1871
1872 if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV) &&
1873 udf_fixed_to_variable(block) >=
1874 i_size_read(sb->s_bdev->bd_inode) >> sb->s_blocksize_bits)
1875 return -EAGAIN;
1876
1877 bh = udf_read_tagged(sb, block, block, &ident);
1878 if (!bh)
1879 return -EAGAIN;
1880 if (ident != TAG_IDENT_AVDP) {
1881 brelse(bh);
1882 return -EAGAIN;
1883 }
1884 ret = udf_load_sequence(sb, bh, fileset);
1885 brelse(bh);
1886 return ret;
1887}
1888
1889/*
1890 * Search for an anchor volume descriptor pointer.
1891 *
1892 * Returns < 0 on error, 0 on success. -EAGAIN is special - try next set
1893 * of anchors.
1894 */
1895static int udf_scan_anchors(struct super_block *sb, sector_t *lastblock,
1896 struct kernel_lb_addr *fileset)
1897{
1898 sector_t last[6];
1899 int i;
1900 struct udf_sb_info *sbi = UDF_SB(sb);
1901 int last_count = 0;
1902 int ret;
1903
1904 /* First try user provided anchor */
1905 if (sbi->s_anchor) {
1906 ret = udf_check_anchor_block(sb, sbi->s_anchor, fileset);
1907 if (ret != -EAGAIN)
1908 return ret;
1909 }
1910 /*
1911 * according to spec, anchor is in either:
1912 * block 256
1913 * lastblock-256
1914 * lastblock
1915 * however, if the disc isn't closed, it could be 512.
1916 */
1917 ret = udf_check_anchor_block(sb, sbi->s_session + 256, fileset);
1918 if (ret != -EAGAIN)
1919 return ret;
1920 /*
1921 * The trouble is which block is the last one. Drives often misreport
1922 * this so we try various possibilities.
1923 */
1924 last[last_count++] = *lastblock;
1925 if (*lastblock >= 1)
1926 last[last_count++] = *lastblock - 1;
1927 last[last_count++] = *lastblock + 1;
1928 if (*lastblock >= 2)
1929 last[last_count++] = *lastblock - 2;
1930 if (*lastblock >= 150)
1931 last[last_count++] = *lastblock - 150;
1932 if (*lastblock >= 152)
1933 last[last_count++] = *lastblock - 152;
1934
1935 for (i = 0; i < last_count; i++) {
1936 if (last[i] >= i_size_read(sb->s_bdev->bd_inode) >>
1937 sb->s_blocksize_bits)
1938 continue;
1939 ret = udf_check_anchor_block(sb, last[i], fileset);
1940 if (ret != -EAGAIN) {
1941 if (!ret)
1942 *lastblock = last[i];
1943 return ret;
1944 }
1945 if (last[i] < 256)
1946 continue;
1947 ret = udf_check_anchor_block(sb, last[i] - 256, fileset);
1948 if (ret != -EAGAIN) {
1949 if (!ret)
1950 *lastblock = last[i];
1951 return ret;
1952 }
1953 }
1954
1955 /* Finally try block 512 in case media is open */
1956 return udf_check_anchor_block(sb, sbi->s_session + 512, fileset);
1957}
1958
1959/*
1960 * Find an anchor volume descriptor and load Volume Descriptor Sequence from
1961 * area specified by it. The function expects sbi->s_lastblock to be the last
1962 * block on the media.
1963 *
1964 * Return <0 on error, 0 if anchor found. -EAGAIN is special meaning anchor
1965 * was not found.
1966 */
1967static int udf_find_anchor(struct super_block *sb,
1968 struct kernel_lb_addr *fileset)
1969{
1970 struct udf_sb_info *sbi = UDF_SB(sb);
1971 sector_t lastblock = sbi->s_last_block;
1972 int ret;
1973
1974 ret = udf_scan_anchors(sb, &lastblock, fileset);
1975 if (ret != -EAGAIN)
1976 goto out;
1977
1978 /* No anchor found? Try VARCONV conversion of block numbers */
1979 UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
1980 lastblock = udf_variable_to_fixed(sbi->s_last_block);
1981 /* Firstly, we try to not convert number of the last block */
1982 ret = udf_scan_anchors(sb, &lastblock, fileset);
1983 if (ret != -EAGAIN)
1984 goto out;
1985
1986 lastblock = sbi->s_last_block;
1987 /* Secondly, we try with converted number of the last block */
1988 ret = udf_scan_anchors(sb, &lastblock, fileset);
1989 if (ret < 0) {
1990 /* VARCONV didn't help. Clear it. */
1991 UDF_CLEAR_FLAG(sb, UDF_FLAG_VARCONV);
1992 }
1993out:
1994 if (ret == 0)
1995 sbi->s_last_block = lastblock;
1996 return ret;
1997}
1998
1999/*
2000 * Check Volume Structure Descriptor, find Anchor block and load Volume
2001 * Descriptor Sequence.
2002 *
2003 * Returns < 0 on error, 0 on success. -EAGAIN is special meaning anchor
2004 * block was not found.
2005 */
2006static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
2007 int silent, struct kernel_lb_addr *fileset)
2008{
2009 struct udf_sb_info *sbi = UDF_SB(sb);
2010 int nsr = 0;
2011 int ret;
2012
2013 if (!sb_set_blocksize(sb, uopt->blocksize)) {
2014 if (!silent)
2015 udf_warn(sb, "Bad block size\n");
2016 return -EINVAL;
2017 }
2018 sbi->s_last_block = uopt->lastblock;
2019 if (!uopt->novrs) {
2020 /* Check that it is NSR02 compliant */
2021 nsr = udf_check_vsd(sb);
2022 if (!nsr) {
2023 if (!silent)
2024 udf_warn(sb, "No VRS found\n");
2025 return -EINVAL;
2026 }
2027 if (nsr == -1)
2028 udf_debug("Failed to read sector at offset %d. "
2029 "Assuming open disc. Skipping validity "
2030 "check\n", VSD_FIRST_SECTOR_OFFSET);
2031 if (!sbi->s_last_block)
2032 sbi->s_last_block = udf_get_last_block(sb);
2033 } else {
2034 udf_debug("Validity check skipped because of novrs option\n");
2035 }
2036
2037 /* Look for anchor block and load Volume Descriptor Sequence */
2038 sbi->s_anchor = uopt->anchor;
2039 ret = udf_find_anchor(sb, fileset);
2040 if (ret < 0) {
2041 if (!silent && ret == -EAGAIN)
2042 udf_warn(sb, "No anchor found\n");
2043 return ret;
2044 }
2045 return 0;
2046}
2047
2048static void udf_finalize_lvid(struct logicalVolIntegrityDesc *lvid)
2049{
2050 struct timespec64 ts;
2051
2052 ktime_get_real_ts64(&ts);
2053 udf_time_to_disk_stamp(&lvid->recordingDateAndTime, ts);
2054 lvid->descTag.descCRC = cpu_to_le16(
2055 crc_itu_t(0, (char *)lvid + sizeof(struct tag),
2056 le16_to_cpu(lvid->descTag.descCRCLength)));
2057 lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
2058}
2059
2060static void udf_open_lvid(struct super_block *sb)
2061{
2062 struct udf_sb_info *sbi = UDF_SB(sb);
2063 struct buffer_head *bh = sbi->s_lvid_bh;
2064 struct logicalVolIntegrityDesc *lvid;
2065 struct logicalVolIntegrityDescImpUse *lvidiu;
2066
2067 if (!bh)
2068 return;
2069 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2070 lvidiu = udf_sb_lvidiu(sb);
2071 if (!lvidiu)
2072 return;
2073
2074 mutex_lock(&sbi->s_alloc_mutex);
2075 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
2076 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
2077 if (le32_to_cpu(lvid->integrityType) == LVID_INTEGRITY_TYPE_CLOSE)
2078 lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN);
2079 else
2080 UDF_SET_FLAG(sb, UDF_FLAG_INCONSISTENT);
2081
2082 udf_finalize_lvid(lvid);
2083 mark_buffer_dirty(bh);
2084 sbi->s_lvid_dirty = 0;
2085 mutex_unlock(&sbi->s_alloc_mutex);
2086 /* Make opening of filesystem visible on the media immediately */
2087 sync_dirty_buffer(bh);
2088}
2089
2090static void udf_close_lvid(struct super_block *sb)
2091{
2092 struct udf_sb_info *sbi = UDF_SB(sb);
2093 struct buffer_head *bh = sbi->s_lvid_bh;
2094 struct logicalVolIntegrityDesc *lvid;
2095 struct logicalVolIntegrityDescImpUse *lvidiu;
2096
2097 if (!bh)
2098 return;
2099 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2100 lvidiu = udf_sb_lvidiu(sb);
2101 if (!lvidiu)
2102 return;
2103
2104 mutex_lock(&sbi->s_alloc_mutex);
2105 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
2106 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
2107 if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev))
2108 lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION);
2109 if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev))
2110 lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev);
2111 if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev))
2112 lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev);
2113 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_INCONSISTENT))
2114 lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
2115
2116 /*
2117 * We set buffer uptodate unconditionally here to avoid spurious
2118 * warnings from mark_buffer_dirty() when previous EIO has marked
2119 * the buffer as !uptodate
2120 */
2121 set_buffer_uptodate(bh);
2122 udf_finalize_lvid(lvid);
2123 mark_buffer_dirty(bh);
2124 sbi->s_lvid_dirty = 0;
2125 mutex_unlock(&sbi->s_alloc_mutex);
2126 /* Make closing of filesystem visible on the media immediately */
2127 sync_dirty_buffer(bh);
2128}
2129
2130u64 lvid_get_unique_id(struct super_block *sb)
2131{
2132 struct buffer_head *bh;
2133 struct udf_sb_info *sbi = UDF_SB(sb);
2134 struct logicalVolIntegrityDesc *lvid;
2135 struct logicalVolHeaderDesc *lvhd;
2136 u64 uniqueID;
2137 u64 ret;
2138
2139 bh = sbi->s_lvid_bh;
2140 if (!bh)
2141 return 0;
2142
2143 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2144 lvhd = (struct logicalVolHeaderDesc *)lvid->logicalVolContentsUse;
2145
2146 mutex_lock(&sbi->s_alloc_mutex);
2147 ret = uniqueID = le64_to_cpu(lvhd->uniqueID);
2148 if (!(++uniqueID & 0xFFFFFFFF))
2149 uniqueID += 16;
2150 lvhd->uniqueID = cpu_to_le64(uniqueID);
2151 udf_updated_lvid(sb);
2152 mutex_unlock(&sbi->s_alloc_mutex);
2153
2154 return ret;
2155}
2156
2157static int udf_fill_super(struct super_block *sb, void *options, int silent)
2158{
2159 int ret = -EINVAL;
2160 struct inode *inode = NULL;
2161 struct udf_options uopt;
2162 struct kernel_lb_addr rootdir, fileset;
2163 struct udf_sb_info *sbi;
2164 bool lvid_open = false;
2165
2166 uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
2167 /* By default we'll use overflow[ug]id when UDF inode [ug]id == -1 */
2168 uopt.uid = make_kuid(current_user_ns(), overflowuid);
2169 uopt.gid = make_kgid(current_user_ns(), overflowgid);
2170 uopt.umask = 0;
2171 uopt.fmode = UDF_INVALID_MODE;
2172 uopt.dmode = UDF_INVALID_MODE;
2173 uopt.nls_map = NULL;
2174
2175 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
2176 if (!sbi)
2177 return -ENOMEM;
2178
2179 sb->s_fs_info = sbi;
2180
2181 mutex_init(&sbi->s_alloc_mutex);
2182
2183 if (!udf_parse_options((char *)options, &uopt, false))
2184 goto parse_options_failure;
2185
2186 fileset.logicalBlockNum = 0xFFFFFFFF;
2187 fileset.partitionReferenceNum = 0xFFFF;
2188
2189 sbi->s_flags = uopt.flags;
2190 sbi->s_uid = uopt.uid;
2191 sbi->s_gid = uopt.gid;
2192 sbi->s_umask = uopt.umask;
2193 sbi->s_fmode = uopt.fmode;
2194 sbi->s_dmode = uopt.dmode;
2195 sbi->s_nls_map = uopt.nls_map;
2196 rwlock_init(&sbi->s_cred_lock);
2197
2198 if (uopt.session == 0xFFFFFFFF)
2199 sbi->s_session = udf_get_last_session(sb);
2200 else
2201 sbi->s_session = uopt.session;
2202
2203 udf_debug("Multi-session=%d\n", sbi->s_session);
2204
2205 /* Fill in the rest of the superblock */
2206 sb->s_op = &udf_sb_ops;
2207 sb->s_export_op = &udf_export_ops;
2208
2209 sb->s_magic = UDF_SUPER_MAGIC;
2210 sb->s_time_gran = 1000;
2211
2212 if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) {
2213 ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2214 } else {
2215 uopt.blocksize = bdev_logical_block_size(sb->s_bdev);
2216 while (uopt.blocksize <= 4096) {
2217 ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2218 if (ret < 0) {
2219 if (!silent && ret != -EACCES) {
2220 pr_notice("Scanning with blocksize %u failed\n",
2221 uopt.blocksize);
2222 }
2223 brelse(sbi->s_lvid_bh);
2224 sbi->s_lvid_bh = NULL;
2225 /*
2226 * EACCES is special - we want to propagate to
2227 * upper layers that we cannot handle RW mount.
2228 */
2229 if (ret == -EACCES)
2230 break;
2231 } else
2232 break;
2233
2234 uopt.blocksize <<= 1;
2235 }
2236 }
2237 if (ret < 0) {
2238 if (ret == -EAGAIN) {
2239 udf_warn(sb, "No partition found (1)\n");
2240 ret = -EINVAL;
2241 }
2242 goto error_out;
2243 }
2244
2245 udf_debug("Lastblock=%u\n", sbi->s_last_block);
2246
2247 if (sbi->s_lvid_bh) {
2248 struct logicalVolIntegrityDescImpUse *lvidiu =
2249 udf_sb_lvidiu(sb);
2250 uint16_t minUDFReadRev;
2251 uint16_t minUDFWriteRev;
2252
2253 if (!lvidiu) {
2254 ret = -EINVAL;
2255 goto error_out;
2256 }
2257 minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
2258 minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
2259 if (minUDFReadRev > UDF_MAX_READ_VERSION) {
2260 udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
2261 minUDFReadRev,
2262 UDF_MAX_READ_VERSION);
2263 ret = -EINVAL;
2264 goto error_out;
2265 } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION) {
2266 if (!sb_rdonly(sb)) {
2267 ret = -EACCES;
2268 goto error_out;
2269 }
2270 UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
2271 }
2272
2273 sbi->s_udfrev = minUDFWriteRev;
2274
2275 if (minUDFReadRev >= UDF_VERS_USE_EXTENDED_FE)
2276 UDF_SET_FLAG(sb, UDF_FLAG_USE_EXTENDED_FE);
2277 if (minUDFReadRev >= UDF_VERS_USE_STREAMS)
2278 UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS);
2279 }
2280
2281 if (!sbi->s_partitions) {
2282 udf_warn(sb, "No partition found (2)\n");
2283 ret = -EINVAL;
2284 goto error_out;
2285 }
2286
2287 if (sbi->s_partmaps[sbi->s_partition].s_partition_flags &
2288 UDF_PART_FLAG_READ_ONLY) {
2289 if (!sb_rdonly(sb)) {
2290 ret = -EACCES;
2291 goto error_out;
2292 }
2293 UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
2294 }
2295
2296 ret = udf_find_fileset(sb, &fileset, &rootdir);
2297 if (ret < 0) {
2298 udf_warn(sb, "No fileset found\n");
2299 goto error_out;
2300 }
2301
2302 if (!silent) {
2303 struct timestamp ts;
2304 udf_time_to_disk_stamp(&ts, sbi->s_record_time);
2305 udf_info("Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
2306 sbi->s_volume_ident,
2307 le16_to_cpu(ts.year), ts.month, ts.day,
2308 ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone));
2309 }
2310 if (!sb_rdonly(sb)) {
2311 udf_open_lvid(sb);
2312 lvid_open = true;
2313 }
2314
2315 /* Assign the root inode */
2316 /* assign inodes by physical block number */
2317 /* perhaps it's not extensible enough, but for now ... */
2318 inode = udf_iget(sb, &rootdir);
2319 if (IS_ERR(inode)) {
2320 udf_err(sb, "Error in udf_iget, block=%u, partition=%u\n",
2321 rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
2322 ret = PTR_ERR(inode);
2323 goto error_out;
2324 }
2325
2326 /* Allocate a dentry for the root inode */
2327 sb->s_root = d_make_root(inode);
2328 if (!sb->s_root) {
2329 udf_err(sb, "Couldn't allocate root dentry\n");
2330 ret = -ENOMEM;
2331 goto error_out;
2332 }
2333 sb->s_maxbytes = UDF_MAX_FILESIZE;
2334 sb->s_max_links = UDF_MAX_LINKS;
2335 return 0;
2336
2337error_out:
2338 iput(sbi->s_vat_inode);
2339parse_options_failure:
2340 unload_nls(uopt.nls_map);
2341 if (lvid_open)
2342 udf_close_lvid(sb);
2343 brelse(sbi->s_lvid_bh);
2344 udf_sb_free_partitions(sb);
2345 kfree(sbi);
2346 sb->s_fs_info = NULL;
2347
2348 return ret;
2349}
2350
2351void _udf_err(struct super_block *sb, const char *function,
2352 const char *fmt, ...)
2353{
2354 struct va_format vaf;
2355 va_list args;
2356
2357 va_start(args, fmt);
2358
2359 vaf.fmt = fmt;
2360 vaf.va = &args;
2361
2362 pr_err("error (device %s): %s: %pV", sb->s_id, function, &vaf);
2363
2364 va_end(args);
2365}
2366
2367void _udf_warn(struct super_block *sb, const char *function,
2368 const char *fmt, ...)
2369{
2370 struct va_format vaf;
2371 va_list args;
2372
2373 va_start(args, fmt);
2374
2375 vaf.fmt = fmt;
2376 vaf.va = &args;
2377
2378 pr_warn("warning (device %s): %s: %pV", sb->s_id, function, &vaf);
2379
2380 va_end(args);
2381}
2382
2383static void udf_put_super(struct super_block *sb)
2384{
2385 struct udf_sb_info *sbi;
2386
2387 sbi = UDF_SB(sb);
2388
2389 iput(sbi->s_vat_inode);
2390 unload_nls(sbi->s_nls_map);
2391 if (!sb_rdonly(sb))
2392 udf_close_lvid(sb);
2393 brelse(sbi->s_lvid_bh);
2394 udf_sb_free_partitions(sb);
2395 mutex_destroy(&sbi->s_alloc_mutex);
2396 kfree(sb->s_fs_info);
2397 sb->s_fs_info = NULL;
2398}
2399
2400static int udf_sync_fs(struct super_block *sb, int wait)
2401{
2402 struct udf_sb_info *sbi = UDF_SB(sb);
2403
2404 mutex_lock(&sbi->s_alloc_mutex);
2405 if (sbi->s_lvid_dirty) {
2406 struct buffer_head *bh = sbi->s_lvid_bh;
2407 struct logicalVolIntegrityDesc *lvid;
2408
2409 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2410 udf_finalize_lvid(lvid);
2411
2412 /*
2413 * Blockdevice will be synced later so we don't have to submit
2414 * the buffer for IO
2415 */
2416 mark_buffer_dirty(bh);
2417 sbi->s_lvid_dirty = 0;
2418 }
2419 mutex_unlock(&sbi->s_alloc_mutex);
2420
2421 return 0;
2422}
2423
2424static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
2425{
2426 struct super_block *sb = dentry->d_sb;
2427 struct udf_sb_info *sbi = UDF_SB(sb);
2428 struct logicalVolIntegrityDescImpUse *lvidiu;
2429 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
2430
2431 lvidiu = udf_sb_lvidiu(sb);
2432 buf->f_type = UDF_SUPER_MAGIC;
2433 buf->f_bsize = sb->s_blocksize;
2434 buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
2435 buf->f_bfree = udf_count_free(sb);
2436 buf->f_bavail = buf->f_bfree;
2437 buf->f_files = (lvidiu != NULL ? (le32_to_cpu(lvidiu->numFiles) +
2438 le32_to_cpu(lvidiu->numDirs)) : 0)
2439 + buf->f_bfree;
2440 buf->f_ffree = buf->f_bfree;
2441 buf->f_namelen = UDF_NAME_LEN;
2442 buf->f_fsid.val[0] = (u32)id;
2443 buf->f_fsid.val[1] = (u32)(id >> 32);
2444
2445 return 0;
2446}
2447
2448static unsigned int udf_count_free_bitmap(struct super_block *sb,
2449 struct udf_bitmap *bitmap)
2450{
2451 struct buffer_head *bh = NULL;
2452 unsigned int accum = 0;
2453 int index;
2454 udf_pblk_t block = 0, newblock;
2455 struct kernel_lb_addr loc;
2456 uint32_t bytes;
2457 uint8_t *ptr;
2458 uint16_t ident;
2459 struct spaceBitmapDesc *bm;
2460
2461 loc.logicalBlockNum = bitmap->s_extPosition;
2462 loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
2463 bh = udf_read_ptagged(sb, &loc, 0, &ident);
2464
2465 if (!bh) {
2466 udf_err(sb, "udf_count_free failed\n");
2467 goto out;
2468 } else if (ident != TAG_IDENT_SBD) {
2469 brelse(bh);
2470 udf_err(sb, "udf_count_free failed\n");
2471 goto out;
2472 }
2473
2474 bm = (struct spaceBitmapDesc *)bh->b_data;
2475 bytes = le32_to_cpu(bm->numOfBytes);
2476 index = sizeof(struct spaceBitmapDesc); /* offset in first block only */
2477 ptr = (uint8_t *)bh->b_data;
2478
2479 while (bytes > 0) {
2480 u32 cur_bytes = min_t(u32, bytes, sb->s_blocksize - index);
2481 accum += bitmap_weight((const unsigned long *)(ptr + index),
2482 cur_bytes * 8);
2483 bytes -= cur_bytes;
2484 if (bytes) {
2485 brelse(bh);
2486 newblock = udf_get_lb_pblock(sb, &loc, ++block);
2487 bh = udf_tread(sb, newblock);
2488 if (!bh) {
2489 udf_debug("read failed\n");
2490 goto out;
2491 }
2492 index = 0;
2493 ptr = (uint8_t *)bh->b_data;
2494 }
2495 }
2496 brelse(bh);
2497out:
2498 return accum;
2499}
2500
2501static unsigned int udf_count_free_table(struct super_block *sb,
2502 struct inode *table)
2503{
2504 unsigned int accum = 0;
2505 uint32_t elen;
2506 struct kernel_lb_addr eloc;
2507 int8_t etype;
2508 struct extent_position epos;
2509
2510 mutex_lock(&UDF_SB(sb)->s_alloc_mutex);
2511 epos.block = UDF_I(table)->i_location;
2512 epos.offset = sizeof(struct unallocSpaceEntry);
2513 epos.bh = NULL;
2514
2515 while ((etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
2516 accum += (elen >> table->i_sb->s_blocksize_bits);
2517
2518 brelse(epos.bh);
2519 mutex_unlock(&UDF_SB(sb)->s_alloc_mutex);
2520
2521 return accum;
2522}
2523
2524static unsigned int udf_count_free(struct super_block *sb)
2525{
2526 unsigned int accum = 0;
2527 struct udf_sb_info *sbi = UDF_SB(sb);
2528 struct udf_part_map *map;
2529 unsigned int part = sbi->s_partition;
2530 int ptype = sbi->s_partmaps[part].s_partition_type;
2531
2532 if (ptype == UDF_METADATA_MAP25) {
2533 part = sbi->s_partmaps[part].s_type_specific.s_metadata.
2534 s_phys_partition_ref;
2535 } else if (ptype == UDF_VIRTUAL_MAP15 || ptype == UDF_VIRTUAL_MAP20) {
2536 /*
2537 * Filesystems with VAT are append-only and we cannot write to
2538 * them. Let's just report 0 here.
2539 */
2540 return 0;
2541 }
2542
2543 if (sbi->s_lvid_bh) {
2544 struct logicalVolIntegrityDesc *lvid =
2545 (struct logicalVolIntegrityDesc *)
2546 sbi->s_lvid_bh->b_data;
2547 if (le32_to_cpu(lvid->numOfPartitions) > part) {
2548 accum = le32_to_cpu(
2549 lvid->freeSpaceTable[part]);
2550 if (accum == 0xFFFFFFFF)
2551 accum = 0;
2552 }
2553 }
2554
2555 if (accum)
2556 return accum;
2557
2558 map = &sbi->s_partmaps[part];
2559 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
2560 accum += udf_count_free_bitmap(sb,
2561 map->s_uspace.s_bitmap);
2562 }
2563 if (accum)
2564 return accum;
2565
2566 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
2567 accum += udf_count_free_table(sb,
2568 map->s_uspace.s_table);
2569 }
2570 return accum;
2571}
2572
2573MODULE_AUTHOR("Ben Fennema");
2574MODULE_DESCRIPTION("Universal Disk Format Filesystem");
2575MODULE_LICENSE("GPL");
2576module_init(init_udf_fs)
2577module_exit(exit_udf_fs)