| lh | 9ed821d | 2023-04-07 01:36:19 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *  linux/fs/minix/bitmap.c | 
|  | 3 | * | 
|  | 4 | *  Copyright (C) 1991, 1992  Linus Torvalds | 
|  | 5 | */ | 
|  | 6 |  | 
|  | 7 | /* | 
|  | 8 | * Modified for 680x0 by Hamish Macdonald | 
|  | 9 | * Fixed for 680x0 by Andreas Schwab | 
|  | 10 | */ | 
|  | 11 |  | 
|  | 12 | /* bitmap.c contains the code that handles the inode and block bitmaps */ | 
|  | 13 |  | 
|  | 14 | #include "minix.h" | 
|  | 15 | #include <linux/buffer_head.h> | 
|  | 16 | #include <linux/bitops.h> | 
|  | 17 | #include <linux/sched.h> | 
|  | 18 |  | 
|  | 19 | static DEFINE_SPINLOCK(bitmap_lock); | 
|  | 20 |  | 
|  | 21 | /* | 
|  | 22 | * bitmap consists of blocks filled with 16bit words | 
|  | 23 | * bit set == busy, bit clear == free | 
|  | 24 | * endianness is a mess, but for counting zero bits it really doesn't matter... | 
|  | 25 | */ | 
|  | 26 | static __u32 count_free(struct buffer_head *map[], unsigned blocksize, __u32 numbits) | 
|  | 27 | { | 
|  | 28 | __u32 sum = 0; | 
|  | 29 | unsigned blocks = DIV_ROUND_UP(numbits, blocksize * 8); | 
|  | 30 |  | 
|  | 31 | while (blocks--) { | 
|  | 32 | unsigned words = blocksize / 2; | 
|  | 33 | __u16 *p = (__u16 *)(*map++)->b_data; | 
|  | 34 | while (words--) | 
|  | 35 | sum += 16 - hweight16(*p++); | 
|  | 36 | } | 
|  | 37 |  | 
|  | 38 | return sum; | 
|  | 39 | } | 
|  | 40 |  | 
|  | 41 | void minix_free_block(struct inode *inode, unsigned long block) | 
|  | 42 | { | 
|  | 43 | struct super_block *sb = inode->i_sb; | 
|  | 44 | struct minix_sb_info *sbi = minix_sb(sb); | 
|  | 45 | struct buffer_head *bh; | 
|  | 46 | int k = sb->s_blocksize_bits + 3; | 
|  | 47 | unsigned long bit, zone; | 
|  | 48 |  | 
|  | 49 | if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) { | 
|  | 50 | printk("Trying to free block not in datazone\n"); | 
|  | 51 | return; | 
|  | 52 | } | 
|  | 53 | zone = block - sbi->s_firstdatazone + 1; | 
|  | 54 | bit = zone & ((1<<k) - 1); | 
|  | 55 | zone >>= k; | 
|  | 56 | if (zone >= sbi->s_zmap_blocks) { | 
|  | 57 | printk("minix_free_block: nonexistent bitmap buffer\n"); | 
|  | 58 | return; | 
|  | 59 | } | 
|  | 60 | bh = sbi->s_zmap[zone]; | 
|  | 61 | spin_lock(&bitmap_lock); | 
|  | 62 | if (!minix_test_and_clear_bit(bit, bh->b_data)) | 
|  | 63 | printk("minix_free_block (%s:%lu): bit already cleared\n", | 
|  | 64 | sb->s_id, block); | 
|  | 65 | spin_unlock(&bitmap_lock); | 
|  | 66 | mark_buffer_dirty(bh); | 
|  | 67 | return; | 
|  | 68 | } | 
|  | 69 |  | 
|  | 70 | int minix_new_block(struct inode * inode) | 
|  | 71 | { | 
|  | 72 | struct minix_sb_info *sbi = minix_sb(inode->i_sb); | 
|  | 73 | int bits_per_zone = 8 * inode->i_sb->s_blocksize; | 
|  | 74 | int i; | 
|  | 75 |  | 
|  | 76 | for (i = 0; i < sbi->s_zmap_blocks; i++) { | 
|  | 77 | struct buffer_head *bh = sbi->s_zmap[i]; | 
|  | 78 | int j; | 
|  | 79 |  | 
|  | 80 | spin_lock(&bitmap_lock); | 
|  | 81 | j = minix_find_first_zero_bit(bh->b_data, bits_per_zone); | 
|  | 82 | if (j < bits_per_zone) { | 
|  | 83 | minix_set_bit(j, bh->b_data); | 
|  | 84 | spin_unlock(&bitmap_lock); | 
|  | 85 | mark_buffer_dirty(bh); | 
|  | 86 | j += i * bits_per_zone + sbi->s_firstdatazone-1; | 
|  | 87 | if (j < sbi->s_firstdatazone || j >= sbi->s_nzones) | 
|  | 88 | break; | 
|  | 89 | return j; | 
|  | 90 | } | 
|  | 91 | spin_unlock(&bitmap_lock); | 
|  | 92 | } | 
|  | 93 | return 0; | 
|  | 94 | } | 
|  | 95 |  | 
|  | 96 | unsigned long minix_count_free_blocks(struct super_block *sb) | 
|  | 97 | { | 
|  | 98 | struct minix_sb_info *sbi = minix_sb(sb); | 
|  | 99 | u32 bits = sbi->s_nzones - (sbi->s_firstdatazone + 1); | 
|  | 100 |  | 
|  | 101 | return (count_free(sbi->s_zmap, sb->s_blocksize, bits) | 
|  | 102 | << sbi->s_log_zone_size); | 
|  | 103 | } | 
|  | 104 |  | 
|  | 105 | struct minix_inode * | 
|  | 106 | minix_V1_raw_inode(struct super_block *sb, ino_t ino, struct buffer_head **bh) | 
|  | 107 | { | 
|  | 108 | int block; | 
|  | 109 | struct minix_sb_info *sbi = minix_sb(sb); | 
|  | 110 | struct minix_inode *p; | 
|  | 111 |  | 
|  | 112 | if (!ino || ino > sbi->s_ninodes) { | 
|  | 113 | printk("Bad inode number on dev %s: %ld is out of range\n", | 
|  | 114 | sb->s_id, (long)ino); | 
|  | 115 | return NULL; | 
|  | 116 | } | 
|  | 117 | ino--; | 
|  | 118 | block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks + | 
|  | 119 | ino / MINIX_INODES_PER_BLOCK; | 
|  | 120 | *bh = sb_bread(sb, block); | 
|  | 121 | if (!*bh) { | 
|  | 122 | printk("Unable to read inode block\n"); | 
|  | 123 | return NULL; | 
|  | 124 | } | 
|  | 125 | p = (void *)(*bh)->b_data; | 
|  | 126 | return p + ino % MINIX_INODES_PER_BLOCK; | 
|  | 127 | } | 
|  | 128 |  | 
|  | 129 | struct minix2_inode * | 
|  | 130 | minix_V2_raw_inode(struct super_block *sb, ino_t ino, struct buffer_head **bh) | 
|  | 131 | { | 
|  | 132 | int block; | 
|  | 133 | struct minix_sb_info *sbi = minix_sb(sb); | 
|  | 134 | struct minix2_inode *p; | 
|  | 135 | int minix2_inodes_per_block = sb->s_blocksize / sizeof(struct minix2_inode); | 
|  | 136 |  | 
|  | 137 | *bh = NULL; | 
|  | 138 | if (!ino || ino > sbi->s_ninodes) { | 
|  | 139 | printk("Bad inode number on dev %s: %ld is out of range\n", | 
|  | 140 | sb->s_id, (long)ino); | 
|  | 141 | return NULL; | 
|  | 142 | } | 
|  | 143 | ino--; | 
|  | 144 | block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks + | 
|  | 145 | ino / minix2_inodes_per_block; | 
|  | 146 | *bh = sb_bread(sb, block); | 
|  | 147 | if (!*bh) { | 
|  | 148 | printk("Unable to read inode block\n"); | 
|  | 149 | return NULL; | 
|  | 150 | } | 
|  | 151 | p = (void *)(*bh)->b_data; | 
|  | 152 | return p + ino % minix2_inodes_per_block; | 
|  | 153 | } | 
|  | 154 |  | 
|  | 155 | /* Clear the link count and mode of a deleted inode on disk. */ | 
|  | 156 |  | 
|  | 157 | static void minix_clear_inode(struct inode *inode) | 
|  | 158 | { | 
|  | 159 | struct buffer_head *bh = NULL; | 
|  | 160 |  | 
|  | 161 | if (INODE_VERSION(inode) == MINIX_V1) { | 
|  | 162 | struct minix_inode *raw_inode; | 
|  | 163 | raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh); | 
|  | 164 | if (raw_inode) { | 
|  | 165 | raw_inode->i_nlinks = 0; | 
|  | 166 | raw_inode->i_mode = 0; | 
|  | 167 | } | 
|  | 168 | } else { | 
|  | 169 | struct minix2_inode *raw_inode; | 
|  | 170 | raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh); | 
|  | 171 | if (raw_inode) { | 
|  | 172 | raw_inode->i_nlinks = 0; | 
|  | 173 | raw_inode->i_mode = 0; | 
|  | 174 | } | 
|  | 175 | } | 
|  | 176 | if (bh) { | 
|  | 177 | mark_buffer_dirty(bh); | 
|  | 178 | brelse (bh); | 
|  | 179 | } | 
|  | 180 | } | 
|  | 181 |  | 
|  | 182 | void minix_free_inode(struct inode * inode) | 
|  | 183 | { | 
|  | 184 | struct super_block *sb = inode->i_sb; | 
|  | 185 | struct minix_sb_info *sbi = minix_sb(inode->i_sb); | 
|  | 186 | struct buffer_head *bh; | 
|  | 187 | int k = sb->s_blocksize_bits + 3; | 
|  | 188 | unsigned long ino, bit; | 
|  | 189 |  | 
|  | 190 | ino = inode->i_ino; | 
|  | 191 | if (ino < 1 || ino > sbi->s_ninodes) { | 
|  | 192 | printk("minix_free_inode: inode 0 or nonexistent inode\n"); | 
|  | 193 | return; | 
|  | 194 | } | 
|  | 195 | bit = ino & ((1<<k) - 1); | 
|  | 196 | ino >>= k; | 
|  | 197 | if (ino >= sbi->s_imap_blocks) { | 
|  | 198 | printk("minix_free_inode: nonexistent imap in superblock\n"); | 
|  | 199 | return; | 
|  | 200 | } | 
|  | 201 |  | 
|  | 202 | minix_clear_inode(inode);	/* clear on-disk copy */ | 
|  | 203 |  | 
|  | 204 | bh = sbi->s_imap[ino]; | 
|  | 205 | spin_lock(&bitmap_lock); | 
|  | 206 | if (!minix_test_and_clear_bit(bit, bh->b_data)) | 
|  | 207 | printk("minix_free_inode: bit %lu already cleared\n", bit); | 
|  | 208 | spin_unlock(&bitmap_lock); | 
|  | 209 | mark_buffer_dirty(bh); | 
|  | 210 | } | 
|  | 211 |  | 
|  | 212 | struct inode *minix_new_inode(const struct inode *dir, umode_t mode, int *error) | 
|  | 213 | { | 
|  | 214 | struct super_block *sb = dir->i_sb; | 
|  | 215 | struct minix_sb_info *sbi = minix_sb(sb); | 
|  | 216 | struct inode *inode = new_inode(sb); | 
|  | 217 | struct buffer_head * bh; | 
|  | 218 | int bits_per_zone = 8 * sb->s_blocksize; | 
|  | 219 | unsigned long j; | 
|  | 220 | int i; | 
|  | 221 |  | 
|  | 222 | if (!inode) { | 
|  | 223 | *error = -ENOMEM; | 
|  | 224 | return NULL; | 
|  | 225 | } | 
|  | 226 | j = bits_per_zone; | 
|  | 227 | bh = NULL; | 
|  | 228 | *error = -ENOSPC; | 
|  | 229 | spin_lock(&bitmap_lock); | 
|  | 230 | for (i = 0; i < sbi->s_imap_blocks; i++) { | 
|  | 231 | bh = sbi->s_imap[i]; | 
|  | 232 | j = minix_find_first_zero_bit(bh->b_data, bits_per_zone); | 
|  | 233 | if (j < bits_per_zone) | 
|  | 234 | break; | 
|  | 235 | } | 
|  | 236 | if (!bh || j >= bits_per_zone) { | 
|  | 237 | spin_unlock(&bitmap_lock); | 
|  | 238 | iput(inode); | 
|  | 239 | return NULL; | 
|  | 240 | } | 
|  | 241 | if (minix_test_and_set_bit(j, bh->b_data)) {	/* shouldn't happen */ | 
|  | 242 | spin_unlock(&bitmap_lock); | 
|  | 243 | printk("minix_new_inode: bit already set\n"); | 
|  | 244 | iput(inode); | 
|  | 245 | return NULL; | 
|  | 246 | } | 
|  | 247 | spin_unlock(&bitmap_lock); | 
|  | 248 | mark_buffer_dirty(bh); | 
|  | 249 | j += i * bits_per_zone; | 
|  | 250 | if (!j || j > sbi->s_ninodes) { | 
|  | 251 | iput(inode); | 
|  | 252 | return NULL; | 
|  | 253 | } | 
|  | 254 | inode_init_owner(inode, dir, mode); | 
|  | 255 | inode->i_ino = j; | 
|  | 256 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; | 
|  | 257 | inode->i_blocks = 0; | 
|  | 258 | memset(&minix_i(inode)->u, 0, sizeof(minix_i(inode)->u)); | 
|  | 259 | insert_inode_hash(inode); | 
|  | 260 | mark_inode_dirty(inode); | 
|  | 261 |  | 
|  | 262 | *error = 0; | 
|  | 263 | return inode; | 
|  | 264 | } | 
|  | 265 |  | 
|  | 266 | unsigned long minix_count_free_inodes(struct super_block *sb) | 
|  | 267 | { | 
|  | 268 | struct minix_sb_info *sbi = minix_sb(sb); | 
|  | 269 | u32 bits = sbi->s_ninodes + 1; | 
|  | 270 |  | 
|  | 271 | return count_free(sbi->s_imap, sb->s_blocksize, bits); | 
|  | 272 | } |