| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 | 
 | 2 | /* | 
 | 3 |  *  linux/fs/hfs/bnode.c | 
 | 4 |  * | 
 | 5 |  * Copyright (C) 2001 | 
 | 6 |  * Brad Boyer (flar@allandria.com) | 
 | 7 |  * (C) 2003 Ardis Technologies <roman@ardistech.com> | 
 | 8 |  * | 
 | 9 |  * Handle basic btree node operations | 
 | 10 |  */ | 
 | 11 |  | 
 | 12 | #include <linux/pagemap.h> | 
 | 13 | #include <linux/slab.h> | 
 | 14 | #include <linux/swap.h> | 
 | 15 |  | 
 | 16 | #include "btree.h" | 
 | 17 |  | 
 | 18 | void hfs_bnode_read(struct hfs_bnode *node, void *buf, | 
 | 19 | 		int off, int len) | 
 | 20 | { | 
 | 21 | 	struct page *page; | 
 | 22 |  | 
 | 23 | 	off += node->page_offset; | 
 | 24 | 	page = node->page[0]; | 
 | 25 |  | 
 | 26 | 	memcpy(buf, kmap(page) + off, len); | 
 | 27 | 	kunmap(page); | 
 | 28 | } | 
 | 29 |  | 
 | 30 | u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off) | 
 | 31 | { | 
 | 32 | 	__be16 data; | 
 | 33 | 	// optimize later... | 
 | 34 | 	hfs_bnode_read(node, &data, off, 2); | 
 | 35 | 	return be16_to_cpu(data); | 
 | 36 | } | 
 | 37 |  | 
 | 38 | u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off) | 
 | 39 | { | 
 | 40 | 	u8 data; | 
 | 41 | 	// optimize later... | 
 | 42 | 	hfs_bnode_read(node, &data, off, 1); | 
 | 43 | 	return data; | 
 | 44 | } | 
 | 45 |  | 
 | 46 | void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off) | 
 | 47 | { | 
 | 48 | 	struct hfs_btree *tree; | 
 | 49 | 	int key_len; | 
 | 50 |  | 
 | 51 | 	tree = node->tree; | 
 | 52 | 	if (node->type == HFS_NODE_LEAF || | 
 | 53 | 	    tree->attributes & HFS_TREE_VARIDXKEYS) | 
 | 54 | 		key_len = hfs_bnode_read_u8(node, off) + 1; | 
 | 55 | 	else | 
 | 56 | 		key_len = tree->max_key_len + 1; | 
 | 57 |  | 
 | 58 | 	hfs_bnode_read(node, key, off, key_len); | 
 | 59 | } | 
 | 60 |  | 
 | 61 | void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) | 
 | 62 | { | 
 | 63 | 	struct page *page; | 
 | 64 |  | 
 | 65 | 	off += node->page_offset; | 
 | 66 | 	page = node->page[0]; | 
 | 67 |  | 
 | 68 | 	memcpy(kmap(page) + off, buf, len); | 
 | 69 | 	kunmap(page); | 
 | 70 | 	set_page_dirty(page); | 
 | 71 | } | 
 | 72 |  | 
 | 73 | void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data) | 
 | 74 | { | 
 | 75 | 	__be16 v = cpu_to_be16(data); | 
 | 76 | 	// optimize later... | 
 | 77 | 	hfs_bnode_write(node, &v, off, 2); | 
 | 78 | } | 
 | 79 |  | 
 | 80 | void hfs_bnode_write_u8(struct hfs_bnode *node, int off, u8 data) | 
 | 81 | { | 
 | 82 | 	// optimize later... | 
 | 83 | 	hfs_bnode_write(node, &data, off, 1); | 
 | 84 | } | 
 | 85 |  | 
 | 86 | void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) | 
 | 87 | { | 
 | 88 | 	struct page *page; | 
 | 89 |  | 
 | 90 | 	off += node->page_offset; | 
 | 91 | 	page = node->page[0]; | 
 | 92 |  | 
 | 93 | 	memset(kmap(page) + off, 0, len); | 
 | 94 | 	kunmap(page); | 
 | 95 | 	set_page_dirty(page); | 
 | 96 | } | 
 | 97 |  | 
 | 98 | void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, | 
 | 99 | 		struct hfs_bnode *src_node, int src, int len) | 
 | 100 | { | 
 | 101 | 	struct page *src_page, *dst_page; | 
 | 102 |  | 
 | 103 | 	hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len); | 
 | 104 | 	if (!len) | 
 | 105 | 		return; | 
 | 106 | 	src += src_node->page_offset; | 
 | 107 | 	dst += dst_node->page_offset; | 
 | 108 | 	src_page = src_node->page[0]; | 
 | 109 | 	dst_page = dst_node->page[0]; | 
 | 110 |  | 
 | 111 | 	memcpy(kmap(dst_page) + dst, kmap(src_page) + src, len); | 
 | 112 | 	kunmap(src_page); | 
 | 113 | 	kunmap(dst_page); | 
 | 114 | 	set_page_dirty(dst_page); | 
 | 115 | } | 
 | 116 |  | 
 | 117 | void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) | 
 | 118 | { | 
 | 119 | 	struct page *page; | 
 | 120 | 	void *ptr; | 
 | 121 |  | 
 | 122 | 	hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len); | 
 | 123 | 	if (!len) | 
 | 124 | 		return; | 
 | 125 | 	src += node->page_offset; | 
 | 126 | 	dst += node->page_offset; | 
 | 127 | 	page = node->page[0]; | 
 | 128 | 	ptr = kmap(page); | 
 | 129 | 	memmove(ptr + dst, ptr + src, len); | 
 | 130 | 	kunmap(page); | 
 | 131 | 	set_page_dirty(page); | 
 | 132 | } | 
 | 133 |  | 
 | 134 | void hfs_bnode_dump(struct hfs_bnode *node) | 
 | 135 | { | 
 | 136 | 	struct hfs_bnode_desc desc; | 
 | 137 | 	__be32 cnid; | 
 | 138 | 	int i, off, key_off; | 
 | 139 |  | 
 | 140 | 	hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this); | 
 | 141 | 	hfs_bnode_read(node, &desc, 0, sizeof(desc)); | 
 | 142 | 	hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n", | 
 | 143 | 		be32_to_cpu(desc.next), be32_to_cpu(desc.prev), | 
 | 144 | 		desc.type, desc.height, be16_to_cpu(desc.num_recs)); | 
 | 145 |  | 
 | 146 | 	off = node->tree->node_size - 2; | 
 | 147 | 	for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) { | 
 | 148 | 		key_off = hfs_bnode_read_u16(node, off); | 
 | 149 | 		hfs_dbg_cont(BNODE_MOD, " %d", key_off); | 
 | 150 | 		if (i && node->type == HFS_NODE_INDEX) { | 
 | 151 | 			int tmp; | 
 | 152 |  | 
 | 153 | 			if (node->tree->attributes & HFS_TREE_VARIDXKEYS) | 
 | 154 | 				tmp = (hfs_bnode_read_u8(node, key_off) | 1) + 1; | 
 | 155 | 			else | 
 | 156 | 				tmp = node->tree->max_key_len + 1; | 
 | 157 | 			hfs_dbg_cont(BNODE_MOD, " (%d,%d", | 
 | 158 | 				     tmp, hfs_bnode_read_u8(node, key_off)); | 
 | 159 | 			hfs_bnode_read(node, &cnid, key_off + tmp, 4); | 
 | 160 | 			hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid)); | 
 | 161 | 		} else if (i && node->type == HFS_NODE_LEAF) { | 
 | 162 | 			int tmp; | 
 | 163 |  | 
 | 164 | 			tmp = hfs_bnode_read_u8(node, key_off); | 
 | 165 | 			hfs_dbg_cont(BNODE_MOD, " (%d)", tmp); | 
 | 166 | 		} | 
 | 167 | 	} | 
 | 168 | 	hfs_dbg_cont(BNODE_MOD, "\n"); | 
 | 169 | } | 
 | 170 |  | 
 | 171 | void hfs_bnode_unlink(struct hfs_bnode *node) | 
 | 172 | { | 
 | 173 | 	struct hfs_btree *tree; | 
 | 174 | 	struct hfs_bnode *tmp; | 
 | 175 | 	__be32 cnid; | 
 | 176 |  | 
 | 177 | 	tree = node->tree; | 
 | 178 | 	if (node->prev) { | 
 | 179 | 		tmp = hfs_bnode_find(tree, node->prev); | 
 | 180 | 		if (IS_ERR(tmp)) | 
 | 181 | 			return; | 
 | 182 | 		tmp->next = node->next; | 
 | 183 | 		cnid = cpu_to_be32(tmp->next); | 
 | 184 | 		hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, next), 4); | 
 | 185 | 		hfs_bnode_put(tmp); | 
 | 186 | 	} else if (node->type == HFS_NODE_LEAF) | 
 | 187 | 		tree->leaf_head = node->next; | 
 | 188 |  | 
 | 189 | 	if (node->next) { | 
 | 190 | 		tmp = hfs_bnode_find(tree, node->next); | 
 | 191 | 		if (IS_ERR(tmp)) | 
 | 192 | 			return; | 
 | 193 | 		tmp->prev = node->prev; | 
 | 194 | 		cnid = cpu_to_be32(tmp->prev); | 
 | 195 | 		hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, prev), 4); | 
 | 196 | 		hfs_bnode_put(tmp); | 
 | 197 | 	} else if (node->type == HFS_NODE_LEAF) | 
 | 198 | 		tree->leaf_tail = node->prev; | 
 | 199 |  | 
 | 200 | 	// move down? | 
 | 201 | 	if (!node->prev && !node->next) { | 
 | 202 | 		printk(KERN_DEBUG "hfs_btree_del_level\n"); | 
 | 203 | 	} | 
 | 204 | 	if (!node->parent) { | 
 | 205 | 		tree->root = 0; | 
 | 206 | 		tree->depth = 0; | 
 | 207 | 	} | 
 | 208 | 	set_bit(HFS_BNODE_DELETED, &node->flags); | 
 | 209 | } | 
 | 210 |  | 
 | 211 | static inline int hfs_bnode_hash(u32 num) | 
 | 212 | { | 
 | 213 | 	num = (num >> 16) + num; | 
 | 214 | 	num += num >> 8; | 
 | 215 | 	return num & (NODE_HASH_SIZE - 1); | 
 | 216 | } | 
 | 217 |  | 
 | 218 | struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid) | 
 | 219 | { | 
 | 220 | 	struct hfs_bnode *node; | 
 | 221 |  | 
 | 222 | 	if (cnid >= tree->node_count) { | 
 | 223 | 		pr_err("request for non-existent node %d in B*Tree\n", cnid); | 
 | 224 | 		return NULL; | 
 | 225 | 	} | 
 | 226 |  | 
 | 227 | 	for (node = tree->node_hash[hfs_bnode_hash(cnid)]; | 
 | 228 | 	     node; node = node->next_hash) { | 
 | 229 | 		if (node->this == cnid) { | 
 | 230 | 			return node; | 
 | 231 | 		} | 
 | 232 | 	} | 
 | 233 | 	return NULL; | 
 | 234 | } | 
 | 235 |  | 
 | 236 | static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) | 
 | 237 | { | 
 | 238 | 	struct hfs_bnode *node, *node2; | 
 | 239 | 	struct address_space *mapping; | 
 | 240 | 	struct page *page; | 
 | 241 | 	int size, block, i, hash; | 
 | 242 | 	loff_t off; | 
 | 243 |  | 
 | 244 | 	if (cnid >= tree->node_count) { | 
 | 245 | 		pr_err("request for non-existent node %d in B*Tree\n", cnid); | 
 | 246 | 		return NULL; | 
 | 247 | 	} | 
 | 248 |  | 
 | 249 | 	size = sizeof(struct hfs_bnode) + tree->pages_per_bnode * | 
 | 250 | 		sizeof(struct page *); | 
 | 251 | 	node = kzalloc(size, GFP_KERNEL); | 
 | 252 | 	if (!node) | 
 | 253 | 		return NULL; | 
 | 254 | 	node->tree = tree; | 
 | 255 | 	node->this = cnid; | 
 | 256 | 	set_bit(HFS_BNODE_NEW, &node->flags); | 
 | 257 | 	atomic_set(&node->refcnt, 1); | 
 | 258 | 	hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n", | 
 | 259 | 		node->tree->cnid, node->this); | 
 | 260 | 	init_waitqueue_head(&node->lock_wq); | 
 | 261 | 	spin_lock(&tree->hash_lock); | 
 | 262 | 	node2 = hfs_bnode_findhash(tree, cnid); | 
 | 263 | 	if (!node2) { | 
 | 264 | 		hash = hfs_bnode_hash(cnid); | 
 | 265 | 		node->next_hash = tree->node_hash[hash]; | 
 | 266 | 		tree->node_hash[hash] = node; | 
 | 267 | 		tree->node_hash_cnt++; | 
 | 268 | 	} else { | 
 | 269 | 		spin_unlock(&tree->hash_lock); | 
 | 270 | 		kfree(node); | 
 | 271 | 		wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags)); | 
 | 272 | 		return node2; | 
 | 273 | 	} | 
 | 274 | 	spin_unlock(&tree->hash_lock); | 
 | 275 |  | 
 | 276 | 	mapping = tree->inode->i_mapping; | 
 | 277 | 	off = (loff_t)cnid * tree->node_size; | 
 | 278 | 	block = off >> PAGE_SHIFT; | 
 | 279 | 	node->page_offset = off & ~PAGE_MASK; | 
 | 280 | 	for (i = 0; i < tree->pages_per_bnode; i++) { | 
 | 281 | 		page = read_mapping_page(mapping, block++, NULL); | 
 | 282 | 		if (IS_ERR(page)) | 
 | 283 | 			goto fail; | 
 | 284 | 		if (PageError(page)) { | 
 | 285 | 			put_page(page); | 
 | 286 | 			goto fail; | 
 | 287 | 		} | 
 | 288 | 		node->page[i] = page; | 
 | 289 | 	} | 
 | 290 |  | 
 | 291 | 	return node; | 
 | 292 | fail: | 
 | 293 | 	set_bit(HFS_BNODE_ERROR, &node->flags); | 
 | 294 | 	return node; | 
 | 295 | } | 
 | 296 |  | 
 | 297 | void hfs_bnode_unhash(struct hfs_bnode *node) | 
 | 298 | { | 
 | 299 | 	struct hfs_bnode **p; | 
 | 300 |  | 
 | 301 | 	hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n", | 
 | 302 | 		node->tree->cnid, node->this, atomic_read(&node->refcnt)); | 
 | 303 | 	for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)]; | 
 | 304 | 	     *p && *p != node; p = &(*p)->next_hash) | 
 | 305 | 		; | 
 | 306 | 	BUG_ON(!*p); | 
 | 307 | 	*p = node->next_hash; | 
 | 308 | 	node->tree->node_hash_cnt--; | 
 | 309 | } | 
 | 310 |  | 
 | 311 | /* Load a particular node out of a tree */ | 
 | 312 | struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num) | 
 | 313 | { | 
 | 314 | 	struct hfs_bnode *node; | 
 | 315 | 	struct hfs_bnode_desc *desc; | 
 | 316 | 	int i, rec_off, off, next_off; | 
 | 317 | 	int entry_size, key_size; | 
 | 318 |  | 
 | 319 | 	spin_lock(&tree->hash_lock); | 
 | 320 | 	node = hfs_bnode_findhash(tree, num); | 
 | 321 | 	if (node) { | 
 | 322 | 		hfs_bnode_get(node); | 
 | 323 | 		spin_unlock(&tree->hash_lock); | 
 | 324 | 		wait_event(node->lock_wq, !test_bit(HFS_BNODE_NEW, &node->flags)); | 
 | 325 | 		if (test_bit(HFS_BNODE_ERROR, &node->flags)) | 
 | 326 | 			goto node_error; | 
 | 327 | 		return node; | 
 | 328 | 	} | 
 | 329 | 	spin_unlock(&tree->hash_lock); | 
 | 330 | 	node = __hfs_bnode_create(tree, num); | 
 | 331 | 	if (!node) | 
 | 332 | 		return ERR_PTR(-ENOMEM); | 
 | 333 | 	if (test_bit(HFS_BNODE_ERROR, &node->flags)) | 
 | 334 | 		goto node_error; | 
 | 335 | 	if (!test_bit(HFS_BNODE_NEW, &node->flags)) | 
 | 336 | 		return node; | 
 | 337 |  | 
 | 338 | 	desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset); | 
 | 339 | 	node->prev = be32_to_cpu(desc->prev); | 
 | 340 | 	node->next = be32_to_cpu(desc->next); | 
 | 341 | 	node->num_recs = be16_to_cpu(desc->num_recs); | 
 | 342 | 	node->type = desc->type; | 
 | 343 | 	node->height = desc->height; | 
 | 344 | 	kunmap(node->page[0]); | 
 | 345 |  | 
 | 346 | 	switch (node->type) { | 
 | 347 | 	case HFS_NODE_HEADER: | 
 | 348 | 	case HFS_NODE_MAP: | 
 | 349 | 		if (node->height != 0) | 
 | 350 | 			goto node_error; | 
 | 351 | 		break; | 
 | 352 | 	case HFS_NODE_LEAF: | 
 | 353 | 		if (node->height != 1) | 
 | 354 | 			goto node_error; | 
 | 355 | 		break; | 
 | 356 | 	case HFS_NODE_INDEX: | 
 | 357 | 		if (node->height <= 1 || node->height > tree->depth) | 
 | 358 | 			goto node_error; | 
 | 359 | 		break; | 
 | 360 | 	default: | 
 | 361 | 		goto node_error; | 
 | 362 | 	} | 
 | 363 |  | 
 | 364 | 	rec_off = tree->node_size - 2; | 
 | 365 | 	off = hfs_bnode_read_u16(node, rec_off); | 
 | 366 | 	if (off != sizeof(struct hfs_bnode_desc)) | 
 | 367 | 		goto node_error; | 
 | 368 | 	for (i = 1; i <= node->num_recs; off = next_off, i++) { | 
 | 369 | 		rec_off -= 2; | 
 | 370 | 		next_off = hfs_bnode_read_u16(node, rec_off); | 
 | 371 | 		if (next_off <= off || | 
 | 372 | 		    next_off > tree->node_size || | 
 | 373 | 		    next_off & 1) | 
 | 374 | 			goto node_error; | 
 | 375 | 		entry_size = next_off - off; | 
 | 376 | 		if (node->type != HFS_NODE_INDEX && | 
 | 377 | 		    node->type != HFS_NODE_LEAF) | 
 | 378 | 			continue; | 
 | 379 | 		key_size = hfs_bnode_read_u8(node, off) + 1; | 
 | 380 | 		if (key_size >= entry_size /*|| key_size & 1*/) | 
 | 381 | 			goto node_error; | 
 | 382 | 	} | 
 | 383 | 	clear_bit(HFS_BNODE_NEW, &node->flags); | 
 | 384 | 	wake_up(&node->lock_wq); | 
 | 385 | 	return node; | 
 | 386 |  | 
 | 387 | node_error: | 
 | 388 | 	set_bit(HFS_BNODE_ERROR, &node->flags); | 
 | 389 | 	clear_bit(HFS_BNODE_NEW, &node->flags); | 
 | 390 | 	wake_up(&node->lock_wq); | 
 | 391 | 	hfs_bnode_put(node); | 
 | 392 | 	return ERR_PTR(-EIO); | 
 | 393 | } | 
 | 394 |  | 
 | 395 | void hfs_bnode_free(struct hfs_bnode *node) | 
 | 396 | { | 
 | 397 | 	int i; | 
 | 398 |  | 
 | 399 | 	for (i = 0; i < node->tree->pages_per_bnode; i++) | 
 | 400 | 		if (node->page[i]) | 
 | 401 | 			put_page(node->page[i]); | 
 | 402 | 	kfree(node); | 
 | 403 | } | 
 | 404 |  | 
 | 405 | struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num) | 
 | 406 | { | 
 | 407 | 	struct hfs_bnode *node; | 
 | 408 | 	struct page **pagep; | 
 | 409 | 	int i; | 
 | 410 |  | 
 | 411 | 	spin_lock(&tree->hash_lock); | 
 | 412 | 	node = hfs_bnode_findhash(tree, num); | 
 | 413 | 	spin_unlock(&tree->hash_lock); | 
 | 414 | 	if (node) { | 
 | 415 | 		pr_crit("new node %u already hashed?\n", num); | 
 | 416 | 		WARN_ON(1); | 
 | 417 | 		return node; | 
 | 418 | 	} | 
 | 419 | 	node = __hfs_bnode_create(tree, num); | 
 | 420 | 	if (!node) | 
 | 421 | 		return ERR_PTR(-ENOMEM); | 
 | 422 | 	if (test_bit(HFS_BNODE_ERROR, &node->flags)) { | 
 | 423 | 		hfs_bnode_put(node); | 
 | 424 | 		return ERR_PTR(-EIO); | 
 | 425 | 	} | 
 | 426 |  | 
 | 427 | 	pagep = node->page; | 
 | 428 | 	memset(kmap(*pagep) + node->page_offset, 0, | 
 | 429 | 	       min((int)PAGE_SIZE, (int)tree->node_size)); | 
 | 430 | 	set_page_dirty(*pagep); | 
 | 431 | 	kunmap(*pagep); | 
 | 432 | 	for (i = 1; i < tree->pages_per_bnode; i++) { | 
 | 433 | 		memset(kmap(*++pagep), 0, PAGE_SIZE); | 
 | 434 | 		set_page_dirty(*pagep); | 
 | 435 | 		kunmap(*pagep); | 
 | 436 | 	} | 
 | 437 | 	clear_bit(HFS_BNODE_NEW, &node->flags); | 
 | 438 | 	wake_up(&node->lock_wq); | 
 | 439 |  | 
 | 440 | 	return node; | 
 | 441 | } | 
 | 442 |  | 
 | 443 | void hfs_bnode_get(struct hfs_bnode *node) | 
 | 444 | { | 
 | 445 | 	if (node) { | 
 | 446 | 		atomic_inc(&node->refcnt); | 
 | 447 | 		hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n", | 
 | 448 | 			node->tree->cnid, node->this, | 
 | 449 | 			atomic_read(&node->refcnt)); | 
 | 450 | 	} | 
 | 451 | } | 
 | 452 |  | 
 | 453 | /* Dispose of resources used by a node */ | 
 | 454 | void hfs_bnode_put(struct hfs_bnode *node) | 
 | 455 | { | 
 | 456 | 	if (node) { | 
 | 457 | 		struct hfs_btree *tree = node->tree; | 
 | 458 | 		int i; | 
 | 459 |  | 
 | 460 | 		hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n", | 
 | 461 | 			node->tree->cnid, node->this, | 
 | 462 | 			atomic_read(&node->refcnt)); | 
 | 463 | 		BUG_ON(!atomic_read(&node->refcnt)); | 
 | 464 | 		if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) | 
 | 465 | 			return; | 
 | 466 | 		for (i = 0; i < tree->pages_per_bnode; i++) { | 
 | 467 | 			if (!node->page[i]) | 
 | 468 | 				continue; | 
 | 469 | 			mark_page_accessed(node->page[i]); | 
 | 470 | 		} | 
 | 471 |  | 
 | 472 | 		if (test_bit(HFS_BNODE_DELETED, &node->flags)) { | 
 | 473 | 			hfs_bnode_unhash(node); | 
 | 474 | 			spin_unlock(&tree->hash_lock); | 
 | 475 | 			hfs_bmap_free(node); | 
 | 476 | 			hfs_bnode_free(node); | 
 | 477 | 			return; | 
 | 478 | 		} | 
 | 479 | 		spin_unlock(&tree->hash_lock); | 
 | 480 | 	} | 
 | 481 | } |