| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* -*- mode: c; c-basic-offset: 8; -*- | 
|  | 2 | * vim: noexpandtab sw=8 ts=8 sts=0: | 
|  | 3 | * | 
|  | 4 | * dcache.c | 
|  | 5 | * | 
|  | 6 | * dentry cache handling code | 
|  | 7 | * | 
|  | 8 | * Copyright (C) 2002, 2004 Oracle.  All rights reserved. | 
|  | 9 | * | 
|  | 10 | * This program is free software; you can redistribute it and/or | 
|  | 11 | * modify it under the terms of the GNU General Public | 
|  | 12 | * License as published by the Free Software Foundation; either | 
|  | 13 | * version 2 of the License, or (at your option) any later version. | 
|  | 14 | * | 
|  | 15 | * This program is distributed in the hope that it will be useful, | 
|  | 16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
|  | 18 | * General Public License for more details. | 
|  | 19 | * | 
|  | 20 | * You should have received a copy of the GNU General Public | 
|  | 21 | * License along with this program; if not, write to the | 
|  | 22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | 
|  | 23 | * Boston, MA 021110-1307, USA. | 
|  | 24 | */ | 
|  | 25 |  | 
|  | 26 | #include <linux/fs.h> | 
|  | 27 | #include <linux/types.h> | 
|  | 28 | #include <linux/slab.h> | 
|  | 29 | #include <linux/namei.h> | 
|  | 30 |  | 
|  | 31 | #include <cluster/masklog.h> | 
|  | 32 |  | 
|  | 33 | #include "ocfs2.h" | 
|  | 34 |  | 
|  | 35 | #include "alloc.h" | 
|  | 36 | #include "dcache.h" | 
|  | 37 | #include "dlmglue.h" | 
|  | 38 | #include "file.h" | 
|  | 39 | #include "inode.h" | 
|  | 40 | #include "ocfs2_trace.h" | 
|  | 41 |  | 
|  | 42 | void ocfs2_dentry_attach_gen(struct dentry *dentry) | 
|  | 43 | { | 
|  | 44 | unsigned long gen = | 
|  | 45 | OCFS2_I(d_inode(dentry->d_parent))->ip_dir_lock_gen; | 
|  | 46 | BUG_ON(d_inode(dentry)); | 
|  | 47 | dentry->d_fsdata = (void *)gen; | 
|  | 48 | } | 
|  | 49 |  | 
|  | 50 |  | 
|  | 51 | static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags) | 
|  | 52 | { | 
|  | 53 | struct inode *inode; | 
|  | 54 | int ret = 0;    /* if all else fails, just return false */ | 
|  | 55 | struct ocfs2_super *osb; | 
|  | 56 |  | 
|  | 57 | if (flags & LOOKUP_RCU) | 
|  | 58 | return -ECHILD; | 
|  | 59 |  | 
|  | 60 | inode = d_inode(dentry); | 
|  | 61 | osb = OCFS2_SB(dentry->d_sb); | 
|  | 62 |  | 
|  | 63 | trace_ocfs2_dentry_revalidate(dentry, dentry->d_name.len, | 
|  | 64 | dentry->d_name.name); | 
|  | 65 |  | 
|  | 66 | /* For a negative dentry - | 
|  | 67 | * check the generation number of the parent and compare with the | 
|  | 68 | * one stored in the inode. | 
|  | 69 | */ | 
|  | 70 | if (inode == NULL) { | 
|  | 71 | unsigned long gen = (unsigned long) dentry->d_fsdata; | 
|  | 72 | unsigned long pgen; | 
|  | 73 | spin_lock(&dentry->d_lock); | 
|  | 74 | pgen = OCFS2_I(d_inode(dentry->d_parent))->ip_dir_lock_gen; | 
|  | 75 | spin_unlock(&dentry->d_lock); | 
|  | 76 | trace_ocfs2_dentry_revalidate_negative(dentry->d_name.len, | 
|  | 77 | dentry->d_name.name, | 
|  | 78 | pgen, gen); | 
|  | 79 | if (gen != pgen) | 
|  | 80 | goto bail; | 
|  | 81 | goto valid; | 
|  | 82 | } | 
|  | 83 |  | 
|  | 84 | BUG_ON(!osb); | 
|  | 85 |  | 
|  | 86 | if (inode == osb->root_inode || is_bad_inode(inode)) | 
|  | 87 | goto bail; | 
|  | 88 |  | 
|  | 89 | spin_lock(&OCFS2_I(inode)->ip_lock); | 
|  | 90 | /* did we or someone else delete this inode? */ | 
|  | 91 | if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) { | 
|  | 92 | spin_unlock(&OCFS2_I(inode)->ip_lock); | 
|  | 93 | trace_ocfs2_dentry_revalidate_delete( | 
|  | 94 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | 
|  | 95 | goto bail; | 
|  | 96 | } | 
|  | 97 | spin_unlock(&OCFS2_I(inode)->ip_lock); | 
|  | 98 |  | 
|  | 99 | /* | 
|  | 100 | * We don't need a cluster lock to test this because once an | 
|  | 101 | * inode nlink hits zero, it never goes back. | 
|  | 102 | */ | 
|  | 103 | if (inode->i_nlink == 0) { | 
|  | 104 | trace_ocfs2_dentry_revalidate_orphaned( | 
|  | 105 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | 
|  | 106 | S_ISDIR(inode->i_mode)); | 
|  | 107 | goto bail; | 
|  | 108 | } | 
|  | 109 |  | 
|  | 110 | /* | 
|  | 111 | * If the last lookup failed to create dentry lock, let us | 
|  | 112 | * redo it. | 
|  | 113 | */ | 
|  | 114 | if (!dentry->d_fsdata) { | 
|  | 115 | trace_ocfs2_dentry_revalidate_nofsdata( | 
|  | 116 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | 
|  | 117 | goto bail; | 
|  | 118 | } | 
|  | 119 |  | 
|  | 120 | valid: | 
|  | 121 | ret = 1; | 
|  | 122 |  | 
|  | 123 | bail: | 
|  | 124 | trace_ocfs2_dentry_revalidate_ret(ret); | 
|  | 125 | return ret; | 
|  | 126 | } | 
|  | 127 |  | 
|  | 128 | static int ocfs2_match_dentry(struct dentry *dentry, | 
|  | 129 | u64 parent_blkno, | 
|  | 130 | int skip_unhashed) | 
|  | 131 | { | 
|  | 132 | struct inode *parent; | 
|  | 133 |  | 
|  | 134 | /* | 
|  | 135 | * ocfs2_lookup() does a d_splice_alias() _before_ attaching | 
|  | 136 | * to the lock data, so we skip those here, otherwise | 
|  | 137 | * ocfs2_dentry_attach_lock() will get its original dentry | 
|  | 138 | * back. | 
|  | 139 | */ | 
|  | 140 | if (!dentry->d_fsdata) | 
|  | 141 | return 0; | 
|  | 142 |  | 
|  | 143 | if (!dentry->d_parent) | 
|  | 144 | return 0; | 
|  | 145 |  | 
|  | 146 | if (skip_unhashed && d_unhashed(dentry)) | 
|  | 147 | return 0; | 
|  | 148 |  | 
|  | 149 | parent = d_inode(dentry->d_parent); | 
|  | 150 | /* Negative parent dentry? */ | 
|  | 151 | if (!parent) | 
|  | 152 | return 0; | 
|  | 153 |  | 
|  | 154 | /* Name is in a different directory. */ | 
|  | 155 | if (OCFS2_I(parent)->ip_blkno != parent_blkno) | 
|  | 156 | return 0; | 
|  | 157 |  | 
|  | 158 | return 1; | 
|  | 159 | } | 
|  | 160 |  | 
|  | 161 | /* | 
|  | 162 | * Walk the inode alias list, and find a dentry which has a given | 
|  | 163 | * parent. ocfs2_dentry_attach_lock() wants to find _any_ alias as it | 
|  | 164 | * is looking for a dentry_lock reference. The downconvert thread is | 
|  | 165 | * looking to unhash aliases, so we allow it to skip any that already | 
|  | 166 | * have that property. | 
|  | 167 | */ | 
|  | 168 | struct dentry *ocfs2_find_local_alias(struct inode *inode, | 
|  | 169 | u64 parent_blkno, | 
|  | 170 | int skip_unhashed) | 
|  | 171 | { | 
|  | 172 | struct dentry *dentry; | 
|  | 173 |  | 
|  | 174 | spin_lock(&inode->i_lock); | 
|  | 175 | hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) { | 
|  | 176 | spin_lock(&dentry->d_lock); | 
|  | 177 | if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) { | 
|  | 178 | trace_ocfs2_find_local_alias(dentry->d_name.len, | 
|  | 179 | dentry->d_name.name); | 
|  | 180 |  | 
|  | 181 | dget_dlock(dentry); | 
|  | 182 | spin_unlock(&dentry->d_lock); | 
|  | 183 | spin_unlock(&inode->i_lock); | 
|  | 184 | return dentry; | 
|  | 185 | } | 
|  | 186 | spin_unlock(&dentry->d_lock); | 
|  | 187 | } | 
|  | 188 | spin_unlock(&inode->i_lock); | 
|  | 189 | return NULL; | 
|  | 190 | } | 
|  | 191 |  | 
|  | 192 | DEFINE_SPINLOCK(dentry_attach_lock); | 
|  | 193 |  | 
|  | 194 | /* | 
|  | 195 | * Attach this dentry to a cluster lock. | 
|  | 196 | * | 
|  | 197 | * Dentry locks cover all links in a given directory to a particular | 
|  | 198 | * inode. We do this so that ocfs2 can build a lock name which all | 
|  | 199 | * nodes in the cluster can agree on at all times. Shoving full names | 
|  | 200 | * in the cluster lock won't work due to size restrictions. Covering | 
|  | 201 | * links inside of a directory is a good compromise because it still | 
|  | 202 | * allows us to use the parent directory lock to synchronize | 
|  | 203 | * operations. | 
|  | 204 | * | 
|  | 205 | * Call this function with the parent dir semaphore and the parent dir | 
|  | 206 | * cluster lock held. | 
|  | 207 | * | 
|  | 208 | * The dir semaphore will protect us from having to worry about | 
|  | 209 | * concurrent processes on our node trying to attach a lock at the | 
|  | 210 | * same time. | 
|  | 211 | * | 
|  | 212 | * The dir cluster lock (held at either PR or EX mode) protects us | 
|  | 213 | * from unlink and rename on other nodes. | 
|  | 214 | * | 
|  | 215 | * A dput() can happen asynchronously due to pruning, so we cover | 
|  | 216 | * attaching and detaching the dentry lock with a | 
|  | 217 | * dentry_attach_lock. | 
|  | 218 | * | 
|  | 219 | * A node which has done lookup on a name retains a protected read | 
|  | 220 | * lock until final dput. If the user requests and unlink or rename, | 
|  | 221 | * the protected read is upgraded to an exclusive lock. Other nodes | 
|  | 222 | * who have seen the dentry will then be informed that they need to | 
|  | 223 | * downgrade their lock, which will involve d_delete on the | 
|  | 224 | * dentry. This happens in ocfs2_dentry_convert_worker(). | 
|  | 225 | */ | 
|  | 226 | int ocfs2_dentry_attach_lock(struct dentry *dentry, | 
|  | 227 | struct inode *inode, | 
|  | 228 | u64 parent_blkno) | 
|  | 229 | { | 
|  | 230 | int ret; | 
|  | 231 | struct dentry *alias; | 
|  | 232 | struct ocfs2_dentry_lock *dl = dentry->d_fsdata; | 
|  | 233 |  | 
|  | 234 | trace_ocfs2_dentry_attach_lock(dentry->d_name.len, dentry->d_name.name, | 
|  | 235 | (unsigned long long)parent_blkno, dl); | 
|  | 236 |  | 
|  | 237 | /* | 
|  | 238 | * Negative dentry. We ignore these for now. | 
|  | 239 | * | 
|  | 240 | * XXX: Could we can improve ocfs2_dentry_revalidate() by | 
|  | 241 | * tracking these? | 
|  | 242 | */ | 
|  | 243 | if (!inode) | 
|  | 244 | return 0; | 
|  | 245 |  | 
|  | 246 | if (d_really_is_negative(dentry) && dentry->d_fsdata) { | 
|  | 247 | /* Converting a negative dentry to positive | 
|  | 248 | Clear dentry->d_fsdata */ | 
|  | 249 | dentry->d_fsdata = dl = NULL; | 
|  | 250 | } | 
|  | 251 |  | 
|  | 252 | if (dl) { | 
|  | 253 | mlog_bug_on_msg(dl->dl_parent_blkno != parent_blkno, | 
|  | 254 | " \"%pd\": old parent: %llu, new: %llu\n", | 
|  | 255 | dentry, | 
|  | 256 | (unsigned long long)parent_blkno, | 
|  | 257 | (unsigned long long)dl->dl_parent_blkno); | 
|  | 258 | return 0; | 
|  | 259 | } | 
|  | 260 |  | 
|  | 261 | alias = ocfs2_find_local_alias(inode, parent_blkno, 0); | 
|  | 262 | if (alias) { | 
|  | 263 | /* | 
|  | 264 | * Great, an alias exists, which means we must have a | 
|  | 265 | * dentry lock already. We can just grab the lock off | 
|  | 266 | * the alias and add it to the list. | 
|  | 267 | * | 
|  | 268 | * We're depending here on the fact that this dentry | 
|  | 269 | * was found and exists in the dcache and so must have | 
|  | 270 | * a reference to the dentry_lock because we can't | 
|  | 271 | * race creates. Final dput() cannot happen on it | 
|  | 272 | * since we have it pinned, so our reference is safe. | 
|  | 273 | */ | 
|  | 274 | dl = alias->d_fsdata; | 
|  | 275 | mlog_bug_on_msg(!dl, "parent %llu, ino %llu\n", | 
|  | 276 | (unsigned long long)parent_blkno, | 
|  | 277 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | 
|  | 278 |  | 
|  | 279 | mlog_bug_on_msg(dl->dl_parent_blkno != parent_blkno, | 
|  | 280 | " \"%pd\": old parent: %llu, new: %llu\n", | 
|  | 281 | dentry, | 
|  | 282 | (unsigned long long)parent_blkno, | 
|  | 283 | (unsigned long long)dl->dl_parent_blkno); | 
|  | 284 |  | 
|  | 285 | trace_ocfs2_dentry_attach_lock_found(dl->dl_lockres.l_name, | 
|  | 286 | (unsigned long long)parent_blkno, | 
|  | 287 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | 
|  | 288 |  | 
|  | 289 | goto out_attach; | 
|  | 290 | } | 
|  | 291 |  | 
|  | 292 | /* | 
|  | 293 | * There are no other aliases | 
|  | 294 | */ | 
|  | 295 | dl = kmalloc(sizeof(*dl), GFP_NOFS); | 
|  | 296 | if (!dl) { | 
|  | 297 | ret = -ENOMEM; | 
|  | 298 | mlog_errno(ret); | 
|  | 299 | return ret; | 
|  | 300 | } | 
|  | 301 |  | 
|  | 302 | dl->dl_count = 0; | 
|  | 303 | /* | 
|  | 304 | * Does this have to happen below, for all attaches, in case | 
|  | 305 | * the struct inode gets blown away by the downconvert thread? | 
|  | 306 | */ | 
|  | 307 | dl->dl_inode = igrab(inode); | 
|  | 308 | dl->dl_parent_blkno = parent_blkno; | 
|  | 309 | ocfs2_dentry_lock_res_init(dl, parent_blkno, inode); | 
|  | 310 |  | 
|  | 311 | out_attach: | 
|  | 312 | spin_lock(&dentry_attach_lock); | 
|  | 313 | if (unlikely(dentry->d_fsdata && !alias)) { | 
|  | 314 | /* d_fsdata is set by a racing thread which is doing | 
|  | 315 | * the same thing as this thread is doing. Leave the racing | 
|  | 316 | * thread going ahead and we return here. | 
|  | 317 | */ | 
|  | 318 | spin_unlock(&dentry_attach_lock); | 
|  | 319 | iput(dl->dl_inode); | 
|  | 320 | ocfs2_lock_res_free(&dl->dl_lockres); | 
|  | 321 | kfree(dl); | 
|  | 322 | return 0; | 
|  | 323 | } | 
|  | 324 |  | 
|  | 325 | dentry->d_fsdata = dl; | 
|  | 326 | dl->dl_count++; | 
|  | 327 | spin_unlock(&dentry_attach_lock); | 
|  | 328 |  | 
|  | 329 | /* | 
|  | 330 | * This actually gets us our PRMODE level lock. From now on, | 
|  | 331 | * we'll have a notification if one of these names is | 
|  | 332 | * destroyed on another node. | 
|  | 333 | */ | 
|  | 334 | ret = ocfs2_dentry_lock(dentry, 0); | 
|  | 335 | if (!ret) | 
|  | 336 | ocfs2_dentry_unlock(dentry, 0); | 
|  | 337 | else | 
|  | 338 | mlog_errno(ret); | 
|  | 339 |  | 
|  | 340 | /* | 
|  | 341 | * In case of error, manually free the allocation and do the iput(). | 
|  | 342 | * We need to do this because error here means no d_instantiate(), | 
|  | 343 | * which means iput() will not be called during dput(dentry). | 
|  | 344 | */ | 
|  | 345 | if (ret < 0 && !alias) { | 
|  | 346 | ocfs2_lock_res_free(&dl->dl_lockres); | 
|  | 347 | BUG_ON(dl->dl_count != 1); | 
|  | 348 | spin_lock(&dentry_attach_lock); | 
|  | 349 | dentry->d_fsdata = NULL; | 
|  | 350 | spin_unlock(&dentry_attach_lock); | 
|  | 351 | kfree(dl); | 
|  | 352 | iput(inode); | 
|  | 353 | } | 
|  | 354 |  | 
|  | 355 | dput(alias); | 
|  | 356 |  | 
|  | 357 | return ret; | 
|  | 358 | } | 
|  | 359 |  | 
|  | 360 | /* | 
|  | 361 | * ocfs2_dentry_iput() and friends. | 
|  | 362 | * | 
|  | 363 | * At this point, our particular dentry is detached from the inodes | 
|  | 364 | * alias list, so there's no way that the locking code can find it. | 
|  | 365 | * | 
|  | 366 | * The interesting stuff happens when we determine that our lock needs | 
|  | 367 | * to go away because this is the last subdir alias in the | 
|  | 368 | * system. This function needs to handle a couple things: | 
|  | 369 | * | 
|  | 370 | * 1) Synchronizing lock shutdown with the downconvert threads. This | 
|  | 371 | *    is already handled for us via the lockres release drop function | 
|  | 372 | *    called in ocfs2_release_dentry_lock() | 
|  | 373 | * | 
|  | 374 | * 2) A race may occur when we're doing our lock shutdown and | 
|  | 375 | *    another process wants to create a new dentry lock. Right now we | 
|  | 376 | *    let them race, which means that for a very short while, this | 
|  | 377 | *    node might have two locks on a lock resource. This should be a | 
|  | 378 | *    problem though because one of them is in the process of being | 
|  | 379 | *    thrown out. | 
|  | 380 | */ | 
|  | 381 | static void ocfs2_drop_dentry_lock(struct ocfs2_super *osb, | 
|  | 382 | struct ocfs2_dentry_lock *dl) | 
|  | 383 | { | 
|  | 384 | iput(dl->dl_inode); | 
|  | 385 | ocfs2_simple_drop_lockres(osb, &dl->dl_lockres); | 
|  | 386 | ocfs2_lock_res_free(&dl->dl_lockres); | 
|  | 387 | kfree(dl); | 
|  | 388 | } | 
|  | 389 |  | 
|  | 390 | void ocfs2_dentry_lock_put(struct ocfs2_super *osb, | 
|  | 391 | struct ocfs2_dentry_lock *dl) | 
|  | 392 | { | 
|  | 393 | int unlock = 0; | 
|  | 394 |  | 
|  | 395 | BUG_ON(dl->dl_count == 0); | 
|  | 396 |  | 
|  | 397 | spin_lock(&dentry_attach_lock); | 
|  | 398 | dl->dl_count--; | 
|  | 399 | unlock = !dl->dl_count; | 
|  | 400 | spin_unlock(&dentry_attach_lock); | 
|  | 401 |  | 
|  | 402 | if (unlock) | 
|  | 403 | ocfs2_drop_dentry_lock(osb, dl); | 
|  | 404 | } | 
|  | 405 |  | 
|  | 406 | static void ocfs2_dentry_iput(struct dentry *dentry, struct inode *inode) | 
|  | 407 | { | 
|  | 408 | struct ocfs2_dentry_lock *dl = dentry->d_fsdata; | 
|  | 409 |  | 
|  | 410 | if (!dl) { | 
|  | 411 | /* | 
|  | 412 | * No dentry lock is ok if we're disconnected or | 
|  | 413 | * unhashed. | 
|  | 414 | */ | 
|  | 415 | if (!(dentry->d_flags & DCACHE_DISCONNECTED) && | 
|  | 416 | !d_unhashed(dentry)) { | 
|  | 417 | unsigned long long ino = 0ULL; | 
|  | 418 | if (inode) | 
|  | 419 | ino = (unsigned long long)OCFS2_I(inode)->ip_blkno; | 
|  | 420 | mlog(ML_ERROR, "Dentry is missing cluster lock. " | 
|  | 421 | "inode: %llu, d_flags: 0x%x, d_name: %pd\n", | 
|  | 422 | ino, dentry->d_flags, dentry); | 
|  | 423 | } | 
|  | 424 |  | 
|  | 425 | goto out; | 
|  | 426 | } | 
|  | 427 |  | 
|  | 428 | mlog_bug_on_msg(dl->dl_count == 0, "dentry: %pd, count: %u\n", | 
|  | 429 | dentry, dl->dl_count); | 
|  | 430 |  | 
|  | 431 | ocfs2_dentry_lock_put(OCFS2_SB(dentry->d_sb), dl); | 
|  | 432 |  | 
|  | 433 | out: | 
|  | 434 | iput(inode); | 
|  | 435 | } | 
|  | 436 |  | 
|  | 437 | /* | 
|  | 438 | * d_move(), but keep the locks in sync. | 
|  | 439 | * | 
|  | 440 | * When we are done, "dentry" will have the parent dir and name of | 
|  | 441 | * "target", which will be thrown away. | 
|  | 442 | * | 
|  | 443 | * We manually update the lock of "dentry" if need be. | 
|  | 444 | * | 
|  | 445 | * "target" doesn't have it's dentry lock touched - we allow the later | 
|  | 446 | * dput() to handle this for us. | 
|  | 447 | * | 
|  | 448 | * This is called during ocfs2_rename(), while holding parent | 
|  | 449 | * directory locks. The dentries have already been deleted on other | 
|  | 450 | * nodes via ocfs2_remote_dentry_delete(). | 
|  | 451 | * | 
|  | 452 | * Normally, the VFS handles the d_move() for the file system, after | 
|  | 453 | * the ->rename() callback. OCFS2 wants to handle this internally, so | 
|  | 454 | * the new lock can be created atomically with respect to the cluster. | 
|  | 455 | */ | 
|  | 456 | void ocfs2_dentry_move(struct dentry *dentry, struct dentry *target, | 
|  | 457 | struct inode *old_dir, struct inode *new_dir) | 
|  | 458 | { | 
|  | 459 | int ret; | 
|  | 460 | struct ocfs2_super *osb = OCFS2_SB(old_dir->i_sb); | 
|  | 461 | struct inode *inode = d_inode(dentry); | 
|  | 462 |  | 
|  | 463 | /* | 
|  | 464 | * Move within the same directory, so the actual lock info won't | 
|  | 465 | * change. | 
|  | 466 | * | 
|  | 467 | * XXX: Is there any advantage to dropping the lock here? | 
|  | 468 | */ | 
|  | 469 | if (old_dir == new_dir) | 
|  | 470 | goto out_move; | 
|  | 471 |  | 
|  | 472 | ocfs2_dentry_lock_put(osb, dentry->d_fsdata); | 
|  | 473 |  | 
|  | 474 | dentry->d_fsdata = NULL; | 
|  | 475 | ret = ocfs2_dentry_attach_lock(dentry, inode, OCFS2_I(new_dir)->ip_blkno); | 
|  | 476 | if (ret) | 
|  | 477 | mlog_errno(ret); | 
|  | 478 |  | 
|  | 479 | out_move: | 
|  | 480 | d_move(dentry, target); | 
|  | 481 | } | 
|  | 482 |  | 
|  | 483 | const struct dentry_operations ocfs2_dentry_ops = { | 
|  | 484 | .d_revalidate		= ocfs2_dentry_revalidate, | 
|  | 485 | .d_iput			= ocfs2_dentry_iput, | 
|  | 486 | }; |