| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved. | 
 | 3 |  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved. | 
 | 4 |  * | 
 | 5 |  * This copyrighted material is made available to anyone wishing to use, | 
 | 6 |  * modify, copy, or redistribute it subject to the terms and conditions | 
 | 7 |  * of the GNU General Public License version 2. | 
 | 8 |  */ | 
 | 9 |  | 
 | 10 | #ifndef __GLOCK_DOT_H__ | 
 | 11 | #define __GLOCK_DOT_H__ | 
 | 12 |  | 
 | 13 | #include <linux/sched.h> | 
 | 14 | #include <linux/parser.h> | 
 | 15 | #include "incore.h" | 
 | 16 | #include "util.h" | 
 | 17 |  | 
 | 18 | /* Options for hostdata parser */ | 
 | 19 |  | 
 | 20 | enum { | 
 | 21 | 	Opt_jid, | 
 | 22 | 	Opt_id, | 
 | 23 | 	Opt_first, | 
 | 24 | 	Opt_nodir, | 
 | 25 | 	Opt_err, | 
 | 26 | }; | 
 | 27 |  | 
 | 28 | /* | 
 | 29 |  * lm_lockname types | 
 | 30 |  */ | 
 | 31 |  | 
 | 32 | #define LM_TYPE_RESERVED	0x00 | 
 | 33 | #define LM_TYPE_NONDISK		0x01 | 
 | 34 | #define LM_TYPE_INODE		0x02 | 
 | 35 | #define LM_TYPE_RGRP		0x03 | 
 | 36 | #define LM_TYPE_META		0x04 | 
 | 37 | #define LM_TYPE_IOPEN		0x05 | 
 | 38 | #define LM_TYPE_FLOCK		0x06 | 
 | 39 | #define LM_TYPE_PLOCK		0x07 | 
 | 40 | #define LM_TYPE_QUOTA		0x08 | 
 | 41 | #define LM_TYPE_JOURNAL		0x09 | 
 | 42 |  | 
 | 43 | /* | 
 | 44 |  * lm_lock() states | 
 | 45 |  * | 
 | 46 |  * SHARED is compatible with SHARED, not with DEFERRED or EX. | 
 | 47 |  * DEFERRED is compatible with DEFERRED, not with SHARED or EX. | 
 | 48 |  */ | 
 | 49 |  | 
 | 50 | #define LM_ST_UNLOCKED		0 | 
 | 51 | #define LM_ST_EXCLUSIVE		1 | 
 | 52 | #define LM_ST_DEFERRED		2 | 
 | 53 | #define LM_ST_SHARED		3 | 
 | 54 |  | 
 | 55 | /* | 
 | 56 |  * lm_lock() flags | 
 | 57 |  * | 
 | 58 |  * LM_FLAG_TRY | 
 | 59 |  * Don't wait to acquire the lock if it can't be granted immediately. | 
 | 60 |  * | 
 | 61 |  * LM_FLAG_TRY_1CB | 
 | 62 |  * Send one blocking callback if TRY is set and the lock is not granted. | 
 | 63 |  * | 
 | 64 |  * LM_FLAG_NOEXP | 
 | 65 |  * GFS sets this flag on lock requests it makes while doing journal recovery. | 
 | 66 |  * These special requests should not be blocked due to the recovery like | 
 | 67 |  * ordinary locks would be. | 
 | 68 |  * | 
 | 69 |  * LM_FLAG_ANY | 
 | 70 |  * A SHARED request may also be granted in DEFERRED, or a DEFERRED request may | 
 | 71 |  * also be granted in SHARED.  The preferred state is whichever is compatible | 
 | 72 |  * with other granted locks, or the specified state if no other locks exist. | 
 | 73 |  * | 
 | 74 |  * LM_FLAG_PRIORITY | 
 | 75 |  * Override fairness considerations.  Suppose a lock is held in a shared state | 
 | 76 |  * and there is a pending request for the deferred state.  A shared lock | 
 | 77 |  * request with the priority flag would be allowed to bypass the deferred | 
 | 78 |  * request and directly join the other shared lock.  A shared lock request | 
 | 79 |  * without the priority flag might be forced to wait until the deferred | 
 | 80 |  * requested had acquired and released the lock. | 
 | 81 |  */ | 
 | 82 |  | 
 | 83 | #define LM_FLAG_TRY		0x0001 | 
 | 84 | #define LM_FLAG_TRY_1CB		0x0002 | 
 | 85 | #define LM_FLAG_NOEXP		0x0004 | 
 | 86 | #define LM_FLAG_ANY		0x0008 | 
 | 87 | #define LM_FLAG_PRIORITY	0x0010 | 
 | 88 | #define GL_ASYNC		0x0040 | 
 | 89 | #define GL_EXACT		0x0080 | 
 | 90 | #define GL_SKIP			0x0100 | 
 | 91 | #define GL_NOCACHE		0x0400 | 
 | 92 |    | 
 | 93 | /* | 
 | 94 |  * lm_async_cb return flags | 
 | 95 |  * | 
 | 96 |  * LM_OUT_ST_MASK | 
 | 97 |  * Masks the lower two bits of lock state in the returned value. | 
 | 98 |  * | 
 | 99 |  * LM_OUT_CANCELED | 
 | 100 |  * The lock request was canceled. | 
 | 101 |  * | 
 | 102 |  */ | 
 | 103 |  | 
 | 104 | #define LM_OUT_ST_MASK		0x00000003 | 
 | 105 | #define LM_OUT_CANCELED		0x00000008 | 
 | 106 | #define LM_OUT_ERROR		0x00000004 | 
 | 107 |  | 
 | 108 | /* | 
 | 109 |  * lm_recovery_done() messages | 
 | 110 |  */ | 
 | 111 |  | 
 | 112 | #define LM_RD_GAVEUP		308 | 
 | 113 | #define LM_RD_SUCCESS		309 | 
 | 114 |  | 
 | 115 | #define GLR_TRYFAILED		13 | 
 | 116 |  | 
 | 117 | #define GL_GLOCK_MAX_HOLD        (long)(HZ / 5) | 
 | 118 | #define GL_GLOCK_DFT_HOLD        (long)(HZ / 5) | 
 | 119 | #define GL_GLOCK_MIN_HOLD        (long)(10) | 
 | 120 | #define GL_GLOCK_HOLD_INCR       (long)(HZ / 20) | 
 | 121 | #define GL_GLOCK_HOLD_DECR       (long)(HZ / 40) | 
 | 122 |  | 
 | 123 | struct lm_lockops { | 
 | 124 | 	const char *lm_proto_name; | 
 | 125 | 	int (*lm_mount) (struct gfs2_sbd *sdp, const char *table); | 
 | 126 | 	void (*lm_first_done) (struct gfs2_sbd *sdp); | 
 | 127 | 	void (*lm_recovery_result) (struct gfs2_sbd *sdp, unsigned int jid, | 
 | 128 | 				    unsigned int result); | 
 | 129 | 	void (*lm_unmount) (struct gfs2_sbd *sdp); | 
 | 130 | 	void (*lm_withdraw) (struct gfs2_sbd *sdp); | 
 | 131 | 	void (*lm_put_lock) (struct gfs2_glock *gl); | 
 | 132 | 	int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state, | 
 | 133 | 			unsigned int flags); | 
 | 134 | 	void (*lm_cancel) (struct gfs2_glock *gl); | 
 | 135 | 	const match_table_t *lm_tokens; | 
 | 136 | }; | 
 | 137 |  | 
 | 138 | extern struct workqueue_struct *gfs2_delete_workqueue; | 
 | 139 | static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) | 
 | 140 | { | 
 | 141 | 	struct gfs2_holder *gh; | 
 | 142 | 	struct pid *pid; | 
 | 143 |  | 
 | 144 | 	/* Look in glock's list of holders for one with current task as owner */ | 
 | 145 | 	spin_lock(&gl->gl_lockref.lock); | 
 | 146 | 	pid = task_pid(current); | 
 | 147 | 	list_for_each_entry(gh, &gl->gl_holders, gh_list) { | 
 | 148 | 		if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) | 
 | 149 | 			break; | 
 | 150 | 		if (gh->gh_owner_pid == pid) | 
 | 151 | 			goto out; | 
 | 152 | 	} | 
 | 153 | 	gh = NULL; | 
 | 154 | out: | 
 | 155 | 	spin_unlock(&gl->gl_lockref.lock); | 
 | 156 |  | 
 | 157 | 	return gh; | 
 | 158 | } | 
 | 159 |  | 
 | 160 | static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl) | 
 | 161 | { | 
 | 162 | 	return gl->gl_state == LM_ST_EXCLUSIVE; | 
 | 163 | } | 
 | 164 |  | 
 | 165 | static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl) | 
 | 166 | { | 
 | 167 | 	return gl->gl_state == LM_ST_DEFERRED; | 
 | 168 | } | 
 | 169 |  | 
 | 170 | static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl) | 
 | 171 | { | 
 | 172 | 	return gl->gl_state == LM_ST_SHARED; | 
 | 173 | } | 
 | 174 |  | 
 | 175 | static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl) | 
 | 176 | { | 
 | 177 | 	if (gl->gl_ops->go_flags & GLOF_ASPACE) | 
 | 178 | 		return (struct address_space *)(gl + 1); | 
 | 179 | 	return NULL; | 
 | 180 | } | 
 | 181 |  | 
 | 182 | extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, | 
 | 183 | 			  const struct gfs2_glock_operations *glops, | 
 | 184 | 			  int create, struct gfs2_glock **glp); | 
 | 185 | extern void gfs2_glock_hold(struct gfs2_glock *gl); | 
 | 186 | extern void gfs2_glock_put(struct gfs2_glock *gl); | 
 | 187 | extern void gfs2_glock_queue_put(struct gfs2_glock *gl); | 
 | 188 | extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, | 
 | 189 | 			     u16 flags, struct gfs2_holder *gh); | 
 | 190 | extern void gfs2_holder_reinit(unsigned int state, u16 flags, | 
 | 191 | 			       struct gfs2_holder *gh); | 
 | 192 | extern void gfs2_holder_uninit(struct gfs2_holder *gh); | 
 | 193 | extern int gfs2_glock_nq(struct gfs2_holder *gh); | 
 | 194 | extern int gfs2_glock_poll(struct gfs2_holder *gh); | 
 | 195 | extern int gfs2_glock_wait(struct gfs2_holder *gh); | 
 | 196 | extern void gfs2_glock_dq(struct gfs2_holder *gh); | 
 | 197 | extern void gfs2_glock_dq_wait(struct gfs2_holder *gh); | 
 | 198 | extern void gfs2_glock_dq_uninit(struct gfs2_holder *gh); | 
 | 199 | extern int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number, | 
 | 200 | 			     const struct gfs2_glock_operations *glops, | 
 | 201 | 			     unsigned int state, u16 flags, | 
 | 202 | 			     struct gfs2_holder *gh); | 
 | 203 | extern int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs); | 
 | 204 | extern void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs); | 
 | 205 | extern void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl); | 
 | 206 | #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { gfs2_dump_glock(NULL, gl); BUG(); } } while(0) | 
 | 207 | extern __printf(2, 3) | 
 | 208 | void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...); | 
 | 209 |  | 
 | 210 | /** | 
 | 211 |  * gfs2_glock_nq_init - initialize a holder and enqueue it on a glock | 
 | 212 |  * @gl: the glock | 
 | 213 |  * @state: the state we're requesting | 
 | 214 |  * @flags: the modifier flags | 
 | 215 |  * @gh: the holder structure | 
 | 216 |  * | 
 | 217 |  * Returns: 0, GLR_*, or errno | 
 | 218 |  */ | 
 | 219 |  | 
 | 220 | static inline int gfs2_glock_nq_init(struct gfs2_glock *gl, | 
 | 221 | 				     unsigned int state, u16 flags, | 
 | 222 | 				     struct gfs2_holder *gh) | 
 | 223 | { | 
 | 224 | 	int error; | 
 | 225 |  | 
 | 226 | 	gfs2_holder_init(gl, state, flags, gh); | 
 | 227 |  | 
 | 228 | 	error = gfs2_glock_nq(gh); | 
 | 229 | 	if (error) | 
 | 230 | 		gfs2_holder_uninit(gh); | 
 | 231 |  | 
 | 232 | 	return error; | 
 | 233 | } | 
 | 234 |  | 
 | 235 | extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state); | 
 | 236 | extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret); | 
 | 237 | extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); | 
 | 238 | extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip); | 
 | 239 | extern void gfs2_glock_thaw(struct gfs2_sbd *sdp); | 
 | 240 | extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl); | 
 | 241 | extern void gfs2_glock_free(struct gfs2_glock *gl); | 
 | 242 |  | 
 | 243 | extern int __init gfs2_glock_init(void); | 
 | 244 | extern void gfs2_glock_exit(void); | 
 | 245 |  | 
 | 246 | extern int gfs2_create_debugfs_file(struct gfs2_sbd *sdp); | 
 | 247 | extern void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp); | 
 | 248 | extern int gfs2_register_debugfs(void); | 
 | 249 | extern void gfs2_unregister_debugfs(void); | 
 | 250 |  | 
 | 251 | extern const struct lm_lockops gfs2_dlm_ops; | 
 | 252 |  | 
 | 253 | static inline void gfs2_holder_mark_uninitialized(struct gfs2_holder *gh) | 
 | 254 | { | 
 | 255 | 	gh->gh_gl = NULL; | 
 | 256 | } | 
 | 257 |  | 
 | 258 | static inline bool gfs2_holder_initialized(struct gfs2_holder *gh) | 
 | 259 | { | 
 | 260 | 	return gh->gh_gl; | 
 | 261 | } | 
 | 262 |  | 
 | 263 | /** | 
 | 264 |  * glock_set_object - set the gl_object field of a glock | 
 | 265 |  * @gl: the glock | 
 | 266 |  * @object: the object | 
 | 267 |  */ | 
 | 268 | static inline void glock_set_object(struct gfs2_glock *gl, void *object) | 
 | 269 | { | 
 | 270 | 	spin_lock(&gl->gl_lockref.lock); | 
 | 271 | 	if (gfs2_assert_warn(gl->gl_name.ln_sbd, gl->gl_object == NULL)) | 
 | 272 | 		gfs2_dump_glock(NULL, gl); | 
 | 273 | 	gl->gl_object = object; | 
 | 274 | 	spin_unlock(&gl->gl_lockref.lock); | 
 | 275 | } | 
 | 276 |  | 
 | 277 | /** | 
 | 278 |  * glock_clear_object - clear the gl_object field of a glock | 
 | 279 |  * @gl: the glock | 
 | 280 |  * @object: the object | 
 | 281 |  * | 
 | 282 |  * I'd love to similarly add this: | 
 | 283 |  *	else if (gfs2_assert_warn(gl->gl_sbd, gl->gl_object == object)) | 
 | 284 |  *		gfs2_dump_glock(NULL, gl); | 
 | 285 |  * Unfortunately, that's not possible because as soon as gfs2_delete_inode | 
 | 286 |  * frees the block in the rgrp, another process can reassign it for an I_NEW | 
 | 287 |  * inode in gfs2_create_inode because that calls new_inode, not gfs2_iget. | 
 | 288 |  * That means gfs2_delete_inode may subsequently try to call this function | 
 | 289 |  * for a glock that's already pointing to a brand new inode. If we clear the | 
 | 290 |  * new inode's gl_object, we'll introduce metadata corruption. Function | 
 | 291 |  * gfs2_delete_inode calls clear_inode which calls gfs2_clear_inode which also | 
 | 292 |  * tries to clear gl_object, so it's more than just gfs2_delete_inode. | 
 | 293 |  * | 
 | 294 |  */ | 
 | 295 | static inline void glock_clear_object(struct gfs2_glock *gl, void *object) | 
 | 296 | { | 
 | 297 | 	spin_lock(&gl->gl_lockref.lock); | 
 | 298 | 	if (gl->gl_object == object) | 
 | 299 | 		gl->gl_object = NULL; | 
 | 300 | 	spin_unlock(&gl->gl_lockref.lock); | 
 | 301 | } | 
 | 302 |  | 
 | 303 | #endif /* __GLOCK_DOT_H__ */ |