blob: c1d34cc70472223a81ca22de9bde27ccfc9276d5 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6#ifndef BTRFS_TRANSACTION_H
7#define BTRFS_TRANSACTION_H
8
9#include <linux/refcount.h>
10#include "btrfs_inode.h"
11#include "delayed-ref.h"
12#include "ctree.h"
13
14enum btrfs_trans_state {
15 TRANS_STATE_RUNNING = 0,
16 TRANS_STATE_BLOCKED = 1,
17 TRANS_STATE_COMMIT_START = 2,
18 TRANS_STATE_COMMIT_DOING = 3,
19 TRANS_STATE_UNBLOCKED = 4,
20 TRANS_STATE_COMPLETED = 5,
21 TRANS_STATE_MAX = 6,
22};
23
24#define BTRFS_TRANS_HAVE_FREE_BGS 0
25#define BTRFS_TRANS_DIRTY_BG_RUN 1
26#define BTRFS_TRANS_CACHE_ENOSPC 2
27
28struct btrfs_transaction {
29 u64 transid;
30 /*
31 * total external writers(USERSPACE/START/ATTACH) in this
32 * transaction, it must be zero before the transaction is
33 * being committed
34 */
35 atomic_t num_extwriters;
36 /*
37 * total writers in this transaction, it must be zero before the
38 * transaction can end
39 */
40 atomic_t num_writers;
41 refcount_t use_count;
42 atomic_t pending_ordered;
43
44 unsigned long flags;
45
46 /* Be protected by fs_info->trans_lock when we want to change it. */
47 enum btrfs_trans_state state;
48 int aborted;
49 struct list_head list;
50 struct extent_io_tree dirty_pages;
51 time64_t start_time;
52 wait_queue_head_t writer_wait;
53 wait_queue_head_t commit_wait;
54 wait_queue_head_t pending_wait;
55 struct list_head pending_snapshots;
56 struct list_head pending_chunks;
57 struct list_head switch_commits;
58 struct list_head dirty_bgs;
59
60 /*
61 * There is no explicit lock which protects io_bgs, rather its
62 * consistency is implied by the fact that all the sites which modify
63 * it do so under some form of transaction critical section, namely:
64 *
65 * - btrfs_start_dirty_block_groups - This function can only ever be
66 * run by one of the transaction committers. Refer to
67 * BTRFS_TRANS_DIRTY_BG_RUN usage in btrfs_commit_transaction
68 *
69 * - btrfs_write_dirty_blockgroups - this is called by
70 * commit_cowonly_roots from transaction critical section
71 * (TRANS_STATE_COMMIT_DOING)
72 *
73 * - btrfs_cleanup_dirty_bgs - called on transaction abort
74 */
75 struct list_head io_bgs;
76 struct list_head dropped_roots;
77
78 /*
79 * we need to make sure block group deletion doesn't race with
80 * free space cache writeout. This mutex keeps them from stomping
81 * on each other
82 */
83 struct mutex cache_write_mutex;
84 spinlock_t dirty_bgs_lock;
85 unsigned int num_dirty_bgs;
86 /* Protected by spin lock fs_info->unused_bgs_lock. */
87 struct list_head deleted_bgs;
88 spinlock_t dropped_roots_lock;
89 struct btrfs_delayed_ref_root delayed_refs;
90 struct btrfs_fs_info *fs_info;
91};
92
93#define __TRANS_FREEZABLE (1U << 0)
94
95#define __TRANS_START (1U << 9)
96#define __TRANS_ATTACH (1U << 10)
97#define __TRANS_JOIN (1U << 11)
98#define __TRANS_JOIN_NOLOCK (1U << 12)
99#define __TRANS_DUMMY (1U << 13)
100#define __TRANS_JOIN_NOSTART (1U << 14)
101
102#define TRANS_START (__TRANS_START | __TRANS_FREEZABLE)
103#define TRANS_ATTACH (__TRANS_ATTACH)
104#define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE)
105#define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK)
106#define TRANS_JOIN_NOSTART (__TRANS_JOIN_NOSTART)
107
108#define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH)
109
110#define BTRFS_SEND_TRANS_STUB ((void *)1)
111
112struct btrfs_trans_handle {
113 u64 transid;
114 u64 bytes_reserved;
115 u64 chunk_bytes_reserved;
116 unsigned long delayed_ref_updates;
117 struct btrfs_transaction *transaction;
118 struct btrfs_block_rsv *block_rsv;
119 struct btrfs_block_rsv *orig_rsv;
120 refcount_t use_count;
121 unsigned int type;
122 short aborted;
123 bool adding_csums;
124 bool allocating_chunk;
125 bool can_flush_pending_bgs;
126 bool reloc_reserved;
127 bool sync;
128 bool dirty;
129 struct btrfs_root *root;
130 struct btrfs_fs_info *fs_info;
131 struct list_head new_bgs;
132};
133
134struct btrfs_pending_snapshot {
135 struct dentry *dentry;
136 struct inode *dir;
137 struct btrfs_root *root;
138 struct btrfs_root_item *root_item;
139 struct btrfs_root *snap;
140 struct btrfs_qgroup_inherit *inherit;
141 struct btrfs_path *path;
142 /* block reservation for the operation */
143 struct btrfs_block_rsv block_rsv;
144 /* extra metadata reservation for relocation */
145 int error;
146 bool readonly;
147 struct list_head list;
148};
149
150static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
151 struct inode *inode)
152{
153 spin_lock(&BTRFS_I(inode)->lock);
154 BTRFS_I(inode)->last_trans = trans->transaction->transid;
155 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
156 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
157 spin_unlock(&BTRFS_I(inode)->lock);
158}
159
160/*
161 * Make qgroup codes to skip given qgroupid, means the old/new_roots for
162 * qgroup won't contain the qgroupid in it.
163 */
164static inline void btrfs_set_skip_qgroup(struct btrfs_trans_handle *trans,
165 u64 qgroupid)
166{
167 struct btrfs_delayed_ref_root *delayed_refs;
168
169 delayed_refs = &trans->transaction->delayed_refs;
170 WARN_ON(delayed_refs->qgroup_to_skip);
171 delayed_refs->qgroup_to_skip = qgroupid;
172}
173
174static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans)
175{
176 struct btrfs_delayed_ref_root *delayed_refs;
177
178 delayed_refs = &trans->transaction->delayed_refs;
179 WARN_ON(!delayed_refs->qgroup_to_skip);
180 delayed_refs->qgroup_to_skip = 0;
181}
182
183int btrfs_end_transaction(struct btrfs_trans_handle *trans);
184struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
185 unsigned int num_items);
186struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
187 struct btrfs_root *root,
188 unsigned int num_items,
189 int min_factor);
190struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
191struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
192struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
193struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
194struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
195 struct btrfs_root *root);
196int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid);
197
198void btrfs_add_dead_root(struct btrfs_root *root);
199int btrfs_defrag_root(struct btrfs_root *root);
200int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
201int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
202int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
203 int wait_for_unblock);
204
205/*
206 * Try to commit transaction asynchronously, so this is safe to call
207 * even holding a spinlock.
208 *
209 * It's done by informing transaction_kthread to commit transaction without
210 * waiting for commit interval.
211 */
212static inline void btrfs_commit_transaction_locksafe(
213 struct btrfs_fs_info *fs_info)
214{
215 set_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags);
216 wake_up_process(fs_info->transaction_kthread);
217}
218int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans);
219int btrfs_should_end_transaction(struct btrfs_trans_handle *trans);
220void btrfs_throttle(struct btrfs_fs_info *fs_info);
221int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
222 struct btrfs_root *root);
223int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
224 struct extent_io_tree *dirty_pages, int mark);
225int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
226 struct extent_io_tree *dirty_pages);
227int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark);
228int btrfs_transaction_blocked(struct btrfs_fs_info *info);
229int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
230void btrfs_put_transaction(struct btrfs_transaction *transaction);
231void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info);
232void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
233 struct btrfs_root *root);
234
235#endif