summaryrefslogtreecommitdiff
path: root/fs/btrfs/delayed-ref.c
diff options
context:
space:
mode:
authorJosef Bacik <josef@toxicpanda.com>2021-01-15 16:48:55 -0500
committerDavid Sterba <dsterba@suse.com>2021-02-08 22:58:55 +0100
commit2187374f35fe9cadbddaa9fcf0c4121365d914e8 (patch)
treeb4b8e9440914db49bc0db5bf36712674e4228de4 /fs/btrfs/delayed-ref.c
parente9aa7c285d20a69ce1fb940ec846686780af9e56 (diff)
btrfs: handle space_info::total_bytes_pinned inside the delayed ref itself
Currently we pass things around to figure out if we maybe freeing data based on the state of the delayed refs head. This makes the accounting sort of confusing and hard to follow, as it's distinctly separate from the delayed ref heads stuff, but also depends on it entirely. Fix this by explicitly adjusting the space_info->total_bytes_pinned in the delayed refs code. We now have two places where we modify this counter, once where we create the delayed and destroy the delayed refs, and once when we pin and unpin the extents. This means there is a slight overlap between delayed refs and the pin/unpin mechanisms, but this is simply used by the ENOSPC infrastructure to determine if we need to commit the transaction, so there's no adverse affect from this, we might simply commit thinking it will give us enough space when it might not. CC: stable@vger.kernel.org # 5.10 Reviewed-by: Nikolay Borisov <nborisov@suse.com> Signed-off-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/delayed-ref.c')
-rw-r--r--fs/btrfs/delayed-ref.c51
1 files changed, 30 insertions, 21 deletions
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 88a1e27d2fc2..a540ace3e03a 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -651,12 +651,12 @@ inserted:
*/
static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *existing,
- struct btrfs_delayed_ref_head *update,
- int *old_ref_mod_ret)
+ struct btrfs_delayed_ref_head *update)
{
struct btrfs_delayed_ref_root *delayed_refs =
&trans->transaction->delayed_refs;
struct btrfs_fs_info *fs_info = trans->fs_info;
+ u64 flags = btrfs_ref_head_to_space_flags(existing);
int old_ref_mod;
BUG_ON(existing->is_data != update->is_data);
@@ -704,8 +704,6 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
* currently, for refs we just added we know we're a-ok.
*/
old_ref_mod = existing->total_ref_mod;
- if (old_ref_mod_ret)
- *old_ref_mod_ret = old_ref_mod;
existing->ref_mod += update->ref_mod;
existing->total_ref_mod += update->ref_mod;
@@ -727,6 +725,22 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
trans->delayed_ref_updates += csum_leaves;
}
}
+
+ /*
+ * This handles the following conditions:
+ *
+ * 1. We had a ref mod of 0 or more and went negative, indicating that
+ * we may be freeing space, so add our space to the
+ * total_bytes_pinned counter.
+ * 2. We were negative and went to 0 or positive, so no longer can say
+ * that the space would be pinned, decrement our counter from the
+ * total_bytes_pinned counter.
+ */
+ if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
+ btrfs_mod_total_bytes_pinned(fs_info, flags, existing->num_bytes);
+ else if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
+ btrfs_mod_total_bytes_pinned(fs_info, flags, -existing->num_bytes);
+
spin_unlock(&existing->lock);
}
@@ -801,8 +815,7 @@ static noinline struct btrfs_delayed_ref_head *
add_delayed_ref_head(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head_ref,
struct btrfs_qgroup_extent_record *qrecord,
- int action, int *qrecord_inserted_ret,
- int *old_ref_mod, int *new_ref_mod)
+ int action, int *qrecord_inserted_ret)
{
struct btrfs_delayed_ref_head *existing;
struct btrfs_delayed_ref_root *delayed_refs;
@@ -824,8 +837,7 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
existing = htree_insert(&delayed_refs->href_root,
&head_ref->href_node);
if (existing) {
- update_existing_head_ref(trans, existing, head_ref,
- old_ref_mod);
+ update_existing_head_ref(trans, existing, head_ref);
/*
* we've updated the existing ref, free the newly
* allocated ref
@@ -833,14 +845,17 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
head_ref = existing;
} else {
- if (old_ref_mod)
- *old_ref_mod = 0;
+ u64 flags = btrfs_ref_head_to_space_flags(head_ref);
+
if (head_ref->is_data && head_ref->ref_mod < 0) {
delayed_refs->pending_csums += head_ref->num_bytes;
trans->delayed_ref_updates +=
btrfs_csum_bytes_to_leaves(trans->fs_info,
head_ref->num_bytes);
}
+ if (head_ref->ref_mod < 0)
+ btrfs_mod_total_bytes_pinned(trans->fs_info, flags,
+ head_ref->num_bytes);
delayed_refs->num_heads++;
delayed_refs->num_heads_ready++;
atomic_inc(&delayed_refs->num_entries);
@@ -848,8 +863,6 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
}
if (qrecord_inserted_ret)
*qrecord_inserted_ret = qrecord_inserted;
- if (new_ref_mod)
- *new_ref_mod = head_ref->total_ref_mod;
return head_ref;
}
@@ -912,8 +925,7 @@ static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
*/
int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
struct btrfs_ref *generic_ref,
- struct btrfs_delayed_extent_op *extent_op,
- int *old_ref_mod, int *new_ref_mod)
+ struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_tree_ref *ref;
@@ -980,8 +992,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
* the spin lock
*/
head_ref = add_delayed_ref_head(trans, head_ref, record,
- action, &qrecord_inserted,
- old_ref_mod, new_ref_mod);
+ action, &qrecord_inserted);
ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
spin_unlock(&delayed_refs->lock);
@@ -1009,8 +1020,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
*/
int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
struct btrfs_ref *generic_ref,
- u64 reserved, int *old_ref_mod,
- int *new_ref_mod)
+ u64 reserved)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_data_ref *ref;
@@ -1076,8 +1086,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
* the spin lock
*/
head_ref = add_delayed_ref_head(trans, head_ref, record,
- action, &qrecord_inserted,
- old_ref_mod, new_ref_mod);
+ action, &qrecord_inserted);
ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
spin_unlock(&delayed_refs->lock);
@@ -1120,7 +1129,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
spin_lock(&delayed_refs->lock);
add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
- NULL, NULL, NULL);
+ NULL);
spin_unlock(&delayed_refs->lock);