aboutsummaryrefslogtreecommitdiff
path: root/fs/btrfs/ulist.c
diff options
context:
space:
mode:
authorBoris Burkov <boris@bur.io>2024-06-20 10:33:10 -0700
committerDavid Sterba <dsterba@suse.com>2024-07-11 15:33:26 +0200
commit33336c1805d3a03240afda0bfb8c8d20395fb1d3 (patch)
tree78fc6415f954e4e5297404af1ae93ae75b8a63a9 /fs/btrfs/ulist.c
parent28cb13f29faf6290597b24b728dc3100c019356f (diff)
btrfs: preallocate ulist memory for qgroup rsv
When qgroups are enabled, during data reservation, we allocate the ulist_nodes that track the exact reserved extents with GFP_ATOMIC unconditionally. This is unnecessary, and we can follow the model already employed by the struct extent_state we preallocate in the non qgroups case, which should reduce the risk of allocation failures with GFP_ATOMIC. Add a prealloc node to struct ulist which ulist_add will grab when it is present, and try to allocate it before taking the tree lock while we can still take advantage of a less strict gfp mask. The lifetime of that node belongs to the new prealloc field, until it is used, at which point it belongs to the ulist linked list. Reviewed-by: Qu Wenruo <wqu@suse.com> Reviewed-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: Boris Burkov <boris@bur.io> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/ulist.c')
-rw-r--r--fs/btrfs/ulist.c21
1 files changed, 18 insertions, 3 deletions
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c
index 183863f4bfa4..fc59b57257d6 100644
--- a/fs/btrfs/ulist.c
+++ b/fs/btrfs/ulist.c
@@ -50,6 +50,7 @@ void ulist_init(struct ulist *ulist)
INIT_LIST_HEAD(&ulist->nodes);
ulist->root = RB_ROOT;
ulist->nnodes = 0;
+ ulist->prealloc = NULL;
}
/*
@@ -68,6 +69,8 @@ void ulist_release(struct ulist *ulist)
list_for_each_entry_safe(node, next, &ulist->nodes, list) {
kfree(node);
}
+ kfree(ulist->prealloc);
+ ulist->prealloc = NULL;
ulist->root = RB_ROOT;
INIT_LIST_HEAD(&ulist->nodes);
}
@@ -105,6 +108,12 @@ struct ulist *ulist_alloc(gfp_t gfp_mask)
return ulist;
}
+void ulist_prealloc(struct ulist *ulist, gfp_t gfp_mask)
+{
+ if (!ulist->prealloc)
+ ulist->prealloc = kzalloc(sizeof(*ulist->prealloc), gfp_mask);
+}
+
/*
* Free dynamically allocated ulist.
*
@@ -206,9 +215,15 @@ int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
*old_aux = node->aux;
return 0;
}
- node = kmalloc(sizeof(*node), gfp_mask);
- if (!node)
- return -ENOMEM;
+
+ if (ulist->prealloc) {
+ node = ulist->prealloc;
+ ulist->prealloc = NULL;
+ } else {
+ node = kmalloc(sizeof(*node), gfp_mask);
+ if (!node)
+ return -ENOMEM;
+ }
node->val = val;
node->aux = aux;