diff options
author | Sidhartha Kumar <sidhartha.kumar@oracle.com> | 2024-08-14 12:19:36 -0400 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-09-01 20:26:16 -0700 |
commit | 580fcbd67ce2cdf9d70c7a8eda0789b9eba1c3af (patch) | |
tree | d3e569271e826ff26c3c4f5cc8c98617b3ed83e0 /lib | |
parent | 23e217a848b3b9e1b0cb8c41d731d7968bcc77a5 (diff) |
maple_tree: use store type in mas_wr_store_entry()
When storing an entry, we can read the store type that was set from a
previous partial walk of the tree. Now that the type of store is known,
select the correct write helper function to use to complete the store.
Also noinline mas_wr_spanning_store() to limit stack frame usage in
mas_wr_store_entry() as it allocates a maple_big_node on the stack.
Link: https://lkml.kernel.org/r/20240814161944.55347-10-sidhartha.kumar@oracle.com
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/maple_tree.c | 90 |
1 files changed, 52 insertions, 38 deletions
diff --git a/lib/maple_tree.c b/lib/maple_tree.c index a1689fc6227b..2242e07a46dc 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -3780,7 +3780,7 @@ done: * * Return: 0 on error, positive on success. */ -static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) +static noinline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) { struct maple_subtree_state mast; struct maple_big_node b_node; @@ -4206,43 +4206,62 @@ slow_path: static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas) { struct ma_state *mas = wr_mas->mas; + unsigned char new_end = mas_wr_new_end(wr_mas); - wr_mas->content = mas_start(mas); - if (mas_is_none(mas) || mas_is_ptr(mas)) { - mas_store_root(mas, wr_mas->entry); + switch (mas->store_type) { + case wr_invalid: + MT_BUG_ON(mas->tree, 1); return; - } - - if (unlikely(!mas_wr_walk(wr_mas))) { + case wr_new_root: + mas_new_root(mas, wr_mas->entry); + break; + case wr_store_root: + mas_store_root(mas, wr_mas->entry); + break; + case wr_exact_fit: + rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry); + if (!!wr_mas->entry ^ !!wr_mas->content) + mas_update_gap(mas); + break; + case wr_append: + mas_wr_append(wr_mas, new_end); + break; + case wr_slot_store: + mas_wr_slot_store(wr_mas); + break; + case wr_node_store: + mas_wr_node_store(wr_mas, new_end); + break; + case wr_spanning_store: mas_wr_spanning_store(wr_mas); - return; + break; + case wr_split_store: + case wr_rebalance: + mas_wr_bnode(wr_mas); + break; } - /* At this point, we are at the leaf node that needs to be altered. */ - mas_wr_end_piv(wr_mas); - /* New root for a single pointer */ - if (unlikely(!mas->index && mas->last == ULONG_MAX)) - mas_new_root(mas, wr_mas->entry); - else - mas_wr_modify(wr_mas); + return; } -static void mas_wr_store_setup(struct ma_wr_state *wr_mas) +static inline void mas_wr_prealloc_setup(struct ma_wr_state *wr_mas) { - if (!mas_is_active(wr_mas->mas)) { - if (mas_is_start(wr_mas->mas)) - return; + struct ma_state *mas = wr_mas->mas; + + if (!mas_is_active(mas)) { + if (mas_is_start(mas)) + goto set_content; - if (unlikely(mas_is_paused(wr_mas->mas))) + if (unlikely(mas_is_paused(mas))) goto reset; - if (unlikely(mas_is_none(wr_mas->mas))) + if (unlikely(mas_is_none(mas))) goto reset; - if (unlikely(mas_is_overflow(wr_mas->mas))) + if (unlikely(mas_is_overflow(mas))) goto reset; - if (unlikely(mas_is_underflow(wr_mas->mas))) + if (unlikely(mas_is_underflow(mas))) goto reset; } @@ -4251,27 +4270,20 @@ static void mas_wr_store_setup(struct ma_wr_state *wr_mas) * writes within this node. This is to stop partial walks in * mas_prealloc() from being reset. */ - if (wr_mas->mas->last > wr_mas->mas->max) + if (mas->last > mas->max) goto reset; if (wr_mas->entry) - return; + goto set_content; - if (mte_is_leaf(wr_mas->mas->node) && - wr_mas->mas->last == wr_mas->mas->max) + if (mte_is_leaf(mas->node) && mas->last == mas->max) goto reset; - return; + goto set_content; reset: - mas_reset(wr_mas->mas); -} - -static inline void mas_wr_prealloc_setup(struct ma_wr_state *wr_mas) -{ - struct ma_state *mas = wr_mas->mas; - - mas_wr_store_setup(wr_mas); + mas_reset(mas); +set_content: wr_mas->content = mas_start(mas); } @@ -5582,7 +5594,8 @@ void *mas_store(struct ma_state *mas, void *entry) * want to examine what happens if a single store operation was to * overwrite multiple entries within a self-balancing B-Tree. */ - mas_wr_store_setup(&wr_mas); + mas_wr_prealloc_setup(&wr_mas); + mas_wr_store_type(&wr_mas); mas_wr_store_entry(&wr_mas); return wr_mas.content; } @@ -5634,7 +5647,8 @@ void mas_store_prealloc(struct ma_state *mas, void *entry) { MA_WR_STATE(wr_mas, mas, entry); - mas_wr_store_setup(&wr_mas); + mas_wr_prealloc_setup(&wr_mas); + mas_wr_store_type(&wr_mas); trace_ma_write(__func__, mas, 0, entry); mas_wr_store_entry(&wr_mas); MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas)); |