diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2021-03-24 20:22:51 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-10-22 17:08:57 -0400 |
commit | 0390ea8ad8f4079c25d47e8c249a2f621aaec3c0 (patch) | |
tree | 51600ed8ca29886ea526de23c47beb0a172a4f8a | |
parent | 7c8b166e584c85f9920d8f82778967eeee0e1b03 (diff) |
bcachefs: Drop bkey noops
Bkey noops were introduced to deal with trimming inline data extents in
place in the btree: if the u64s field of a bkey was 0, that u64 was a
noop and we'd start looking for the next bkey immediately after it.
But extent handling has been lifted above the btree - we no longer
modify existing extents in place in the btree, and the compatibilty code
for old style extent btree nodes is gone, so we can completely drop this
code.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r-- | fs/bcachefs/bkey.h | 10 | ||||
-rw-r--r-- | fs/bcachefs/bkey_sort.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/bset.c | 26 | ||||
-rw-r--r-- | fs/bcachefs/bset.h | 2 | ||||
-rw-r--r-- | fs/bcachefs/btree_gc.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/btree_io.c | 14 | ||||
-rw-r--r-- | fs/bcachefs/btree_update_interior.c | 4 |
7 files changed, 22 insertions, 38 deletions
diff --git a/fs/bcachefs/bkey.h b/fs/bcachefs/bkey.h index 77d9d871adfb..51dc49b9ffba 100644 --- a/fs/bcachefs/bkey.h +++ b/fs/bcachefs/bkey.h @@ -41,16 +41,6 @@ struct bkey_s { #define bkey_next(_k) vstruct_next(_k) -static inline struct bkey_packed *bkey_next_skip_noops(struct bkey_packed *k, - struct bkey_packed *end) -{ - k = bkey_next(k); - - while (k != end && !k->u64s) - k = (void *) ((u64 *) k + 1); - return k; -} - #define bkey_val_u64s(_k) ((_k)->u64s - BKEY_U64s) static inline size_t bkey_val_bytes(const struct bkey *k) diff --git a/fs/bcachefs/bkey_sort.c b/fs/bcachefs/bkey_sort.c index f2507079ed11..537ab7919e88 100644 --- a/fs/bcachefs/bkey_sort.c +++ b/fs/bcachefs/bkey_sort.c @@ -45,7 +45,7 @@ static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp) BUG_ON(!iter->used); - i->k = bkey_next_skip_noops(i->k, i->end); + i->k = bkey_next(i->k); BUG_ON(i->k > i->end); diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c index c371f402eaa3..59f613560b65 100644 --- a/fs/bcachefs/bset.c +++ b/fs/bcachefs/bset.c @@ -66,7 +66,7 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b, for (_k = i->start; _k < vstruct_last(i); _k = _n) { - _n = bkey_next_skip_noops(_k, vstruct_last(i)); + _n = bkey_next(_k); k = bkey_disassemble(b, _k, &uk); if (c) @@ -532,7 +532,7 @@ start: rw_aux_tree(b, t)[j - 1].offset); } - k = bkey_next_skip_noops(k, btree_bkey_last(b, t)); + k = bkey_next(k); BUG_ON(k >= btree_bkey_last(b, t)); } } @@ -747,7 +747,7 @@ retry: /* First we figure out where the first key in each cacheline is */ eytzinger1_for_each(j, t->size) { while (bkey_to_cacheline(b, t, k) < cacheline) - prev = k, k = bkey_next_skip_noops(k, btree_bkey_last(b, t)); + prev = k, k = bkey_next(k); if (k >= btree_bkey_last(b, t)) { /* XXX: this path sucks */ @@ -764,7 +764,7 @@ retry: } while (k != btree_bkey_last(b, t)) - prev = k, k = bkey_next_skip_noops(k, btree_bkey_last(b, t)); + prev = k, k = bkey_next(k); t->max_key = bkey_unpack_pos(b, prev); @@ -899,7 +899,7 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b, struct bkey_packed *p, *i, *ret = NULL, *orig_k = k; while ((p = __bkey_prev(b, t, k)) && !ret) { - for (i = p; i != k; i = bkey_next_skip_noops(i, k)) + for (i = p; i != k; i = bkey_next(i)) if (i->type >= min_key_type) ret = i; @@ -910,10 +910,10 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b, BUG_ON(ret >= orig_k); for (i = ret - ? bkey_next_skip_noops(ret, orig_k) + ? bkey_next(ret) : btree_bkey_first(b, t); i != orig_k; - i = bkey_next_skip_noops(i, orig_k)) + i = bkey_next(i)) BUG_ON(i->type >= min_key_type); } @@ -948,7 +948,7 @@ static void ro_aux_tree_fix_invalidated_key(struct btree *b, /* signal to make_bfloat() that they're uninitialized: */ min_key.u64s = max_key.u64s = 0; - if (bkey_next_skip_noops(k, btree_bkey_last(b, t)) == btree_bkey_last(b, t)) { + if (bkey_next(k) == btree_bkey_last(b, t)) { t->max_key = bkey_unpack_pos(b, k); for (j = 1; j < t->size; j = j * 2 + 1) @@ -1072,7 +1072,7 @@ static void bch2_bset_fix_lookup_table(struct btree *b, struct bkey_packed *k = start; while (1) { - k = bkey_next_skip_noops(k, end); + k = bkey_next(k); if (k == end) break; @@ -1322,12 +1322,12 @@ struct bkey_packed *bch2_bset_search_linear(struct btree *b, while (m != btree_bkey_last(b, t) && bkey_iter_cmp_p_or_unp(b, m, lossy_packed_search, search) < 0) - m = bkey_next_skip_noops(m, btree_bkey_last(b, t)); + m = bkey_next(m); if (!packed_search) while (m != btree_bkey_last(b, t) && bkey_iter_pos_cmp(b, m, search) < 0) - m = bkey_next_skip_noops(m, btree_bkey_last(b, t)); + m = bkey_next(m); if (bch2_expensive_debug_checks) { struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m); @@ -1561,10 +1561,6 @@ static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter, EBUG_ON(iter->data->k > iter->data->end); - while (!__btree_node_iter_set_end(iter, 0) && - !__bch2_btree_node_iter_peek_all(iter, b)->u64s) - iter->data->k++; - if (unlikely(__btree_node_iter_set_end(iter, 0))) { bch2_btree_node_iter_set_drop(iter, iter->data); return; diff --git a/fs/bcachefs/bset.h b/fs/bcachefs/bset.h index f19cd032cf70..8cf2301e510d 100644 --- a/fs/bcachefs/bset.h +++ b/fs/bcachefs/bset.h @@ -305,7 +305,7 @@ static inline struct bkey_s __bkey_disassemble(struct btree *b, #define bset_tree_for_each_key(_b, _t, _k) \ for (_k = btree_bkey_first(_b, _t); \ _k != btree_bkey_last(_b, _t); \ - _k = bkey_next_skip_noops(_k, btree_bkey_last(_b, _t))) + _k = bkey_next(_k)) static inline bool bset_has_ro_aux_tree(struct bset_tree *t) { diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index 483360fbda18..36ed6df39768 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -1373,7 +1373,7 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter, k < vstruct_last(s2) && vstruct_blocks_plus(n1->data, c->block_bits, u64s + k->u64s) <= blocks; - k = bkey_next_skip_noops(k, vstruct_last(s2))) { + k = bkey_next(k)) { last = k; u64s += k->u64s; } diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index adeb4f9fb5fd..71860e1a3100 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -32,9 +32,9 @@ static void verify_no_dups(struct btree *b, if (start == end) return; - for (p = start, k = bkey_next_skip_noops(start, end); + for (p = start, k = bkey_next(start); k != end; - p = k, k = bkey_next_skip_noops(k, end)) { + p = k, k = bkey_next(k)) { struct bkey l = bkey_unpack_key(b, p); struct bkey r = bkey_unpack_key(b, k); @@ -47,9 +47,7 @@ static void set_needs_whiteout(struct bset *i, int v) { struct bkey_packed *k; - for (k = i->start; - k != vstruct_last(i); - k = bkey_next_skip_noops(k, vstruct_last(i))) + for (k = i->start; k != vstruct_last(i); k = bkey_next(k)) k->needs_whiteout = v; } @@ -213,7 +211,7 @@ static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode) out = i->start; for (k = start; k != end; k = n) { - n = bkey_next_skip_noops(k, end); + n = bkey_next(k); if (!bkey_deleted(k)) { bkey_copy(out, k); @@ -754,7 +752,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b, } prev = k; - k = bkey_next_skip_noops(k, vstruct_last(i)); + k = bkey_next(k); } fsck_err: return ret; @@ -947,7 +945,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, bp.v->mem_ptr = 0; } - k = bkey_next_skip_noops(k, vstruct_last(i)); + k = bkey_next(k); } bch2_bset_build_aux_tree(b, b->set, false); diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c index 2c202dd01766..c5e0516ff1fb 100644 --- a/fs/bcachefs/btree_update_interior.c +++ b/fs/bcachefs/btree_update_interior.c @@ -1119,7 +1119,7 @@ static struct btree *__btree_split_node(struct btree_update *as, */ k = set1->start; while (1) { - struct bkey_packed *n = bkey_next_skip_noops(k, vstruct_last(set1)); + struct bkey_packed *n = bkey_next(k); if (n == vstruct_last(set1)) break; @@ -1216,7 +1216,7 @@ static void btree_split_insert_keys(struct btree_update *as, struct btree *b, i = btree_bset_first(b); src = dst = i->start; while (src != vstruct_last(i)) { - n = bkey_next_skip_noops(src, vstruct_last(i)); + n = bkey_next(src); if (!bkey_deleted(src)) { memmove_u64s_down(dst, src, src->u64s); dst = bkey_next(dst); |