summaryrefslogtreecommitdiff
path: root/fs/bcachefs/btree_io.h
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2022-10-28 17:08:41 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:09:45 -0400
commit46fee692eebb850b8478531e185fb5a5f942d3ea (patch)
tree332347a26a8887b98e5bcb40456aa69825413500 /fs/bcachefs/btree_io.h
parent8852501fe570c4956c0e29246e1e5636f09b58fb (diff)
bcachefs: Improved btree write statistics
This replaces sysfs btree_avg_write_size with btree_write_stats, which now breaks out statistics by the source of the btree write. Btree writes that are too small are a source of inefficiency, and excessive btree resort overhead - this will let us see what's causing them. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/btree_io.h')
-rw-r--r--fs/bcachefs/btree_io.h10
1 files changed, 8 insertions, 2 deletions
diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h
index 8af853642123..4b1810ad7d91 100644
--- a/fs/bcachefs/btree_io.h
+++ b/fs/bcachefs/btree_io.h
@@ -139,8 +139,12 @@ void bch2_btree_complete_write(struct bch_fs *, struct btree *,
bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *);
-#define BTREE_WRITE_ONLY_IF_NEED (1U << 0)
-#define BTREE_WRITE_ALREADY_STARTED (1U << 1)
+enum btree_write_flags {
+ __BTREE_WRITE_ONLY_IF_NEED = BTREE_WRITE_TYPE_BITS,
+ __BTREE_WRITE_ALREADY_STARTED,
+};
+#define BTREE_WRITE_ONLY_IF_NEED (1U << __BTREE_WRITE_ONLY_IF_NEED )
+#define BTREE_WRITE_ALREADY_STARTED (1U << __BTREE_WRITE_ALREADY_STARTED)
void __bch2_btree_node_write(struct bch_fs *, struct btree *, unsigned);
void bch2_btree_node_write(struct bch_fs *, struct btree *,
@@ -219,4 +223,6 @@ static inline void compat_btree_node(unsigned level, enum btree_id btree_id,
bn->min_key = bpos_nosnap_successor(bn->min_key);
}
+void bch2_btree_write_stats_to_text(struct printbuf *, struct bch_fs *);
+
#endif /* _BCACHEFS_BTREE_IO_H */