summaryrefslogtreecommitdiff
path: root/fs/btrfs/subpage.h
diff options
context:
space:
mode:
authorQu Wenruo <wqu@suse.com>2024-02-17 16:59:49 +1030
committerDavid Sterba <dsterba@suse.com>2024-03-05 17:13:23 +0100
commit8e7e9c672fd810a099dc2ac92a80e8e95cd5b0dc (patch)
tree60a6f6d218ebc78fea0016aae795d72bffb40bbc /fs/btrfs/subpage.h
parent621b9ff18c009ed6512df93b63fcf7dbac4cb4e0 (diff)
btrfs: subpage: make reader lock utilize bitmap
Currently btrfs_subpage utilizes its atomic member @reader to manage the reader counter. However it is only utilized to prevent the page to be released/unlocked when we still have reads underway. In that use case, we don't really allow multiple readers on the same subpage sector. So here we can introduce a new locked bitmap to represent exactly which subpage range is locked for read. In theory we can remove btrfs_subpage::reader as it's just the set bits of the new locked bitmap. But unfortunately bitmap doesn't provide such handy API yet, so we still keep the reader counter. Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/subpage.h')
-rw-r--r--fs/btrfs/subpage.h12
1 files changed, 11 insertions, 1 deletions
diff --git a/fs/btrfs/subpage.h b/fs/btrfs/subpage.h
index 97ba2c100b0b..b6dc013b0fdc 100644
--- a/fs/btrfs/subpage.h
+++ b/fs/btrfs/subpage.h
@@ -33,7 +33,7 @@ struct btrfs_subpage_info {
unsigned int total_nr_bits;
/*
- * *_start indicates where the bitmap starts, the length is always
+ * *_offset indicates where the bitmap starts, the length is always
* @bitmap_size, which is calculated from PAGE_SIZE / sectorsize.
*/
unsigned int uptodate_offset;
@@ -41,6 +41,16 @@ struct btrfs_subpage_info {
unsigned int writeback_offset;
unsigned int ordered_offset;
unsigned int checked_offset;
+
+ /*
+ * For locked bitmaps, normally it's subpage representation for folio
+ * Locked flag, but metadata is different:
+ *
+ * - Metadata doesn't really lock the folio
+ * It's just to prevent page::private get cleared before the last
+ * end_page_read().
+ */
+ unsigned int locked_offset;
};
/*