summaryrefslogtreecommitdiff
path: root/fs/erofs
diff options
context:
space:
mode:
authorGao Xiang <hsiangkao@linux.alibaba.com>2023-12-06 17:10:57 +0800
committerGao Xiang <hsiangkao@linux.alibaba.com>2023-12-18 15:49:39 +0800
commit0ee3a0d59e007320167a2e9f4b8bf1304ada7771 (patch)
tree53248764dab476f4355824248d17195d8c1112b7 /fs/erofs
parent1ca01520148af399899ed66af5c78330bb9ecaf2 (diff)
erofs: enable sub-page compressed block support
Let's just disable cached decompression and inplace I/Os for partial pages as the first step in order to enable sub-page block initial support. In other words, currently it works primarily based on temporary short-lived pages. Don't expect too much in terms of performance. Reviewed-by: Yue Hu <huyue2@coolpad.com> Reviewed-by: Chao Yu <chao@kernel.org> Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com> Link: https://lore.kernel.org/r/20231206091057.87027-6-hsiangkao@linux.alibaba.com
Diffstat (limited to 'fs/erofs')
-rw-r--r--fs/erofs/inode.c6
-rw-r--r--fs/erofs/zdata.c6
2 files changed, 8 insertions, 4 deletions
diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
index 14a79d3226ab..3d616dea55dc 100644
--- a/fs/erofs/inode.c
+++ b/fs/erofs/inode.c
@@ -259,8 +259,10 @@ static int erofs_fill_inode(struct inode *inode)
if (erofs_inode_is_data_compressed(vi->datalayout)) {
#ifdef CONFIG_EROFS_FS_ZIP
- if (!erofs_is_fscache_mode(inode->i_sb) &&
- inode->i_sb->s_blocksize_bits == PAGE_SHIFT) {
+ if (!erofs_is_fscache_mode(inode->i_sb)) {
+ DO_ONCE_LITE_IF(inode->i_sb->s_blocksize != PAGE_SIZE,
+ erofs_info, inode->i_sb,
+ "EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!");
inode->i_mapping->a_ops = &z_erofs_aops;
err = 0;
goto out_unlock;
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 5d5640173412..8264936b8612 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -563,6 +563,8 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
unsigned int i;
+ if (i_blocksize(fe->inode) != PAGE_SIZE)
+ return;
if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
return;
@@ -967,12 +969,12 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
struct inode *const inode = fe->inode;
struct erofs_map_blocks *const map = &fe->map;
const loff_t offset = page_offset(page);
+ const unsigned int bs = i_blocksize(inode);
bool tight = true, exclusive;
unsigned int cur, end, len, split;
int err = 0;
z_erofs_onlinepage_init(page);
-
split = 0;
end = PAGE_SIZE;
repeat:
@@ -1021,7 +1023,7 @@ repeat:
* for inplace I/O or bvpage (should be processed in a strict order.)
*/
tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
- exclusive = (!cur && ((split <= 1) || tight));
+ exclusive = (!cur && ((split <= 1) || (tight && bs == PAGE_SIZE)));
if (cur)
tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED);