summaryrefslogtreecommitdiff
path: root/fs/f2fs/node.c
diff options
context:
space:
mode:
authorJaegeuk Kim <jaegeuk@kernel.org>2016-06-16 16:41:49 -0700
committerJaegeuk Kim <jaegeuk@kernel.org>2016-07-06 10:44:08 -0700
commitad4edb83143fdeef9e6fdd9daaa735b59476565b (patch)
treec2691e320b927f9dce1ae9df62875f3123f908d9 /fs/f2fs/node.c
parent52763a4b7a2112743745c5bbfe43fe6f54d4b39a (diff)
f2fs: produce more nids and reduce readahead nats
The readahead nat pages are more likely to be reclaimed quickly, so it'd better to gather more free nids in advance. And, let's keep some free nids as much as possible. Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs/node.c')
-rw-r--r--fs/f2fs/node.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index b448c8fec7fc..729fb1eb86ce 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1765,7 +1765,7 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
}
}
-static void build_free_nids(struct f2fs_sb_info *sbi)
+void build_free_nids(struct f2fs_sb_info *sbi)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
@@ -1774,7 +1774,7 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
nid_t nid = nm_i->next_scan_nid;
/* Enough entries */
- if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
+ if (nm_i->fcnt >= NAT_ENTRY_PER_BLOCK)
return;
/* readahead nat pages to be scanned */
@@ -1912,12 +1912,15 @@ int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
struct free_nid *i, *next;
int nr = nr_shrink;
+ if (nm_i->fcnt <= MAX_FREE_NIDS)
+ return 0;
+
if (!mutex_trylock(&nm_i->build_lock))
return 0;
spin_lock(&nm_i->free_nid_list_lock);
list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
- if (nr_shrink <= 0 || nm_i->fcnt <= NAT_ENTRY_PER_BLOCK)
+ if (nr_shrink <= 0 || nm_i->fcnt <= MAX_FREE_NIDS)
break;
if (i->state == NID_ALLOC)
continue;