summaryrefslogtreecommitdiff
path: root/include/linux/mbcache2.h
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-02-22 18:23:47 -0500
committerTheodore Ts'o <tytso@mit.edu>2016-02-22 18:23:47 -0500
commitf0c8b46238db9d51ef9ea0858259958d0c601cec (patch)
tree67a5822eb2c25bf5c89e6e52a81e6fc8cde7d7ba /include/linux/mbcache2.h
parentc2f3140fe2eceb3a6c1615b2648b9471544881c6 (diff)
mbcache2: Use referenced bit instead of LRU
Currently we maintain perfect LRU list by moving entry to the tail of the list when it gets used. However these operations on cache-global list are relatively expensive. In this patch we switch to lazy updates of LRU list. Whenever entry gets used, we set a referenced bit in it. When reclaiming entries, we give referenced entries another round in the LRU. Since the list is not a real LRU anymore, rename it to just 'list'. In my testing this logic gives about 30% boost to workloads with mostly unique xattr blocks (e.g. xattr-bench with 10 files and 10000 unique xattr values). Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Diffstat (limited to 'include/linux/mbcache2.h')
-rw-r--r--include/linux/mbcache2.h11
1 files changed, 7 insertions, 4 deletions
diff --git a/include/linux/mbcache2.h b/include/linux/mbcache2.h
index b6f160ff2533..c934843a6a31 100644
--- a/include/linux/mbcache2.h
+++ b/include/linux/mbcache2.h
@@ -10,8 +10,8 @@
struct mb2_cache;
struct mb2_cache_entry {
- /* LRU list - protected by cache->c_lru_list_lock */
- struct list_head e_lru_list;
+ /* List of entries in cache - protected by cache->c_list_lock */
+ struct list_head e_list;
/* Hash table list - protected by bitlock in e_hash_list_head */
struct hlist_bl_node e_hash_list;
atomic_t e_refcnt;
@@ -19,8 +19,11 @@ struct mb2_cache_entry {
u32 e_key;
/* Block number of hashed block - stable during lifetime of the entry */
sector_t e_block;
- /* Head of hash list (for list bit lock) - stable */
- struct hlist_bl_head *e_hash_list_head;
+ /*
+ * Head of hash list (for list bit lock) - stable. Combined with
+ * referenced bit of entry
+ */
+ unsigned long _e_hash_list_head;
};
struct mb2_cache *mb2_cache_create(int bucket_bits);