diff options
Diffstat (limited to 'drivers/md')
45 files changed, 3060 insertions, 1363 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 95ad936e6048..5bdedf6df153 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -285,6 +285,17 @@ config DM_CACHE_CLEANER A simple cache policy that writes back all data to the origin. Used when decommissioning a dm-cache. +config DM_ERA + tristate "Era target (EXPERIMENTAL)" + depends on BLK_DEV_DM + default n + select DM_PERSISTENT_DATA + select DM_BIO_PRISON + ---help--- + dm-era tracks which parts of a block device are written to + over time. Useful for maintaining cache coherency when using + vendor snapshots. + config DM_MIRROR tristate "Mirror target" depends on BLK_DEV_DM diff --git a/drivers/md/Makefile b/drivers/md/Makefile index f26d83292579..a2da532b1c2b 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -14,6 +14,7 @@ dm-thin-pool-y += dm-thin.o dm-thin-metadata.o dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o dm-cache-mq-y += dm-cache-policy-mq.o dm-cache-cleaner-y += dm-cache-policy-cleaner.o +dm-era-y += dm-era-target.o md-mod-y += md.o bitmap.o raid456-y += raid5.o @@ -53,6 +54,7 @@ obj-$(CONFIG_DM_VERITY) += dm-verity.o obj-$(CONFIG_DM_CACHE) += dm-cache.o obj-$(CONFIG_DM_CACHE_MQ) += dm-cache-mq.o obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o +obj-$(CONFIG_DM_ERA) += dm-era.o ifeq ($(CONFIG_DM_UEVENT),y) dm-mod-objs += dm-uevent.o diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig index 2638417b19aa..4d200883c505 100644 --- a/drivers/md/bcache/Kconfig +++ b/drivers/md/bcache/Kconfig @@ -24,11 +24,3 @@ config BCACHE_CLOSURES_DEBUG Keeps all active closures in a linked list and provides a debugfs interface to list them, which makes it possible to see asynchronous operations that get stuck. - -# cgroup code needs to be updated: -# -#config CGROUP_BCACHE -# bool "Cgroup controls for bcache" -# depends on BCACHE && BLK_CGROUP -# ---help--- -# TODO diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index c0d37d082443..443d03fbac47 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -78,12 +78,6 @@ uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b)); WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX); - if (CACHE_SYNC(&ca->set->sb)) { - ca->need_save_prio = max(ca->need_save_prio, - bucket_disk_gen(b)); - WARN_ON_ONCE(ca->need_save_prio > BUCKET_DISK_GEN_MAX); - } - return ret; } @@ -120,51 +114,45 @@ void bch_rescale_priorities(struct cache_set *c, int sectors) mutex_unlock(&c->bucket_lock); } -/* Allocation */ +/* + * Background allocation thread: scans for buckets to be invalidated, + * invalidates them, rewrites prios/gens (marking them as invalidated on disk), + * then optionally issues discard commands to the newly free buckets, then puts + * them on the various freelists. + */ static inline bool can_inc_bucket_gen(struct bucket *b) { - return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX && - bucket_disk_gen(b) < BUCKET_DISK_GEN_MAX; + return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX; } -bool bch_bucket_add_unused(struct cache *ca, struct bucket *b) +bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) { - BUG_ON(GC_MARK(b) || GC_SECTORS_USED(b)); - - if (CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO) { - unsigned i; - - for (i = 0; i < RESERVE_NONE; i++) - if (!fifo_full(&ca->free[i])) - goto add; + BUG_ON(!ca->set->gc_mark_valid); - return false; - } -add: - b->prio = 0; - - if (can_inc_bucket_gen(b) && - fifo_push(&ca->unused, b - ca->buckets)) { - atomic_inc(&b->pin); - return true; - } - - return false; -} - -static bool can_invalidate_bucket(struct cache *ca, struct bucket *b) -{ - return GC_MARK(b) == GC_MARK_RECLAIMABLE && + return (!GC_MARK(b) || + GC_MARK(b) == GC_MARK_RECLAIMABLE) && !atomic_read(&b->pin) && can_inc_bucket_gen(b); } -static void invalidate_one_bucket(struct cache *ca, struct bucket *b) +void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) { + lockdep_assert_held(&ca->set->bucket_lock); + BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE); + + if (GC_SECTORS_USED(b)) + trace_bcache_invalidate(ca, b - ca->buckets); + bch_inc_gen(ca, b); b->prio = INITIAL_PRIO; atomic_inc(&b->pin); +} + +static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) +{ + __bch_invalidate_one_bucket(ca, b); + fifo_push(&ca->free_inc, b - ca->buckets); } @@ -195,20 +183,7 @@ static void invalidate_buckets_lru(struct cache *ca) ca->heap.used = 0; for_each_bucket(b, ca) { - /* - * If we fill up the unused list, if we then return before - * adding anything to the free_inc list we'll skip writing - * prios/gens and just go back to allocating from the unused - * list: - */ - if (fifo_full(&ca->unused)) - return; - - if (!can_invalidate_bucket(ca, b)) - continue; - - if (!GC_SECTORS_USED(b) && - bch_bucket_add_unused(ca, b)) + if (!bch_can_invalidate_bucket(ca, b)) continue; if (!heap_full(&ca->heap)) @@ -233,7 +208,7 @@ static void invalidate_buckets_lru(struct cache *ca) return; } - invalidate_one_bucket(ca, b); + bch_invalidate_one_bucket(ca, b); } } @@ -249,8 +224,8 @@ static void invalidate_buckets_fifo(struct cache *ca) b = ca->buckets + ca->fifo_last_bucket++; - if (can_invalidate_bucket(ca, b)) - invalidate_one_bucket(ca, b); + if (bch_can_invalidate_bucket(ca, b)) + bch_invalidate_one_bucket(ca, b); if (++checked >= ca->sb.nbuckets) { ca->invalidate_needs_gc = 1; @@ -274,8 +249,8 @@ static void invalidate_buckets_random(struct cache *ca) b = ca->buckets + n; - if (can_invalidate_bucket(ca, b)) - invalidate_one_bucket(ca, b); + if (bch_can_invalidate_bucket(ca, b)) + bch_invalidate_one_bucket(ca, b); if (++checked >= ca->sb.nbuckets / 2) { ca->invalidate_needs_gc = 1; @@ -287,8 +262,7 @@ static void invalidate_buckets_random(struct cache *ca) static void invalidate_buckets(struct cache *ca) { - if (ca->invalidate_needs_gc) - return; + BUG_ON(ca->invalidate_needs_gc); switch (CACHE_REPLACEMENT(&ca->sb)) { case CACHE_REPLACEMENT_LRU: @@ -301,8 +275,6 @@ static void invalidate_buckets(struct cache *ca) invalidate_buckets_random(ca); break; } - - trace_bcache_alloc_invalidate(ca); } #define allocator_wait(ca, cond) \ @@ -350,17 +322,10 @@ static int bch_allocator_thread(void *arg) * possibly issue discards to them, then we add the bucket to * the free list: */ - while (1) { + while (!fifo_empty(&ca->free_inc)) { long bucket; - if ((!atomic_read(&ca->set->prio_blocked) || - !CACHE_SYNC(&ca->set->sb)) && - !fifo_empty(&ca->unused)) - fifo_pop(&ca->unused, bucket); - else if (!fifo_empty(&ca->free_inc)) - fifo_pop(&ca->free_inc, bucket); - else - break; + fifo_pop(&ca->free_inc, bucket); if (ca->discard) { mutex_unlock(&ca->set->bucket_lock); @@ -371,6 +336,7 @@ static int bch_allocator_thread(void *arg) } allocator_wait(ca, bch_allocator_push(ca, bucket)); + wake_up(&ca->set->btree_cache_wait); wake_up(&ca->set->bucket_wait); } @@ -380,9 +346,9 @@ static int bch_allocator_thread(void *arg) * them to the free_inc list: */ +retry_invalidate: allocator_wait(ca, ca->set->gc_mark_valid && - (ca->need_save_prio > 64 || - !ca->invalidate_needs_gc)); + !ca->invalidate_needs_gc); invalidate_buckets(ca); /* @@ -390,13 +356,28 @@ static int bch_allocator_thread(void *arg) * new stuff to them: */ allocator_wait(ca, !atomic_read(&ca->set->prio_blocked)); - if (CACHE_SYNC(&ca->set->sb) && - (!fifo_empty(&ca->free_inc) || - ca->need_save_prio > 64)) + if (CACHE_SYNC(&ca->set->sb)) { + /* + * This could deadlock if an allocation with a btree + * node locked ever blocked - having the btree node + * locked would block garbage collection, but here we're + * waiting on garbage collection before we invalidate + * and free anything. + * + * But this should be safe since the btree code always + * uses btree_check_reserve() before allocating now, and + * if it fails it blocks without btree nodes locked. + */ + if (!fifo_full(&ca->free_inc)) + goto retry_invalidate; + bch_prio_write(ca); + } } } +/* Allocation */ + long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait) { DEFINE_WAIT(w); @@ -408,8 +389,10 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait) fifo_pop(&ca->free[reserve], r)) goto out; - if (!wait) + if (!wait) { + trace_bcache_alloc_fail(ca, reserve); return -1; + } do { prepare_to_wait(&ca->set->bucket_wait, &w, @@ -425,6 +408,8 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait) out: wake_up_process(ca->alloc_thread); + trace_bcache_alloc(ca, reserve); + if (expensive_debug_checks(ca->set)) { size_t iter; long i; @@ -438,8 +423,6 @@ out: BUG_ON(i == r); fifo_for_each(i, &ca->free_inc, iter) BUG_ON(i == r); - fifo_for_each(i, &ca->unused, iter) - BUG_ON(i == r); } b = ca->buckets + r; @@ -461,17 +444,19 @@ out: return r; } +void __bch_bucket_free(struct cache *ca, struct bucket *b) +{ + SET_GC_MARK(b, 0); + SET_GC_SECTORS_USED(b, 0); +} + void bch_bucket_free(struct cache_set *c, struct bkey *k) { unsigned i; - for (i = 0; i < KEY_PTRS(k); i++) { - struct bucket *b = PTR_BUCKET(c, k, i); - - SET_GC_MARK(b, GC_MARK_RECLAIMABLE); - SET_GC_SECTORS_USED(b, 0); - bch_bucket_add_unused(PTR_CACHE(c, k, i), b); - } + for (i = 0; i < KEY_PTRS(k); i++) + __bch_bucket_free(PTR_CACHE(c, k, i), + PTR_BUCKET(c, k, i)); } int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, @@ -709,25 +694,3 @@ int bch_cache_allocator_start(struct cache *ca) ca->alloc_thread = k; return 0; } - -int bch_cache_allocator_init(struct cache *ca) -{ - /* - * Reserve: - * Prio/gen writes first - * Then 8 for btree allocations - * Then half for the moving garbage collector - */ -#if 0 - ca->watermark[WATERMARK_PRIO] = 0; - - ca->watermark[WATERMARK_METADATA] = prio_buckets(ca); - - ca->watermark[WATERMARK_MOVINGGC] = 8 + - ca->watermark[WATERMARK_METADATA]; - - ca->watermark[WATERMARK_NONE] = ca->free.size / 2 + - ca->watermark[WATERMARK_MOVINGGC]; -#endif - return 0; -} diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index a4c7306ff43d..82c9c5d35251 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -195,9 +195,7 @@ struct bucket { atomic_t pin; uint16_t prio; uint8_t gen; - uint8_t disk_gen; uint8_t last_gc; /* Most out of date gen in the btree */ - uint8_t gc_gen; uint16_t gc_mark; /* Bitfield used by GC. See below for field */ }; @@ -207,9 +205,9 @@ struct bucket { */ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2); -#define GC_MARK_RECLAIMABLE 0 -#define GC_MARK_DIRTY 1 -#define GC_MARK_METADATA 2 +#define GC_MARK_RECLAIMABLE 1 +#define GC_MARK_DIRTY 2 +#define GC_MARK_METADATA 3 #define GC_SECTORS_USED_SIZE 13 #define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE)) BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE); @@ -426,14 +424,9 @@ struct cache { * their new gen to disk. After prio_write() finishes writing the new * gens/prios, they'll be moved to the free list (and possibly discarded * in the process) - * - * unused: GC found nothing pointing into these buckets (possibly - * because all the data they contained was overwritten), so we only - * need to discard them before they can be moved to the free list. */ DECLARE_FIFO(long, free)[RESERVE_NR]; DECLARE_FIFO(long, free_inc); - DECLARE_FIFO(long, unused); size_t fifo_last_bucket; @@ -443,12 +436,6 @@ struct cache { DECLARE_HEAP(struct bucket *, heap); /* - * max(gen - disk_gen) for all buckets. When it gets too big we have to - * call prio_write() to keep gens from wrapping. - */ - uint8_t need_save_prio; - - /* * If nonzero, we know we aren't going to find any buckets to invalidate * until a gc finishes - otherwise we could pointlessly burn a ton of * cpu @@ -562,19 +549,16 @@ struct cache_set { struct list_head btree_cache_freed; /* Number of elements in btree_cache + btree_cache_freeable lists */ - unsigned bucket_cache_used; + unsigned btree_cache_used; /* * If we need to allocate memory for a new btree node and that * allocation fails, we can cannibalize another node in the btree cache - * to satisfy the allocation. However, only one thread can be doing this - * at a time, for obvious reasons - try_harder and try_wait are - * basically a lock for this that we can wait on asynchronously. The - * btree_root() macro releases the lock when it returns. + * to satisfy the allocation - lock to guarantee only one thread does + * this at a time: */ - struct task_struct *try_harder; - wait_queue_head_t try_wait; - uint64_t try_harder_start; + wait_queue_head_t btree_cache_wait; + struct task_struct *btree_cache_alloc_lock; /* * When we free a btree node, we increment the gen of the bucket the @@ -603,7 +587,7 @@ struct cache_set { uint16_t min_prio; /* - * max(gen - gc_gen) for all buckets. When it gets too big we have to gc + * max(gen - last_gc) for all buckets. When it gets too big we have to gc * to keep gens from wrapping around. */ uint8_t need_gc; @@ -628,6 +612,8 @@ struct cache_set { /* Number of moving GC bios in flight */ struct semaphore moving_in_flight; + struct workqueue_struct *moving_gc_wq; + struct btree *root; #ifdef CONFIG_BCACHE_DEBUG @@ -667,7 +653,6 @@ struct cache_set { struct time_stats btree_gc_time; struct time_stats btree_split_time; struct time_stats btree_read_time; - struct time_stats try_harder_time; atomic_long_t cache_read_races; atomic_long_t writeback_keys_done; @@ -850,9 +835,6 @@ static inline bool cached_dev_get(struct cached_dev *dc) /* * bucket_gc_gen() returns the difference between the bucket's current gen and * the oldest gen of any pointer into that bucket in the btree (last_gc). - * - * bucket_disk_gen() returns the difference between the current gen and the gen - * on disk; they're both used to make sure gens don't wrap around. */ static inline uint8_t bucket_gc_gen(struct bucket *b) @@ -860,13 +842,7 @@ static inline uint8_t bucket_gc_gen(struct bucket *b) return b->gen - b->last_gc; } -static inline uint8_t bucket_disk_gen(struct bucket *b) -{ - return b->gen - b->disk_gen; -} - #define BUCKET_GC_GEN_MAX 96U -#define BUCKET_DISK_GEN_MAX 64U #define kobj_attribute_write(n, fn) \ static struct kobj_attribute ksysfs_##n = __ATTR(n, S_IWUSR, NULL, fn) @@ -899,11 +875,14 @@ void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned); uint8_t bch_inc_gen(struct cache *, struct bucket *); void bch_rescale_priorities(struct cache_set *, int); -bool bch_bucket_add_unused(struct cache *, struct bucket *); -long bch_bucket_alloc(struct cache *, unsigned, bool); +bool bch_can_invalidate_bucket(struct cache *, struct bucket *); +void __bch_invalidate_one_bucket(struct cache *, struct bucket *); + +void __bch_bucket_free(struct cache *, struct bucket *); void bch_bucket_free(struct cache_set *, struct bkey *); +long bch_bucket_alloc(struct cache *, unsigned, bool); int __bch_bucket_alloc_set(struct cache_set *, unsigned, struct bkey *, int, bool); int bch_bucket_alloc_set(struct cache_set *, unsigned, @@ -954,13 +933,10 @@ int bch_open_buckets_alloc(struct cache_set *); void bch_open_buckets_free(struct cache_set *); int bch_cache_allocator_start(struct cache *ca); -int bch_cache_allocator_init(struct cache *ca); void bch_debug_exit(void); int bch_debug_init(struct kobject *); void bch_request_exit(void); int bch_request_init(void); -void bch_btree_exit(void); -int bch_btree_init(void); #endif /* _BCACHE_H */ diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 3f74b4b0747b..545416415305 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -23,8 +23,8 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set) for (k = i->start; k < bset_bkey_last(i); k = next) { next = bkey_next(k); - printk(KERN_ERR "block %u key %li/%u: ", set, - (uint64_t *) k - i->d, i->keys); + printk(KERN_ERR "block %u key %u/%u: ", set, + (unsigned) ((u64 *) k - i->d), i->keys); if (b->ops->key_dump) b->ops->key_dump(b, k); diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index 003260f4ddf6..5f6728d5d4dd 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -478,6 +478,12 @@ static inline void bch_keylist_init(struct keylist *l) l->top_p = l->keys_p = l->inline_keys; } +static inline void bch_keylist_init_single(struct keylist *l, struct bkey *k) +{ + l->keys = k; + l->top = bkey_next(k); +} + static inline void bch_keylist_push(struct keylist *l) { l->top = bkey_next(l->top); diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 5f9c2a665ca5..7347b6100961 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -68,15 +68,11 @@ * alloc_bucket() cannot fail. This should be true but is not completely * obvious. * - * Make sure all allocations get charged to the root cgroup - * * Plugging? * * If data write is less than hard sector size of ssd, round up offset in open * bucket to the next whole sector * - * Also lookup by cgroup in get_open_bucket() - * * Superblock needs to be fleshed out for multiple cache devices * * Add a sysfs tunable for the number of writeback IOs in flight @@ -97,8 +93,6 @@ #define PTR_HASH(c, k) \ (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0)) -static struct workqueue_struct *btree_io_wq; - #define insert_lock(s, b) ((b)->level <= (s)->lock) /* @@ -123,7 +117,7 @@ static struct workqueue_struct *btree_io_wq; ({ \ int _r, l = (b)->level - 1; \ bool _w = l <= (op)->lock; \ - struct btree *_child = bch_btree_node_get((b)->c, key, l, _w); \ + struct btree *_child = bch_btree_node_get((b)->c, op, key, l, _w);\ if (!IS_ERR(_child)) { \ _child->parent = (b); \ _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \ @@ -152,17 +146,12 @@ static struct workqueue_struct *btree_io_wq; _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \ } \ rw_unlock(_w, _b); \ + bch_cannibalize_unlock(c); \ if (_r == -EINTR) \ schedule(); \ - bch_cannibalize_unlock(c); \ - if (_r == -ENOSPC) { \ - wait_event((c)->try_wait, \ - !(c)->try_harder); \ - _r = -EINTR; \ - } \ } while (_r == -EINTR); \ \ - finish_wait(&(c)->bucket_wait, &(op)->wait); \ + finish_wait(&(c)->btree_cache_wait, &(op)->wait); \ _r; \ }) @@ -171,6 +160,20 @@ static inline struct bset *write_block(struct btree *b) return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c); } +static void bch_btree_init_next(struct btree *b) +{ + /* If not a leaf node, always sort */ + if (b->level && b->keys.nsets) + bch_btree_sort(&b->keys, &b->c->sort); + else + bch_btree_sort_lazy(&b->keys, &b->c->sort); + + if (b->written < btree_blocks(b)) + bch_bset_init_next(&b->keys, write_block(b), + bset_magic(&b->c->sb)); + +} + /* Btree key manipulation */ void bkey_put(struct cache_set *c, struct bkey *k) @@ -352,8 +355,7 @@ static void __btree_node_write_done(struct closure *cl) btree_complete_write(b, w); if (btree_node_dirty(b)) - queue_delayed_work(btree_io_wq, &b->work, - msecs_to_jiffies(30000)); + schedule_delayed_work(&b->work, 30 * HZ); closure_return_with_destructor(cl, btree_node_write_unlock); } @@ -442,10 +444,12 @@ static void do_btree_node_write(struct btree *b) } } -void bch_btree_node_write(struct btree *b, struct closure *parent) +void __bch_btree_node_write(struct btree *b, struct closure *parent) { struct bset *i = btree_bset_last(b); + lockdep_assert_held(&b->write_lock); + trace_bcache_btree_write(b); BUG_ON(current->bio_list); @@ -469,23 +473,24 @@ void bch_btree_node_write(struct btree *b, struct closure *parent) &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written); b->written += set_blocks(i, block_bytes(b->c)); +} - /* If not a leaf node, always sort */ - if (b->level && b->keys.nsets) - bch_btree_sort(&b->keys, &b->c->sort); - else - bch_btree_sort_lazy(&b->keys, &b->c->sort); +void bch_btree_node_write(struct btree *b, struct closure *parent) +{ + unsigned nsets = b->keys.nsets; + + lockdep_assert_held(&b->lock); + + __bch_btree_node_write(b, parent); /* * do verify if there was more than one set initially (i.e. we did a * sort) and we sorted down to a single set: */ - if (i != b->keys.set->data && !b->keys.nsets) + if (nsets && !b->keys.nsets) bch_btree_verify(b); - if (b->written < btree_blocks(b)) - bch_bset_init_next(&b->keys, write_block(b), - bset_magic(&b->c->sb)); + bch_btree_init_next(b); } static void bch_btree_node_write_sync(struct btree *b) @@ -493,7 +498,11 @@ static void bch_btree_node_write_sync(struct btree *b) struct closure cl; closure_init_stack(&cl); + + mutex_lock(&b->write_lock); bch_btree_node_write(b, &cl); + mutex_unlock(&b->write_lock); + closure_sync(&cl); } @@ -501,11 +510,10 @@ static void btree_node_write_work(struct work_struct *w) { struct btree *b = container_of(to_delayed_work(w), struct btree, work); - rw_lock(true, b, b->level); - + mutex_lock(&b->write_lock); if (btree_node_dirty(b)) - bch_btree_node_write(b, NULL); - rw_unlock(true, b); + __bch_btree_node_write(b, NULL); + mutex_unlock(&b->write_lock); } static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref) @@ -513,11 +521,13 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref) struct bset *i = btree_bset_last(b); struct btree_write *w = btree_current_write(b); + lockdep_assert_held(&b->write_lock); + BUG_ON(!b->written); BUG_ON(!i->keys); if (!btree_node_dirty(b)) - queue_delayed_work(btree_io_wq, &b->work, 30 * HZ); + schedule_delayed_work(&b->work, 30 * HZ); set_btree_node_dirty(b); @@ -548,7 +558,7 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref) #define mca_reserve(c) (((c->root && c->root->level) \ ? c->root->level : 1) * 8 + 16) #define mca_can_free(c) \ - max_t(int, 0, c->bucket_cache_used - mca_reserve(c)) + max_t(int, 0, c->btree_cache_used - mca_reserve(c)) static void mca_data_free(struct btree *b) { @@ -556,7 +566,7 @@ static void mca_data_free(struct btree *b) bch_btree_keys_free(&b->keys); - b->c->bucket_cache_used--; + b->c->btree_cache_used--; list_move(&b->list, &b->c->btree_cache_freed); } @@ -581,7 +591,7 @@ static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) ilog2(b->c->btree_pages), btree_order(k)), gfp)) { - b->c->bucket_cache_used++; + b->c->btree_cache_used++; list_move(&b->list, &b->c->btree_cache); } else { list_move(&b->list, &b->c->btree_cache_freed); @@ -597,6 +607,8 @@ static struct btree *mca_bucket_alloc(struct cache_set *c, init_rwsem(&b->lock); lockdep_set_novalidate_class(&b->lock); + mutex_init(&b->write_lock); + lockdep_set_novalidate_class(&b->write_lock); INIT_LIST_HEAD(&b->list); INIT_DELAYED_WORK(&b->work, btree_node_write_work); b->c = c; @@ -630,8 +642,12 @@ static int mca_reap(struct btree *b, unsigned min_order, bool flush) up(&b->io_mutex); } + mutex_lock(&b->write_lock); if (btree_node_dirty(b)) - bch_btree_node_write_sync(b); + __bch_btree_node_write(b, &cl); + mutex_unlock(&b->write_lock); + + closure_sync(&cl); /* wait for any in flight btree write */ down(&b->io_mutex); @@ -654,7 +670,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, if (c->shrinker_disabled) return SHRINK_STOP; - if (c->try_harder) + if (c->btree_cache_alloc_lock) return SHRINK_STOP; /* Return -1 if we can't do anything right now */ @@ -686,7 +702,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, } } - for (i = 0; (nr--) && i < c->bucket_cache_used; i++) { + for (i = 0; (nr--) && i < c->btree_cache_used; i++) { if (list_empty(&c->btree_cache)) goto out; @@ -715,7 +731,7 @@ static unsigned long bch_mca_count(struct shrinker *shrink, if (c->shrinker_disabled) return 0; - if (c->try_harder) + if (c->btree_cache_alloc_lock) return 0; return mca_can_free(c) * c->btree_pages; @@ -819,17 +835,30 @@ out: return b; } -static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k) +static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) +{ + struct task_struct *old; + + old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current); + if (old && old != current) { + if (op) + prepare_to_wait(&c->btree_cache_wait, &op->wait, + TASK_UNINTERRUPTIBLE); + return -EINTR; + } + + return 0; +} + +static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, + struct bkey *k) { struct btree *b; trace_bcache_btree_cache_cannibalize(c); - if (!c->try_harder) { - c->try_harder = current; - c->try_harder_start = local_clock(); - } else if (c->try_harder != current) - return ERR_PTR(-ENOSPC); + if (mca_cannibalize_lock(c, op)) + return ERR_PTR(-EINTR); list_for_each_entry_reverse(b, &c->btree_cache, list) if (!mca_reap(b, btree_order(k), false)) @@ -839,6 +868,7 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k) if (!mca_reap(b, btree_order(k), true)) return b; + WARN(1, "btree cache cannibalize failed\n"); return ERR_PTR(-ENOMEM); } @@ -850,14 +880,14 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k) */ static void bch_cannibalize_unlock(struct cache_set *c) { - if (c->try_harder == current) { - bch_time_stats_update(&c->try_harder_time, c->try_harder_start); - c->try_harder = NULL; - wake_up(&c->try_wait); + if (c->btree_cache_alloc_lock == current) { + c->btree_cache_alloc_lock = NULL; + wake_up(&c->btree_cache_wait); } } -static struct btree *mca_alloc(struct cache_set *c, struct bkey *k, int level) +static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, + struct bkey *k, int level) { struct btree *b; @@ -920,7 +950,7 @@ err: if (b) rw_unlock(true, b); - b = mca_cannibalize(c, k); + b = mca_cannibalize(c, op, k); if (!IS_ERR(b)) goto out; @@ -936,8 +966,8 @@ err: * The btree node will have either a read or a write lock held, depending on * level and op->lock. */ -struct btree *bch_btree_node_get(struct cache_set *c, struct bkey *k, - int level, bool write) +struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, + struct bkey *k, int level, bool write) { int i = 0; struct btree *b; @@ -951,7 +981,7 @@ retry: return ERR_PTR(-EAGAIN); mutex_lock(&c->bucket_lock); - b = mca_alloc(c, k, level); + b = mca_alloc(c, op, k, level); mutex_unlock(&c->bucket_lock); if (!b) @@ -997,7 +1027,7 @@ static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level) struct btree *b; mutex_lock(&c->bucket_lock); - b = mca_alloc(c, k, level); + b = mca_alloc(c, NULL, k, level); mutex_unlock(&c->bucket_lock); if (!IS_ERR_OR_NULL(b)) { @@ -1010,46 +1040,41 @@ static void btree_node_prefetch(struct cache_set *c, struct bkey *k, int level) static void btree_node_free(struct btree *b) { - unsigned i; - trace_bcache_btree_node_free(b); BUG_ON(b == b->c->root); + mutex_lock(&b->write_lock); + if (btree_node_dirty(b)) btree_complete_write(b, btree_current_write(b)); clear_bit(BTREE_NODE_dirty, &b->flags); + mutex_unlock(&b->write_lock); + cancel_delayed_work(&b->work); mutex_lock(&b->c->bucket_lock); - - for (i = 0; i < KEY_PTRS(&b->key); i++) { - BUG_ON(atomic_read(&PTR_BUCKET(b->c, &b->key, i)->pin)); - - bch_inc_gen(PTR_CACHE(b->c, &b->key, i), - PTR_BUCKET(b->c, &b->key, i)); - } - bch_bucket_free(b->c, &b->key); mca_bucket_free(b); mutex_unlock(&b->c->bucket_lock); } -struct btree *bch_btree_node_alloc(struct cache_set *c, int level, bool wait) +struct btree *bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, + int level) { BKEY_PADDED(key) k; struct btree *b = ERR_PTR(-EAGAIN); mutex_lock(&c->bucket_lock); retry: - if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait)) + if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, op != NULL)) goto err; bkey_put(c, &k.key); SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS); - b = mca_alloc(c, &k.key, level); + b = mca_alloc(c, op, &k.key, level); if (IS_ERR(b)) goto err_free; @@ -1075,12 +1100,15 @@ err: return b; } -static struct btree *btree_node_alloc_replacement(struct btree *b, bool wait) +static struct btree *btree_node_alloc_replacement(struct btree *b, + struct btree_op *op) { - struct btree *n = bch_btree_node_alloc(b->c, b->level, wait); + struct btree *n = bch_btree_node_alloc(b->c, op, b->level); if (!IS_ERR_OR_NULL(n)) { + mutex_lock(&n->write_lock); bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); bkey_copy_key(&n->key, &b->key); + mutex_unlock(&n->write_lock); } return n; @@ -1090,43 +1118,47 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k) { unsigned i; + mutex_lock(&b->c->bucket_lock); + + atomic_inc(&b->c->prio_blocked); + bkey_copy(k, &b->key); bkey_copy_key(k, &ZERO_KEY); - for (i = 0; i < KEY_PTRS(k); i++) { - uint8_t g = PTR_BUCKET(b->c, k, i)->gen + 1; - - SET_PTR_GEN(k, i, g); - } + for (i = 0; i < KEY_PTRS(k); i++) + SET_PTR_GEN(k, i, + bch_inc_gen(PTR_CACHE(b->c, &b->key, i), + PTR_BUCKET(b->c, &b->key, i))); - atomic_inc(&b->c->prio_blocked); + mutex_unlock(&b->c->bucket_lock); } static int btree_check_reserve(struct btree *b, struct btree_op *op) { struct cache_set *c = b->c; struct cache *ca; - unsigned i, reserve = c->root->level * 2 + 1; - int ret = 0; + unsigned i, reserve = (c->root->level - b->level) * 2 + 1; mutex_lock(&c->bucket_lock); for_each_cache(ca, c, i) if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) { if (op) - prepare_to_wait(&c->bucket_wait, &op->wait, + prepare_to_wait(&c->btree_cache_wait, &op->wait, TASK_UNINTERRUPTIBLE); - ret = -EINTR; - break; + mutex_unlock(&c->bucket_lock); + return -EINTR; } mutex_unlock(&c->bucket_lock); - return ret; + + return mca_cannibalize_lock(b->c, op); } /* Garbage collection */ -uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k) +static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, + struct bkey *k) { uint8_t stale = 0; unsigned i; @@ -1146,8 +1178,8 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k) g = PTR_BUCKET(c, k, i); - if (gen_after(g->gc_gen, PTR_GEN(k, i))) - g->gc_gen = PTR_GEN(k, i); + if (gen_after(g->last_gc, PTR_GEN(k, i))) + g->last_gc = PTR_GEN(k, i); if (ptr_stale(c, k, i)) { stale = max(stale, ptr_stale(c, k, i)); @@ -1163,6 +1195,8 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k) SET_GC_MARK(g, GC_MARK_METADATA); else if (KEY_DIRTY(k)) SET_GC_MARK(g, GC_MARK_DIRTY); + else if (!GC_MARK(g)) + SET_GC_MARK(g, GC_MARK_RECLAIMABLE); /* guard against overflow */ SET_GC_SECTORS_USED(g, min_t(unsigned, @@ -1177,6 +1211,26 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k) #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k) +void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k) +{ + unsigned i; + + for (i = 0; i < KEY_PTRS(k); i++) + if (ptr_available(c, k, i) && + !ptr_stale(c, k, i)) { + struct bucket *b = PTR_BUCKET(c, k, i); + + b->gen = PTR_GEN(k, i); + + if (level && bkey_cmp(k, &ZERO_KEY)) + b->prio = BTREE_PRIO; + else if (!level && b->prio == BTREE_PRIO) + b->prio = INITIAL_PRIO; + } + + __bch_btree_mark_key(c, level, k); +} + static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) { uint8_t stale = 0; @@ -1230,14 +1284,19 @@ static int bch_btree_insert_node(struct btree *, struct btree_op *, struct keylist *, atomic_t *, struct bkey *); static int btree_gc_coalesce(struct btree *b, struct btree_op *op, - struct keylist *keylist, struct gc_stat *gc, - struct gc_merge_info *r) + struct gc_stat *gc, struct gc_merge_info *r) { unsigned i, nodes = 0, keys = 0, blocks; struct btree *new_nodes[GC_MERGE_NODES]; + struct keylist keylist; struct closure cl; struct bkey *k; + bch_keylist_init(&keylist); + + if (btree_check_reserve(b, NULL)) + return 0; + memset(new_nodes, 0, sizeof(new_nodes)); closure_init_stack(&cl); @@ -1252,11 +1311,23 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, return 0; for (i = 0; i < nodes; i++) { - new_nodes[i] = btree_node_alloc_replacement(r[i].b, false); + new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL); if (IS_ERR_OR_NULL(new_nodes[i])) goto out_nocoalesce; } + /* + * We have to check the reserve here, after we've allocated our new + * nodes, to make sure the insert below will succeed - we also check + * before as an optimization to potentially avoid a bunch of expensive + * allocs/sorts + */ + if (btree_check_reserve(b, NULL)) + goto out_nocoalesce; + + for (i = 0; i < nodes; i++) + mutex_lock(&new_nodes[i]->write_lock); + for (i = nodes - 1; i > 0; --i) { struct bset *n1 = btree_bset_first(new_nodes[i]); struct bset *n2 = btree_bset_first(new_nodes[i - 1]); @@ -1315,28 +1386,34 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, n2->keys -= keys; - if (__bch_keylist_realloc(keylist, + if (__bch_keylist_realloc(&keylist, bkey_u64s(&new_nodes[i]->key))) goto out_nocoalesce; bch_btree_node_write(new_nodes[i], &cl); - bch_keylist_add(keylist, &new_nodes[i]->key); + bch_keylist_add(&keylist, &new_nodes[i]->key); } - for (i = 0; i < nodes; i++) { - if (__bch_keylist_realloc(keylist, bkey_u64s(&r[i].b->key))) - goto out_nocoalesce; + for (i = 0; i < nodes; i++) + mutex_unlock(&new_nodes[i]->write_lock); - make_btree_freeing_key(r[i].b, keylist->top); - bch_keylist_push(keylist); - } + closure_sync(&cl); /* We emptied out this node */ BUG_ON(btree_bset_first(new_nodes[0])->keys); btree_node_free(new_nodes[0]); rw_unlock(true, new_nodes[0]); - closure_sync(&cl); + for (i = 0; i < nodes; i++) { + if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key))) + goto out_nocoalesce; + + make_btree_freeing_key(r[i].b, keylist.top); + bch_keylist_push(&keylist); + } + + bch_btree_insert_node(b, op, &keylist, NULL, NULL); + BUG_ON(!bch_keylist_empty(&keylist)); for (i = 0; i < nodes; i++) { btree_node_free(r[i].b); @@ -1345,22 +1422,22 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op, r[i].b = new_nodes[i]; } - bch_btree_insert_node(b, op, keylist, NULL, NULL); - BUG_ON(!bch_keylist_empty(keylist)); - memmove(r, r + 1, sizeof(r[0]) * (nodes - 1)); r[nodes - 1].b = ERR_PTR(-EINTR); trace_bcache_btree_gc_coalesce(nodes); gc->nodes--; + bch_keylist_free(&keylist); + /* Invalidated our iterator */ return -EINTR; out_nocoalesce: closure_sync(&cl); + bch_keylist_free(&keylist); - while ((k = bch_keylist_pop(keylist))) + while ((k = bch_keylist_pop(&keylist))) if (!bkey_cmp(k, &ZERO_KEY)) atomic_dec(&b->c->prio_blocked); @@ -1372,6 +1449,42 @@ out_nocoalesce: return 0; } +static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, + struct btree *replace) +{ + struct keylist keys; + struct btree *n; + + if (btree_check_reserve(b, NULL)) + return 0; + + n = btree_node_alloc_replacement(replace, NULL); + + /* recheck reserve after allocating replacement node */ + if (btree_check_reserve(b, NULL)) { + btree_node_free(n); + rw_unlock(true, n); + return 0; + } + + bch_btree_node_write_sync(n); + + bch_keylist_init(&keys); + bch_keylist_add(&keys, &n->key); + + make_btree_freeing_key(replace, keys.top); + bch_keylist_push(&keys); + + bch_btree_insert_node(b, op, &keys, NULL, NULL); + BUG_ON(!bch_keylist_empty(&keys)); + + btree_node_free(replace); + rw_unlock(true, n); + + /* Invalidated our iterator */ + return -EINTR; +} + static unsigned btree_gc_count_keys(struct btree *b) { struct bkey *k; @@ -1387,26 +1500,23 @@ static unsigned btree_gc_count_keys(struct btree *b) static int btree_gc_recurse(struct btree *b, struct btree_op *op, struct closure *writes, struct gc_stat *gc) { - unsigned i; int ret = 0; bool should_rewrite; - struct btree *n; struct bkey *k; - struct keylist keys; struct btree_iter iter; struct gc_merge_info r[GC_MERGE_NODES]; - struct gc_merge_info *last = r + GC_MERGE_NODES - 1; + struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1; - bch_keylist_init(&keys); bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done); - for (i = 0; i < GC_MERGE_NODES; i++) - r[i].b = ERR_PTR(-EINTR); + for (i = r; i < r + ARRAY_SIZE(r); i++) + i->b = ERR_PTR(-EINTR); while (1) { k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); if (k) { - r->b = bch_btree_node_get(b->c, k, b->level - 1, true); + r->b = bch_btree_node_get(b->c, op, k, b->level - 1, + true); if (IS_ERR(r->b)) { ret = PTR_ERR(r->b); break; @@ -1414,7 +1524,7 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op, r->keys = btree_gc_count_keys(r->b); - ret = btree_gc_coalesce(b, op, &keys, gc, r); + ret = btree_gc_coalesce(b, op, gc, r); if (ret) break; } @@ -1424,32 +1534,10 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op, if (!IS_ERR(last->b)) { should_rewrite = btree_gc_mark_node(last->b, gc); - if (should_rewrite && - !btree_check_reserve(b, NULL)) { - n = btree_node_alloc_replacement(last->b, - false); - - if (!IS_ERR_OR_NULL(n)) { - bch_btree_node_write_sync(n); - bch_keylist_add(&keys, &n->key); - - make_btree_freeing_key(last->b, - keys.top); - bch_keylist_push(&keys); - - btree_node_free(last->b); - - bch_btree_insert_node(b, op, &keys, - NULL, NULL); - BUG_ON(!bch_keylist_empty(&keys)); - - rw_unlock(true, last->b); - last->b = n; - - /* Invalidated our iterator */ - ret = -EINTR; + if (should_rewrite) { + ret = btree_gc_rewrite_node(b, op, last->b); + if (ret) break; - } } if (last->b->level) { @@ -1464,8 +1552,10 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op, * Must flush leaf nodes before gc ends, since replace * operations aren't journalled */ + mutex_lock(&last->b->write_lock); if (btree_node_dirty(last->b)) bch_btree_node_write(last->b, writes); + mutex_unlock(&last->b->write_lock); rw_unlock(true, last->b); } @@ -1478,15 +1568,15 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op, } } - for (i = 0; i < GC_MERGE_NODES; i++) - if (!IS_ERR_OR_NULL(r[i].b)) { - if (btree_node_dirty(r[i].b)) - bch_btree_node_write(r[i].b, writes); - rw_unlock(true, r[i].b); + for (i = r; i < r + ARRAY_SIZE(r); i++) + if (!IS_ERR_OR_NULL(i->b)) { + mutex_lock(&i->b->write_lock); + if (btree_node_dirty(i->b)) + bch_btree_node_write(i->b, writes); + mutex_unlock(&i->b->write_lock); + rw_unlock(true, i->b); } - bch_keylist_free(&keys); - return ret; } @@ -1499,10 +1589,11 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op, should_rewrite = btree_gc_mark_node(b, gc); if (should_rewrite) { - n = btree_node_alloc_replacement(b, false); + n = btree_node_alloc_replacement(b, NULL); if (!IS_ERR_OR_NULL(n)) { bch_btree_node_write_sync(n); + bch_btree_set_root(n); btree_node_free(b); rw_unlock(true, n); @@ -1511,6 +1602,8 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op, } } + __bch_btree_mark_key(b->c, b->level + 1, &b->key); + if (b->level) { ret = btree_gc_recurse(b, op, writes, gc); if (ret) @@ -1538,9 +1631,9 @@ static void btree_gc_start(struct cache_set *c) for_each_cache(ca, c, i) for_each_bucket(b, ca) { - b->gc_gen = b->gen; + b->last_gc = b->gen; if (!atomic_read(&b->pin)) { - SET_GC_MARK(b, GC_MARK_RECLAIMABLE); + SET_GC_MARK(b, 0); SET_GC_SECTORS_USED(b, 0); } } @@ -1548,7 +1641,7 @@ static void btree_gc_start(struct cache_set *c) mutex_unlock(&c->bucket_lock); } -size_t bch_btree_gc_finish(struct cache_set *c) +static size_t bch_btree_gc_finish(struct cache_set *c) { size_t available = 0; struct bucket *b; @@ -1561,11 +1654,6 @@ size_t bch_btree_gc_finish(struct cache_set *c) c->gc_mark_valid = 1; c->need_gc = 0; - if (c->root) - for (i = 0; i < KEY_PTRS(&c->root->key); i++) - SET_GC_MARK(PTR_BUCKET(c, &c->root->key, i), - GC_MARK_METADATA); - for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++) SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), GC_MARK_METADATA); @@ -1605,15 +1693,15 @@ size_t bch_btree_gc_finish(struct cache_set *c) SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA); for_each_bucket(b, ca) { - b->last_gc = b->gc_gen; c->need_gc = max(c->need_gc, bucket_gc_gen(b)); - if (!atomic_read(&b->pin) && - GC_MARK(b) == GC_MARK_RECLAIMABLE) { + if (atomic_read(&b->pin)) + continue; + + BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b)); + + if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE) available++; - if (!GC_SECTORS_USED(b)) - bch_bucket_add_unused(ca, b); - } } } @@ -1705,36 +1793,16 @@ int bch_gc_thread_start(struct cache_set *c) /* Initial partial gc */ -static int bch_btree_check_recurse(struct btree *b, struct btree_op *op, - unsigned long **seen) +static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) { int ret = 0; - unsigned i; struct bkey *k, *p = NULL; - struct bucket *g; struct btree_iter iter; - for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) { - for (i = 0; i < KEY_PTRS(k); i++) { - if (!ptr_available(b->c, k, i)) - continue; - - g = PTR_BUCKET(b->c, k, i); + for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) + bch_initial_mark_key(b->c, b->level, k); - if (!__test_and_set_bit(PTR_BUCKET_NR(b->c, k, i), - seen[PTR_DEV(k, i)]) || - !ptr_stale(b->c, k, i)) { - g->gen = PTR_GEN(k, i); - - if (b->level) - g->prio = BTREE_PRIO; - else if (g->prio == BTREE_PRIO) - g->prio = INITIAL_PRIO; - } - } - - btree_mark_key(b, k); - } + bch_initial_mark_key(b->c, b->level + 1, &b->key); if (b->level) { bch_btree_iter_init(&b->keys, &iter, NULL); @@ -1746,40 +1814,58 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op, btree_node_prefetch(b->c, k, b->level - 1); if (p) - ret = btree(check_recurse, p, b, op, seen); + ret = btree(check_recurse, p, b, op); p = k; } while (p && !ret); } - return 0; + return ret; } int bch_btree_check(struct cache_set *c) { - int ret = -ENOMEM; - unsigned i; - unsigned long *seen[MAX_CACHES_PER_SET]; struct btree_op op; - memset(seen, 0, sizeof(seen)); bch_btree_op_init(&op, SHRT_MAX); - for (i = 0; c->cache[i]; i++) { - size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8); - seen[i] = kmalloc(n, GFP_KERNEL); - if (!seen[i]) - goto err; + return btree_root(check_recurse, c, &op); +} + +void bch_initial_gc_finish(struct cache_set *c) +{ + struct cache *ca; + struct bucket *b; + unsigned i; + + bch_btree_gc_finish(c); - /* Disables the seen array until prio_read() uses it too */ - memset(seen[i], 0xFF, n); + mutex_lock(&c->bucket_lock); + + /* + * We need to put some unused buckets directly on the prio freelist in + * order to get the allocator thread started - it needs freed buckets in + * order to rewrite the prios and gens, and it needs to rewrite prios + * and gens in order to free buckets. + * + * This is only safe for buckets that have no live data in them, which + * there should always be some of. + */ + for_each_cache(ca, c, i) { + for_each_bucket(b, ca) { + if (fifo_full(&ca->free[RESERVE_PRIO])) + break; + + if (bch_can_invalidate_bucket(ca, b) && + !GC_MARK(b)) { + __bch_invalidate_one_bucket(ca, b); + fifo_push(&ca->free[RESERVE_PRIO], + b - ca->buckets); + } + } } - ret = btree_root(check_recurse, c, &op, seen); -err: - for (i = 0; i < MAX_CACHES_PER_SET; i++) - kfree(seen[i]); - return ret; + mutex_unlock(&c->bucket_lock); } /* Btree insertion */ @@ -1871,11 +1957,14 @@ static int btree_split(struct btree *b, struct btree_op *op, closure_init_stack(&cl); bch_keylist_init(&parent_keys); - if (!b->level && - btree_check_reserve(b, op)) - return -EINTR; + if (btree_check_reserve(b, op)) { + if (!b->level) + return -EINTR; + else + WARN(1, "insufficient reserve for split\n"); + } - n1 = btree_node_alloc_replacement(b, true); + n1 = btree_node_alloc_replacement(b, op); if (IS_ERR(n1)) goto err; @@ -1887,16 +1976,19 @@ static int btree_split(struct btree *b, struct btree_op *op, trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); - n2 = bch_btree_node_alloc(b->c, b->level, true); + n2 = bch_btree_node_alloc(b->c, op, b->level); if (IS_ERR(n2)) goto err_free1; if (!b->parent) { - n3 = bch_btree_node_alloc(b->c, b->level + 1, true); + n3 = bch_btree_node_alloc(b->c, op, b->level + 1); if (IS_ERR(n3)) goto err_free2; } + mutex_lock(&n1->write_lock); + mutex_lock(&n2->write_lock); + bch_btree_insert_keys(n1, op, insert_keys, replace_key); /* @@ -1923,45 +2015,45 @@ static int btree_split(struct btree *b, struct btree_op *op, bch_keylist_add(&parent_keys, &n2->key); bch_btree_node_write(n2, &cl); + mutex_unlock(&n2->write_lock); rw_unlock(true, n2); } else { trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys); + mutex_lock(&n1->write_lock); bch_btree_insert_keys(n1, op, insert_keys, replace_key); } bch_keylist_add(&parent_keys, &n1->key); bch_btree_node_write(n1, &cl); + mutex_unlock(&n1->write_lock); if (n3) { /* Depth increases, make a new root */ + mutex_lock(&n3->write_lock); bkey_copy_key(&n3->key, &MAX_KEY); bch_btree_insert_keys(n3, op, &parent_keys, NULL); bch_btree_node_write(n3, &cl); + mutex_unlock(&n3->write_lock); closure_sync(&cl); bch_btree_set_root(n3); rw_unlock(true, n3); - - btree_node_free(b); } else if (!b->parent) { /* Root filled up but didn't need to be split */ closure_sync(&cl); bch_btree_set_root(n1); - - btree_node_free(b); } else { /* Split a non root node */ closure_sync(&cl); make_btree_freeing_key(b, parent_keys.top); bch_keylist_push(&parent_keys); - btree_node_free(b); - bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL); BUG_ON(!bch_keylist_empty(&parent_keys)); } + btree_node_free(b); rw_unlock(true, n1); bch_time_stats_update(&b->c->btree_split_time, start_time); @@ -1976,7 +2068,7 @@ err_free1: btree_node_free(n1); rw_unlock(true, n1); err: - WARN(1, "bcache: btree split failed"); + WARN(1, "bcache: btree split failed (level %u)", b->level); if (n3 == ERR_PTR(-EAGAIN) || n2 == ERR_PTR(-EAGAIN) || @@ -1991,33 +2083,54 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op, atomic_t *journal_ref, struct bkey *replace_key) { + struct closure cl; + BUG_ON(b->level && replace_key); + closure_init_stack(&cl); + + mutex_lock(&b->write_lock); + + if (write_block(b) != btree_bset_last(b) && + b->keys.last_set_unwritten) + bch_btree_init_next(b); /* just wrote a set */ + if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) { - if (current->bio_list) { - op->lock = b->c->root->level + 1; - return -EAGAIN; - } else if (op->lock <= b->c->root->level) { - op->lock = b->c->root->level + 1; - return -EINTR; - } else { - /* Invalidated all iterators */ - int ret = btree_split(b, op, insert_keys, replace_key); + mutex_unlock(&b->write_lock); + goto split; + } - return bch_keylist_empty(insert_keys) ? - 0 : ret ?: -EINTR; - } - } else { - BUG_ON(write_block(b) != btree_bset_last(b)); + BUG_ON(write_block(b) != btree_bset_last(b)); - if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) { - if (!b->level) - bch_btree_leaf_dirty(b, journal_ref); - else - bch_btree_node_write_sync(b); - } + if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) { + if (!b->level) + bch_btree_leaf_dirty(b, journal_ref); + else + bch_btree_node_write(b, &cl); + } - return 0; + mutex_unlock(&b->write_lock); + + /* wait for btree node write if necessary, after unlock */ + closure_sync(&cl); + + return 0; +split: + if (current->bio_list) { + op->lock = b->c->root->level + 1; + return -EAGAIN; + } else if (op->lock <= b->c->root->level) { + op->lock = b->c->root->level + 1; + return -EINTR; + } else { + /* Invalidated all iterators */ + int ret = btree_split(b, op, insert_keys, replace_key); + + if (bch_keylist_empty(insert_keys)) + return 0; + else if (!ret) + return -EINTR; + return ret; } } @@ -2403,18 +2516,3 @@ void bch_keybuf_init(struct keybuf *buf) spin_lock_init(&buf->lock); array_allocator_init(&buf->freelist); } - -void bch_btree_exit(void) -{ - if (btree_io_wq) - destroy_workqueue(btree_io_wq); -} - -int __init bch_btree_init(void) -{ - btree_io_wq = create_singlethread_workqueue("bch_btree_io"); - if (!btree_io_wq) - return -ENOMEM; - - return 0; -} diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index af065e97e55c..91dfa5e69685 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -127,6 +127,8 @@ struct btree { struct cache_set *c; struct btree *parent; + struct mutex write_lock; + unsigned long flags; uint16_t written; /* would be nice to kill */ uint8_t level; @@ -236,11 +238,13 @@ static inline void rw_unlock(bool w, struct btree *b) } void bch_btree_node_read_done(struct btree *); +void __bch_btree_node_write(struct btree *, struct closure *); void bch_btree_node_write(struct btree *, struct closure *); void bch_btree_set_root(struct btree *); -struct btree *bch_btree_node_alloc(struct cache_set *, int, bool); -struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool); +struct btree *bch_btree_node_alloc(struct cache_set *, struct btree_op *, int); +struct btree *bch_btree_node_get(struct cache_set *, struct btree_op *, + struct bkey *, int, bool); int bch_btree_insert_check_key(struct btree *, struct btree_op *, struct bkey *); @@ -248,10 +252,10 @@ int bch_btree_insert(struct cache_set *, struct keylist *, atomic_t *, struct bkey *); int bch_gc_thread_start(struct cache_set *); -size_t bch_btree_gc_finish(struct cache_set *); +void bch_initial_gc_finish(struct cache_set *); void bch_moving_gc(struct cache_set *); int bch_btree_check(struct cache_set *); -uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *); +void bch_initial_mark_key(struct cache_set *, int, struct bkey *); static inline void wake_up_gc(struct cache_set *c) { diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index 416d1a3e028e..3a0de4cf9771 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -194,9 +194,9 @@ err: mutex_unlock(&b->c->bucket_lock); bch_extent_to_text(buf, sizeof(buf), k); btree_bug(b, -"inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i", +"inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu", buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin), - g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen); + g->prio, g->gen, g->last_gc, GC_MARK(g)); return true; } @@ -308,6 +308,16 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter, return NULL; } +static void bch_subtract_dirty(struct bkey *k, + struct cache_set *c, + uint64_t offset, + int sectors) +{ + if (KEY_DIRTY(k)) + bcache_dev_sectors_dirty_add(c, KEY_INODE(k), + offset, -sectors); +} + static bool bch_extent_insert_fixup(struct btree_keys *b, struct bkey *insert, struct btree_iter *iter, @@ -315,13 +325,6 @@ static bool bch_extent_insert_fixup(struct btree_keys *b, { struct cache_set *c = container_of(b, struct btree, keys)->c; - void subtract_dirty(struct bkey *k, uint64_t offset, int sectors) - { - if (KEY_DIRTY(k)) - bcache_dev_sectors_dirty_add(c, KEY_INODE(k), - offset, -sectors); - } - uint64_t old_offset; unsigned old_size, sectors_found = 0; @@ -398,7 +401,8 @@ static bool bch_extent_insert_fixup(struct btree_keys *b, struct bkey *top; - subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert)); + bch_subtract_dirty(k, c, KEY_START(insert), + KEY_SIZE(insert)); if (bkey_written(b, k)) { /* @@ -448,7 +452,7 @@ static bool bch_extent_insert_fixup(struct btree_keys *b, } } - subtract_dirty(k, old_offset, old_size - KEY_SIZE(k)); + bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k)); } check_failed: @@ -499,9 +503,9 @@ static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, if (mutex_trylock(&b->c->bucket_lock)) { if (b->c->gc_mark_valid && - ((GC_MARK(g) != GC_MARK_DIRTY && - KEY_DIRTY(k)) || - GC_MARK(g) == GC_MARK_METADATA)) + (!GC_MARK(g) || + GC_MARK(g) == GC_MARK_METADATA || + (GC_MARK(g) != GC_MARK_DIRTY && KEY_DIRTY(k)))) goto err; if (g->prio == BTREE_PRIO) @@ -515,9 +519,9 @@ err: mutex_unlock(&b->c->bucket_lock); bch_extent_to_text(buf, sizeof(buf), k); btree_bug(b, -"inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i", +"inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu", buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin), - g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen); + g->prio, g->gen, g->last_gc, GC_MARK(g)); return true; } diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 18039affc306..59e82021b5bb 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -237,8 +237,14 @@ bsearch: for (i = 0; i < ca->sb.njournal_buckets; i++) if (ja->seq[i] > seq) { seq = ja->seq[i]; - ja->cur_idx = ja->discard_idx = - ja->last_idx = i; + /* + * When journal_reclaim() goes to allocate for + * the first time, it'll use the bucket after + * ja->cur_idx + */ + ja->cur_idx = i; + ja->last_idx = ja->discard_idx = (i + 1) % + ca->sb.njournal_buckets; } } @@ -288,16 +294,11 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list) k = bkey_next(k)) { unsigned j; - for (j = 0; j < KEY_PTRS(k); j++) { - struct bucket *g = PTR_BUCKET(c, k, j); - atomic_inc(&g->pin); + for (j = 0; j < KEY_PTRS(k); j++) + if (ptr_available(c, k, j)) + atomic_inc(&PTR_BUCKET(c, k, j)->pin); - if (g->prio == BTREE_PRIO && - !ptr_stale(c, k, j)) - g->prio = INITIAL_PRIO; - } - - __bch_btree_mark_key(c, 0, k); + bch_initial_mark_key(c, 0, k); } } } @@ -312,8 +313,6 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list) uint64_t start = i->j.last_seq, end = i->j.seq, n = start; struct keylist keylist; - bch_keylist_init(&keylist); - list_for_each_entry(i, list, list) { BUG_ON(i->pin && atomic_read(i->pin) != 1); @@ -326,8 +325,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list) k = bkey_next(k)) { trace_bcache_journal_replay_key(k); - bkey_copy(keylist.top, k); - bch_keylist_push(&keylist); + bch_keylist_init_single(&keylist, k); ret = bch_btree_insert(s, &keylist, i->pin, NULL); if (ret) @@ -383,16 +381,15 @@ retry: b = best; if (b) { - rw_lock(true, b, b->level); - + mutex_lock(&b->write_lock); if (!btree_current_write(b)->journal) { - rw_unlock(true, b); + mutex_unlock(&b->write_lock); /* We raced */ goto retry; } - bch_btree_node_write(b, NULL); - rw_unlock(true, b); + __bch_btree_node_write(b, NULL); + mutex_unlock(&b->write_lock); } } @@ -536,6 +533,7 @@ void bch_journal_next(struct journal *j) atomic_set(&fifo_back(&j->pin), 1); j->cur->data->seq = ++j->seq; + j->cur->dirty = false; j->cur->need_write = false; j->cur->data->keys = 0; @@ -731,7 +729,10 @@ static void journal_write_work(struct work_struct *work) struct cache_set, journal.work); spin_lock(&c->journal.lock); - journal_try_write(c); + if (c->journal.cur->dirty) + journal_try_write(c); + else + spin_unlock(&c->journal.lock); } /* @@ -761,7 +762,8 @@ atomic_t *bch_journal(struct cache_set *c, if (parent) { closure_wait(&w->wait, parent); journal_try_write(c); - } else if (!w->need_write) { + } else if (!w->dirty) { + w->dirty = true; schedule_delayed_work(&c->journal.work, msecs_to_jiffies(c->journal_delay_ms)); spin_unlock(&c->journal.lock); diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h index 9180c4465075..e3c39457afbb 100644 --- a/drivers/md/bcache/journal.h +++ b/drivers/md/bcache/journal.h @@ -95,6 +95,7 @@ struct journal_write { struct cache_set *c; struct closure_waitlist wait; + bool dirty; bool need_write; }; diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 9eb60d102de8..cd7490311e51 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -24,12 +24,10 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k) moving_gc_keys); unsigned i; - for (i = 0; i < KEY_PTRS(k); i++) { - struct bucket *g = PTR_BUCKET(c, k, i); - - if (GC_MOVE(g)) + for (i = 0; i < KEY_PTRS(k); i++) + if (ptr_available(c, k, i) && + GC_MOVE(PTR_BUCKET(c, k, i))) return true; - } return false; } @@ -115,7 +113,7 @@ static void write_moving(struct closure *cl) closure_call(&op->cl, bch_data_insert, NULL, cl); } - continue_at(cl, write_moving_finish, system_wq); + continue_at(cl, write_moving_finish, op->wq); } static void read_moving_submit(struct closure *cl) @@ -125,7 +123,7 @@ static void read_moving_submit(struct closure *cl) bch_submit_bbio(bio, io->op.c, &io->w->key, 0); - continue_at(cl, write_moving, system_wq); + continue_at(cl, write_moving, io->op.wq); } static void read_moving(struct cache_set *c) @@ -160,6 +158,7 @@ static void read_moving(struct cache_set *c) io->w = w; io->op.inode = KEY_INODE(&w->key); io->op.c = c; + io->op.wq = c->moving_gc_wq; moving_init(io); bio = &io->bio.bio; @@ -216,7 +215,10 @@ void bch_moving_gc(struct cache_set *c) ca->heap.used = 0; for_each_bucket(b, ca) { - if (!GC_SECTORS_USED(b)) + if (GC_MARK(b) == GC_MARK_METADATA || + !GC_SECTORS_USED(b) || + GC_SECTORS_USED(b) == ca->sb.bucket_size || + atomic_read(&b->pin)) continue; if (!heap_full(&ca->heap)) { diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 5d5d031cf381..15fff4f68a7c 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -12,11 +12,9 @@ #include "request.h" #include "writeback.h" -#include <linux/cgroup.h> #include <linux/module.h> #include <linux/hash.h> #include <linux/random.h> -#include "blk-cgroup.h" #include <trace/events/bcache.h> @@ -27,171 +25,13 @@ struct kmem_cache *bch_search_cache; static void bch_data_insert_start(struct closure *); -/* Cgroup interface */ - -#ifdef CONFIG_CGROUP_BCACHE -static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 }; - -static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup) -{ - struct cgroup_subsys_state *css; - return cgroup && - (css = cgroup_subsys_state(cgroup, bcache_subsys_id)) - ? container_of(css, struct bch_cgroup, css) - : &bcache_default_cgroup; -} - -struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio) -{ - struct cgroup_subsys_state *css = bio->bi_css - ? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id) - : task_subsys_state(current, bcache_subsys_id); - - return css - ? container_of(css, struct bch_cgroup, css) - : &bcache_default_cgroup; -} - -static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft, - struct file *file, - char __user *buf, size_t nbytes, loff_t *ppos) -{ - char tmp[1024]; - int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes, - cgroup_to_bcache(cgrp)->cache_mode + 1); - - if (len < 0) - return len; - - return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); -} - -static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft, - const char *buf) -{ - int v = bch_read_string_list(buf, bch_cache_modes); - if (v < 0) - return v; - - cgroup_to_bcache(cgrp)->cache_mode = v - 1; - return 0; -} - -static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft) -{ - return cgroup_to_bcache(cgrp)->verify; -} - -static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val) -{ - cgroup_to_bcache(cgrp)->verify = val; - return 0; -} - -static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft) -{ - struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); - return atomic_read(&bcachecg->stats.cache_hits); -} - -static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft) -{ - struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); - return atomic_read(&bcachecg->stats.cache_misses); -} - -static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp, - struct cftype *cft) -{ - struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); - return atomic_read(&bcachecg->stats.cache_bypass_hits); -} - -static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp, - struct cftype *cft) -{ - struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); - return atomic_read(&bcachecg->stats.cache_bypass_misses); -} - -static struct cftype bch_files[] = { - { - .name = "cache_mode", - .read = cache_mode_read, - .write_string = cache_mode_write, - }, - { - .name = "verify", - .read_u64 = bch_verify_read, - .write_u64 = bch_verify_write, - }, - { - .name = "cache_hits", - .read_u64 = bch_cache_hits_read, - }, - { - .name = "cache_misses", - .read_u64 = bch_cache_misses_read, - }, - { - .name = "cache_bypass_hits", - .read_u64 = bch_cache_bypass_hits_read, - }, - { - .name = "cache_bypass_misses", - .read_u64 = bch_cache_bypass_misses_read, - }, - { } /* terminate */ -}; - -static void init_bch_cgroup(struct bch_cgroup *cg) -{ - cg->cache_mode = -1; -} - -static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup) -{ - struct bch_cgroup *cg; - - cg = kzalloc(sizeof(*cg), GFP_KERNEL); - if (!cg) - return ERR_PTR(-ENOMEM); - init_bch_cgroup(cg); - return &cg->css; -} - -static void bcachecg_destroy(struct cgroup *cgroup) -{ - struct bch_cgroup *cg = cgroup_to_bcache(cgroup); - kfree(cg); -} - -struct cgroup_subsys bcache_subsys = { - .create = bcachecg_create, - .destroy = bcachecg_destroy, - .subsys_id = bcache_subsys_id, - .name = "bcache", - .module = THIS_MODULE, -}; -EXPORT_SYMBOL_GPL(bcache_subsys); -#endif - static unsigned cache_mode(struct cached_dev *dc, struct bio *bio) { -#ifdef CONFIG_CGROUP_BCACHE - int r = bch_bio_to_cgroup(bio)->cache_mode; - if (r >= 0) - return r; -#endif return BDEV_CACHE_MODE(&dc->sb); } static bool verify(struct cached_dev *dc, struct bio *bio) { -#ifdef CONFIG_CGROUP_BCACHE - if (bch_bio_to_cgroup(bio)->verify) - return true; -#endif return dc->verify; } @@ -248,7 +88,7 @@ static void bch_data_insert_keys(struct closure *cl) atomic_dec_bug(journal_ref); if (!op->insert_data_done) - continue_at(cl, bch_data_insert_start, bcache_wq); + continue_at(cl, bch_data_insert_start, op->wq); bch_keylist_free(&op->insert_keys); closure_return(cl); @@ -297,7 +137,7 @@ static void bch_data_invalidate(struct closure *cl) op->insert_data_done = true; bio_put(bio); out: - continue_at(cl, bch_data_insert_keys, bcache_wq); + continue_at(cl, bch_data_insert_keys, op->wq); } static void bch_data_insert_error(struct closure *cl) @@ -340,7 +180,7 @@ static void bch_data_insert_endio(struct bio *bio, int error) if (op->writeback) op->error = error; else if (!op->replace) - set_closure_fn(cl, bch_data_insert_error, bcache_wq); + set_closure_fn(cl, bch_data_insert_error, op->wq); else set_closure_fn(cl, NULL, NULL); } @@ -376,7 +216,7 @@ static void bch_data_insert_start(struct closure *cl) if (bch_keylist_realloc(&op->insert_keys, 3 + (op->csum ? 1 : 0), op->c)) - continue_at(cl, bch_data_insert_keys, bcache_wq); + continue_at(cl, bch_data_insert_keys, op->wq); k = op->insert_keys.top; bkey_init(k); @@ -413,7 +253,7 @@ static void bch_data_insert_start(struct closure *cl) } while (n != bio); op->insert_data_done = true; - continue_at(cl, bch_data_insert_keys, bcache_wq); + continue_at(cl, bch_data_insert_keys, op->wq); err: /* bch_alloc_sectors() blocks if s->writeback = true */ BUG_ON(op->writeback); @@ -442,7 +282,7 @@ err: bio_put(bio); if (!bch_keylist_empty(&op->insert_keys)) - continue_at(cl, bch_data_insert_keys, bcache_wq); + continue_at(cl, bch_data_insert_keys, op->wq); else closure_return(cl); } @@ -824,6 +664,7 @@ static inline struct search *search_alloc(struct bio *bio, s->iop.error = 0; s->iop.flags = 0; s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; + s->iop.wq = bcache_wq; return s; } @@ -1203,22 +1044,13 @@ void bch_cached_dev_request_init(struct cached_dev *dc) static int flash_dev_cache_miss(struct btree *b, struct search *s, struct bio *bio, unsigned sectors) { - struct bio_vec bv; - struct bvec_iter iter; - - /* Zero fill bio */ + unsigned bytes = min(sectors, bio_sectors(bio)) << 9; - bio_for_each_segment(bv, bio, iter) { - unsigned j = min(bv.bv_len >> 9, sectors); - - void *p = kmap(bv.bv_page); - memset(p + bv.bv_offset, 0, j << 9); - kunmap(bv.bv_page); - - sectors -= j; - } + swap(bio->bi_iter.bi_size, bytes); + zero_fill_bio(bio); + swap(bio->bi_iter.bi_size, bytes); - bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size)); + bio_advance(bio, bytes); if (!bio->bi_iter.bi_size) return MAP_DONE; @@ -1313,9 +1145,6 @@ void bch_flash_dev_request_init(struct bcache_device *d) void bch_request_exit(void) { -#ifdef CONFIG_CGROUP_BCACHE - cgroup_unload_subsys(&bcache_subsys); -#endif if (bch_search_cache) kmem_cache_destroy(bch_search_cache); } @@ -1326,11 +1155,5 @@ int __init bch_request_init(void) if (!bch_search_cache) return -ENOMEM; -#ifdef CONFIG_CGROUP_BCACHE - cgroup_load_subsys(&bcache_subsys); - init_bch_cgroup(&bcache_default_cgroup); - - cgroup_add_cftypes(&bcache_subsys, bch_files); -#endif return 0; } diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index 39f21dbedc38..1ff36875c2b3 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h @@ -1,12 +1,11 @@ #ifndef _BCACHE_REQUEST_H_ #define _BCACHE_REQUEST_H_ -#include <linux/cgroup.h> - struct data_insert_op { struct closure cl; struct cache_set *c; struct bio *bio; + struct workqueue_struct *wq; unsigned inode; uint16_t write_point; @@ -41,20 +40,4 @@ void bch_flash_dev_request_init(struct bcache_device *d); extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache; -struct bch_cgroup { -#ifdef CONFIG_CGROUP_BCACHE - struct cgroup_subsys_state css; -#endif - /* - * We subtract one from the index into bch_cache_modes[], so that - * default == -1; this makes it so the rest match up with d->cache_mode, - * and we use d->cache_mode if cgrp->cache_mode < 0 - */ - short cache_mode; - bool verify; - struct cache_stat_collector stats; -}; - -struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio); - #endif /* _BCACHE_REQUEST_H_ */ diff --git a/drivers/md/bcache/stats.c b/drivers/md/bcache/stats.c index 84d0782f702e..0ca072c20d0d 100644 --- a/drivers/md/bcache/stats.c +++ b/drivers/md/bcache/stats.c @@ -201,9 +201,6 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d, struct cached_dev *dc = container_of(d, struct cached_dev, disk); mark_cache_stats(&dc->accounting.collector, hit, bypass); mark_cache_stats(&c->accounting.collector, hit, bypass); -#ifdef CONFIG_CGROUP_BCACHE - mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass); -#endif } void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d) diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 24a3a1546caa..926ded8ccbf5 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -541,9 +541,6 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) closure_sync(cl); } -#define buckets_free(c) "free %zu, free_inc %zu, unused %zu", \ - fifo_used(&c->free), fifo_used(&c->free_inc), fifo_used(&c->unused) - void bch_prio_write(struct cache *ca) { int i; @@ -554,10 +551,6 @@ void bch_prio_write(struct cache *ca) lockdep_assert_held(&ca->set->bucket_lock); - for (b = ca->buckets; - b < ca->buckets + ca->sb.nbuckets; b++) - b->disk_gen = b->gen; - ca->disk_buckets->seq++; atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), @@ -601,14 +594,17 @@ void bch_prio_write(struct cache *ca) mutex_lock(&ca->set->bucket_lock); - ca->need_save_prio = 0; - /* * Don't want the old priorities to get garbage collected until after we * finish writing the new ones, and they're journalled */ - for (i = 0; i < prio_buckets(ca); i++) + for (i = 0; i < prio_buckets(ca); i++) { + if (ca->prio_last_buckets[i]) + __bch_bucket_free(ca, + &ca->buckets[ca->prio_last_buckets[i]]); + ca->prio_last_buckets[i] = ca->prio_buckets[i]; + } } static void prio_read(struct cache *ca, uint64_t bucket) @@ -639,7 +635,7 @@ static void prio_read(struct cache *ca, uint64_t bucket) } b->prio = le16_to_cpu(d->prio); - b->gen = b->disk_gen = b->last_gc = b->gc_gen = d->gen; + b->gen = b->last_gc = d->gen; } } @@ -843,6 +839,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, q->limits.max_segment_size = UINT_MAX; q->limits.max_segments = BIO_MAX_PAGES; q->limits.max_discard_sectors = UINT_MAX; + q->limits.discard_granularity = 512; q->limits.io_min = block_size; q->limits.logical_block_size = block_size; q->limits.physical_block_size = block_size; @@ -1355,6 +1352,8 @@ static void cache_set_free(struct closure *cl) bch_bset_sort_state_free(&c->sort); free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); + if (c->moving_gc_wq) + destroy_workqueue(c->moving_gc_wq); if (c->bio_split) bioset_free(c->bio_split); if (c->fill_iter) @@ -1395,14 +1394,21 @@ static void cache_set_flush(struct closure *cl) list_add(&c->root->list, &c->btree_cache); /* Should skip this if we're unregistering because of an error */ - list_for_each_entry(b, &c->btree_cache, list) + list_for_each_entry(b, &c->btree_cache, list) { + mutex_lock(&b->write_lock); if (btree_node_dirty(b)) - bch_btree_node_write(b, NULL); + __bch_btree_node_write(b, NULL); + mutex_unlock(&b->write_lock); + } for_each_cache(ca, c, i) if (ca->alloc_thread) kthread_stop(ca->alloc_thread); + cancel_delayed_work_sync(&c->journal.work); + /* flush last journal entry if needed */ + c->journal.work.work.func(&c->journal.work.work); + closure_return(cl); } @@ -1485,14 +1491,13 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) sema_init(&c->sb_write_mutex, 1); mutex_init(&c->bucket_lock); - init_waitqueue_head(&c->try_wait); + init_waitqueue_head(&c->btree_cache_wait); init_waitqueue_head(&c->bucket_wait); sema_init(&c->uuid_write_mutex, 1); spin_lock_init(&c->btree_gc_time.lock); spin_lock_init(&c->btree_split_time.lock); spin_lock_init(&c->btree_read_time.lock); - spin_lock_init(&c->try_harder_time.lock); bch_moving_init_cache_set(c); @@ -1517,6 +1522,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) || !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || + !(c->moving_gc_wq = create_workqueue("bcache_gc")) || bch_journal_alloc(c) || bch_btree_cache_alloc(c) || bch_open_buckets_alloc(c) || @@ -1580,7 +1586,7 @@ static void run_cache_set(struct cache_set *c) goto err; err = "error reading btree root"; - c->root = bch_btree_node_get(c, k, j->btree_level, true); + c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true); if (IS_ERR_OR_NULL(c->root)) goto err; @@ -1596,7 +1602,7 @@ static void run_cache_set(struct cache_set *c) goto err; bch_journal_mark(c, &journal); - bch_btree_gc_finish(c); + bch_initial_gc_finish(c); pr_debug("btree_check() done"); /* @@ -1638,7 +1644,7 @@ static void run_cache_set(struct cache_set *c) ca->sb.d[j] = ca->sb.first_bucket + j; } - bch_btree_gc_finish(c); + bch_initial_gc_finish(c); err = "error starting allocator thread"; for_each_cache(ca, c, i) @@ -1655,12 +1661,14 @@ static void run_cache_set(struct cache_set *c) goto err; err = "cannot allocate new btree root"; - c->root = bch_btree_node_alloc(c, 0, true); + c->root = bch_btree_node_alloc(c, NULL, 0); if (IS_ERR_OR_NULL(c->root)) goto err; + mutex_lock(&c->root->write_lock); bkey_copy_key(&c->root->key, &MAX_KEY); bch_btree_node_write(c->root, &cl); + mutex_unlock(&c->root->write_lock); bch_btree_set_root(c->root); rw_unlock(true, c->root); @@ -1782,7 +1790,6 @@ void bch_cache_release(struct kobject *kobj) vfree(ca->buckets); free_heap(&ca->heap); - free_fifo(&ca->unused); free_fifo(&ca->free_inc); for (i = 0; i < RESERVE_NR; i++) @@ -1819,7 +1826,6 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca) !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) || !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) || !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || - !init_fifo(&ca->unused, free << 2, GFP_KERNEL) || !init_heap(&ca->heap, free << 3, GFP_KERNEL) || !(ca->buckets = vzalloc(sizeof(struct bucket) * ca->sb.nbuckets)) || @@ -1834,13 +1840,7 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca) for_each_bucket(b, ca) atomic_set(&b->pin, 0); - if (bch_cache_allocator_init(ca)) - goto err; - return 0; -err: - kobject_put(&ca->kobj); - return -ENOMEM; } static void register_cache(struct cache_sb *sb, struct page *sb_page, @@ -1869,7 +1869,10 @@ static void register_cache(struct cache_sb *sb, struct page *sb_page, if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) goto err; + mutex_lock(&bch_register_lock); err = register_cache_set(ca); + mutex_unlock(&bch_register_lock); + if (err) goto err; @@ -1931,8 +1934,6 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, if (!try_module_get(THIS_MODULE)) return -EBUSY; - mutex_lock(&bch_register_lock); - if (!(path = kstrndup(buffer, size, GFP_KERNEL)) || !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL))) goto err; @@ -1965,7 +1966,9 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, if (!dc) goto err_close; + mutex_lock(&bch_register_lock); register_bdev(sb, sb_page, bdev, dc); + mutex_unlock(&bch_register_lock); } else { struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); if (!ca) @@ -1978,7 +1981,6 @@ out: put_page(sb_page); kfree(sb); kfree(path); - mutex_unlock(&bch_register_lock); module_put(THIS_MODULE); return ret; @@ -2057,7 +2059,6 @@ static void bcache_exit(void) { bch_debug_exit(); bch_request_exit(); - bch_btree_exit(); if (bcache_kobj) kobject_put(bcache_kobj); if (bcache_wq) @@ -2087,7 +2088,6 @@ static int __init bcache_init(void) if (!(bcache_wq = create_workqueue("bcache")) || !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || sysfs_create_files(bcache_kobj, files) || - bch_btree_init() || bch_request_init() || bch_debug_init(bcache_kobj)) goto err; diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index d8458d477a12..b3ff57d61dde 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -54,7 +54,6 @@ sysfs_time_stats_attribute(btree_gc, sec, ms); sysfs_time_stats_attribute(btree_split, sec, us); sysfs_time_stats_attribute(btree_sort, ms, us); sysfs_time_stats_attribute(btree_read, ms, us); -sysfs_time_stats_attribute(try_harder, ms, us); read_attribute(btree_nodes); read_attribute(btree_used_percent); @@ -406,7 +405,7 @@ struct bset_stats_op { struct bset_stats stats; }; -static int btree_bset_stats(struct btree_op *b_op, struct btree *b) +static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b) { struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op); @@ -424,7 +423,7 @@ static int bch_bset_print_stats(struct cache_set *c, char *buf) memset(&op, 0, sizeof(op)); bch_btree_op_init(&op.op, -1); - ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, btree_bset_stats); + ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats); if (ret < 0) return ret; @@ -442,81 +441,81 @@ static int bch_bset_print_stats(struct cache_set *c, char *buf) op.stats.floats, op.stats.failed); } -SHOW(__bch_cache_set) +static unsigned bch_root_usage(struct cache_set *c) { - unsigned root_usage(struct cache_set *c) - { - unsigned bytes = 0; - struct bkey *k; - struct btree *b; - struct btree_iter iter; + unsigned bytes = 0; + struct bkey *k; + struct btree *b; + struct btree_iter iter; - goto lock_root; + goto lock_root; - do { - rw_unlock(false, b); + do { + rw_unlock(false, b); lock_root: - b = c->root; - rw_lock(false, b, b->level); - } while (b != c->root); + b = c->root; + rw_lock(false, b, b->level); + } while (b != c->root); - for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) - bytes += bkey_bytes(k); + for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) + bytes += bkey_bytes(k); - rw_unlock(false, b); + rw_unlock(false, b); - return (bytes * 100) / btree_bytes(c); - } + return (bytes * 100) / btree_bytes(c); +} - size_t cache_size(struct cache_set *c) - { - size_t ret = 0; - struct btree *b; +static size_t bch_cache_size(struct cache_set *c) +{ + size_t ret = 0; + struct btree *b; - mutex_lock(&c->bucket_lock); - list_for_each_entry(b, &c->btree_cache, list) - ret += 1 << (b->keys.page_order + PAGE_SHIFT); + mutex_lock(&c->bucket_lock); + list_for_each_entry(b, &c->btree_cache, list) + ret += 1 << (b->keys.page_order + PAGE_SHIFT); - mutex_unlock(&c->bucket_lock); - return ret; - } - - unsigned cache_max_chain(struct cache_set *c) - { - unsigned ret = 0; - struct hlist_head *h; + mutex_unlock(&c->bucket_lock); + return ret; +} - mutex_lock(&c->bucket_lock); +static unsigned bch_cache_max_chain(struct cache_set *c) +{ + unsigned ret = 0; + struct hlist_head *h; - for (h = c->bucket_hash; - h < c->bucket_hash + (1 << BUCKET_HASH_BITS); - h++) { - unsigned i = 0; - struct hlist_node *p; + mutex_lock(&c->bucket_lock); - hlist_for_each(p, h) - i++; + for (h = c->bucket_hash; + h < c->bucket_hash + (1 << BUCKET_HASH_BITS); + h++) { + unsigned i = 0; + struct hlist_node *p; - ret = max(ret, i); - } + hlist_for_each(p, h) + i++; - mutex_unlock(&c->bucket_lock); - return ret; + ret = max(ret, i); } - unsigned btree_used(struct cache_set *c) - { - return div64_u64(c->gc_stats.key_bytes * 100, - (c->gc_stats.nodes ?: 1) * btree_bytes(c)); - } + mutex_unlock(&c->bucket_lock); + return ret; +} - unsigned average_key_size(struct cache_set *c) - { - return c->gc_stats.nkeys - ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys) - : 0; - } +static unsigned bch_btree_used(struct cache_set *c) +{ + return div64_u64(c->gc_stats.key_bytes * 100, + (c->gc_stats.nodes ?: 1) * btree_bytes(c)); +} +static unsigned bch_average_key_size(struct cache_set *c) +{ + return c->gc_stats.nkeys + ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys) + : 0; +} + +SHOW(__bch_cache_set) +{ struct cache_set *c = container_of(kobj, struct cache_set, kobj); sysfs_print(synchronous, CACHE_SYNC(&c->sb)); @@ -524,21 +523,20 @@ lock_root: sysfs_hprint(bucket_size, bucket_bytes(c)); sysfs_hprint(block_size, block_bytes(c)); sysfs_print(tree_depth, c->root->level); - sysfs_print(root_usage_percent, root_usage(c)); + sysfs_print(root_usage_percent, bch_root_usage(c)); - sysfs_hprint(btree_cache_size, cache_size(c)); - sysfs_print(btree_cache_max_chain, cache_max_chain(c)); + sysfs_hprint(btree_cache_size, bch_cache_size(c)); + sysfs_print(btree_cache_max_chain, bch_cache_max_chain(c)); sysfs_print(cache_available_percent, 100 - c->gc_stats.in_use); sysfs_print_time_stats(&c->btree_gc_time, btree_gc, sec, ms); sysfs_print_time_stats(&c->btree_split_time, btree_split, sec, us); sysfs_print_time_stats(&c->sort.time, btree_sort, ms, us); sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us); - sysfs_print_time_stats(&c->try_harder_time, try_harder, ms, us); - sysfs_print(btree_used_percent, btree_used(c)); + sysfs_print(btree_used_percent, bch_btree_used(c)); sysfs_print(btree_nodes, c->gc_stats.nodes); - sysfs_hprint(average_key_size, average_key_size(c)); + sysfs_hprint(average_key_size, bch_average_key_size(c)); sysfs_print(cache_read_races, atomic_long_read(&c->cache_read_races)); @@ -709,7 +707,6 @@ static struct attribute *bch_cache_set_internal_files[] = { sysfs_time_stats_attribute_list(btree_split, sec, us) sysfs_time_stats_attribute_list(btree_sort, ms, us) sysfs_time_stats_attribute_list(btree_read, ms, us) - sysfs_time_stats_attribute_list(try_harder, ms, us) &sysfs_btree_nodes, &sysfs_btree_used_percent, @@ -761,7 +758,9 @@ SHOW(__bch_cache) int cmp(const void *l, const void *r) { return *((uint16_t *) r) - *((uint16_t *) l); } - size_t n = ca->sb.nbuckets, i, unused, btree; + struct bucket *b; + size_t n = ca->sb.nbuckets, i; + size_t unused = 0, available = 0, dirty = 0, meta = 0; uint64_t sum = 0; /* Compute 31 quantiles */ uint16_t q[31], *p, *cached; @@ -772,6 +771,17 @@ SHOW(__bch_cache) return -ENOMEM; mutex_lock(&ca->set->bucket_lock); + for_each_bucket(b, ca) { + if (!GC_SECTORS_USED(b)) + unused++; + if (GC_MARK(b) == GC_MARK_RECLAIMABLE) + available++; + if (GC_MARK(b) == GC_MARK_DIRTY) + dirty++; + if (GC_MARK(b) == GC_MARK_METADATA) + meta++; + } + for (i = ca->sb.first_bucket; i < n; i++) p[i] = ca->buckets[i].prio; mutex_unlock(&ca->set->bucket_lock); @@ -786,10 +796,7 @@ SHOW(__bch_cache) while (cached < p + n && *cached == BTREE_PRIO) - cached++; - - btree = cached - p; - n -= btree; + cached++, n--; for (i = 0; i < n; i++) sum += INITIAL_PRIO - cached[i]; @@ -805,12 +812,16 @@ SHOW(__bch_cache) ret = scnprintf(buf, PAGE_SIZE, "Unused: %zu%%\n" + "Clean: %zu%%\n" + "Dirty: %zu%%\n" "Metadata: %zu%%\n" "Average: %llu\n" "Sectors per Q: %zu\n" "Quantiles: [", unused * 100 / (size_t) ca->sb.nbuckets, - btree * 100 / (size_t) ca->sb.nbuckets, sum, + available * 100 / (size_t) ca->sb.nbuckets, + dirty * 100 / (size_t) ca->sb.nbuckets, + meta * 100 / (size_t) ca->sb.nbuckets, sum, n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1)); for (i = 0; i < ARRAY_SIZE(q); i++) diff --git a/drivers/md/bcache/trace.c b/drivers/md/bcache/trace.c index adbc3df17a80..b7820b0d2621 100644 --- a/drivers/md/bcache/trace.c +++ b/drivers/md/bcache/trace.c @@ -45,7 +45,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_split); EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_node_compact); EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_btree_set_root); -EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_alloc_invalidate); +EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_invalidate); EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_alloc_fail); EXPORT_TRACEPOINT_SYMBOL_GPL(bcache_writeback); diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 4195a01b1535..9a8e66ae04f5 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1988,7 +1988,6 @@ location_store(struct mddev *mddev, const char *buf, size_t len) if (mddev->bitmap_info.file) { struct file *f = mddev->bitmap_info.file; mddev->bitmap_info.file = NULL; - restore_bitmap_write_access(f); fput(f); } } else { diff --git a/drivers/md/dm-cache-block-types.h b/drivers/md/dm-cache-block-types.h index bed4ad4e1b7c..aac0e2df06be 100644 --- a/drivers/md/dm-cache-block-types.h +++ b/drivers/md/dm-cache-block-types.h @@ -19,7 +19,6 @@ typedef dm_block_t __bitwise__ dm_oblock_t; typedef uint32_t __bitwise__ dm_cblock_t; -typedef dm_block_t __bitwise__ dm_dblock_t; static inline dm_oblock_t to_oblock(dm_block_t b) { @@ -41,14 +40,4 @@ static inline uint32_t from_cblock(dm_cblock_t b) return (__force uint32_t) b; } -static inline dm_dblock_t to_dblock(dm_block_t b) -{ - return (__force dm_dblock_t) b; -} - -static inline dm_block_t from_dblock(dm_dblock_t b) -{ - return (__force dm_block_t) b; -} - #endif /* DM_CACHE_BLOCK_TYPES_H */ diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 9ef0752e8a08..4ead4ba60656 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -109,7 +109,7 @@ struct dm_cache_metadata { dm_block_t discard_root; sector_t discard_block_size; - dm_dblock_t discard_nr_blocks; + dm_oblock_t discard_nr_blocks; sector_t data_block_size; dm_cblock_t cache_blocks; @@ -120,6 +120,12 @@ struct dm_cache_metadata { unsigned policy_version[CACHE_POLICY_VERSION_SIZE]; size_t policy_hint_size; struct dm_cache_statistics stats; + + /* + * Reading the space map root can fail, so we read it into this + * buffer before the superblock is locked and updated. + */ + __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE]; }; /*------------------------------------------------------------------- @@ -260,11 +266,31 @@ static void __setup_mapping_info(struct dm_cache_metadata *cmd) } } +static int __save_sm_root(struct dm_cache_metadata *cmd) +{ + int r; + size_t metadata_len; + + r = dm_sm_root_size(cmd->metadata_sm, &metadata_len); + if (r < 0) + return r; + + return dm_sm_copy_root(cmd->metadata_sm, &cmd->metadata_space_map_root, + metadata_len); +} + +static void __copy_sm_root(struct dm_cache_metadata *cmd, + struct cache_disk_superblock *disk_super) +{ + memcpy(&disk_super->metadata_space_map_root, + &cmd->metadata_space_map_root, + sizeof(cmd->metadata_space_map_root)); +} + static int __write_initial_superblock(struct dm_cache_metadata *cmd) { int r; struct dm_block *sblock; - size_t metadata_len; struct cache_disk_superblock *disk_super; sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT; @@ -272,12 +298,16 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd) if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS) bdev_size = DM_CACHE_METADATA_MAX_SECTORS; - r = dm_sm_root_size(cmd->metadata_sm, &metadata_len); + r = dm_tm_pre_commit(cmd->tm); if (r < 0) return r; - r = dm_tm_pre_commit(cmd->tm); - if (r < 0) + /* + * dm_sm_copy_root() can fail. So we need to do it before we start + * updating the superblock. + */ + r = __save_sm_root(cmd); + if (r) return r; r = superblock_lock_zero(cmd, &sblock); @@ -293,16 +323,13 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd) memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version)); disk_super->policy_hint_size = 0; - r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root, - metadata_len); - if (r < 0) - goto bad_locked; + __copy_sm_root(cmd, disk_super); disk_super->mapping_root = cpu_to_le64(cmd->root); disk_super->hint_root = cpu_to_le64(cmd->hint_root); disk_super->discard_root = cpu_to_le64(cmd->discard_root); disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size); - disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks)); + disk_super->discard_nr_blocks = cpu_to_le64(from_oblock(cmd->discard_nr_blocks)); disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); disk_super->data_block_size = cpu_to_le32(cmd->data_block_size); disk_super->cache_blocks = cpu_to_le32(0); @@ -313,10 +340,6 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd) disk_super->write_misses = cpu_to_le32(0); return dm_tm_commit(cmd->tm, sblock); - -bad_locked: - dm_bm_unlock(sblock); - return r; } static int __format_metadata(struct dm_cache_metadata *cmd) @@ -496,7 +519,7 @@ static void read_superblock_fields(struct dm_cache_metadata *cmd, cmd->hint_root = le64_to_cpu(disk_super->hint_root); cmd->discard_root = le64_to_cpu(disk_super->discard_root); cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size); - cmd->discard_nr_blocks = to_dblock(le64_to_cpu(disk_super->discard_nr_blocks)); + cmd->discard_nr_blocks = to_oblock(le64_to_cpu(disk_super->discard_nr_blocks)); cmd->data_block_size = le32_to_cpu(disk_super->data_block_size); cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks)); strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name)); @@ -530,8 +553,9 @@ static int __begin_transaction_flags(struct dm_cache_metadata *cmd, disk_super = dm_block_data(sblock); update_flags(disk_super, mutator); read_superblock_fields(cmd, disk_super); + dm_bm_unlock(sblock); - return dm_bm_flush_and_unlock(cmd->bm, sblock); + return dm_bm_flush(cmd->bm); } static int __begin_transaction(struct dm_cache_metadata *cmd) @@ -559,7 +583,6 @@ static int __commit_transaction(struct dm_cache_metadata *cmd, flags_mutator mutator) { int r; - size_t metadata_len; struct cache_disk_superblock *disk_super; struct dm_block *sblock; @@ -577,8 +600,8 @@ static int __commit_transaction(struct dm_cache_metadata *cmd, if (r < 0) return r; - r = dm_sm_root_size(cmd->metadata_sm, &metadata_len); - if (r < 0) + r = __save_sm_root(cmd); + if (r) return r; r = superblock_lock(cmd, &sblock); @@ -594,7 +617,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd, disk_super->hint_root = cpu_to_le64(cmd->hint_root); disk_super->discard_root = cpu_to_le64(cmd->discard_root); disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size); - disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks)); + disk_super->discard_nr_blocks = cpu_to_le64(from_oblock(cmd->discard_nr_blocks)); disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks)); strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name)); disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]); @@ -605,13 +628,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd, disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses); disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits); disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses); - - r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root, - metadata_len); - if (r < 0) { - dm_bm_unlock(sblock); - return r; - } + __copy_sm_root(cmd, disk_super); return dm_tm_commit(cmd->tm, sblock); } @@ -771,15 +788,15 @@ out: int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd, sector_t discard_block_size, - dm_dblock_t new_nr_entries) + dm_oblock_t new_nr_entries) { int r; down_write(&cmd->root_lock); r = dm_bitset_resize(&cmd->discard_info, cmd->discard_root, - from_dblock(cmd->discard_nr_blocks), - from_dblock(new_nr_entries), + from_oblock(cmd->discard_nr_blocks), + from_oblock(new_nr_entries), false, &cmd->discard_root); if (!r) { cmd->discard_block_size = discard_block_size; @@ -792,28 +809,28 @@ int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd, return r; } -static int __set_discard(struct dm_cache_metadata *cmd, dm_dblock_t b) +static int __set_discard(struct dm_cache_metadata *cmd, dm_oblock_t b) { return dm_bitset_set_bit(&cmd->discard_info, cmd->discard_root, - from_dblock(b), &cmd->discard_root); + from_oblock(b), &cmd->discard_root); } -static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b) +static int __clear_discard(struct dm_cache_metadata *cmd, dm_oblock_t b) { return dm_bitset_clear_bit(&cmd->discard_info, cmd->discard_root, - from_dblock(b), &cmd->discard_root); + from_oblock(b), &cmd->discard_root); } -static int __is_discarded(struct dm_cache_metadata *cmd, dm_dblock_t b, +static int __is_discarded(struct dm_cache_metadata *cmd, dm_oblock_t b, bool *is_discarded) { return dm_bitset_test_bit(&cmd->discard_info, cmd->discard_root, - from_dblock(b), &cmd->discard_root, + from_oblock(b), &cmd->discard_root, is_discarded); } static int __discard(struct dm_cache_metadata *cmd, - dm_dblock_t dblock, bool discard) + dm_oblock_t dblock, bool discard) { int r; @@ -826,7 +843,7 @@ static int __discard(struct dm_cache_metadata *cmd, } int dm_cache_set_discard(struct dm_cache_metadata *cmd, - dm_dblock_t dblock, bool discard) + dm_oblock_t dblock, bool discard) { int r; @@ -844,8 +861,8 @@ static int __load_discards(struct dm_cache_metadata *cmd, dm_block_t b; bool discard; - for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) { - dm_dblock_t dblock = to_dblock(b); + for (b = 0; b < from_oblock(cmd->discard_nr_blocks); b++) { + dm_oblock_t dblock = to_oblock(b); if (cmd->clean_when_opened) { r = __is_discarded(cmd, dblock, &discard); @@ -1228,22 +1245,12 @@ static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po return 0; } -int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy) +static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock, uint32_t hint) { + struct dm_cache_metadata *cmd = context; + __le32 value = cpu_to_le32(hint); int r; - down_write(&cmd->root_lock); - r = begin_hints(cmd, policy); - up_write(&cmd->root_lock); - - return r; -} - -static int save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock, - uint32_t hint) -{ - int r; - __le32 value = cpu_to_le32(hint); __dm_bless_for_disk(&value); r = dm_array_set_value(&cmd->hint_info, cmd->hint_root, @@ -1253,16 +1260,25 @@ static int save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock, return r; } -int dm_cache_save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock, - uint32_t hint) +static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy) { int r; - if (!hints_array_initialized(cmd)) - return 0; + r = begin_hints(cmd, policy); + if (r) { + DMERR("begin_hints failed"); + return r; + } + + return policy_walk_mappings(policy, save_hint, cmd); +} + +int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy) +{ + int r; down_write(&cmd->root_lock); - r = save_hint(cmd, cblock, hint); + r = write_hints(cmd, policy); up_write(&cmd->root_lock); return r; diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h index cd906f14f98d..cd70a78623a3 100644 --- a/drivers/md/dm-cache-metadata.h +++ b/drivers/md/dm-cache-metadata.h @@ -72,14 +72,14 @@ dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd); int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd, sector_t discard_block_size, - dm_dblock_t new_nr_entries); + dm_oblock_t new_nr_entries); typedef int (*load_discard_fn)(void *context, sector_t discard_block_size, - dm_dblock_t dblock, bool discarded); + dm_oblock_t dblock, bool discarded); int dm_cache_load_discards(struct dm_cache_metadata *cmd, load_discard_fn fn, void *context); -int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_dblock_t dblock, bool discard); +int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_oblock_t dblock, bool discard); int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock); int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock); @@ -128,14 +128,7 @@ void dm_cache_dump(struct dm_cache_metadata *cmd); * rather than querying the policy for each cblock, we let it walk its data * structures and fill in the hints in whatever order it wishes. */ - -int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p); - -/* - * requests hints for every cblock and stores in the metadata device. - */ -int dm_cache_save_hint(struct dm_cache_metadata *cmd, - dm_cblock_t cblock, uint32_t hint); +int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p); /* * Query method. Are all the blocks in the cache clean? diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 074b9c8e4cf0..5f054c44b485 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -237,9 +237,8 @@ struct cache { /* * origin_blocks entries, discarded if set. */ - dm_dblock_t discard_nr_blocks; + dm_oblock_t discard_nr_blocks; unsigned long *discard_bitset; - uint32_t discard_block_size; /* a power of 2 times sectors per block */ /* * Rather than reconstructing the table line for the status we just @@ -526,48 +525,33 @@ static dm_block_t block_div(dm_block_t b, uint32_t n) return b; } -static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) -{ - uint32_t discard_blocks = cache->discard_block_size; - dm_block_t b = from_oblock(oblock); - - if (!block_size_is_power_of_two(cache)) - discard_blocks = discard_blocks / cache->sectors_per_block; - else - discard_blocks >>= cache->sectors_per_block_shift; - - b = block_div(b, discard_blocks); - - return to_dblock(b); -} - -static void set_discard(struct cache *cache, dm_dblock_t b) +static void set_discard(struct cache *cache, dm_oblock_t b) { unsigned long flags; atomic_inc(&cache->stats.discard_count); spin_lock_irqsave(&cache->lock, flags); - set_bit(from_dblock(b), cache->discard_bitset); + set_bit(from_oblock(b), cache->discard_bitset); spin_unlock_irqrestore(&cache->lock, flags); } -static void clear_discard(struct cache *cache, dm_dblock_t b) +static void clear_discard(struct cache *cache, dm_oblock_t b) { unsigned long flags; spin_lock_irqsave(&cache->lock, flags); - clear_bit(from_dblock(b), cache->discard_bitset); + clear_bit(from_oblock(b), cache->discard_bitset); spin_unlock_irqrestore(&cache->lock, flags); } -static bool is_discarded(struct cache *cache, dm_dblock_t b) +static bool is_discarded(struct cache *cache, dm_oblock_t b) { int r; unsigned long flags; spin_lock_irqsave(&cache->lock, flags); - r = test_bit(from_dblock(b), cache->discard_bitset); + r = test_bit(from_oblock(b), cache->discard_bitset); spin_unlock_irqrestore(&cache->lock, flags); return r; @@ -579,8 +563,7 @@ static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b) unsigned long flags; spin_lock_irqsave(&cache->lock, flags); - r = test_bit(from_dblock(oblock_to_dblock(cache, b)), - cache->discard_bitset); + r = test_bit(from_oblock(b), cache->discard_bitset); spin_unlock_irqrestore(&cache->lock, flags); return r; @@ -705,7 +688,7 @@ static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, check_if_tick_bio_needed(cache, bio); remap_to_origin(cache, bio); if (bio_data_dir(bio) == WRITE) - clear_discard(cache, oblock_to_dblock(cache, oblock)); + clear_discard(cache, oblock); } static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, @@ -715,7 +698,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, remap_to_cache(cache, bio, cblock); if (bio_data_dir(bio) == WRITE) { set_dirty(cache, oblock, cblock); - clear_discard(cache, oblock_to_dblock(cache, oblock)); + clear_discard(cache, oblock); } } @@ -1288,14 +1271,14 @@ static void process_flush_bio(struct cache *cache, struct bio *bio) static void process_discard_bio(struct cache *cache, struct bio *bio) { dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector, - cache->discard_block_size); + cache->sectors_per_block); dm_block_t end_block = bio_end_sector(bio); dm_block_t b; - end_block = block_div(end_block, cache->discard_block_size); + end_block = block_div(end_block, cache->sectors_per_block); for (b = start_block; b < end_block; b++) - set_discard(cache, to_dblock(b)); + set_discard(cache, to_oblock(b)); bio_endio(bio, 0); } @@ -2171,35 +2154,6 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca, return 0; } -/* - * We want the discard block size to be a power of two, at least the size - * of the cache block size, and have no more than 2^14 discard blocks - * across the origin. - */ -#define MAX_DISCARD_BLOCKS (1 << 14) - -static bool too_many_discard_blocks(sector_t discard_block_size, - sector_t origin_size) -{ - (void) sector_div(origin_size, discard_block_size); - - return origin_size > MAX_DISCARD_BLOCKS; -} - -static sector_t calculate_discard_block_size(sector_t cache_block_size, - sector_t origin_size) -{ - sector_t discard_block_size; - - discard_block_size = roundup_pow_of_two(cache_block_size); - - if (origin_size) - while (too_many_discard_blocks(discard_block_size, origin_size)) - discard_block_size *= 2; - - return discard_block_size; -} - #define DEFAULT_MIGRATION_THRESHOLD 2048 static int cache_create(struct cache_args *ca, struct cache **result) @@ -2224,6 +2178,8 @@ static int cache_create(struct cache_args *ca, struct cache **result) ti->num_discard_bios = 1; ti->discards_supported = true; ti->discard_zeroes_data_unsupported = true; + /* Discard bios must be split on a block boundary */ + ti->split_discard_bios = true; cache->features = ca->features; ti->per_bio_data_size = get_per_bio_data_size(cache); @@ -2321,16 +2277,13 @@ static int cache_create(struct cache_args *ca, struct cache **result) } clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); - cache->discard_block_size = - calculate_discard_block_size(cache->sectors_per_block, - cache->origin_sectors); - cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks); - cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); + cache->discard_nr_blocks = cache->origin_blocks; + cache->discard_bitset = alloc_bitset(from_oblock(cache->discard_nr_blocks)); if (!cache->discard_bitset) { *error = "could not allocate discard bitset"; goto bad; } - clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); + clear_bitset(cache->discard_bitset, from_oblock(cache->discard_nr_blocks)); cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); if (IS_ERR(cache->copier)) { @@ -2537,6 +2490,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio) } else { inc_hit_counter(cache, bio); + pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && !is_dirty(cache, lookup_result.cblock)) @@ -2614,16 +2568,16 @@ static int write_discard_bitset(struct cache *cache) { unsigned i, r; - r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size, - cache->discard_nr_blocks); + r = dm_cache_discard_bitset_resize(cache->cmd, cache->sectors_per_block, + cache->origin_blocks); if (r) { DMERR("could not resize on-disk discard bitset"); return r; } - for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) { - r = dm_cache_set_discard(cache->cmd, to_dblock(i), - is_discarded(cache, to_dblock(i))); + for (i = 0; i < from_oblock(cache->discard_nr_blocks); i++) { + r = dm_cache_set_discard(cache->cmd, to_oblock(i), + is_discarded(cache, to_oblock(i))); if (r) return r; } @@ -2631,30 +2585,6 @@ static int write_discard_bitset(struct cache *cache) return 0; } -static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock, - uint32_t hint) -{ - struct cache *cache = context; - return dm_cache_save_hint(cache->cmd, cblock, hint); -} - -static int write_hints(struct cache *cache) -{ - int r; - - r = dm_cache_begin_hints(cache->cmd, cache->policy); - if (r) { - DMERR("dm_cache_begin_hints failed"); - return r; - } - - r = policy_walk_mappings(cache->policy, save_hint, cache); - if (r) - DMERR("policy_walk_mappings failed"); - - return r; -} - /* * returns true on success */ @@ -2672,7 +2602,7 @@ static bool sync_metadata(struct cache *cache) save_stats(cache); - r3 = write_hints(cache); + r3 = dm_cache_write_hints(cache->cmd, cache->policy); if (r3) DMERR("could not write hints"); @@ -2720,16 +2650,14 @@ static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock, } static int load_discard(void *context, sector_t discard_block_size, - dm_dblock_t dblock, bool discard) + dm_oblock_t oblock, bool discard) { struct cache *cache = context; - /* FIXME: handle mis-matched block size */ - if (discard) - set_discard(cache, dblock); + set_discard(cache, oblock); else - clear_discard(cache, dblock); + clear_discard(cache, oblock); return 0; } @@ -3120,8 +3048,8 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits) /* * FIXME: these limits may be incompatible with the cache device */ - limits->max_discard_sectors = cache->discard_block_size * 1024; - limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; + limits->max_discard_sectors = cache->sectors_per_block; + limits->discard_granularity = cache->sectors_per_block << SECTOR_SHIFT; } static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) @@ -3145,7 +3073,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type cache_target = { .name = "cache", - .version = {1, 3, 0}, + .version = {1, 4, 0}, .module = THIS_MODULE, .ctr = cache_ctr, .dtr = cache_dtr, diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 784695d22fde..53b213226c01 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -19,7 +19,6 @@ #include <linux/crypto.h> #include <linux/workqueue.h> #include <linux/backing-dev.h> -#include <linux/percpu.h> #include <linux/atomic.h> #include <linux/scatterlist.h> #include <asm/page.h> @@ -43,6 +42,7 @@ struct convert_context { struct bvec_iter iter_out; sector_t cc_sector; atomic_t cc_pending; + struct ablkcipher_request *req; }; /* @@ -111,15 +111,7 @@ struct iv_tcw_private { enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; /* - * Duplicated per-CPU state for cipher. - */ -struct crypt_cpu { - struct ablkcipher_request *req; -}; - -/* - * The fields in here must be read only after initialization, - * changing state should be in crypt_cpu. + * The fields in here must be read only after initialization. */ struct crypt_config { struct dm_dev *dev; @@ -150,12 +142,6 @@ struct crypt_config { sector_t iv_offset; unsigned int iv_size; - /* - * Duplicated per cpu state. Access through - * per_cpu_ptr() only. - */ - struct crypt_cpu __percpu *cpu; - /* ESSIV: struct crypto_cipher *essiv_tfm */ void *iv_private; struct crypto_ablkcipher **tfms; @@ -192,11 +178,6 @@ static void clone_init(struct dm_crypt_io *, struct bio *); static void kcryptd_queue_crypt(struct dm_crypt_io *io); static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); -static struct crypt_cpu *this_crypt_config(struct crypt_config *cc) -{ - return this_cpu_ptr(cc->cpu); -} - /* * Use this to access cipher attributes that are the same for each CPU. */ @@ -903,16 +884,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, static void crypt_alloc_req(struct crypt_config *cc, struct convert_context *ctx) { - struct crypt_cpu *this_cc = this_crypt_config(cc); unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); - if (!this_cc->req) - this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); + if (!ctx->req) + ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO); - ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]); - ablkcipher_request_set_callback(this_cc->req, + ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]); + ablkcipher_request_set_callback(ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); + kcryptd_async_done, dmreq_of_req(cc, ctx->req)); } /* @@ -921,7 +901,6 @@ static void crypt_alloc_req(struct crypt_config *cc, static int crypt_convert(struct crypt_config *cc, struct convert_context *ctx) { - struct crypt_cpu *this_cc = this_crypt_config(cc); int r; atomic_set(&ctx->cc_pending, 1); @@ -932,7 +911,7 @@ static int crypt_convert(struct crypt_config *cc, atomic_inc(&ctx->cc_pending); - r = crypt_convert_block(cc, ctx, this_cc->req); + r = crypt_convert_block(cc, ctx, ctx->req); switch (r) { /* async */ @@ -941,7 +920,7 @@ static int crypt_convert(struct crypt_config *cc, reinit_completion(&ctx->restart); /* fall through*/ case -EINPROGRESS: - this_cc->req = NULL; + ctx->req = NULL; ctx->cc_sector++; continue; @@ -1040,6 +1019,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc, io->sector = sector; io->error = 0; io->base_io = NULL; + io->ctx.req = NULL; atomic_set(&io->io_pending, 0); return io; @@ -1065,6 +1045,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io) if (!atomic_dec_and_test(&io->io_pending)) return; + if (io->ctx.req) + mempool_free(io->ctx.req, cc->req_pool); mempool_free(io, cc->io_pool); if (likely(!base_io)) @@ -1492,8 +1474,6 @@ static int crypt_wipe_key(struct crypt_config *cc) static void crypt_dtr(struct dm_target *ti) { struct crypt_config *cc = ti->private; - struct crypt_cpu *cpu_cc; - int cpu; ti->private = NULL; @@ -1505,13 +1485,6 @@ static void crypt_dtr(struct dm_target *ti) if (cc->crypt_queue) destroy_workqueue(cc->crypt_queue); - if (cc->cpu) - for_each_possible_cpu(cpu) { - cpu_cc = per_cpu_ptr(cc->cpu, cpu); - if (cpu_cc->req) - mempool_free(cpu_cc->req, cc->req_pool); - } - crypt_free_tfms(cc); if (cc->bs) @@ -1530,9 +1503,6 @@ static void crypt_dtr(struct dm_target *ti) if (cc->dev) dm_put_device(ti, cc->dev); - if (cc->cpu) - free_percpu(cc->cpu); - kzfree(cc->cipher); kzfree(cc->cipher_string); @@ -1588,13 +1558,6 @@ static int crypt_ctr_cipher(struct dm_target *ti, if (tmp) DMWARN("Ignoring unexpected additional cipher options"); - cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)), - __alignof__(struct crypt_cpu)); - if (!cc->cpu) { - ti->error = "Cannot allocate per cpu state"; - goto bad_mem; - } - /* * For compatibility with the original dm-crypt mapping format, if * only the cipher name is supplied, use cbc-plain. diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c new file mode 100644 index 000000000000..414dad4cb49b --- /dev/null +++ b/drivers/md/dm-era-target.c @@ -0,0 +1,1746 @@ +#include "dm.h" +#include "persistent-data/dm-transaction-manager.h" +#include "persistent-data/dm-bitset.h" +#include "persistent-data/dm-space-map.h" + +#include <linux/dm-io.h> +#include <linux/dm-kcopyd.h> +#include <linux/init.h> +#include <linux/mempool.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> + +#define DM_MSG_PREFIX "era" + +#define SUPERBLOCK_LOCATION 0 +#define SUPERBLOCK_MAGIC 2126579579 +#define SUPERBLOCK_CSUM_XOR 146538381 +#define MIN_ERA_VERSION 1 +#define MAX_ERA_VERSION 1 +#define INVALID_WRITESET_ROOT SUPERBLOCK_LOCATION +#define MIN_BLOCK_SIZE 8 + +/*---------------------------------------------------------------- + * Writeset + *--------------------------------------------------------------*/ +struct writeset_metadata { + uint32_t nr_bits; + dm_block_t root; +}; + +struct writeset { + struct writeset_metadata md; + + /* + * An in core copy of the bits to save constantly doing look ups on + * disk. + */ + unsigned long *bits; +}; + +/* + * This does not free off the on disk bitset as this will normally be done + * after digesting into the era array. + */ +static void writeset_free(struct writeset *ws) +{ + vfree(ws->bits); +} + +static int setup_on_disk_bitset(struct dm_disk_bitset *info, + unsigned nr_bits, dm_block_t *root) +{ + int r; + + r = dm_bitset_empty(info, root); + if (r) + return r; + + return dm_bitset_resize(info, *root, 0, nr_bits, false, root); +} + +static size_t bitset_size(unsigned nr_bits) +{ + return sizeof(unsigned long) * dm_div_up(nr_bits, BITS_PER_LONG); +} + +/* + * Allocates memory for the in core bitset. + */ +static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks) +{ + ws->md.nr_bits = nr_blocks; + ws->md.root = INVALID_WRITESET_ROOT; + ws->bits = vzalloc(bitset_size(nr_blocks)); + if (!ws->bits) { + DMERR("%s: couldn't allocate in memory bitset", __func__); + return -ENOMEM; + } + + return 0; +} + +/* + * Wipes the in-core bitset, and creates a new on disk bitset. + */ +static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws) +{ + int r; + + memset(ws->bits, 0, bitset_size(ws->md.nr_bits)); + + r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root); + if (r) { + DMERR("%s: setup_on_disk_bitset failed", __func__); + return r; + } + + return 0; +} + +static bool writeset_marked(struct writeset *ws, dm_block_t block) +{ + return test_bit(block, ws->bits); +} + +static int writeset_marked_on_disk(struct dm_disk_bitset *info, + struct writeset_metadata *m, dm_block_t block, + bool *result) +{ + dm_block_t old = m->root; + + /* + * The bitset was flushed when it was archived, so we know there'll + * be no change to the root. + */ + int r = dm_bitset_test_bit(info, m->root, block, &m->root, result); + if (r) { + DMERR("%s: dm_bitset_test_bit failed", __func__); + return r; + } + + BUG_ON(m->root != old); + + return r; +} + +/* + * Returns < 0 on error, 0 if the bit wasn't previously set, 1 if it was. + */ +static int writeset_test_and_set(struct dm_disk_bitset *info, + struct writeset *ws, uint32_t block) +{ + int r; + + if (!test_and_set_bit(block, ws->bits)) { + r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root); + if (r) { + /* FIXME: fail mode */ + return r; + } + + return 0; + } + + return 1; +} + +/*---------------------------------------------------------------- + * On disk metadata layout + *--------------------------------------------------------------*/ +#define SPACE_MAP_ROOT_SIZE 128 +#define UUID_LEN 16 + +struct writeset_disk { + __le32 nr_bits; + __le64 root; +} __packed; + +struct superblock_disk { + __le32 csum; + __le32 flags; + __le64 blocknr; + + __u8 uuid[UUID_LEN]; + __le64 magic; + __le32 version; + + __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE]; + + __le32 data_block_size; + __le32 metadata_block_size; + __le32 nr_blocks; + + __le32 current_era; + struct writeset_disk current_writeset; + + /* + * Only these two fields are valid within the metadata snapshot. + */ + __le64 writeset_tree_root; + __le64 era_array_root; + + __le64 metadata_snap; +} __packed; + +/*---------------------------------------------------------------- + * Superblock validation + *--------------------------------------------------------------*/ +static void sb_prepare_for_write(struct dm_block_validator *v, + struct dm_block *b, + size_t sb_block_size) +{ + struct superblock_disk *disk = dm_block_data(b); + + disk->blocknr = cpu_to_le64(dm_block_location(b)); + disk->csum = cpu_to_le32(dm_bm_checksum(&disk->flags, + sb_block_size - sizeof(__le32), + SUPERBLOCK_CSUM_XOR)); +} + +static int check_metadata_version(struct superblock_disk *disk) +{ + uint32_t metadata_version = le32_to_cpu(disk->version); + if (metadata_version < MIN_ERA_VERSION || metadata_version > MAX_ERA_VERSION) { + DMERR("Era metadata version %u found, but only versions between %u and %u supported.", + metadata_version, MIN_ERA_VERSION, MAX_ERA_VERSION); + return -EINVAL; + } + + return 0; +} + +static int sb_check(struct dm_block_validator *v, + struct dm_block *b, + size_t sb_block_size) +{ + struct superblock_disk *disk = dm_block_data(b); + __le32 csum_le; + + if (dm_block_location(b) != le64_to_cpu(disk->blocknr)) { + DMERR("sb_check failed: blocknr %llu: wanted %llu", + le64_to_cpu(disk->blocknr), + (unsigned long long)dm_block_location(b)); + return -ENOTBLK; + } + + if (le64_to_cpu(disk->magic) != SUPERBLOCK_MAGIC) { + DMERR("sb_check failed: magic %llu: wanted %llu", + le64_to_cpu(disk->magic), + (unsigned long long) SUPERBLOCK_MAGIC); + return -EILSEQ; + } + + csum_le = cpu_to_le32(dm_bm_checksum(&disk->flags, + sb_block_size - sizeof(__le32), + SUPERBLOCK_CSUM_XOR)); + if (csum_le != disk->csum) { + DMERR("sb_check failed: csum %u: wanted %u", + le32_to_cpu(csum_le), le32_to_cpu(disk->csum)); + return -EILSEQ; + } + + return check_metadata_version(disk); +} + +static struct dm_block_validator sb_validator = { + .name = "superblock", + .prepare_for_write = sb_prepare_for_write, + .check = sb_check +}; + +/*---------------------------------------------------------------- + * Low level metadata handling + *--------------------------------------------------------------*/ +#define DM_ERA_METADATA_BLOCK_SIZE 4096 +#define DM_ERA_METADATA_CACHE_SIZE 64 +#define ERA_MAX_CONCURRENT_LOCKS 5 + +struct era_metadata { + struct block_device *bdev; + struct dm_block_manager *bm; + struct dm_space_map *sm; + struct dm_transaction_manager *tm; + + dm_block_t block_size; + uint32_t nr_blocks; + + uint32_t current_era; + + /* + * We preallocate 2 writesets. When an era rolls over we + * switch between them. This means the allocation is done at + * preresume time, rather than on the io path. + */ + struct writeset writesets[2]; + struct writeset *current_writeset; + + dm_block_t writeset_tree_root; + dm_block_t era_array_root; + + struct dm_disk_bitset bitset_info; + struct dm_btree_info writeset_tree_info; + struct dm_array_info era_array_info; + + dm_block_t metadata_snap; + + /* + * A flag that is set whenever a writeset has been archived. + */ + bool archived_writesets; + + /* + * Reading the space map root can fail, so we read it into this + * buffer before the superblock is locked and updated. + */ + __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE]; +}; + +static int superblock_read_lock(struct era_metadata *md, + struct dm_block **sblock) +{ + return dm_bm_read_lock(md->bm, SUPERBLOCK_LOCATION, + &sb_validator, sblock); +} + +static int superblock_lock_zero(struct era_metadata *md, + struct dm_block **sblock) +{ + return dm_bm_write_lock_zero(md->bm, SUPERBLOCK_LOCATION, + &sb_validator, sblock); +} + +static int superblock_lock(struct era_metadata *md, + struct dm_block **sblock) +{ + return dm_bm_write_lock(md->bm, SUPERBLOCK_LOCATION, + &sb_validator, sblock); +} + +/* FIXME: duplication with cache and thin */ +static int superblock_all_zeroes(struct dm_block_manager *bm, bool *result) +{ + int r; + unsigned i; + struct dm_block *b; + __le64 *data_le, zero = cpu_to_le64(0); + unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64); + + /* + * We can't use a validator here - it may be all zeroes. + */ + r = dm_bm_read_lock(bm, SUPERBLOCK_LOCATION, NULL, &b); + if (r) + return r; + + data_le = dm_block_data(b); + *result = true; + for (i = 0; i < sb_block_size; i++) { + if (data_le[i] != zero) { + *result = false; + break; + } + } + + return dm_bm_unlock(b); +} + +/*----------------------------------------------------------------*/ + +static void ws_pack(const struct writeset_metadata *core, struct writeset_disk *disk) +{ + disk->nr_bits = cpu_to_le32(core->nr_bits); + disk->root = cpu_to_le64(core->root); +} + +static void ws_unpack(const struct writeset_disk *disk, struct writeset_metadata *core) +{ + core->nr_bits = le32_to_cpu(disk->nr_bits); + core->root = le64_to_cpu(disk->root); +} + +static void ws_inc(void *context, const void *value) +{ + struct era_metadata *md = context; + struct writeset_disk ws_d; + dm_block_t b; + + memcpy(&ws_d, value, sizeof(ws_d)); + b = le64_to_cpu(ws_d.root); + + dm_tm_inc(md->tm, b); +} + +static void ws_dec(void *context, const void *value) +{ + struct era_metadata *md = context; + struct writeset_disk ws_d; + dm_block_t b; + + memcpy(&ws_d, value, sizeof(ws_d)); + b = le64_to_cpu(ws_d.root); + + dm_bitset_del(&md->bitset_info, b); +} + +static int ws_eq(void *context, const void *value1, const void *value2) +{ + return !memcmp(value1, value2, sizeof(struct writeset_metadata)); +} + +/*----------------------------------------------------------------*/ + +static void setup_writeset_tree_info(struct era_metadata *md) +{ + struct dm_btree_value_type *vt = &md->writeset_tree_info.value_type; + md->writeset_tree_info.tm = md->tm; + md->writeset_tree_info.levels = 1; + vt->context = md; + vt->size = sizeof(struct writeset_disk); + vt->inc = ws_inc; + vt->dec = ws_dec; + vt->equal = ws_eq; +} + +static void setup_era_array_info(struct era_metadata *md) + +{ + struct dm_btree_value_type vt; + vt.context = NULL; + vt.size = sizeof(__le32); + vt.inc = NULL; + vt.dec = NULL; + vt.equal = NULL; + + dm_array_info_init(&md->era_array_info, md->tm, &vt); +} + +static void setup_infos(struct era_metadata *md) +{ + dm_disk_bitset_init(md->tm, &md->bitset_info); + setup_writeset_tree_info(md); + setup_era_array_info(md); +} + +/*----------------------------------------------------------------*/ + +static int create_fresh_metadata(struct era_metadata *md) +{ + int r; + + r = dm_tm_create_with_sm(md->bm, SUPERBLOCK_LOCATION, + &md->tm, &md->sm); + if (r < 0) { + DMERR("dm_tm_create_with_sm failed"); + return r; + } + + setup_infos(md); + + r = dm_btree_empty(&md->writeset_tree_info, &md->writeset_tree_root); + if (r) { + DMERR("couldn't create new writeset tree"); + goto bad; + } + + r = dm_array_empty(&md->era_array_info, &md->era_array_root); + if (r) { + DMERR("couldn't create era array"); + goto bad; + } + + return 0; + +bad: + dm_sm_destroy(md->sm); + dm_tm_destroy(md->tm); + + return r; +} + +static int save_sm_root(struct era_metadata *md) +{ + int r; + size_t metadata_len; + + r = dm_sm_root_size(md->sm, &metadata_len); + if (r < 0) + return r; + + return dm_sm_copy_root(md->sm, &md->metadata_space_map_root, + metadata_len); +} + +static void copy_sm_root(struct era_metadata *md, struct superblock_disk *disk) +{ + memcpy(&disk->metadata_space_map_root, + &md->metadata_space_map_root, + sizeof(md->metadata_space_map_root)); +} + +/* + * Writes a superblock, including the static fields that don't get updated + * with every commit (possible optimisation here). 'md' should be fully + * constructed when this is called. + */ +static void prepare_superblock(struct era_metadata *md, struct superblock_disk *disk) +{ + disk->magic = cpu_to_le64(SUPERBLOCK_MAGIC); + disk->flags = cpu_to_le32(0ul); + + /* FIXME: can't keep blanking the uuid (uuid is currently unused though) */ + memset(disk->uuid, 0, sizeof(disk->uuid)); + disk->version = cpu_to_le32(MAX_ERA_VERSION); + + copy_sm_root(md, disk); + + disk->data_block_size = cpu_to_le32(md->block_size); + disk->metadata_block_size = cpu_to_le32(DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); + disk->nr_blocks = cpu_to_le32(md->nr_blocks); + disk->current_era = cpu_to_le32(md->current_era); + + ws_pack(&md->current_writeset->md, &disk->current_writeset); + disk->writeset_tree_root = cpu_to_le64(md->writeset_tree_root); + disk->era_array_root = cpu_to_le64(md->era_array_root); + disk->metadata_snap = cpu_to_le64(md->metadata_snap); +} + +static int write_superblock(struct era_metadata *md) +{ + int r; + struct dm_block *sblock; + struct superblock_disk *disk; + + r = save_sm_root(md); + if (r) { + DMERR("%s: save_sm_root failed", __func__); + return r; + } + + r = superblock_lock_zero(md, &sblock); + if (r) + return r; + + disk = dm_block_data(sblock); + prepare_superblock(md, disk); + + return dm_tm_commit(md->tm, sblock); +} + +/* + * Assumes block_size and the infos are set. + */ +static int format_metadata(struct era_metadata *md) +{ + int r; + + r = create_fresh_metadata(md); + if (r) + return r; + + r = write_superblock(md); + if (r) { + dm_sm_destroy(md->sm); + dm_tm_destroy(md->tm); + return r; + } + + return 0; +} + +static int open_metadata(struct era_metadata *md) +{ + int r; + struct dm_block *sblock; + struct superblock_disk *disk; + + r = superblock_read_lock(md, &sblock); + if (r) { + DMERR("couldn't read_lock superblock"); + return r; + } + + disk = dm_block_data(sblock); + r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION, + disk->metadata_space_map_root, + sizeof(disk->metadata_space_map_root), + &md->tm, &md->sm); + if (r) { + DMERR("dm_tm_open_with_sm failed"); + goto bad; + } + + setup_infos(md); + + md->block_size = le32_to_cpu(disk->data_block_size); + md->nr_blocks = le32_to_cpu(disk->nr_blocks); + md->current_era = le32_to_cpu(disk->current_era); + + md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root); + md->era_array_root = le64_to_cpu(disk->era_array_root); + md->metadata_snap = le64_to_cpu(disk->metadata_snap); + md->archived_writesets = true; + + return dm_bm_unlock(sblock); + +bad: + dm_bm_unlock(sblock); + return r; +} + +static int open_or_format_metadata(struct era_metadata *md, + bool may_format) +{ + int r; + bool unformatted = false; + + r = superblock_all_zeroes(md->bm, &unformatted); + if (r) + return r; + + if (unformatted) + return may_format ? format_metadata(md) : -EPERM; + + return open_metadata(md); +} + +static int create_persistent_data_objects(struct era_metadata *md, + bool may_format) +{ + int r; + + md->bm = dm_block_manager_create(md->bdev, DM_ERA_METADATA_BLOCK_SIZE, + DM_ERA_METADATA_CACHE_SIZE, + ERA_MAX_CONCURRENT_LOCKS); + if (IS_ERR(md->bm)) { + DMERR("could not create block manager"); + return PTR_ERR(md->bm); + } + + r = open_or_format_metadata(md, may_format); + if (r) + dm_block_manager_destroy(md->bm); + + return r; +} + +static void destroy_persistent_data_objects(struct era_metadata *md) +{ + dm_sm_destroy(md->sm); + dm_tm_destroy(md->tm); + dm_block_manager_destroy(md->bm); +} + +/* + * This waits until all era_map threads have picked up the new filter. + */ +static void swap_writeset(struct era_metadata *md, struct writeset *new_writeset) +{ + rcu_assign_pointer(md->current_writeset, new_writeset); + synchronize_rcu(); +} + +/*---------------------------------------------------------------- + * Writesets get 'digested' into the main era array. + * + * We're using a coroutine here so the worker thread can do the digestion, + * thus avoiding synchronisation of the metadata. Digesting a whole + * writeset in one go would cause too much latency. + *--------------------------------------------------------------*/ +struct digest { + uint32_t era; + unsigned nr_bits, current_bit; + struct writeset_metadata writeset; + __le32 value; + struct dm_disk_bitset info; + + int (*step)(struct era_metadata *, struct digest *); +}; + +static int metadata_digest_lookup_writeset(struct era_metadata *md, + struct digest *d); + +static int metadata_digest_remove_writeset(struct era_metadata *md, + struct digest *d) +{ + int r; + uint64_t key = d->era; + + r = dm_btree_remove(&md->writeset_tree_info, md->writeset_tree_root, + &key, &md->writeset_tree_root); + if (r) { + DMERR("%s: dm_btree_remove failed", __func__); + return r; + } + + d->step = metadata_digest_lookup_writeset; + return 0; +} + +#define INSERTS_PER_STEP 100 + +static int metadata_digest_transcribe_writeset(struct era_metadata *md, + struct digest *d) +{ + int r; + bool marked; + unsigned b, e = min(d->current_bit + INSERTS_PER_STEP, d->nr_bits); + + for (b = d->current_bit; b < e; b++) { + r = writeset_marked_on_disk(&d->info, &d->writeset, b, &marked); + if (r) { + DMERR("%s: writeset_marked_on_disk failed", __func__); + return r; + } + + if (!marked) + continue; + + __dm_bless_for_disk(&d->value); + r = dm_array_set_value(&md->era_array_info, md->era_array_root, + b, &d->value, &md->era_array_root); + if (r) { + DMERR("%s: dm_array_set_value failed", __func__); + return r; + } + } + + if (b == d->nr_bits) + d->step = metadata_digest_remove_writeset; + else + d->current_bit = b; + + return 0; +} + +static int metadata_digest_lookup_writeset(struct era_metadata *md, + struct digest *d) +{ + int r; + uint64_t key; + struct writeset_disk disk; + + r = dm_btree_find_lowest_key(&md->writeset_tree_info, + md->writeset_tree_root, &key); + if (r < 0) + return r; + + d->era = key; + + r = dm_btree_lookup(&md->writeset_tree_info, + md->writeset_tree_root, &key, &disk); + if (r) { + if (r == -ENODATA) { + d->step = NULL; + return 0; + } + + DMERR("%s: dm_btree_lookup failed", __func__); + return r; + } + + ws_unpack(&disk, &d->writeset); + d->value = cpu_to_le32(key); + + d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks); + d->current_bit = 0; + d->step = metadata_digest_transcribe_writeset; + + return 0; +} + +static int metadata_digest_start(struct era_metadata *md, struct digest *d) +{ + if (d->step) + return 0; + + memset(d, 0, sizeof(*d)); + + /* + * We initialise another bitset info to avoid any caching side + * effects with the previous one. + */ + dm_disk_bitset_init(md->tm, &d->info); + d->step = metadata_digest_lookup_writeset; + + return 0; +} + +/*---------------------------------------------------------------- + * High level metadata interface. Target methods should use these, and not + * the lower level ones. + *--------------------------------------------------------------*/ +static struct era_metadata *metadata_open(struct block_device *bdev, + sector_t block_size, + bool may_format) +{ + int r; + struct era_metadata *md = kzalloc(sizeof(*md), GFP_KERNEL); + + if (!md) + return NULL; + + md->bdev = bdev; + md->block_size = block_size; + + md->writesets[0].md.root = INVALID_WRITESET_ROOT; + md->writesets[1].md.root = INVALID_WRITESET_ROOT; + md->current_writeset = &md->writesets[0]; + + r = create_persistent_data_objects(md, may_format); + if (r) { + kfree(md); + return ERR_PTR(r); + } + + return md; +} + +static void metadata_close(struct era_metadata *md) +{ + destroy_persistent_data_objects(md); + kfree(md); +} + +static bool valid_nr_blocks(dm_block_t n) +{ + /* + * dm_bitset restricts us to 2^32. test_bit & co. restrict us + * further to 2^31 - 1 + */ + return n < (1ull << 31); +} + +static int metadata_resize(struct era_metadata *md, void *arg) +{ + int r; + dm_block_t *new_size = arg; + __le32 value; + + if (!valid_nr_blocks(*new_size)) { + DMERR("Invalid number of origin blocks %llu", + (unsigned long long) *new_size); + return -EINVAL; + } + + writeset_free(&md->writesets[0]); + writeset_free(&md->writesets[1]); + + r = writeset_alloc(&md->writesets[0], *new_size); + if (r) { + DMERR("%s: writeset_alloc failed for writeset 0", __func__); + return r; + } + + r = writeset_alloc(&md->writesets[1], *new_size); + if (r) { + DMERR("%s: writeset_alloc failed for writeset 1", __func__); + return r; + } + + value = cpu_to_le32(0u); + __dm_bless_for_disk(&value); + r = dm_array_resize(&md->era_array_info, md->era_array_root, + md->nr_blocks, *new_size, + &value, &md->era_array_root); + if (r) { + DMERR("%s: dm_array_resize failed", __func__); + return r; + } + + md->nr_blocks = *new_size; + return 0; +} + +static int metadata_era_archive(struct era_metadata *md) +{ + int r; + uint64_t keys[1]; + struct writeset_disk value; + + r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root, + &md->current_writeset->md.root); + if (r) { + DMERR("%s: dm_bitset_flush failed", __func__); + return r; + } + + ws_pack(&md->current_writeset->md, &value); + md->current_writeset->md.root = INVALID_WRITESET_ROOT; + + keys[0] = md->current_era; + __dm_bless_for_disk(&value); + r = dm_btree_insert(&md->writeset_tree_info, md->writeset_tree_root, + keys, &value, &md->writeset_tree_root); + if (r) { + DMERR("%s: couldn't insert writeset into btree", __func__); + /* FIXME: fail mode */ + return r; + } + + md->archived_writesets = true; + + return 0; +} + +static struct writeset *next_writeset(struct era_metadata *md) +{ + return (md->current_writeset == &md->writesets[0]) ? + &md->writesets[1] : &md->writesets[0]; +} + +static int metadata_new_era(struct era_metadata *md) +{ + int r; + struct writeset *new_writeset = next_writeset(md); + + r = writeset_init(&md->bitset_info, new_writeset); + if (r) { + DMERR("%s: writeset_init failed", __func__); + return r; + } + + swap_writeset(md, new_writeset); + md->current_era++; + + return 0; +} + +static int metadata_era_rollover(struct era_metadata *md) +{ + int r; + + if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) { + r = metadata_era_archive(md); + if (r) { + DMERR("%s: metadata_archive_era failed", __func__); + /* FIXME: fail mode? */ + return r; + } + } + + r = metadata_new_era(md); + if (r) { + DMERR("%s: new era failed", __func__); + /* FIXME: fail mode */ + return r; + } + + return 0; +} + +static bool metadata_current_marked(struct era_metadata *md, dm_block_t block) +{ + bool r; + struct writeset *ws; + + rcu_read_lock(); + ws = rcu_dereference(md->current_writeset); + r = writeset_marked(ws, block); + rcu_read_unlock(); + + return r; +} + +static int metadata_commit(struct era_metadata *md) +{ + int r; + struct dm_block *sblock; + + if (md->current_writeset->md.root != SUPERBLOCK_LOCATION) { + r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root, + &md->current_writeset->md.root); + if (r) { + DMERR("%s: bitset flush failed", __func__); + return r; + } + } + + r = save_sm_root(md); + if (r) { + DMERR("%s: save_sm_root failed", __func__); + return r; + } + + r = dm_tm_pre_commit(md->tm); + if (r) { + DMERR("%s: pre commit failed", __func__); + return r; + } + + r = superblock_lock(md, &sblock); + if (r) { + DMERR("%s: superblock lock failed", __func__); + return r; + } + + prepare_superblock(md, dm_block_data(sblock)); + + return dm_tm_commit(md->tm, sblock); +} + +static int metadata_checkpoint(struct era_metadata *md) +{ + /* + * For now we just rollover, but later I want to put a check in to + * avoid this if the filter is still pretty fresh. + */ + return metadata_era_rollover(md); +} + +/* + * Metadata snapshots allow userland to access era data. + */ +static int metadata_take_snap(struct era_metadata *md) +{ + int r, inc; + struct dm_block *clone; + + if (md->metadata_snap != SUPERBLOCK_LOCATION) { + DMERR("%s: metadata snapshot already exists", __func__); + return -EINVAL; + } + + r = metadata_era_rollover(md); + if (r) { + DMERR("%s: era rollover failed", __func__); + return r; + } + + r = metadata_commit(md); + if (r) { + DMERR("%s: pre commit failed", __func__); + return r; + } + + r = dm_sm_inc_block(md->sm, SUPERBLOCK_LOCATION); + if (r) { + DMERR("%s: couldn't increment superblock", __func__); + return r; + } + + r = dm_tm_shadow_block(md->tm, SUPERBLOCK_LOCATION, + &sb_validator, &clone, &inc); + if (r) { + DMERR("%s: couldn't shadow superblock", __func__); + dm_sm_dec_block(md->sm, SUPERBLOCK_LOCATION); + return r; + } + BUG_ON(!inc); + + r = dm_sm_inc_block(md->sm, md->writeset_tree_root); + if (r) { + DMERR("%s: couldn't inc writeset tree root", __func__); + dm_tm_unlock(md->tm, clone); + return r; + } + + r = dm_sm_inc_block(md->sm, md->era_array_root); + if (r) { + DMERR("%s: couldn't inc era tree root", __func__); + dm_sm_dec_block(md->sm, md->writeset_tree_root); + dm_tm_unlock(md->tm, clone); + return r; + } + + md->metadata_snap = dm_block_location(clone); + + r = dm_tm_unlock(md->tm, clone); + if (r) { + DMERR("%s: couldn't unlock clone", __func__); + md->metadata_snap = SUPERBLOCK_LOCATION; + return r; + } + + return 0; +} + +static int metadata_drop_snap(struct era_metadata *md) +{ + int r; + dm_block_t location; + struct dm_block *clone; + struct superblock_disk *disk; + + if (md->metadata_snap == SUPERBLOCK_LOCATION) { + DMERR("%s: no snap to drop", __func__); + return -EINVAL; + } + + r = dm_tm_read_lock(md->tm, md->metadata_snap, &sb_validator, &clone); + if (r) { + DMERR("%s: couldn't read lock superblock clone", __func__); + return r; + } + + /* + * Whatever happens now we'll commit with no record of the metadata + * snap. + */ + md->metadata_snap = SUPERBLOCK_LOCATION; + + disk = dm_block_data(clone); + r = dm_btree_del(&md->writeset_tree_info, + le64_to_cpu(disk->writeset_tree_root)); + if (r) { + DMERR("%s: error deleting writeset tree clone", __func__); + dm_tm_unlock(md->tm, clone); + return r; + } + + r = dm_array_del(&md->era_array_info, le64_to_cpu(disk->era_array_root)); + if (r) { + DMERR("%s: error deleting era array clone", __func__); + dm_tm_unlock(md->tm, clone); + return r; + } + + location = dm_block_location(clone); + dm_tm_unlock(md->tm, clone); + + return dm_sm_dec_block(md->sm, location); +} + +struct metadata_stats { + dm_block_t used; + dm_block_t total; + dm_block_t snap; + uint32_t era; +}; + +static int metadata_get_stats(struct era_metadata *md, void *ptr) +{ + int r; + struct metadata_stats *s = ptr; + dm_block_t nr_free, nr_total; + + r = dm_sm_get_nr_free(md->sm, &nr_free); + if (r) { + DMERR("dm_sm_get_nr_free returned %d", r); + return r; + } + + r = dm_sm_get_nr_blocks(md->sm, &nr_total); + if (r) { + DMERR("dm_pool_get_metadata_dev_size returned %d", r); + return r; + } + + s->used = nr_total - nr_free; + s->total = nr_total; + s->snap = md->metadata_snap; + s->era = md->current_era; + + return 0; +} + +/*----------------------------------------------------------------*/ + +struct era { + struct dm_target *ti; + struct dm_target_callbacks callbacks; + + struct dm_dev *metadata_dev; + struct dm_dev *origin_dev; + + dm_block_t nr_blocks; + uint32_t sectors_per_block; + int sectors_per_block_shift; + struct era_metadata *md; + + struct workqueue_struct *wq; + struct work_struct worker; + + spinlock_t deferred_lock; + struct bio_list deferred_bios; + + spinlock_t rpc_lock; + struct list_head rpc_calls; + + struct digest digest; + atomic_t suspended; +}; + +struct rpc { + struct list_head list; + + int (*fn0)(struct era_metadata *); + int (*fn1)(struct era_metadata *, void *); + void *arg; + int result; + + struct completion complete; +}; + +/*---------------------------------------------------------------- + * Remapping. + *---------------------------------------------------------------*/ +static bool block_size_is_power_of_two(struct era *era) +{ + return era->sectors_per_block_shift >= 0; +} + +static dm_block_t get_block(struct era *era, struct bio *bio) +{ + sector_t block_nr = bio->bi_iter.bi_sector; + + if (!block_size_is_power_of_two(era)) + (void) sector_div(block_nr, era->sectors_per_block); + else + block_nr >>= era->sectors_per_block_shift; + + return block_nr; +} + +static void remap_to_origin(struct era *era, struct bio *bio) +{ + bio->bi_bdev = era->origin_dev->bdev; +} + +/*---------------------------------------------------------------- + * Worker thread + *--------------------------------------------------------------*/ +static void wake_worker(struct era *era) +{ + if (!atomic_read(&era->suspended)) + queue_work(era->wq, &era->worker); +} + +static void process_old_eras(struct era *era) +{ + int r; + + if (!era->digest.step) + return; + + r = era->digest.step(era->md, &era->digest); + if (r < 0) { + DMERR("%s: digest step failed, stopping digestion", __func__); + era->digest.step = NULL; + + } else if (era->digest.step) + wake_worker(era); +} + +static void process_deferred_bios(struct era *era) +{ + int r; + struct bio_list deferred_bios, marked_bios; + struct bio *bio; + bool commit_needed = false; + bool failed = false; + + bio_list_init(&deferred_bios); + bio_list_init(&marked_bios); + + spin_lock(&era->deferred_lock); + bio_list_merge(&deferred_bios, &era->deferred_bios); + bio_list_init(&era->deferred_bios); + spin_unlock(&era->deferred_lock); + + while ((bio = bio_list_pop(&deferred_bios))) { + r = writeset_test_and_set(&era->md->bitset_info, + era->md->current_writeset, + get_block(era, bio)); + if (r < 0) { + /* + * This is bad news, we need to rollback. + * FIXME: finish. + */ + failed = true; + + } else if (r == 0) + commit_needed = true; + + bio_list_add(&marked_bios, bio); + } + + if (commit_needed) { + r = metadata_commit(era->md); + if (r) + failed = true; + } + + if (failed) + while ((bio = bio_list_pop(&marked_bios))) + bio_io_error(bio); + else + while ((bio = bio_list_pop(&marked_bios))) + generic_make_request(bio); +} + +static void process_rpc_calls(struct era *era) +{ + int r; + bool need_commit = false; + struct list_head calls; + struct rpc *rpc, *tmp; + + INIT_LIST_HEAD(&calls); + spin_lock(&era->rpc_lock); + list_splice_init(&era->rpc_calls, &calls); + spin_unlock(&era->rpc_lock); + + list_for_each_entry_safe(rpc, tmp, &calls, list) { + rpc->result = rpc->fn0 ? rpc->fn0(era->md) : rpc->fn1(era->md, rpc->arg); + need_commit = true; + } + + if (need_commit) { + r = metadata_commit(era->md); + if (r) + list_for_each_entry_safe(rpc, tmp, &calls, list) + rpc->result = r; + } + + list_for_each_entry_safe(rpc, tmp, &calls, list) + complete(&rpc->complete); +} + +static void kick_off_digest(struct era *era) +{ + if (era->md->archived_writesets) { + era->md->archived_writesets = false; + metadata_digest_start(era->md, &era->digest); + } +} + +static void do_work(struct work_struct *ws) +{ + struct era *era = container_of(ws, struct era, worker); + + kick_off_digest(era); + process_old_eras(era); + process_deferred_bios(era); + process_rpc_calls(era); +} + +static void defer_bio(struct era *era, struct bio *bio) +{ + spin_lock(&era->deferred_lock); + bio_list_add(&era->deferred_bios, bio); + spin_unlock(&era->deferred_lock); + + wake_worker(era); +} + +/* + * Make an rpc call to the worker to change the metadata. + */ +static int perform_rpc(struct era *era, struct rpc *rpc) +{ + rpc->result = 0; + init_completion(&rpc->complete); + + spin_lock(&era->rpc_lock); + list_add(&rpc->list, &era->rpc_calls); + spin_unlock(&era->rpc_lock); + + wake_worker(era); + wait_for_completion(&rpc->complete); + + return rpc->result; +} + +static int in_worker0(struct era *era, int (*fn)(struct era_metadata *)) +{ + struct rpc rpc; + rpc.fn0 = fn; + rpc.fn1 = NULL; + + return perform_rpc(era, &rpc); +} + +static int in_worker1(struct era *era, + int (*fn)(struct era_metadata *, void *), void *arg) +{ + struct rpc rpc; + rpc.fn0 = NULL; + rpc.fn1 = fn; + rpc.arg = arg; + + return perform_rpc(era, &rpc); +} + +static void start_worker(struct era *era) +{ + atomic_set(&era->suspended, 0); +} + +static void stop_worker(struct era *era) +{ + atomic_set(&era->suspended, 1); + flush_workqueue(era->wq); +} + +/*---------------------------------------------------------------- + * Target methods + *--------------------------------------------------------------*/ +static int dev_is_congested(struct dm_dev *dev, int bdi_bits) +{ + struct request_queue *q = bdev_get_queue(dev->bdev); + return bdi_congested(&q->backing_dev_info, bdi_bits); +} + +static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits) +{ + struct era *era = container_of(cb, struct era, callbacks); + return dev_is_congested(era->origin_dev, bdi_bits); +} + +static void era_destroy(struct era *era) +{ + metadata_close(era->md); + + if (era->wq) + destroy_workqueue(era->wq); + + if (era->origin_dev) + dm_put_device(era->ti, era->origin_dev); + + if (era->metadata_dev) + dm_put_device(era->ti, era->metadata_dev); + + kfree(era); +} + +static dm_block_t calc_nr_blocks(struct era *era) +{ + return dm_sector_div_up(era->ti->len, era->sectors_per_block); +} + +static bool valid_block_size(dm_block_t block_size) +{ + bool greater_than_zero = block_size > 0; + bool multiple_of_min_block_size = (block_size & (MIN_BLOCK_SIZE - 1)) == 0; + + return greater_than_zero && multiple_of_min_block_size; +} + +/* + * <metadata dev> <data dev> <data block size (sectors)> + */ +static int era_ctr(struct dm_target *ti, unsigned argc, char **argv) +{ + int r; + char dummy; + struct era *era; + struct era_metadata *md; + + if (argc != 3) { + ti->error = "Invalid argument count"; + return -EINVAL; + } + + era = kzalloc(sizeof(*era), GFP_KERNEL); + if (!era) { + ti->error = "Error allocating era structure"; + return -ENOMEM; + } + + era->ti = ti; + + r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &era->metadata_dev); + if (r) { + ti->error = "Error opening metadata device"; + era_destroy(era); + return -EINVAL; + } + + r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &era->origin_dev); + if (r) { + ti->error = "Error opening data device"; + era_destroy(era); + return -EINVAL; + } + + r = sscanf(argv[2], "%u%c", &era->sectors_per_block, &dummy); + if (r != 1) { + ti->error = "Error parsing block size"; + era_destroy(era); + return -EINVAL; + } + + r = dm_set_target_max_io_len(ti, era->sectors_per_block); + if (r) { + ti->error = "could not set max io len"; + era_destroy(era); + return -EINVAL; + } + + if (!valid_block_size(era->sectors_per_block)) { + ti->error = "Invalid block size"; + era_destroy(era); + return -EINVAL; + } + if (era->sectors_per_block & (era->sectors_per_block - 1)) + era->sectors_per_block_shift = -1; + else + era->sectors_per_block_shift = __ffs(era->sectors_per_block); + + md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true); + if (IS_ERR(md)) { + ti->error = "Error reading metadata"; + era_destroy(era); + return PTR_ERR(md); + } + era->md = md; + + era->nr_blocks = calc_nr_blocks(era); + + r = metadata_resize(era->md, &era->nr_blocks); + if (r) { + ti->error = "couldn't resize metadata"; + era_destroy(era); + return -ENOMEM; + } + + era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); + if (!era->wq) { + ti->error = "could not create workqueue for metadata object"; + era_destroy(era); + return -ENOMEM; + } + INIT_WORK(&era->worker, do_work); + + spin_lock_init(&era->deferred_lock); + bio_list_init(&era->deferred_bios); + + spin_lock_init(&era->rpc_lock); + INIT_LIST_HEAD(&era->rpc_calls); + + ti->private = era; + ti->num_flush_bios = 1; + ti->flush_supported = true; + + ti->num_discard_bios = 1; + ti->discards_supported = true; + era->callbacks.congested_fn = era_is_congested; + dm_table_add_target_callbacks(ti->table, &era->callbacks); + + return 0; +} + +static void era_dtr(struct dm_target *ti) +{ + era_destroy(ti->private); +} + +static int era_map(struct dm_target *ti, struct bio *bio) +{ + struct era *era = ti->private; + dm_block_t block = get_block(era, bio); + + /* + * All bios get remapped to the origin device. We do this now, but + * it may not get issued until later. Depending on whether the + * block is marked in this era. + */ + remap_to_origin(era, bio); + + /* + * REQ_FLUSH bios carry no data, so we're not interested in them. + */ + if (!(bio->bi_rw & REQ_FLUSH) && + (bio_data_dir(bio) == WRITE) && + !metadata_current_marked(era->md, block)) { + defer_bio(era, bio); + return DM_MAPIO_SUBMITTED; + } + + return DM_MAPIO_REMAPPED; +} + +static void era_postsuspend(struct dm_target *ti) +{ + int r; + struct era *era = ti->private; + + r = in_worker0(era, metadata_era_archive); + if (r) { + DMERR("%s: couldn't archive current era", __func__); + /* FIXME: fail mode */ + } + + stop_worker(era); +} + +static int era_preresume(struct dm_target *ti) +{ + int r; + struct era *era = ti->private; + dm_block_t new_size = calc_nr_blocks(era); + + if (era->nr_blocks != new_size) { + r = in_worker1(era, metadata_resize, &new_size); + if (r) + return r; + + era->nr_blocks = new_size; + } + + start_worker(era); + + r = in_worker0(era, metadata_new_era); + if (r) { + DMERR("%s: metadata_era_rollover failed", __func__); + return r; + } + + return 0; +} + +/* + * Status format: + * + * <metadata block size> <#used metadata blocks>/<#total metadata blocks> + * <current era> <held metadata root | '-'> + */ +static void era_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen) +{ + int r; + struct era *era = ti->private; + ssize_t sz = 0; + struct metadata_stats stats; + char buf[BDEVNAME_SIZE]; + + switch (type) { + case STATUSTYPE_INFO: + r = in_worker1(era, metadata_get_stats, &stats); + if (r) + goto err; + + DMEMIT("%u %llu/%llu %u", + (unsigned) (DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT), + (unsigned long long) stats.used, + (unsigned long long) stats.total, + (unsigned) stats.era); + + if (stats.snap != SUPERBLOCK_LOCATION) + DMEMIT(" %llu", stats.snap); + else + DMEMIT(" -"); + break; + + case STATUSTYPE_TABLE: + format_dev_t(buf, era->metadata_dev->bdev->bd_dev); + DMEMIT("%s ", buf); + format_dev_t(buf, era->origin_dev->bdev->bd_dev); + DMEMIT("%s %u", buf, era->sectors_per_block); + break; + } + + return; + +err: + DMEMIT("Error"); +} + +static int era_message(struct dm_target *ti, unsigned argc, char **argv) +{ + struct era *era = ti->private; + + if (argc != 1) { + DMERR("incorrect number of message arguments"); + return -EINVAL; + } + + if (!strcasecmp(argv[0], "checkpoint")) + return in_worker0(era, metadata_checkpoint); + + if (!strcasecmp(argv[0], "take_metadata_snap")) + return in_worker0(era, metadata_take_snap); + + if (!strcasecmp(argv[0], "drop_metadata_snap")) + return in_worker0(era, metadata_drop_snap); + + DMERR("unsupported message '%s'", argv[0]); + return -EINVAL; +} + +static sector_t get_dev_size(struct dm_dev *dev) +{ + return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; +} + +static int era_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data) +{ + struct era *era = ti->private; + return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data); +} + +static int era_merge(struct dm_target *ti, struct bvec_merge_data *bvm, + struct bio_vec *biovec, int max_size) +{ + struct era *era = ti->private; + struct request_queue *q = bdev_get_queue(era->origin_dev->bdev); + + if (!q->merge_bvec_fn) + return max_size; + + bvm->bi_bdev = era->origin_dev->bdev; + + return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); +} + +static void era_io_hints(struct dm_target *ti, struct queue_limits *limits) +{ + struct era *era = ti->private; + uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT; + + /* + * If the system-determined stacked limits are compatible with the + * era device's blocksize (io_opt is a factor) do not override them. + */ + if (io_opt_sectors < era->sectors_per_block || + do_div(io_opt_sectors, era->sectors_per_block)) { + blk_limits_io_min(limits, 0); + blk_limits_io_opt(limits, era->sectors_per_block << SECTOR_SHIFT); + } +} + +/*----------------------------------------------------------------*/ + +static struct target_type era_target = { + .name = "era", + .version = {1, 0, 0}, + .module = THIS_MODULE, + .ctr = era_ctr, + .dtr = era_dtr, + .map = era_map, + .postsuspend = era_postsuspend, + .preresume = era_preresume, + .status = era_status, + .message = era_message, + .iterate_devices = era_iterate_devices, + .merge = era_merge, + .io_hints = era_io_hints +}; + +static int __init dm_era_init(void) +{ + int r; + + r = dm_register_target(&era_target); + if (r) { + DMERR("era target registration failed: %d", r); + return r; + } + + return 0; +} + +static void __exit dm_era_exit(void) +{ + dm_unregister_target(&era_target); +} + +module_init(dm_era_init); +module_exit(dm_era_exit); + +MODULE_DESCRIPTION(DM_NAME " era target"); +MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c index 08d9a207259a..b428c0ae63d5 100644 --- a/drivers/md/dm-log-userspace-transfer.c +++ b/drivers/md/dm-log-userspace-transfer.c @@ -66,7 +66,7 @@ static int dm_ulog_sendto_server(struct dm_ulog_request *tfr) msg->seq = tfr->seq; msg->len = sizeof(struct dm_ulog_request) + tfr->data_size; - r = cn_netlink_send(msg, 0, gfp_any()); + r = cn_netlink_send(msg, 0, 0, gfp_any()); return r; } diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 422a9fdeb53e..ebfa411d1a7d 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -93,10 +93,6 @@ struct multipath { unsigned pg_init_count; /* Number of times pg_init called */ unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ - unsigned queue_size; - struct work_struct process_queued_ios; - struct list_head queued_ios; - struct work_struct trigger_event; /* @@ -121,9 +117,9 @@ typedef int (*action_fn) (struct pgpath *pgpath); static struct kmem_cache *_mpio_cache; static struct workqueue_struct *kmultipathd, *kmpath_handlerd; -static void process_queued_ios(struct work_struct *work); static void trigger_event(struct work_struct *work); static void activate_path(struct work_struct *work); +static int __pgpath_busy(struct pgpath *pgpath); /*----------------------------------------------- @@ -195,11 +191,9 @@ static struct multipath *alloc_multipath(struct dm_target *ti) m = kzalloc(sizeof(*m), GFP_KERNEL); if (m) { INIT_LIST_HEAD(&m->priority_groups); - INIT_LIST_HEAD(&m->queued_ios); spin_lock_init(&m->lock); m->queue_io = 1; m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; - INIT_WORK(&m->process_queued_ios, process_queued_ios); INIT_WORK(&m->trigger_event, trigger_event); init_waitqueue_head(&m->pg_init_wait); mutex_init(&m->work_mutex); @@ -256,13 +250,21 @@ static void clear_mapinfo(struct multipath *m, union map_info *info) * Path selection *-----------------------------------------------*/ -static void __pg_init_all_paths(struct multipath *m) +static int __pg_init_all_paths(struct multipath *m) { struct pgpath *pgpath; unsigned long pg_init_delay = 0; + if (m->pg_init_in_progress || m->pg_init_disabled) + return 0; + m->pg_init_count++; m->pg_init_required = 0; + + /* Check here to reset pg_init_required */ + if (!m->current_pg) + return 0; + if (m->pg_init_delay_retry) pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ? m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS); @@ -274,6 +276,7 @@ static void __pg_init_all_paths(struct multipath *m) pg_init_delay)) m->pg_init_in_progress++; } + return m->pg_init_in_progress; } static void __switch_pg(struct multipath *m, struct pgpath *pgpath) @@ -365,19 +368,26 @@ failed: */ static int __must_push_back(struct multipath *m) { - return (m->queue_if_no_path != m->saved_queue_if_no_path && - dm_noflush_suspending(m->ti)); + return (m->queue_if_no_path || + (m->queue_if_no_path != m->saved_queue_if_no_path && + dm_noflush_suspending(m->ti))); } -static int map_io(struct multipath *m, struct request *clone, - union map_info *map_context, unsigned was_queued) +#define pg_ready(m) (!(m)->queue_io && !(m)->pg_init_required) + +/* + * Map cloned requests + */ +static int multipath_map(struct dm_target *ti, struct request *clone, + union map_info *map_context) { - int r = DM_MAPIO_REMAPPED; + struct multipath *m = (struct multipath *) ti->private; + int r = DM_MAPIO_REQUEUE; size_t nr_bytes = blk_rq_bytes(clone); unsigned long flags; struct pgpath *pgpath; struct block_device *bdev; - struct dm_mpath_io *mpio = map_context->ptr; + struct dm_mpath_io *mpio; spin_lock_irqsave(&m->lock, flags); @@ -388,38 +398,33 @@ static int map_io(struct multipath *m, struct request *clone, pgpath = m->current_pgpath; - if (was_queued) - m->queue_size--; - - if (m->pg_init_required) { - if (!m->pg_init_in_progress) - queue_work(kmultipathd, &m->process_queued_ios); - r = DM_MAPIO_REQUEUE; - } else if ((pgpath && m->queue_io) || - (!pgpath && m->queue_if_no_path)) { - /* Queue for the daemon to resubmit */ - list_add_tail(&clone->queuelist, &m->queued_ios); - m->queue_size++; - if (!m->queue_io) - queue_work(kmultipathd, &m->process_queued_ios); - pgpath = NULL; - r = DM_MAPIO_SUBMITTED; - } else if (pgpath) { - bdev = pgpath->path.dev->bdev; - clone->q = bdev_get_queue(bdev); - clone->rq_disk = bdev->bd_disk; - } else if (__must_push_back(m)) - r = DM_MAPIO_REQUEUE; - else - r = -EIO; /* Failed */ + if (!pgpath) { + if (!__must_push_back(m)) + r = -EIO; /* Failed */ + goto out_unlock; + } + if (!pg_ready(m)) { + __pg_init_all_paths(m); + goto out_unlock; + } + if (set_mapinfo(m, map_context) < 0) + /* ENOMEM, requeue */ + goto out_unlock; + bdev = pgpath->path.dev->bdev; + clone->q = bdev_get_queue(bdev); + clone->rq_disk = bdev->bd_disk; + clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; + mpio = map_context->ptr; mpio->pgpath = pgpath; mpio->nr_bytes = nr_bytes; - - if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io) - pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path, + if (pgpath->pg->ps.type->start_io) + pgpath->pg->ps.type->start_io(&pgpath->pg->ps, + &pgpath->path, nr_bytes); + r = DM_MAPIO_REMAPPED; +out_unlock: spin_unlock_irqrestore(&m->lock, flags); return r; @@ -440,74 +445,12 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path, else m->saved_queue_if_no_path = queue_if_no_path; m->queue_if_no_path = queue_if_no_path; - if (!m->queue_if_no_path && m->queue_size) - queue_work(kmultipathd, &m->process_queued_ios); - - spin_unlock_irqrestore(&m->lock, flags); - - return 0; -} - -/*----------------------------------------------------------------- - * The multipath daemon is responsible for resubmitting queued ios. - *---------------------------------------------------------------*/ - -static void dispatch_queued_ios(struct multipath *m) -{ - int r; - unsigned long flags; - union map_info *info; - struct request *clone, *n; - LIST_HEAD(cl); - - spin_lock_irqsave(&m->lock, flags); - list_splice_init(&m->queued_ios, &cl); spin_unlock_irqrestore(&m->lock, flags); - list_for_each_entry_safe(clone, n, &cl, queuelist) { - list_del_init(&clone->queuelist); - - info = dm_get_rq_mapinfo(clone); - - r = map_io(m, clone, info, 1); - if (r < 0) { - clear_mapinfo(m, info); - dm_kill_unmapped_request(clone, r); - } else if (r == DM_MAPIO_REMAPPED) - dm_dispatch_request(clone); - else if (r == DM_MAPIO_REQUEUE) { - clear_mapinfo(m, info); - dm_requeue_unmapped_request(clone); - } - } -} - -static void process_queued_ios(struct work_struct *work) -{ - struct multipath *m = - container_of(work, struct multipath, process_queued_ios); - struct pgpath *pgpath = NULL; - unsigned must_queue = 1; - unsigned long flags; - - spin_lock_irqsave(&m->lock, flags); - - if (!m->current_pgpath) - __choose_pgpath(m, 0); - - pgpath = m->current_pgpath; + if (!queue_if_no_path) + dm_table_run_md_queue_async(m->ti->table); - if ((pgpath && !m->queue_io) || - (!pgpath && !m->queue_if_no_path)) - must_queue = 0; - - if (m->pg_init_required && !m->pg_init_in_progress && pgpath && - !m->pg_init_disabled) - __pg_init_all_paths(m); - - spin_unlock_irqrestore(&m->lock, flags); - if (!must_queue) - dispatch_queued_ios(m); + return 0; } /* @@ -972,27 +915,6 @@ static void multipath_dtr(struct dm_target *ti) } /* - * Map cloned requests - */ -static int multipath_map(struct dm_target *ti, struct request *clone, - union map_info *map_context) -{ - int r; - struct multipath *m = (struct multipath *) ti->private; - - if (set_mapinfo(m, map_context) < 0) - /* ENOMEM, requeue */ - return DM_MAPIO_REQUEUE; - - clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; - r = map_io(m, clone, map_context, 0); - if (r < 0 || r == DM_MAPIO_REQUEUE) - clear_mapinfo(m, map_context); - - return r; -} - -/* * Take a path out of use. */ static int fail_path(struct pgpath *pgpath) @@ -1032,7 +954,7 @@ out: */ static int reinstate_path(struct pgpath *pgpath) { - int r = 0; + int r = 0, run_queue = 0; unsigned long flags; struct multipath *m = pgpath->pg->m; @@ -1054,9 +976,9 @@ static int reinstate_path(struct pgpath *pgpath) pgpath->is_active = 1; - if (!m->nr_valid_paths++ && m->queue_size) { + if (!m->nr_valid_paths++) { m->current_pgpath = NULL; - queue_work(kmultipathd, &m->process_queued_ios); + run_queue = 1; } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) m->pg_init_in_progress++; @@ -1069,6 +991,8 @@ static int reinstate_path(struct pgpath *pgpath) out: spin_unlock_irqrestore(&m->lock, flags); + if (run_queue) + dm_table_run_md_queue_async(m->ti->table); return r; } @@ -1252,11 +1176,12 @@ static void pg_init_done(void *data, int errors) /* Activations of other paths are still on going */ goto out; - if (!m->pg_init_required) - m->queue_io = 0; - - m->pg_init_delay_retry = delay_retry; - queue_work(kmultipathd, &m->process_queued_ios); + if (m->pg_init_required) { + m->pg_init_delay_retry = delay_retry; + if (__pg_init_all_paths(m)) + goto out; + } + m->queue_io = 0; /* * Wake up any thread waiting to suspend. @@ -1272,8 +1197,11 @@ static void activate_path(struct work_struct *work) struct pgpath *pgpath = container_of(work, struct pgpath, activate_path.work); - scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), - pg_init_done, pgpath); + if (pgpath->is_active) + scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), + pg_init_done, pgpath); + else + pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED); } static int noretry_error(int error) @@ -1433,7 +1361,7 @@ static void multipath_status(struct dm_target *ti, status_type_t type, /* Features */ if (type == STATUSTYPE_INFO) - DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count); + DMEMIT("2 %u %u ", m->queue_io, m->pg_init_count); else { DMEMIT("%u ", m->queue_if_no_path + (m->pg_init_retries > 0) * 2 + @@ -1552,7 +1480,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) } if (argc != 2) { - DMWARN("Unrecognised multipath message received."); + DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc); goto out; } @@ -1570,7 +1498,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) else if (!strcasecmp(argv[0], "fail_path")) action = fail_path; else { - DMWARN("Unrecognised multipath message received."); + DMWARN("Unrecognised multipath message received: %s", argv[0]); goto out; } @@ -1632,8 +1560,17 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, r = err; } - if (r == -ENOTCONN && !fatal_signal_pending(current)) - queue_work(kmultipathd, &m->process_queued_ios); + if (r == -ENOTCONN && !fatal_signal_pending(current)) { + spin_lock_irqsave(&m->lock, flags); + if (!m->current_pg) { + /* Path status changed, redo selection */ + __choose_pgpath(m, 0); + } + if (m->pg_init_required) + __pg_init_all_paths(m); + spin_unlock_irqrestore(&m->lock, flags); + dm_table_run_md_queue_async(m->ti->table); + } return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); } @@ -1684,7 +1621,7 @@ static int multipath_busy(struct dm_target *ti) spin_lock_irqsave(&m->lock, flags); /* pg_init in progress, requeue until done */ - if (m->pg_init_in_progress) { + if (!pg_ready(m)) { busy = 1; goto out; } @@ -1737,7 +1674,7 @@ out: *---------------------------------------------------------------*/ static struct target_type multipath_target = { .name = "multipath", - .version = {1, 6, 0}, + .version = {1, 7, 0}, .module = THIS_MODULE, .ctr = multipath_ctr, .dtr = multipath_dtr, diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 6a7f2b83a126..50601ec7017a 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -945,7 +945,7 @@ bool dm_table_request_based(struct dm_table *t) return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; } -int dm_table_alloc_md_mempools(struct dm_table *t) +static int dm_table_alloc_md_mempools(struct dm_table *t) { unsigned type = dm_table_get_type(t); unsigned per_bio_data_size = 0; @@ -1618,6 +1618,25 @@ struct mapped_device *dm_table_get_md(struct dm_table *t) } EXPORT_SYMBOL(dm_table_get_md); +void dm_table_run_md_queue_async(struct dm_table *t) +{ + struct mapped_device *md; + struct request_queue *queue; + unsigned long flags; + + if (!dm_table_request_based(t)) + return; + + md = dm_table_get_md(t); + queue = dm_get_md_queue(md); + if (queue) { + spin_lock_irqsave(queue->queue_lock, flags); + blk_run_queue_async(queue); + spin_unlock_irqrestore(queue->queue_lock, flags); + } +} +EXPORT_SYMBOL(dm_table_run_md_queue_async); + static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index fb9efc829182..b086a945edcb 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -192,6 +192,13 @@ struct dm_pool_metadata { * operation possible in this state is the closing of the device. */ bool fail_io:1; + + /* + * Reading the space map roots can fail, so we read it into these + * buffers before the superblock is locked and updated. + */ + __u8 data_space_map_root[SPACE_MAP_ROOT_SIZE]; + __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE]; }; struct dm_thin_device { @@ -431,26 +438,53 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd) pmd->details_info.value_type.equal = NULL; } +static int save_sm_roots(struct dm_pool_metadata *pmd) +{ + int r; + size_t len; + + r = dm_sm_root_size(pmd->metadata_sm, &len); + if (r < 0) + return r; + + r = dm_sm_copy_root(pmd->metadata_sm, &pmd->metadata_space_map_root, len); + if (r < 0) + return r; + + r = dm_sm_root_size(pmd->data_sm, &len); + if (r < 0) + return r; + + return dm_sm_copy_root(pmd->data_sm, &pmd->data_space_map_root, len); +} + +static void copy_sm_roots(struct dm_pool_metadata *pmd, + struct thin_disk_superblock *disk) +{ + memcpy(&disk->metadata_space_map_root, + &pmd->metadata_space_map_root, + sizeof(pmd->metadata_space_map_root)); + + memcpy(&disk->data_space_map_root, + &pmd->data_space_map_root, + sizeof(pmd->data_space_map_root)); +} + static int __write_initial_superblock(struct dm_pool_metadata *pmd) { int r; struct dm_block *sblock; - size_t metadata_len, data_len; struct thin_disk_superblock *disk_super; sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT; if (bdev_size > THIN_METADATA_MAX_SECTORS) bdev_size = THIN_METADATA_MAX_SECTORS; - r = dm_sm_root_size(pmd->metadata_sm, &metadata_len); - if (r < 0) - return r; - - r = dm_sm_root_size(pmd->data_sm, &data_len); + r = dm_sm_commit(pmd->data_sm); if (r < 0) return r; - r = dm_sm_commit(pmd->data_sm); + r = save_sm_roots(pmd); if (r < 0) return r; @@ -471,15 +505,7 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd) disk_super->trans_id = 0; disk_super->held_root = 0; - r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root, - metadata_len); - if (r < 0) - goto bad_locked; - - r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root, - data_len); - if (r < 0) - goto bad_locked; + copy_sm_roots(pmd, disk_super); disk_super->data_mapping_root = cpu_to_le64(pmd->root); disk_super->device_details_root = cpu_to_le64(pmd->details_root); @@ -488,10 +514,6 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd) disk_super->data_block_size = cpu_to_le32(pmd->data_block_size); return dm_tm_commit(pmd->tm, sblock); - -bad_locked: - dm_bm_unlock(sblock); - return r; } static int __format_metadata(struct dm_pool_metadata *pmd) @@ -769,6 +791,10 @@ static int __commit_transaction(struct dm_pool_metadata *pmd) if (r < 0) return r; + r = save_sm_roots(pmd); + if (r < 0) + return r; + r = superblock_lock(pmd, &sblock); if (r) return r; @@ -780,21 +806,9 @@ static int __commit_transaction(struct dm_pool_metadata *pmd) disk_super->trans_id = cpu_to_le64(pmd->trans_id); disk_super->flags = cpu_to_le32(pmd->flags); - r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root, - metadata_len); - if (r < 0) - goto out_locked; - - r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root, - data_len); - if (r < 0) - goto out_locked; + copy_sm_roots(pmd, disk_super); return dm_tm_commit(pmd->tm, sblock); - -out_locked: - dm_bm_unlock(sblock); - return r; } struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev, diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index be70d38745f7..242ac2ea5f29 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -12,9 +12,11 @@ #include <linux/dm-io.h> #include <linux/dm-kcopyd.h> #include <linux/list.h> +#include <linux/rculist.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/rbtree.h> #define DM_MSG_PREFIX "thin" @@ -25,6 +27,9 @@ #define MAPPING_POOL_SIZE 1024 #define PRISON_CELLS 1024 #define COMMIT_PERIOD HZ +#define NO_SPACE_TIMEOUT_SECS 60 + +static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS; DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, "A percentage of time allocated for copy on write"); @@ -173,17 +178,16 @@ struct pool { struct workqueue_struct *wq; struct work_struct worker; struct delayed_work waker; + struct delayed_work no_space_timeout; unsigned long last_commit_jiffies; unsigned ref_count; spinlock_t lock; - struct bio_list deferred_bios; struct bio_list deferred_flush_bios; struct list_head prepared_mappings; struct list_head prepared_discards; - - struct bio_list retry_on_resume_list; + struct list_head active_thins; struct dm_deferred_set *shared_read_ds; struct dm_deferred_set *all_io_ds; @@ -220,6 +224,7 @@ struct pool_c { * Target context for a thin. */ struct thin_c { + struct list_head list; struct dm_dev *pool_dev; struct dm_dev *origin_dev; dm_thin_id dev_id; @@ -227,6 +232,17 @@ struct thin_c { struct pool *pool; struct dm_thin_device *td; bool requeue_mode:1; + spinlock_t lock; + struct bio_list deferred_bio_list; + struct bio_list retry_on_resume_list; + struct rb_root sort_bio_list; /* sorted list of deferred bios */ + + /* + * Ensures the thin is not destroyed until the worker has finished + * iterating the active_thins list. + */ + atomic_t refcount; + struct completion can_destroy; }; /*----------------------------------------------------------------*/ @@ -287,9 +303,9 @@ static void cell_defer_no_holder_no_free(struct thin_c *tc, struct pool *pool = tc->pool; unsigned long flags; - spin_lock_irqsave(&pool->lock, flags); - dm_cell_release_no_holder(pool->prison, cell, &pool->deferred_bios); - spin_unlock_irqrestore(&pool->lock, flags); + spin_lock_irqsave(&tc->lock, flags); + dm_cell_release_no_holder(pool->prison, cell, &tc->deferred_bio_list); + spin_unlock_irqrestore(&tc->lock, flags); wake_worker(pool); } @@ -368,6 +384,7 @@ struct dm_thin_endio_hook { struct dm_deferred_entry *shared_read_entry; struct dm_deferred_entry *all_io_entry; struct dm_thin_new_mapping *overwrite_mapping; + struct rb_node rb_node; }; static void requeue_bio_list(struct thin_c *tc, struct bio_list *master) @@ -378,30 +395,22 @@ static void requeue_bio_list(struct thin_c *tc, struct bio_list *master) bio_list_init(&bios); - spin_lock_irqsave(&tc->pool->lock, flags); + spin_lock_irqsave(&tc->lock, flags); bio_list_merge(&bios, master); bio_list_init(master); - spin_unlock_irqrestore(&tc->pool->lock, flags); + spin_unlock_irqrestore(&tc->lock, flags); - while ((bio = bio_list_pop(&bios))) { - struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); - - if (h->tc == tc) - bio_endio(bio, DM_ENDIO_REQUEUE); - else - bio_list_add(master, bio); - } + while ((bio = bio_list_pop(&bios))) + bio_endio(bio, DM_ENDIO_REQUEUE); } static void requeue_io(struct thin_c *tc) { - struct pool *pool = tc->pool; - - requeue_bio_list(tc, &pool->deferred_bios); - requeue_bio_list(tc, &pool->retry_on_resume_list); + requeue_bio_list(tc, &tc->deferred_bio_list); + requeue_bio_list(tc, &tc->retry_on_resume_list); } -static void error_retry_list(struct pool *pool) +static void error_thin_retry_list(struct thin_c *tc) { struct bio *bio; unsigned long flags; @@ -409,15 +418,25 @@ static void error_retry_list(struct pool *pool) bio_list_init(&bios); - spin_lock_irqsave(&pool->lock, flags); - bio_list_merge(&bios, &pool->retry_on_resume_list); - bio_list_init(&pool->retry_on_resume_list); - spin_unlock_irqrestore(&pool->lock, flags); + spin_lock_irqsave(&tc->lock, flags); + bio_list_merge(&bios, &tc->retry_on_resume_list); + bio_list_init(&tc->retry_on_resume_list); + spin_unlock_irqrestore(&tc->lock, flags); while ((bio = bio_list_pop(&bios))) bio_io_error(bio); } +static void error_retry_list(struct pool *pool) +{ + struct thin_c *tc; + + rcu_read_lock(); + list_for_each_entry_rcu(tc, &pool->active_thins, list) + error_thin_retry_list(tc); + rcu_read_unlock(); +} + /* * This section of code contains the logic for processing a thin device's IO. * Much of the code depends on pool object resources (lists, workqueues, etc) @@ -608,9 +627,9 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell) struct pool *pool = tc->pool; unsigned long flags; - spin_lock_irqsave(&pool->lock, flags); - cell_release(pool, cell, &pool->deferred_bios); - spin_unlock_irqrestore(&tc->pool->lock, flags); + spin_lock_irqsave(&tc->lock, flags); + cell_release(pool, cell, &tc->deferred_bio_list); + spin_unlock_irqrestore(&tc->lock, flags); wake_worker(pool); } @@ -623,9 +642,9 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c struct pool *pool = tc->pool; unsigned long flags; - spin_lock_irqsave(&pool->lock, flags); - cell_release_no_holder(pool, cell, &pool->deferred_bios); - spin_unlock_irqrestore(&pool->lock, flags); + spin_lock_irqsave(&tc->lock, flags); + cell_release_no_holder(pool, cell, &tc->deferred_bio_list); + spin_unlock_irqrestore(&tc->lock, flags); wake_worker(pool); } @@ -920,7 +939,7 @@ static int commit(struct pool *pool) { int r; - if (get_pool_mode(pool) != PM_WRITE) + if (get_pool_mode(pool) >= PM_READ_ONLY) return -EINVAL; r = dm_pool_commit_metadata(pool->pmd); @@ -1001,12 +1020,11 @@ static void retry_on_resume(struct bio *bio) { struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); struct thin_c *tc = h->tc; - struct pool *pool = tc->pool; unsigned long flags; - spin_lock_irqsave(&pool->lock, flags); - bio_list_add(&pool->retry_on_resume_list, bio); - spin_unlock_irqrestore(&pool->lock, flags); + spin_lock_irqsave(&tc->lock, flags); + bio_list_add(&tc->retry_on_resume_list, bio); + spin_unlock_irqrestore(&tc->lock, flags); } static bool should_error_unserviceable_bio(struct pool *pool) @@ -1363,38 +1381,111 @@ static int need_commit_due_to_time(struct pool *pool) jiffies > pool->last_commit_jiffies + COMMIT_PERIOD; } -static void process_deferred_bios(struct pool *pool) +#define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node) +#define thin_bio(pbd) dm_bio_from_per_bio_data((pbd), sizeof(struct dm_thin_endio_hook)) + +static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio) +{ + struct rb_node **rbp, *parent; + struct dm_thin_endio_hook *pbd; + sector_t bi_sector = bio->bi_iter.bi_sector; + + rbp = &tc->sort_bio_list.rb_node; + parent = NULL; + while (*rbp) { + parent = *rbp; + pbd = thin_pbd(parent); + + if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector) + rbp = &(*rbp)->rb_left; + else + rbp = &(*rbp)->rb_right; + } + + pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); + rb_link_node(&pbd->rb_node, parent, rbp); + rb_insert_color(&pbd->rb_node, &tc->sort_bio_list); +} + +static void __extract_sorted_bios(struct thin_c *tc) +{ + struct rb_node *node; + struct dm_thin_endio_hook *pbd; + struct bio *bio; + + for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) { + pbd = thin_pbd(node); + bio = thin_bio(pbd); + + bio_list_add(&tc->deferred_bio_list, bio); + rb_erase(&pbd->rb_node, &tc->sort_bio_list); + } + + WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list)); +} + +static void __sort_thin_deferred_bios(struct thin_c *tc) { + struct bio *bio; + struct bio_list bios; + + bio_list_init(&bios); + bio_list_merge(&bios, &tc->deferred_bio_list); + bio_list_init(&tc->deferred_bio_list); + + /* Sort deferred_bio_list using rb-tree */ + while ((bio = bio_list_pop(&bios))) + __thin_bio_rb_add(tc, bio); + + /* + * Transfer the sorted bios in sort_bio_list back to + * deferred_bio_list to allow lockless submission of + * all bios. + */ + __extract_sorted_bios(tc); +} + +static void process_thin_deferred_bios(struct thin_c *tc) +{ + struct pool *pool = tc->pool; unsigned long flags; struct bio *bio; struct bio_list bios; + struct blk_plug plug; + + if (tc->requeue_mode) { + requeue_bio_list(tc, &tc->deferred_bio_list); + return; + } bio_list_init(&bios); - spin_lock_irqsave(&pool->lock, flags); - bio_list_merge(&bios, &pool->deferred_bios); - bio_list_init(&pool->deferred_bios); - spin_unlock_irqrestore(&pool->lock, flags); + spin_lock_irqsave(&tc->lock, flags); - while ((bio = bio_list_pop(&bios))) { - struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); - struct thin_c *tc = h->tc; + if (bio_list_empty(&tc->deferred_bio_list)) { + spin_unlock_irqrestore(&tc->lock, flags); + return; + } - if (tc->requeue_mode) { - bio_endio(bio, DM_ENDIO_REQUEUE); - continue; - } + __sort_thin_deferred_bios(tc); + + bio_list_merge(&bios, &tc->deferred_bio_list); + bio_list_init(&tc->deferred_bio_list); + + spin_unlock_irqrestore(&tc->lock, flags); + blk_start_plug(&plug); + while ((bio = bio_list_pop(&bios))) { /* * If we've got no free new_mapping structs, and processing * this bio might require one, we pause until there are some * prepared mappings to process. */ if (ensure_next_mapping(pool)) { - spin_lock_irqsave(&pool->lock, flags); - bio_list_merge(&pool->deferred_bios, &bios); - spin_unlock_irqrestore(&pool->lock, flags); - + spin_lock_irqsave(&tc->lock, flags); + bio_list_add(&tc->deferred_bio_list, bio); + bio_list_merge(&tc->deferred_bio_list, &bios); + spin_unlock_irqrestore(&tc->lock, flags); break; } @@ -1403,6 +1494,60 @@ static void process_deferred_bios(struct pool *pool) else pool->process_bio(tc, bio); } + blk_finish_plug(&plug); +} + +static void thin_get(struct thin_c *tc); +static void thin_put(struct thin_c *tc); + +/* + * We can't hold rcu_read_lock() around code that can block. So we + * find a thin with the rcu lock held; bump a refcount; then drop + * the lock. + */ +static struct thin_c *get_first_thin(struct pool *pool) +{ + struct thin_c *tc = NULL; + + rcu_read_lock(); + if (!list_empty(&pool->active_thins)) { + tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list); + thin_get(tc); + } + rcu_read_unlock(); + + return tc; +} + +static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc) +{ + struct thin_c *old_tc = tc; + + rcu_read_lock(); + list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) { + thin_get(tc); + thin_put(old_tc); + rcu_read_unlock(); + return tc; + } + thin_put(old_tc); + rcu_read_unlock(); + + return NULL; +} + +static void process_deferred_bios(struct pool *pool) +{ + unsigned long flags; + struct bio *bio; + struct bio_list bios; + struct thin_c *tc; + + tc = get_first_thin(pool); + while (tc) { + process_thin_deferred_bios(tc); + tc = get_next_thin(pool, tc); + } /* * If there are any deferred flush bios, we must commit @@ -1449,6 +1594,20 @@ static void do_waker(struct work_struct *ws) queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); } +/* + * We're holding onto IO to allow userland time to react. After the + * timeout either the pool will have been resized (and thus back in + * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO. + */ +static void do_no_space_timeout(struct work_struct *ws) +{ + struct pool *pool = container_of(to_delayed_work(ws), struct pool, + no_space_timeout); + + if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) + set_pool_mode(pool, PM_READ_ONLY); +} + /*----------------------------------------------------------------*/ struct noflush_work { @@ -1484,7 +1643,7 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *)) { struct noflush_work w; - INIT_WORK(&w.worker, fn); + INIT_WORK_ONSTACK(&w.worker, fn); w.tc = tc; atomic_set(&w.complete, 0); init_waitqueue_head(&w.wait); @@ -1513,6 +1672,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) struct pool_c *pt = pool->ti->private; bool needs_check = dm_pool_metadata_needs_check(pool->pmd); enum pool_mode old_mode = get_pool_mode(pool); + unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ; /* * Never allow the pool to transition to PM_WRITE mode if user @@ -1574,6 +1734,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) pool->process_discard = process_discard; pool->process_prepared_mapping = process_prepared_mapping; pool->process_prepared_discard = process_prepared_discard_passdown; + + if (!pool->pf.error_if_no_space && no_space_timeout) + queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout); break; case PM_WRITE: @@ -1634,9 +1797,9 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio) unsigned long flags; struct pool *pool = tc->pool; - spin_lock_irqsave(&pool->lock, flags); - bio_list_add(&pool->deferred_bios, bio); - spin_unlock_irqrestore(&pool->lock, flags); + spin_lock_irqsave(&tc->lock, flags); + bio_list_add(&tc->deferred_bio_list, bio); + spin_unlock_irqrestore(&tc->lock, flags); wake_worker(pool); } @@ -1757,26 +1920,29 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits) { - int r; - unsigned long flags; struct pool_c *pt = container_of(cb, struct pool_c, callbacks); + struct request_queue *q; - spin_lock_irqsave(&pt->pool->lock, flags); - r = !bio_list_empty(&pt->pool->retry_on_resume_list); - spin_unlock_irqrestore(&pt->pool->lock, flags); - - if (!r) { - struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); - r = bdi_congested(&q->backing_dev_info, bdi_bits); - } + if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE) + return 1; - return r; + q = bdev_get_queue(pt->data_dev->bdev); + return bdi_congested(&q->backing_dev_info, bdi_bits); } -static void __requeue_bios(struct pool *pool) +static void requeue_bios(struct pool *pool) { - bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list); - bio_list_init(&pool->retry_on_resume_list); + unsigned long flags; + struct thin_c *tc; + + rcu_read_lock(); + list_for_each_entry_rcu(tc, &pool->active_thins, list) { + spin_lock_irqsave(&tc->lock, flags); + bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list); + bio_list_init(&tc->retry_on_resume_list); + spin_unlock_irqrestore(&tc->lock, flags); + } + rcu_read_unlock(); } /*---------------------------------------------------------------- @@ -1956,13 +2122,13 @@ static struct pool *pool_create(struct mapped_device *pool_md, INIT_WORK(&pool->worker, do_worker); INIT_DELAYED_WORK(&pool->waker, do_waker); + INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); spin_lock_init(&pool->lock); - bio_list_init(&pool->deferred_bios); bio_list_init(&pool->deferred_flush_bios); INIT_LIST_HEAD(&pool->prepared_mappings); INIT_LIST_HEAD(&pool->prepared_discards); + INIT_LIST_HEAD(&pool->active_thins); pool->low_water_triggered = false; - bio_list_init(&pool->retry_on_resume_list); pool->shared_read_ds = dm_deferred_set_create(); if (!pool->shared_read_ds) { @@ -2507,8 +2673,8 @@ static void pool_resume(struct dm_target *ti) spin_lock_irqsave(&pool->lock, flags); pool->low_water_triggered = false; - __requeue_bios(pool); spin_unlock_irqrestore(&pool->lock, flags); + requeue_bios(pool); do_waker(&pool->waker.work); } @@ -2519,6 +2685,7 @@ static void pool_postsuspend(struct dm_target *ti) struct pool *pool = pt->pool; cancel_delayed_work(&pool->waker); + cancel_delayed_work(&pool->no_space_timeout); flush_workqueue(pool->wq); (void) commit(pool); } @@ -2947,7 +3114,7 @@ static struct target_type pool_target = { .name = "thin-pool", .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | DM_TARGET_IMMUTABLE, - .version = {1, 11, 0}, + .version = {1, 12, 0}, .module = THIS_MODULE, .ctr = pool_ctr, .dtr = pool_dtr, @@ -2965,9 +3132,29 @@ static struct target_type pool_target = { /*---------------------------------------------------------------- * Thin target methods *--------------------------------------------------------------*/ +static void thin_get(struct thin_c *tc) +{ + atomic_inc(&tc->refcount); +} + +static void thin_put(struct thin_c *tc) +{ + if (atomic_dec_and_test(&tc->refcount)) + complete(&tc->can_destroy); +} + static void thin_dtr(struct dm_target *ti) { struct thin_c *tc = ti->private; + unsigned long flags; + + thin_put(tc); + wait_for_completion(&tc->can_destroy); + + spin_lock_irqsave(&tc->pool->lock, flags); + list_del_rcu(&tc->list); + spin_unlock_irqrestore(&tc->pool->lock, flags); + synchronize_rcu(); mutex_lock(&dm_thin_pool_table.mutex); @@ -2999,6 +3186,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) struct thin_c *tc; struct dm_dev *pool_dev, *origin_dev; struct mapped_device *pool_md; + unsigned long flags; mutex_lock(&dm_thin_pool_table.mutex); @@ -3014,6 +3202,10 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) r = -ENOMEM; goto out_unlock; } + spin_lock_init(&tc->lock); + bio_list_init(&tc->deferred_bio_list); + bio_list_init(&tc->retry_on_resume_list); + tc->sort_bio_list = RB_ROOT; if (argc == 3) { r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev); @@ -3085,6 +3277,20 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) mutex_unlock(&dm_thin_pool_table.mutex); + atomic_set(&tc->refcount, 1); + init_completion(&tc->can_destroy); + + spin_lock_irqsave(&tc->pool->lock, flags); + list_add_tail_rcu(&tc->list, &tc->pool->active_thins); + spin_unlock_irqrestore(&tc->pool->lock, flags); + /* + * This synchronize_rcu() call is needed here otherwise we risk a + * wake_worker() call finding no bios to process (because the newly + * added tc isn't yet visible). So this reduces latency since we + * aren't then dependent on the periodic commit to wake_worker(). + */ + synchronize_rcu(); + return 0; bad_target_max_io_len: @@ -3250,7 +3456,7 @@ static int thin_iterate_devices(struct dm_target *ti, static struct target_type thin_target = { .name = "thin", - .version = {1, 11, 0}, + .version = {1, 12, 0}, .module = THIS_MODULE, .ctr = thin_ctr, .dtr = thin_dtr, @@ -3305,6 +3511,9 @@ static void dm_thin_exit(void) module_init(dm_thin_init); module_exit(dm_thin_exit); +module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds"); + MODULE_DESCRIPTION(DM_NAME " thin provisioning target"); MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index 796007a5e0e1..7a7bab8947ae 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c @@ -330,15 +330,17 @@ test_block_hash: return r; } } - todo = 1 << v->data_dev_block_bits; - while (io->iter.bi_size) { + do { u8 *page; + unsigned len; struct bio_vec bv = bio_iter_iovec(bio, io->iter); page = kmap_atomic(bv.bv_page); - r = crypto_shash_update(desc, page + bv.bv_offset, - bv.bv_len); + len = bv.bv_len; + if (likely(len >= todo)) + len = todo; + r = crypto_shash_update(desc, page + bv.bv_offset, len); kunmap_atomic(page); if (r < 0) { @@ -346,8 +348,9 @@ test_block_hash: return r; } - bio_advance_iter(bio, &io->iter, bv.bv_len); - } + bio_advance_iter(bio, &io->iter, len); + todo -= len; + } while (todo); if (!v->version) { r = crypto_shash_update(desc, v->salt, v->salt_size); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 8c53b09b9a2c..455e64916498 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -94,13 +94,6 @@ struct dm_rq_clone_bio_info { struct bio clone; }; -union map_info *dm_get_mapinfo(struct bio *bio) -{ - if (bio && bio->bi_private) - return &((struct dm_target_io *)bio->bi_private)->info; - return NULL; -} - union map_info *dm_get_rq_mapinfo(struct request *rq) { if (rq && rq->end_io_data) @@ -475,6 +468,11 @@ sector_t dm_get_size(struct mapped_device *md) return get_capacity(md->disk); } +struct request_queue *dm_get_md_queue(struct mapped_device *md) +{ + return md->queue; +} + struct dm_stats *dm_get_stats(struct mapped_device *md) { return &md->stats; @@ -760,7 +758,7 @@ static void dec_pending(struct dm_io *io, int error) static void clone_endio(struct bio *bio, int error) { int r = 0; - struct dm_target_io *tio = bio->bi_private; + struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); struct dm_io *io = tio->io; struct mapped_device *md = tio->io->md; dm_endio_fn endio = tio->ti->type->end_io; @@ -794,7 +792,8 @@ static void clone_endio(struct bio *bio, int error) */ static void end_clone_bio(struct bio *clone, int error) { - struct dm_rq_clone_bio_info *info = clone->bi_private; + struct dm_rq_clone_bio_info *info = + container_of(clone, struct dm_rq_clone_bio_info, clone); struct dm_rq_target_io *tio = info->tio; struct bio *bio = info->orig; unsigned int nr_bytes = info->orig->bi_iter.bi_size; @@ -1120,7 +1119,6 @@ static void __map_bio(struct dm_target_io *tio) struct dm_target *ti = tio->ti; clone->bi_end_io = clone_endio; - clone->bi_private = tio; /* * Map the clone. If r == 0 we don't need to do @@ -1195,7 +1193,6 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci, tio->io = ci->io; tio->ti = ti; - memset(&tio->info, 0, sizeof(tio->info)); tio->target_bio_nr = target_bio_nr; return tio; @@ -1530,7 +1527,6 @@ static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, info->orig = bio_orig; info->tio = tio; bio->bi_end_io = end_clone_bio; - bio->bi_private = info; return 0; } @@ -2172,7 +2168,7 @@ static struct dm_table *__unbind(struct mapped_device *md) return NULL; dm_table_event_callback(map, NULL, NULL); - rcu_assign_pointer(md->map, NULL); + RCU_INIT_POINTER(md->map, NULL); dm_sync_table(md); return map; @@ -2873,8 +2869,6 @@ static const struct block_device_operations dm_blk_dops = { .owner = THIS_MODULE }; -EXPORT_SYMBOL(dm_get_mapinfo); - /* * module hooks */ diff --git a/drivers/md/dm.h b/drivers/md/dm.h index c4569f02f50f..ed76126aac54 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -73,7 +73,6 @@ unsigned dm_table_get_type(struct dm_table *t); struct target_type *dm_table_get_immutable_target_type(struct dm_table *t); bool dm_table_request_based(struct dm_table *t); bool dm_table_supports_discards(struct dm_table *t); -int dm_table_alloc_md_mempools(struct dm_table *t); void dm_table_free_md_mempools(struct dm_table *t); struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); @@ -189,6 +188,7 @@ int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only int dm_cancel_deferred_remove(struct mapped_device *md); int dm_request_based(struct mapped_device *md); sector_t dm_get_size(struct mapped_device *md); +struct request_queue *dm_get_md_queue(struct mapped_device *md); struct dm_stats *dm_get_stats(struct mapped_device *md); int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, diff --git a/drivers/md/md.c b/drivers/md/md.c index 4ad5cc4e63e8..2382cfc9bb3f 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5181,32 +5181,6 @@ static int restart_array(struct mddev *mddev) return 0; } -/* similar to deny_write_access, but accounts for our holding a reference - * to the file ourselves */ -static int deny_bitmap_write_access(struct file * file) -{ - struct inode *inode = file->f_mapping->host; - - spin_lock(&inode->i_lock); - if (atomic_read(&inode->i_writecount) > 1) { - spin_unlock(&inode->i_lock); - return -ETXTBSY; - } - atomic_set(&inode->i_writecount, -1); - spin_unlock(&inode->i_lock); - - return 0; -} - -void restore_bitmap_write_access(struct file *file) -{ - struct inode *inode = file->f_mapping->host; - - spin_lock(&inode->i_lock); - atomic_set(&inode->i_writecount, 1); - spin_unlock(&inode->i_lock); -} - static void md_clean(struct mddev *mddev) { mddev->array_sectors = 0; @@ -5427,7 +5401,6 @@ static int do_md_stop(struct mddev * mddev, int mode, bitmap_destroy(mddev); if (mddev->bitmap_info.file) { - restore_bitmap_write_access(mddev->bitmap_info.file); fput(mddev->bitmap_info.file); mddev->bitmap_info.file = NULL; } @@ -5979,7 +5952,7 @@ abort_export: static int set_bitmap_file(struct mddev *mddev, int fd) { - int err; + int err = 0; if (mddev->pers) { if (!mddev->pers->quiesce) @@ -5991,6 +5964,7 @@ static int set_bitmap_file(struct mddev *mddev, int fd) if (fd >= 0) { + struct inode *inode; if (mddev->bitmap) return -EEXIST; /* cannot add when bitmap is present */ mddev->bitmap_info.file = fget(fd); @@ -6001,10 +5975,21 @@ static int set_bitmap_file(struct mddev *mddev, int fd) return -EBADF; } - err = deny_bitmap_write_access(mddev->bitmap_info.file); - if (err) { + inode = mddev->bitmap_info.file->f_mapping->host; + if (!S_ISREG(inode->i_mode)) { + printk(KERN_ERR "%s: error: bitmap file must be a regular file\n", + mdname(mddev)); + err = -EBADF; + } else if (!(mddev->bitmap_info.file->f_mode & FMODE_WRITE)) { + printk(KERN_ERR "%s: error: bitmap file must open for write\n", + mdname(mddev)); + err = -EBADF; + } else if (atomic_read(&inode->i_writecount) != 1) { printk(KERN_ERR "%s: error: bitmap file is already in use\n", mdname(mddev)); + err = -EBUSY; + } + if (err) { fput(mddev->bitmap_info.file); mddev->bitmap_info.file = NULL; return err; @@ -6027,10 +6012,8 @@ static int set_bitmap_file(struct mddev *mddev, int fd) mddev->pers->quiesce(mddev, 0); } if (fd < 0) { - if (mddev->bitmap_info.file) { - restore_bitmap_write_access(mddev->bitmap_info.file); + if (mddev->bitmap_info.file) fput(mddev->bitmap_info.file); - } mddev->bitmap_info.file = NULL; } @@ -7182,11 +7165,14 @@ static int md_seq_open(struct inode *inode, struct file *file) return error; } +static int md_unloading; static unsigned int mdstat_poll(struct file *filp, poll_table *wait) { struct seq_file *seq = filp->private_data; int mask; + if (md_unloading) + return POLLIN|POLLRDNORM|POLLERR|POLLPRI;; poll_wait(filp, &md_event_waiters, wait); /* always allow read */ @@ -7395,8 +7381,10 @@ void md_do_sync(struct md_thread *thread) /* just incase thread restarts... */ if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) return; - if (mddev->ro) /* never try to sync a read-only array */ + if (mddev->ro) {/* never try to sync a read-only array */ + set_bit(MD_RECOVERY_INTR, &mddev->recovery); return; + } if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { @@ -7838,6 +7826,7 @@ void md_check_recovery(struct mddev *mddev) /* There is no thread, but we need to call * ->spare_active and clear saved_raid_disk */ + set_bit(MD_RECOVERY_INTR, &mddev->recovery); md_reap_sync_thread(mddev); clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); goto unlock; @@ -8530,7 +8519,8 @@ static int md_notify_reboot(struct notifier_block *this, if (mddev_trylock(mddev)) { if (mddev->pers) __md_stop_writes(mddev); - mddev->safemode = 2; + if (mddev->persistent) + mddev->safemode = 2; mddev_unlock(mddev); } need_delay = 1; @@ -8672,6 +8662,7 @@ static __exit void md_exit(void) { struct mddev *mddev; struct list_head *tmp; + int delay = 1; blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS); blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS); @@ -8680,7 +8671,19 @@ static __exit void md_exit(void) unregister_blkdev(mdp_major, "mdp"); unregister_reboot_notifier(&md_notifier); unregister_sysctl_table(raid_table_header); + + /* We cannot unload the modules while some process is + * waiting for us in select() or poll() - wake them up + */ + md_unloading = 1; + while (waitqueue_active(&md_event_waiters)) { + /* not safe to leave yet */ + wake_up(&md_event_waiters); + msleep(delay); + delay += delay; + } remove_proc_entry("mdstat", NULL); + for_each_mddev(mddev, tmp) { export_array(mddev); mddev->hold_active = 0; diff --git a/drivers/md/md.h b/drivers/md/md.h index 07bba96de260..a49d991f3fe1 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -605,7 +605,6 @@ extern int md_check_no_bitmap(struct mddev *mddev); extern int md_integrity_register(struct mddev *mddev); extern void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev); extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); -extern void restore_bitmap_write_access(struct file *file); extern void mddev_init(struct mddev *mddev); extern int md_run(struct mddev *mddev); diff --git a/drivers/md/persistent-data/dm-bitset.c b/drivers/md/persistent-data/dm-bitset.c index cd9a86d4cdf0..36f7cc2c7109 100644 --- a/drivers/md/persistent-data/dm-bitset.c +++ b/drivers/md/persistent-data/dm-bitset.c @@ -65,7 +65,7 @@ int dm_bitset_flush(struct dm_disk_bitset *info, dm_block_t root, int r; __le64 value; - if (!info->current_index_set) + if (!info->current_index_set || !info->dirty) return 0; value = cpu_to_le64(info->current_bits); @@ -77,6 +77,8 @@ int dm_bitset_flush(struct dm_disk_bitset *info, dm_block_t root, return r; info->current_index_set = false; + info->dirty = false; + return 0; } EXPORT_SYMBOL_GPL(dm_bitset_flush); @@ -94,6 +96,8 @@ static int read_bits(struct dm_disk_bitset *info, dm_block_t root, info->current_bits = le64_to_cpu(value); info->current_index_set = true; info->current_index = array_index; + info->dirty = false; + return 0; } @@ -126,6 +130,8 @@ int dm_bitset_set_bit(struct dm_disk_bitset *info, dm_block_t root, return r; set_bit(b, (unsigned long *) &info->current_bits); + info->dirty = true; + return 0; } EXPORT_SYMBOL_GPL(dm_bitset_set_bit); @@ -141,6 +147,8 @@ int dm_bitset_clear_bit(struct dm_disk_bitset *info, dm_block_t root, return r; clear_bit(b, (unsigned long *) &info->current_bits); + info->dirty = true; + return 0; } EXPORT_SYMBOL_GPL(dm_bitset_clear_bit); diff --git a/drivers/md/persistent-data/dm-bitset.h b/drivers/md/persistent-data/dm-bitset.h index e1b9bea14aa1..c2287d672ef5 100644 --- a/drivers/md/persistent-data/dm-bitset.h +++ b/drivers/md/persistent-data/dm-bitset.h @@ -71,6 +71,7 @@ struct dm_disk_bitset { uint64_t current_bits; bool current_index_set:1; + bool dirty:1; }; /* diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c index 455f79279a16..087411c95ffc 100644 --- a/drivers/md/persistent-data/dm-block-manager.c +++ b/drivers/md/persistent-data/dm-block-manager.c @@ -595,25 +595,14 @@ int dm_bm_unlock(struct dm_block *b) } EXPORT_SYMBOL_GPL(dm_bm_unlock); -int dm_bm_flush_and_unlock(struct dm_block_manager *bm, - struct dm_block *superblock) +int dm_bm_flush(struct dm_block_manager *bm) { - int r; - if (bm->read_only) return -EPERM; - r = dm_bufio_write_dirty_buffers(bm->bufio); - if (unlikely(r)) { - dm_bm_unlock(superblock); - return r; - } - - dm_bm_unlock(superblock); - return dm_bufio_write_dirty_buffers(bm->bufio); } -EXPORT_SYMBOL_GPL(dm_bm_flush_and_unlock); +EXPORT_SYMBOL_GPL(dm_bm_flush); void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b) { diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h index 13cd58e1fe69..1b95dfc17786 100644 --- a/drivers/md/persistent-data/dm-block-manager.h +++ b/drivers/md/persistent-data/dm-block-manager.h @@ -105,8 +105,7 @@ int dm_bm_unlock(struct dm_block *b); * * This method always blocks. */ -int dm_bm_flush_and_unlock(struct dm_block_manager *bm, - struct dm_block *superblock); +int dm_bm_flush(struct dm_block_manager *bm); /* * Request data is prefetched into the cache. diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c index 81da1a26042e..3bc30a0ae3d6 100644 --- a/drivers/md/persistent-data/dm-transaction-manager.c +++ b/drivers/md/persistent-data/dm-transaction-manager.c @@ -154,7 +154,7 @@ int dm_tm_pre_commit(struct dm_transaction_manager *tm) if (r < 0) return r; - return 0; + return dm_bm_flush(tm->bm); } EXPORT_SYMBOL_GPL(dm_tm_pre_commit); @@ -164,8 +164,9 @@ int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root) return -EWOULDBLOCK; wipe_shadow_table(tm); + dm_bm_unlock(root); - return dm_bm_flush_and_unlock(tm->bm, root); + return dm_bm_flush(tm->bm); } EXPORT_SYMBOL_GPL(dm_tm_commit); diff --git a/drivers/md/persistent-data/dm-transaction-manager.h b/drivers/md/persistent-data/dm-transaction-manager.h index b5b139076ca5..2772ed2a781a 100644 --- a/drivers/md/persistent-data/dm-transaction-manager.h +++ b/drivers/md/persistent-data/dm-transaction-manager.h @@ -38,18 +38,17 @@ struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transac /* * We use a 2-phase commit here. * - * i) In the first phase the block manager is told to start flushing, and - * the changes to the space map are written to disk. You should interrogate - * your particular space map to get detail of its root node etc. to be - * included in your superblock. + * i) Make all changes for the transaction *except* for the superblock. + * Then call dm_tm_pre_commit() to flush them to disk. * - * ii) @root will be committed last. You shouldn't use more than the - * first 512 bytes of @root if you wish the transaction to survive a power - * failure. You *must* have a write lock held on @root for both stage (i) - * and (ii). The commit will drop the write lock. + * ii) Lock your superblock. Update. Then call dm_tm_commit() which will + * unlock the superblock and flush it. No other blocks should be updated + * during this period. Care should be taken to never unlock a partially + * updated superblock; perform any operations that could fail *before* you + * take the superblock lock. */ int dm_tm_pre_commit(struct dm_transaction_manager *tm); -int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root); +int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *superblock); /* * These methods are the only way to get hold of a writeable block. diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 4a6ca1cb2e78..56e24c072b62 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -97,6 +97,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) struct pool_info *pi = data; struct r1bio *r1_bio; struct bio *bio; + int need_pages; int i, j; r1_bio = r1bio_pool_alloc(gfp_flags, pi); @@ -119,15 +120,15 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) * RESYNC_PAGES for each bio. */ if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) - j = pi->raid_disks; + need_pages = pi->raid_disks; else - j = 1; - while(j--) { + need_pages = 1; + for (j = 0; j < need_pages; j++) { bio = r1_bio->bios[j]; bio->bi_vcnt = RESYNC_PAGES; if (bio_alloc_pages(bio, gfp_flags)) - goto out_free_bio; + goto out_free_pages; } /* If not user-requests, copy the page pointers to all bios */ if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) { @@ -141,6 +142,14 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) return r1_bio; +out_free_pages: + while (--j >= 0) { + struct bio_vec *bv; + + bio_for_each_segment_all(bv, r1_bio->bios[j], i) + __free_page(bv->bv_page); + } + out_free_bio: while (++j < pi->raid_disks) bio_put(r1_bio->bios[j]); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 33fc408e5eac..cb882aae9e20 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1172,6 +1172,13 @@ static void __make_request(struct mddev *mddev, struct bio *bio) int max_sectors; int sectors; + /* + * Register the new request and wait if the reconstruction + * thread has put up a bar for new requests. + * Continue immediately if no resync is active currently. + */ + wait_barrier(conf); + sectors = bio_sectors(bio); while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && bio->bi_iter.bi_sector < conf->reshape_progress && @@ -1552,12 +1559,6 @@ static void make_request(struct mddev *mddev, struct bio *bio) md_write_start(mddev, bio); - /* - * Register the new request and wait if the reconstruction - * thread has put up a bar for new requests. - * Continue immediately if no resync is active currently. - */ - wait_barrier(conf); do { diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 16f5c21963db..ad1b9bea446e 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -679,14 +679,9 @@ get_active_stripe(struct r5conf *conf, sector_t sector, init_stripe(sh, sector, previous); atomic_inc(&sh->count); } - } else { + } else if (!atomic_inc_not_zero(&sh->count)) { spin_lock(&conf->device_lock); - if (atomic_read(&sh->count)) { - BUG_ON(!list_empty(&sh->lru) - && !test_bit(STRIPE_EXPANDING, &sh->state) - && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state) - ); - } else { + if (!atomic_read(&sh->count)) { if (!test_bit(STRIPE_HANDLE, &sh->state)) atomic_inc(&conf->active_stripes); BUG_ON(list_empty(&sh->lru) && @@ -4375,8 +4370,7 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) sh->group = NULL; } list_del_init(&sh->lru); - atomic_inc(&sh->count); - BUG_ON(atomic_read(&sh->count) != 1); + BUG_ON(atomic_inc_return(&sh->count) != 1); return sh; } @@ -4552,6 +4546,8 @@ static void make_request(struct mddev *mddev, struct bio * bi) struct stripe_head *sh; const int rw = bio_data_dir(bi); int remaining; + DEFINE_WAIT(w); + bool do_prepare; if (unlikely(bi->bi_rw & REQ_FLUSH)) { md_flush_request(mddev, bi); @@ -4575,15 +4571,18 @@ static void make_request(struct mddev *mddev, struct bio * bi) bi->bi_next = NULL; bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ + prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { - DEFINE_WAIT(w); int previous; int seq; + do_prepare = false; retry: seq = read_seqcount_begin(&conf->gen_lock); previous = 0; - prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); + if (do_prepare) + prepare_to_wait(&conf->wait_for_overlap, &w, + TASK_UNINTERRUPTIBLE); if (unlikely(conf->reshape_progress != MaxSector)) { /* spinlock is needed as reshape_progress may be * 64bit on a 32bit platform, and so it might be @@ -4604,6 +4603,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) : logical_sector >= conf->reshape_safe) { spin_unlock_irq(&conf->device_lock); schedule(); + do_prepare = true; goto retry; } } @@ -4640,6 +4640,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) if (must_retry) { release_stripe(sh); schedule(); + do_prepare = true; goto retry; } } @@ -4663,8 +4664,10 @@ static void make_request(struct mddev *mddev, struct bio * bi) prepare_to_wait(&conf->wait_for_overlap, &w, TASK_INTERRUPTIBLE); if (logical_sector >= mddev->suspend_lo && - logical_sector < mddev->suspend_hi) + logical_sector < mddev->suspend_hi) { schedule(); + do_prepare = true; + } goto retry; } @@ -4677,9 +4680,9 @@ static void make_request(struct mddev *mddev, struct bio * bi) md_wakeup_thread(mddev->thread); release_stripe(sh); schedule(); + do_prepare = true; goto retry; } - finish_wait(&conf->wait_for_overlap, &w); set_bit(STRIPE_HANDLE, &sh->state); clear_bit(STRIPE_DELAYED, &sh->state); if ((bi->bi_rw & REQ_SYNC) && @@ -4689,10 +4692,10 @@ static void make_request(struct mddev *mddev, struct bio * bi) } else { /* cannot get stripe for read-ahead, just give-up */ clear_bit(BIO_UPTODATE, &bi->bi_flags); - finish_wait(&conf->wait_for_overlap, &w); break; } } + finish_wait(&conf->wait_for_overlap, &w); remaining = raid5_dec_bi_active_stripes(bi); if (remaining == 0) { |