summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/md/dm-cache-policy-cleaner.c3
-rw-r--r--drivers/md/dm-cache-policy-internal.h5
-rw-r--r--drivers/md/dm-cache-policy-mq.c41
-rw-r--r--drivers/md/dm-cache-policy.h15
-rw-r--r--drivers/md/dm-cache-target.c58
5 files changed, 85 insertions, 37 deletions
diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
index b04d1f904d07..004e463c9423 100644
--- a/drivers/md/dm-cache-policy-cleaner.c
+++ b/drivers/md/dm-cache-policy-cleaner.c
@@ -171,7 +171,8 @@ static void remove_cache_hash_entry(struct wb_cache_entry *e)
/* Public interface (see dm-cache-policy.h */
static int wb_map(struct dm_cache_policy *pe, dm_oblock_t oblock,
bool can_block, bool can_migrate, bool discarded_oblock,
- struct bio *bio, struct policy_result *result)
+ struct bio *bio, struct policy_locker *locker,
+ struct policy_result *result)
{
struct policy *p = to_policy(pe);
struct wb_cache_entry *e;
diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h
index 2256a1f24f73..c198e6defb9c 100644
--- a/drivers/md/dm-cache-policy-internal.h
+++ b/drivers/md/dm-cache-policy-internal.h
@@ -16,9 +16,10 @@
*/
static inline int policy_map(struct dm_cache_policy *p, dm_oblock_t oblock,
bool can_block, bool can_migrate, bool discarded_oblock,
- struct bio *bio, struct policy_result *result)
+ struct bio *bio, struct policy_locker *locker,
+ struct policy_result *result)
{
- return p->map(p, oblock, can_block, can_migrate, discarded_oblock, bio, result);
+ return p->map(p, oblock, can_block, can_migrate, discarded_oblock, bio, locker, result);
}
static inline int policy_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 3ddd1162334d..515d44bf24d3 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -693,9 +693,10 @@ static void requeue(struct mq_policy *mq, struct entry *e)
* - set the hit count to a hard coded value other than 1, eg, is it better
* if it goes in at level 2?
*/
-static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
+static int demote_cblock(struct mq_policy *mq,
+ struct policy_locker *locker, dm_oblock_t *oblock)
{
- struct entry *demoted = pop(mq, &mq->cache_clean);
+ struct entry *demoted = peek(&mq->cache_clean);
if (!demoted)
/*
@@ -707,6 +708,13 @@ static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
*/
return -ENOSPC;
+ if (locker->fn(locker, demoted->oblock))
+ /*
+ * We couldn't lock the demoted block.
+ */
+ return -EBUSY;
+
+ del(mq, demoted);
*oblock = demoted->oblock;
free_entry(&mq->cache_pool, demoted);
@@ -795,6 +803,7 @@ static int cache_entry_found(struct mq_policy *mq,
* finding which cache block to use.
*/
static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
+ struct policy_locker *locker,
struct policy_result *result)
{
int r;
@@ -803,11 +812,12 @@ static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
/* Ensure there's a free cblock in the cache */
if (epool_empty(&mq->cache_pool)) {
result->op = POLICY_REPLACE;
- r = demote_cblock(mq, &result->old_oblock);
+ r = demote_cblock(mq, locker, &result->old_oblock);
if (r) {
result->op = POLICY_MISS;
return 0;
}
+
} else
result->op = POLICY_NEW;
@@ -829,7 +839,8 @@ static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
bool can_migrate, bool discarded_oblock,
- int data_dir, struct policy_result *result)
+ int data_dir, struct policy_locker *locker,
+ struct policy_result *result)
{
int r = 0;
@@ -842,7 +853,7 @@ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
else {
requeue(mq, e);
- r = pre_cache_to_cache(mq, e, result);
+ r = pre_cache_to_cache(mq, e, locker, result);
}
return r;
@@ -872,6 +883,7 @@ static void insert_in_pre_cache(struct mq_policy *mq,
}
static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
+ struct policy_locker *locker,
struct policy_result *result)
{
int r;
@@ -879,7 +891,7 @@ static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
if (epool_empty(&mq->cache_pool)) {
result->op = POLICY_REPLACE;
- r = demote_cblock(mq, &result->old_oblock);
+ r = demote_cblock(mq, locker, &result->old_oblock);
if (unlikely(r)) {
result->op = POLICY_MISS;
insert_in_pre_cache(mq, oblock);
@@ -907,11 +919,12 @@ static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
bool can_migrate, bool discarded_oblock,
- int data_dir, struct policy_result *result)
+ int data_dir, struct policy_locker *locker,
+ struct policy_result *result)
{
if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) <= 1) {
if (can_migrate)
- insert_in_cache(mq, oblock, result);
+ insert_in_cache(mq, oblock, locker, result);
else
return -EWOULDBLOCK;
} else {
@@ -928,7 +941,8 @@ static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
*/
static int map(struct mq_policy *mq, dm_oblock_t oblock,
bool can_migrate, bool discarded_oblock,
- int data_dir, struct policy_result *result)
+ int data_dir, struct policy_locker *locker,
+ struct policy_result *result)
{
int r = 0;
struct entry *e = hash_lookup(mq, oblock);
@@ -942,11 +956,11 @@ static int map(struct mq_policy *mq, dm_oblock_t oblock,
else if (e)
r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock,
- data_dir, result);
+ data_dir, locker, result);
else
r = no_entry_found(mq, oblock, can_migrate, discarded_oblock,
- data_dir, result);
+ data_dir, locker, result);
if (r == -EWOULDBLOCK)
result->op = POLICY_MISS;
@@ -1012,7 +1026,8 @@ static void copy_tick(struct mq_policy *mq)
static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
bool can_block, bool can_migrate, bool discarded_oblock,
- struct bio *bio, struct policy_result *result)
+ struct bio *bio, struct policy_locker *locker,
+ struct policy_result *result)
{
int r;
struct mq_policy *mq = to_mq_policy(p);
@@ -1028,7 +1043,7 @@ static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
iot_examine_bio(&mq->tracker, bio);
r = map(mq, oblock, can_migrate, discarded_oblock,
- bio_data_dir(bio), result);
+ bio_data_dir(bio), locker, result);
mutex_unlock(&mq->lock);
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
index f50fe360c546..5524e21e4836 100644
--- a/drivers/md/dm-cache-policy.h
+++ b/drivers/md/dm-cache-policy.h
@@ -70,6 +70,18 @@ enum policy_operation {
};
/*
+ * When issuing a POLICY_REPLACE the policy needs to make a callback to
+ * lock the block being demoted. This doesn't need to occur during a
+ * writeback operation since the block remains in the cache.
+ */
+struct policy_locker;
+typedef int (*policy_lock_fn)(struct policy_locker *l, dm_oblock_t oblock);
+
+struct policy_locker {
+ policy_lock_fn fn;
+};
+
+/*
* This is the instruction passed back to the core target.
*/
struct policy_result {
@@ -122,7 +134,8 @@ struct dm_cache_policy {
*/
int (*map)(struct dm_cache_policy *p, dm_oblock_t oblock,
bool can_block, bool can_migrate, bool discarded_oblock,
- struct bio *bio, struct policy_result *result);
+ struct bio *bio, struct policy_locker *locker,
+ struct policy_result *result);
/*
* Sometimes we want to see if a block is in the cache, without
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 41b2594a80c6..d5982480630b 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -1439,16 +1439,43 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio)
&cache->stats.read_miss : &cache->stats.write_miss);
}
+/*----------------------------------------------------------------*/
+
+struct old_oblock_lock {
+ struct policy_locker locker;
+ struct cache *cache;
+ struct prealloc *structs;
+ struct dm_bio_prison_cell *cell;
+};
+
+static int null_locker(struct policy_locker *locker, dm_oblock_t b)
+{
+ /* This should never be called */
+ BUG();
+ return 0;
+}
+
+static int cell_locker(struct policy_locker *locker, dm_oblock_t b)
+{
+ struct old_oblock_lock *l = container_of(locker, struct old_oblock_lock, locker);
+ struct dm_bio_prison_cell *cell_prealloc = prealloc_get_cell(l->structs);
+
+ return bio_detain(l->cache, b, NULL, cell_prealloc,
+ (cell_free_fn) prealloc_put_cell,
+ l->structs, &l->cell);
+}
+
static void process_bio(struct cache *cache, struct prealloc *structs,
struct bio *bio)
{
int r;
bool release_cell = true;
dm_oblock_t block = get_bio_block(cache, bio);
- struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
+ struct dm_bio_prison_cell *cell_prealloc, *new_ocell;
struct policy_result lookup_result;
bool passthrough = passthrough_mode(&cache->features);
bool discarded_block, can_migrate;
+ struct old_oblock_lock ool;
/*
* Check to see if that block is currently migrating.
@@ -1463,8 +1490,12 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
discarded_block = is_discarded_oblock(cache, block);
can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache));
+ ool.locker.fn = cell_locker;
+ ool.cache = cache;
+ ool.structs = structs;
+ ool.cell = NULL;
r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
- bio, &lookup_result);
+ bio, &ool.locker, &lookup_result);
if (r == -EWOULDBLOCK)
/* migration has been denied */
@@ -1521,27 +1552,11 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
break;
case POLICY_REPLACE:
- cell_prealloc = prealloc_get_cell(structs);
- r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
- (cell_free_fn) prealloc_put_cell,
- structs, &old_ocell);
- if (r > 0) {
- /*
- * We have to be careful to avoid lock inversion of
- * the cells. So we back off, and wait for the
- * old_ocell to become free.
- */
- policy_force_mapping(cache->policy, block,
- lookup_result.old_oblock);
- atomic_inc(&cache->stats.cache_cell_clash);
- break;
- }
atomic_inc(&cache->stats.demotion);
atomic_inc(&cache->stats.promotion);
-
demote_then_promote(cache, structs, lookup_result.old_oblock,
block, lookup_result.cblock,
- old_ocell, new_ocell);
+ ool.cell, new_ocell);
release_cell = false;
break;
@@ -2589,6 +2604,9 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
bool discarded_block;
struct policy_result lookup_result;
struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
+ struct old_oblock_lock ool;
+
+ ool.locker.fn = null_locker;
if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
/*
@@ -2627,7 +2645,7 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
discarded_block = is_discarded_oblock(cache, block);
r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
- bio, &lookup_result);
+ bio, &ool.locker, &lookup_result);
if (r == -EWOULDBLOCK) {
cell_defer(cache, *cell, true);
return DM_MAPIO_SUBMITTED;