summaryrefslogtreecommitdiff
path: root/drivers/md/dm-snap.c
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2008-07-21 12:00:35 +0100
committerAlasdair G Kergon <agk@redhat.com>2008-07-21 12:00:35 +0100
commit92e868122edf08b9fc06b112e7e0c80ab94c1f93 (patch)
tree9cd2e5e2f15adcaf1b8d3f8f55d5a676ad40bfd6 /drivers/md/dm-snap.c
parenta8d41b59f3f5a7ac19452ef442a7fc1b5fa17366 (diff)
dm snapshot: use per device mempools
Change snapshot per-module mempool to per-device mempool. Per-module mempools could cause a deadlock if multiple snapshot devices are stacked above each other. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
Diffstat (limited to 'drivers/md/dm-snap.c')
-rw-r--r--drivers/md/dm-snap.c40
1 files changed, 22 insertions, 18 deletions
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index f4fd0cee9c3d..6e5528aecc98 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -96,7 +96,6 @@ struct dm_snap_pending_exception {
*/
static struct kmem_cache *exception_cache;
static struct kmem_cache *pending_cache;
-static mempool_t *pending_pool;
struct dm_snap_tracked_chunk {
struct hlist_node node;
@@ -364,14 +363,19 @@ static void free_exception(struct dm_snap_exception *e)
kmem_cache_free(exception_cache, e);
}
-static struct dm_snap_pending_exception *alloc_pending_exception(void)
+static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
{
- return mempool_alloc(pending_pool, GFP_NOIO);
+ struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
+ GFP_NOIO);
+
+ pe->snap = s;
+
+ return pe;
}
static void free_pending_exception(struct dm_snap_pending_exception *pe)
{
- mempool_free(pe, pending_pool);
+ mempool_free(pe, pe->snap->pending_pool);
}
static void insert_completed_exception(struct dm_snapshot *s,
@@ -627,12 +631,18 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad5;
}
+ s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
+ if (!s->pending_pool) {
+ ti->error = "Could not allocate mempool for pending exceptions";
+ goto bad6;
+ }
+
s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
tracked_chunk_cache);
if (!s->tracked_chunk_pool) {
ti->error = "Could not allocate tracked_chunk mempool for "
"tracking reads";
- goto bad6;
+ goto bad_tracked_chunk_pool;
}
for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
@@ -669,6 +679,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
bad_load_and_register:
mempool_destroy(s->tracked_chunk_pool);
+ bad_tracked_chunk_pool:
+ mempool_destroy(s->pending_pool);
+
bad6:
dm_kcopyd_client_destroy(s->kcopyd_client);
@@ -723,6 +736,8 @@ static void snapshot_dtr(struct dm_target *ti)
__free_exceptions(s);
+ mempool_destroy(s->pending_pool);
+
dm_put_device(ti, s->origin);
dm_put_device(ti, s->cow);
@@ -969,7 +984,7 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
* to hold the lock while we do this.
*/
up_write(&s->lock);
- pe = alloc_pending_exception();
+ pe = alloc_pending_exception(s);
down_write(&s->lock);
if (!s->valid) {
@@ -989,7 +1004,6 @@ __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
bio_list_init(&pe->snapshot_bios);
pe->primary_pe = NULL;
atomic_set(&pe->ref_count, 0);
- pe->snap = s;
pe->started = 0;
if (s->store.prepare_exception(&s->store, &pe->e)) {
@@ -1418,24 +1432,15 @@ static int __init dm_snapshot_init(void)
goto bad5;
}
- pending_pool = mempool_create_slab_pool(128, pending_cache);
- if (!pending_pool) {
- DMERR("Couldn't create pending pool.");
- r = -ENOMEM;
- goto bad_pending_pool;
- }
-
ksnapd = create_singlethread_workqueue("ksnapd");
if (!ksnapd) {
DMERR("Failed to create ksnapd workqueue.");
r = -ENOMEM;
- goto bad6;
+ goto bad_pending_pool;
}
return 0;
- bad6:
- mempool_destroy(pending_pool);
bad_pending_pool:
kmem_cache_destroy(tracked_chunk_cache);
bad5:
@@ -1466,7 +1471,6 @@ static void __exit dm_snapshot_exit(void)
DMERR("origin unregister failed %d", r);
exit_origin_hash();
- mempool_destroy(pending_pool);
kmem_cache_destroy(pending_cache);
kmem_cache_destroy(exception_cache);
kmem_cache_destroy(tracked_chunk_cache);