summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorKrzysztof Kozlowski <k.kozlowski@samsung.com>2014-01-10 12:41:31 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2014-01-10 12:41:31 +1100
commitb5748268a90f21032b86691374d5af60e8b21f94 (patch)
tree8a173cad119d9b88257b36ca88b7d455e5265c7f /mm
parent098b7679f871fae31dfc9ac1cb54ef1410c4ff92 (diff)
swap: fix setting PAGE_SIZE blocksize during swapoff/swapon race
Fix race between swapoff and swapon resulting in setting blocksize of PAGE_SIZE for block devices during swapoff. The swapon modifies swap_info->old_block_size before acquiring swapon_mutex. It reads block_size of bdev, stores it under swap_info->old_block_size and sets new block_size to PAGE_SIZE. On the other hand the swapoff sets the device's block_size to old_block_size after releasing swapon_mutex. This patch locks the swapon_mutex much earlier during swapon. It also releases the swapon_mutex later during swapoff. The effect of race can be triggered by following scenario: - One block swap device with block size of 512 - thread 1: Swapon is called, swap is activated, p->old_block_size = block_size(p->bdev); /512/ block_size(p->bdev) = PAGE_SIZE; Thread ends. - thread 2: Swapoff is called and it goes just after releasing the swapon_mutex. The swap is now fully disabled except of setting the block size to old value. The p->bdev->block_size is still equal to PAGE_SIZE. - thread 3: New swapon is called. This swap is disabled so without acquiring the swapon_mutex: - p->old_block_size = block_size(p->bdev); /PAGE_SIZE (!!!)/ - block_size(p->bdev) = PAGE_SIZE; Swap is activated and thread ends. - thread 2: resumes work and sets blocksize to old value: - set_blocksize(bdev, p->old_block_size) But now the p->old_block_size is equal to PAGE_SIZE. The patch swap-fix-set_blocksize-race-during-swapon-swapoff does not fix this particular issue. It reduces the possibility of races as the swapon must overwrite p->old_block_size before acquiring swapon_mutex in swapoff. Signed-off-by: Krzysztof Kozlowski <k.kozlowski@samsung.com> Cc: Weijie Yang <weijie.yang.kh@gmail.com> Cc: Bob Liu <bob.liu@oracle.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Shaohua Li <shli@fusionio.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/swapfile.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index d443dea95c27..f8289fc0cd56 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1928,7 +1928,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
spin_unlock(&swap_lock);
frontswap_invalidate_area(type);
frontswap_map_set(p, NULL);
- mutex_unlock(&swapon_mutex);
free_percpu(p->percpu_cluster);
p->percpu_cluster = NULL;
vfree(swap_map);
@@ -1948,6 +1947,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
mutex_unlock(&inode->i_mutex);
}
filp_close(swap_file, NULL);
+ mutex_unlock(&swapon_mutex);
err = 0;
atomic_inc(&proc_poll_event);
wake_up_interruptible(&proc_poll_wait);
@@ -2404,37 +2404,38 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
}
}
+ mutex_lock(&swapon_mutex);
inode = mapping->host;
/* If S_ISREG(inode->i_mode) will do mutex_lock(&inode->i_mutex); */
error = claim_swapfile(p, inode);
if (unlikely(error))
- goto bad_swap;
+ goto bad_swap_wmutex;
/*
* Read the swap header.
*/
if (!mapping->a_ops->readpage) {
error = -EINVAL;
- goto bad_swap;
+ goto bad_swap_wmutex;
}
page = read_mapping_page(mapping, 0, swap_file);
if (IS_ERR(page)) {
error = PTR_ERR(page);
- goto bad_swap;
+ goto bad_swap_wmutex;
}
swap_header = kmap(page);
maxpages = read_swap_header(p, swap_header, inode);
if (unlikely(!maxpages)) {
error = -EINVAL;
- goto bad_swap;
+ goto bad_swap_wmutex;
}
/* OK, set up the swap map and apply the bad block list */
swap_map = vzalloc(maxpages);
if (!swap_map) {
error = -ENOMEM;
- goto bad_swap;
+ goto bad_swap_wmutex;
}
if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
p->flags |= SWP_SOLIDSTATE;
@@ -2464,13 +2465,13 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
error = swap_cgroup_swapon(p->type, maxpages);
if (error)
- goto bad_swap;
+ goto bad_swap_wmutex;
nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
cluster_info, maxpages, &span);
if (unlikely(nr_extents < 0)) {
error = nr_extents;
- goto bad_swap;
+ goto bad_swap_wmutex;
}
/* frontswap enabled? set up bit-per-page map for frontswap */
if (frontswap_enabled)
@@ -2506,7 +2507,6 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
}
}
- mutex_lock(&swapon_mutex);
prio = -1;
if (swap_flags & SWAP_FLAG_PREFER)
prio =
@@ -2531,6 +2531,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
inode->i_flags |= S_SWAPFILE;
error = 0;
goto out;
+bad_swap_wmutex:
+ mutex_unlock(&swapon_mutex);
bad_swap:
free_percpu(p->percpu_cluster);
p->percpu_cluster = NULL;