diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-02-22 13:21:31 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-02-22 13:21:31 -0800 |
commit | f0b2769a0185ccf84842a795b5587afc37274c3d (patch) | |
tree | 144ff7280bd7699168ebf635b80625b17fd5194a /drivers/md/dm-crypt.c | |
parent | 23064dfe088e0926e3fc0922f118866dc1564405 (diff) | |
parent | d695e44157c8da8d298295d1905428fb2495bc8b (diff) |
Merge tag 'for-6.3/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper updates from Mike Snitzer:
- Fix DM cache target to free background tracker work items, otherwise
slab BUG will occur when kmem_cache_destroy() is called.
- Improve 2 of DM's shrinker names to reflect their use.
- Fix the DM flakey target to not corrupt the zero page. Fix dm-flakey
on 32-bit hughmem systems by using bvec_kmap_local instead of
page_address. Also, fix logic used when imposing the
"corrupt_bio_byte" feature.
- Stop using WQ_UNBOUND for DM verity target's verify_wq because it
causes significant Android latencies on ARM64 (and doesn't show real
benefit on other architectures).
- Add negative check to catch simple case of a DM table referencing
itself. More complex scenarios that use intermediate devices to
self-reference still need to be avoided/handled in userspace.
- Fix DM core's resize to only send one uevent instead of two. This
fixes a race with udev, that if udev wins, will cause udev to miss
uevents (which caused premature unmount attempts by systemd).
- Add cond_resched() to workqueue functions in DM core, dn-thin and
dm-cache so that their loops aren't the cause of unintended cpu
scheduling fairness issues.
- Fix all of DM's checkpatch errors and warnings (famous last words).
Various other small cleanups.
* tag 'for-6.3/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (62 commits)
dm: remove unnecessary (void*) conversion in event_callback()
dm ioctl: remove unnecessary check when using dm_get_mdptr()
dm ioctl: assert _hash_lock is held in __hash_remove
dm cache: add cond_resched() to various workqueue loops
dm thin: add cond_resched() to various workqueue loops
dm: add cond_resched() to dm_wq_requeue_work()
dm: add cond_resched() to dm_wq_work()
dm sysfs: make kobj_type structure constant
dm: update targets using system workqueues to use a local workqueue
dm: remove flush_scheduled_work() during local_exit()
dm clone: prefer kvmalloc_array()
dm: declare variables static when sensible
dm: fix suspect indent whitespace
dm ioctl: prefer strscpy() instead of strlcpy()
dm: avoid void function return statements
dm integrity: change macros min/max() -> min_t/max_t where appropriate
dm: fix use of sizeof() macro
dm: avoid 'do {} while(0)' loop in single statement macros
dm log: avoid multiple line dereference
dm log: avoid trailing semicolon in macro
...
Diffstat (limited to 'drivers/md/dm-crypt.c')
-rw-r--r-- | drivers/md/dm-crypt.c | 117 |
1 files changed, 62 insertions, 55 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 3aeeb8f2802f..40cb1719ae4d 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2003 Jana Saout <jana@saout.de> * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> @@ -171,14 +172,14 @@ struct crypt_config { } iv_gen_private; u64 iv_offset; unsigned int iv_size; - unsigned short int sector_size; + unsigned short sector_size; unsigned char sector_shift; union { struct crypto_skcipher **tfms; struct crypto_aead **tfms_aead; } cipher_tfm; - unsigned tfms_count; + unsigned int tfms_count; unsigned long cipher_flags; /* @@ -212,7 +213,7 @@ struct crypt_config { * pool for per bio private data, crypto requests, * encryption requeusts/buffer pages and integrity tags */ - unsigned tag_pool_max_sectors; + unsigned int tag_pool_max_sectors; mempool_t tag_pool; mempool_t req_pool; mempool_t page_pool; @@ -229,7 +230,7 @@ struct crypt_config { #define POOL_ENTRY_SIZE 512 static DEFINE_SPINLOCK(dm_crypt_clients_lock); -static unsigned dm_crypt_clients_n = 0; +static unsigned int dm_crypt_clients_n; static volatile unsigned long dm_crypt_pages_per_client; #define DM_CRYPT_MEMORY_PERCENT 2 #define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16) @@ -354,7 +355,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, const char *opts) { - unsigned bs; + unsigned int bs; int log; if (crypt_integrity_aead(cc)) @@ -363,9 +364,10 @@ static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, bs = crypto_skcipher_blocksize(any_tfm(cc)); log = ilog2(bs); - /* we need to calculate how far we must shift the sector count - * to get the cipher block count, we use this shift in _gen */ - + /* + * We need to calculate how far we must shift the sector count + * to get the cipher block count, we use this shift in _gen. + */ if (1 << log != bs) { ti->error = "cypher blocksize is not a power of 2"; return -EINVAL; @@ -531,9 +533,9 @@ static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { sg = crypt_get_sg_data(cc, dmreq->sg_in); - src = kmap_atomic(sg_page(sg)); + src = kmap_local_page(sg_page(sg)); r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset); - kunmap_atomic(src); + kunmap_local(src); } else memset(iv, 0, cc->iv_size); @@ -551,14 +553,14 @@ static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, return 0; sg = crypt_get_sg_data(cc, dmreq->sg_out); - dst = kmap_atomic(sg_page(sg)); + dst = kmap_local_page(sg_page(sg)); r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset); /* Tweak the first block of plaintext sector */ if (!r) crypto_xor(dst + sg->offset, iv, cc->iv_size); - kunmap_atomic(dst); + kunmap_local(dst); return r; } @@ -681,9 +683,9 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv, /* Remove whitening from ciphertext */ if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { sg = crypt_get_sg_data(cc, dmreq->sg_in); - src = kmap_atomic(sg_page(sg)); + src = kmap_local_page(sg_page(sg)); r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset); - kunmap_atomic(src); + kunmap_local(src); } /* Calculate IV */ @@ -707,9 +709,9 @@ static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv, /* Apply whitening on ciphertext */ sg = crypt_get_sg_data(cc, dmreq->sg_out); - dst = kmap_atomic(sg_page(sg)); + dst = kmap_local_page(sg_page(sg)); r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset); - kunmap_atomic(dst); + kunmap_local(dst); return r; } @@ -731,8 +733,7 @@ static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti, } if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) { - ti->error = "Block size of EBOIV cipher does " - "not match IV size of block cipher"; + ti->error = "Block size of EBOIV cipher does not match IV size of block cipher"; return -EINVAL; } @@ -974,35 +975,35 @@ static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *d goto out; sg = crypt_get_sg_data(cc, dmreq->sg_out); - data = kmap_atomic(sg_page(sg)); + data = kmap_local_page(sg_page(sg)); data_offset = data + sg->offset; /* Cannot modify original bio, copy to sg_out and apply Elephant to it */ if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { sg2 = crypt_get_sg_data(cc, dmreq->sg_in); - data2 = kmap_atomic(sg_page(sg2)); + data2 = kmap_local_page(sg_page(sg2)); memcpy(data_offset, data2 + sg2->offset, cc->sector_size); - kunmap_atomic(data2); + kunmap_local(data2); } if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { - diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32)); - diffuser_b_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); - diffuser_a_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); - diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32)); + diffuser_disk_to_cpu((u32 *)data_offset, cc->sector_size / sizeof(u32)); + diffuser_b_decrypt((u32 *)data_offset, cc->sector_size / sizeof(u32)); + diffuser_a_decrypt((u32 *)data_offset, cc->sector_size / sizeof(u32)); + diffuser_cpu_to_disk((__le32 *)data_offset, cc->sector_size / sizeof(u32)); } for (i = 0; i < (cc->sector_size / 32); i++) crypto_xor(data_offset + i * 32, ks, 32); if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { - diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32)); - diffuser_a_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); - diffuser_b_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32)); - diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32)); + diffuser_disk_to_cpu((u32 *)data_offset, cc->sector_size / sizeof(u32)); + diffuser_a_encrypt((u32 *)data_offset, cc->sector_size / sizeof(u32)); + diffuser_b_encrypt((u32 *)data_offset, cc->sector_size / sizeof(u32)); + diffuser_cpu_to_disk((__le32 *)data_offset, cc->sector_size / sizeof(u32)); } - kunmap_atomic(data); + kunmap_local(data); out: kfree_sensitive(ks); kfree_sensitive(es); @@ -1255,6 +1256,7 @@ static __le64 *org_sector_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq) { u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size; + return (__le64 *) ptr; } @@ -1263,7 +1265,8 @@ static unsigned int *org_tag_of_dmreq(struct crypt_config *cc, { u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size + sizeof(uint64_t); - return (unsigned int*)ptr; + + return (unsigned int *)ptr; } static void *tag_from_dmreq(struct crypt_config *cc, @@ -1463,7 +1466,7 @@ static void kcryptd_async_done(void *async_req, int error); static int crypt_alloc_req_skcipher(struct crypt_config *cc, struct convert_context *ctx) { - unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); + unsigned int key_index = ctx->cc_sector & (cc->tfms_count - 1); if (!ctx->r.req) { ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO); @@ -1657,13 +1660,13 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone); * non-blocking allocations without a mutex first but on failure we fallback * to blocking allocations with a mutex. */ -static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) +static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size) { struct crypt_config *cc = io->cc; struct bio *clone; unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM; - unsigned i, len, remaining_size; + unsigned int i, len, remaining_size; struct page *page; retry: @@ -1738,6 +1741,7 @@ static void crypt_inc_pending(struct dm_crypt_io *io) static void kcryptd_io_bio_endio(struct work_struct *work) { struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); + bio_endio(io->base_bio); } @@ -1803,7 +1807,7 @@ static void crypt_endio(struct bio *clone) { struct dm_crypt_io *io = clone->bi_private; struct crypt_config *cc = io->cc; - unsigned rw = bio_data_dir(clone); + unsigned int rw = bio_data_dir(clone); blk_status_t error; /* @@ -2255,7 +2259,7 @@ static void crypt_free_tfms_aead(struct crypt_config *cc) static void crypt_free_tfms_skcipher(struct crypt_config *cc) { - unsigned i; + unsigned int i; if (!cc->cipher_tfm.tfms) return; @@ -2280,7 +2284,7 @@ static void crypt_free_tfms(struct crypt_config *cc) static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode) { - unsigned i; + unsigned int i; int err; cc->cipher_tfm.tfms = kcalloc(cc->tfms_count, @@ -2338,12 +2342,12 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) return crypt_alloc_tfms_skcipher(cc, ciphermode); } -static unsigned crypt_subkey_size(struct crypt_config *cc) +static unsigned int crypt_subkey_size(struct crypt_config *cc) { return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count); } -static unsigned crypt_authenckey_size(struct crypt_config *cc) +static unsigned int crypt_authenckey_size(struct crypt_config *cc) { return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param)); } @@ -2354,7 +2358,7 @@ static unsigned crypt_authenckey_size(struct crypt_config *cc) * This funcion converts cc->key to this special format. */ static void crypt_copy_authenckey(char *p, const void *key, - unsigned enckeylen, unsigned authkeylen) + unsigned int enckeylen, unsigned int authkeylen) { struct crypto_authenc_key_param *param; struct rtattr *rta; @@ -2372,7 +2376,7 @@ static void crypt_copy_authenckey(char *p, const void *key, static int crypt_setkey(struct crypt_config *cc) { - unsigned subkey_size; + unsigned int subkey_size; int err = 0, i, r; /* Ignore extra keys (which are used for IV etc) */ @@ -2485,7 +2489,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string } /* look for next ':' separating key_type from key_description */ - key_desc = strpbrk(key_string, ":"); + key_desc = strchr(key_string, ':'); if (!key_desc || key_desc == key_string || !strlen(key_desc + 1)) return -EINVAL; @@ -2500,7 +2504,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string type = &key_type_encrypted; set_key = set_key_encrypted; } else if (IS_ENABLED(CONFIG_TRUSTED_KEYS) && - !strncmp(key_string, "trusted:", key_desc - key_string + 1)) { + !strncmp(key_string, "trusted:", key_desc - key_string + 1)) { type = &key_type_trusted; set_key = set_key_trusted; } else { @@ -3411,11 +3415,14 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); if (cc->on_disk_tag_size) { - unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift); + unsigned int tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift); + + if (unlikely(tag_len > KMALLOC_MAX_SIZE)) + io->integrity_metadata = NULL; + else + io->integrity_metadata = kmalloc(tag_len, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); - if (unlikely(tag_len > KMALLOC_MAX_SIZE) || - unlikely(!(io->integrity_metadata = kmalloc(tag_len, - GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) { + if (unlikely(!io->integrity_metadata)) { if (bio_sectors(bio) > cc->tag_pool_max_sectors) dm_accept_partial_bio(bio, cc->tag_pool_max_sectors); io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO); @@ -3439,14 +3446,14 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) static char hex2asc(unsigned char c) { - return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27); + return c + '0' + ((unsigned int)(9 - c) >> 4 & 0x27); } static void crypt_status(struct dm_target *ti, status_type_t type, - unsigned status_flags, char *result, unsigned maxlen) + unsigned int status_flags, char *result, unsigned int maxlen) { struct crypt_config *cc = ti->private; - unsigned i, sz = 0; + unsigned int i, sz = 0; int num_feature_args = 0; switch (type) { @@ -3562,8 +3569,8 @@ static void crypt_resume(struct dm_target *ti) * key set <key> * key wipe */ -static int crypt_message(struct dm_target *ti, unsigned argc, char **argv, - char *result, unsigned maxlen) +static int crypt_message(struct dm_target *ti, unsigned int argc, char **argv, + char *result, unsigned int maxlen) { struct crypt_config *cc = ti->private; int key_size, ret = -EINVAL; @@ -3624,10 +3631,10 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) limits->max_segment_size = PAGE_SIZE; limits->logical_block_size = - max_t(unsigned, limits->logical_block_size, cc->sector_size); + max_t(unsigned int, limits->logical_block_size, cc->sector_size); limits->physical_block_size = - max_t(unsigned, limits->physical_block_size, cc->sector_size); - limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size); + max_t(unsigned int, limits->physical_block_size, cc->sector_size); + limits->io_min = max_t(unsigned int, limits->io_min, cc->sector_size); limits->dma_alignment = limits->logical_block_size - 1; } |