diff options
author | Ingo Molnar <mingo@kernel.org> | 2020-11-27 11:09:57 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2020-11-27 11:10:50 +0100 |
commit | a787bdaff83a085288b6fc607afb4bb648da3cc9 (patch) | |
tree | ec389c1494ef4790ea90f65c4f86e523caf325d0 /block | |
parent | 2914b0ba61a9d253535e51af16c7122a8148995d (diff) | |
parent | 85a2c56cb4454c73f56d3099d96942e7919b292f (diff) |
Merge branch 'linus' into sched/core, to resolve semantic conflict
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'block')
-rw-r--r-- | block/bio.c | 11 | ||||
-rw-r--r-- | block/blk-cgroup.c | 16 | ||||
-rw-r--r-- | block/blk-flush.c | 6 | ||||
-rw-r--r-- | block/genhd.c | 5 |
4 files changed, 31 insertions, 7 deletions
diff --git a/block/bio.c b/block/bio.c index e6e26d7a1ffb..fa01bef35bb1 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1044,6 +1044,7 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter) ssize_t size, left; unsigned len, i; size_t offset; + int ret = 0; if (WARN_ON_ONCE(!max_append_sectors)) return 0; @@ -1066,15 +1067,17 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter) len = min_t(size_t, PAGE_SIZE - offset, left); if (bio_add_hw_page(q, bio, page, len, offset, - max_append_sectors, &same_page) != len) - return -EINVAL; + max_append_sectors, &same_page) != len) { + ret = -EINVAL; + break; + } if (same_page) put_page(page); offset = 0; } - iov_iter_advance(iter, size); - return 0; + iov_iter_advance(iter, size - left); + return ret; } /** diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index f9b55614d67d..54fbe1e80cc4 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -657,13 +657,20 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, goto fail; } + if (radix_tree_preload(GFP_KERNEL)) { + blkg_free(new_blkg); + ret = -ENOMEM; + goto fail; + } + rcu_read_lock(); spin_lock_irq(&q->queue_lock); blkg = blkg_lookup_check(pos, pol, q); if (IS_ERR(blkg)) { ret = PTR_ERR(blkg); - goto fail_unlock; + blkg_free(new_blkg); + goto fail_preloaded; } if (blkg) { @@ -672,10 +679,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, blkg = blkg_create(pos, q, new_blkg); if (IS_ERR(blkg)) { ret = PTR_ERR(blkg); - goto fail_unlock; + goto fail_preloaded; } } + radix_tree_preload_end(); + if (pos == blkcg) goto success; } @@ -685,6 +694,8 @@ success: ctx->body = input; return 0; +fail_preloaded: + radix_tree_preload_end(); fail_unlock: spin_unlock_irq(&q->queue_lock); rcu_read_unlock(); @@ -838,6 +849,7 @@ static void blkcg_fill_root_iostats(void) blkg_iostat_set(&blkg->iostat.cur, &tmp); u64_stats_update_end(&blkg->iostat.sync); } + disk_put_part(part); } } diff --git a/block/blk-flush.c b/block/blk-flush.c index 53abb5c73d99..fd5cee9f1a3b 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -231,6 +231,12 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error) return; } + /* + * Flush request has to be marked as IDLE when it is really ended + * because its .end_io() is called from timeout code path too for + * avoiding use-after-free. + */ + WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE); if (fq->rq_status != BLK_STS_OK) error = fq->rq_status; diff --git a/block/genhd.c b/block/genhd.c index 0a273211fec2..9387f050c248 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -49,7 +49,7 @@ static void disk_release_events(struct gendisk *disk); * Set disk capacity and notify if the size is not currently * zero and will not be set to zero */ -void set_capacity_revalidate_and_notify(struct gendisk *disk, sector_t size, +bool set_capacity_revalidate_and_notify(struct gendisk *disk, sector_t size, bool update_bdev) { sector_t capacity = get_capacity(disk); @@ -62,7 +62,10 @@ void set_capacity_revalidate_and_notify(struct gendisk *disk, sector_t size, char *envp[] = { "RESIZE=1", NULL }; kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp); + return true; } + + return false; } EXPORT_SYMBOL_GPL(set_capacity_revalidate_and_notify); |