diff options
Diffstat (limited to 'block/blk-zoned.c')
-rw-r--r-- | block/blk-zoned.c | 82 |
1 files changed, 44 insertions, 38 deletions
diff --git a/block/blk-zoned.c b/block/blk-zoned.c index af19296fa50d..263e28b72053 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -18,7 +18,7 @@ #include <linux/vmalloc.h> #include <linux/sched/mm.h> #include <linux/spinlock.h> -#include <linux/atomic.h> +#include <linux/refcount.h> #include <linux/mempool.h> #include "blk.h" @@ -64,7 +64,7 @@ static const char *const zone_cond_name[] = { struct blk_zone_wplug { struct hlist_node node; struct list_head link; - atomic_t ref; + refcount_t ref; spinlock_t lock; unsigned int flags; unsigned int zone_no; @@ -348,13 +348,6 @@ fail: return ret; } -static inline bool disk_zone_is_conv(struct gendisk *disk, sector_t sector) -{ - if (!disk->conv_zones_bitmap) - return false; - return test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap); -} - static bool disk_zone_is_last(struct gendisk *disk, struct blk_zone *zone) { return zone->start + zone->len >= get_capacity(disk); @@ -411,7 +404,7 @@ static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk, hlist_for_each_entry_rcu(zwplug, &disk->zone_wplugs_hash[idx], node) { if (zwplug->zone_no == zno && - atomic_inc_not_zero(&zwplug->ref)) { + refcount_inc_not_zero(&zwplug->ref)) { rcu_read_unlock(); return zwplug; } @@ -432,7 +425,7 @@ static void disk_free_zone_wplug_rcu(struct rcu_head *rcu_head) static inline void disk_put_zone_wplug(struct blk_zone_wplug *zwplug) { - if (atomic_dec_and_test(&zwplug->ref)) { + if (refcount_dec_and_test(&zwplug->ref)) { WARN_ON_ONCE(!bio_list_empty(&zwplug->bio_list)); WARN_ON_ONCE(!list_empty(&zwplug->link)); WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_UNHASHED)); @@ -463,7 +456,7 @@ static inline bool disk_should_remove_zone_wplug(struct gendisk *disk, * taken when the plug was allocated and another reference taken by the * caller context). */ - if (atomic_read(&zwplug->ref) > 2) + if (refcount_read(&zwplug->ref) > 2) return false; /* We can remove zone write plugs for zones that are empty or full. */ @@ -533,7 +526,7 @@ again: INIT_HLIST_NODE(&zwplug->node); INIT_LIST_HEAD(&zwplug->link); - atomic_set(&zwplug->ref, 2); + refcount_set(&zwplug->ref, 2); spin_lock_init(&zwplug->lock); zwplug->flags = 0; zwplug->zone_no = zno; @@ -624,7 +617,7 @@ static inline void disk_zone_wplug_set_error(struct gendisk *disk, * finished. */ zwplug->flags |= BLK_ZONE_WPLUG_ERROR; - atomic_inc(&zwplug->ref); + refcount_inc(&zwplug->ref); spin_lock_irqsave(&disk->zone_wplugs_lock, flags); list_add_tail(&zwplug->link, &disk->zone_wplugs_err_list); @@ -709,7 +702,7 @@ static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio, struct blk_zone_wplug *zwplug; /* Conventional zones cannot be reset nor finished. */ - if (disk_zone_is_conv(disk, sector)) { + if (!bdev_zone_is_seq(bio->bi_bdev, sector)) { bio_io_error(bio); return true; } @@ -963,7 +956,7 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs) } /* Conventional zones do not need write plugging. */ - if (disk_zone_is_conv(disk, sector)) { + if (!bdev_zone_is_seq(bio->bi_bdev, sector)) { /* Zone append to conventional zones is not allowed. */ if (bio_op(bio) == REQ_OP_ZONE_APPEND) { bio_io_error(bio); @@ -1099,7 +1092,7 @@ static void disk_zone_wplug_schedule_bio_work(struct gendisk *disk, * reference we take here. */ WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED)); - atomic_inc(&zwplug->ref); + refcount_inc(&zwplug->ref); queue_work(disk->zone_wplugs_wq, &zwplug->bio_work); } @@ -1444,7 +1437,7 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk) while (!hlist_empty(&disk->zone_wplugs_hash[i])) { zwplug = hlist_entry(disk->zone_wplugs_hash[i].first, struct blk_zone_wplug, node); - atomic_inc(&zwplug->ref); + refcount_inc(&zwplug->ref); disk_remove_zone_wplug(disk, zwplug); disk_put_zone_wplug(zwplug); } @@ -1455,6 +1448,24 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk) disk->zone_wplugs_hash_bits = 0; } +static unsigned int disk_set_conv_zones_bitmap(struct gendisk *disk, + unsigned long *bitmap) +{ + unsigned int nr_conv_zones = 0; + unsigned long flags; + + spin_lock_irqsave(&disk->zone_wplugs_lock, flags); + if (bitmap) + nr_conv_zones = bitmap_weight(bitmap, disk->nr_zones); + bitmap = rcu_replace_pointer(disk->conv_zones_bitmap, bitmap, + lockdep_is_held(&disk->zone_wplugs_lock)); + spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags); + + kfree_rcu_mightsleep(bitmap); + + return nr_conv_zones; +} + void disk_free_zone_resources(struct gendisk *disk) { if (!disk->zone_wplugs_pool) @@ -1478,8 +1489,7 @@ void disk_free_zone_resources(struct gendisk *disk) mempool_destroy(disk->zone_wplugs_pool); disk->zone_wplugs_pool = NULL; - bitmap_free(disk->conv_zones_bitmap); - disk->conv_zones_bitmap = NULL; + disk_set_conv_zones_bitmap(disk, NULL); disk->zone_capacity = 0; disk->last_zone_capacity = 0; disk->nr_zones = 0; @@ -1538,17 +1548,16 @@ static int disk_update_zone_resources(struct gendisk *disk, struct blk_revalidate_zone_args *args) { struct request_queue *q = disk->queue; - unsigned int nr_seq_zones, nr_conv_zones = 0; + unsigned int nr_seq_zones, nr_conv_zones; unsigned int pool_size; struct queue_limits lim; + int ret; disk->nr_zones = args->nr_zones; disk->zone_capacity = args->zone_capacity; disk->last_zone_capacity = args->last_zone_capacity; - swap(disk->conv_zones_bitmap, args->conv_zones_bitmap); - if (disk->conv_zones_bitmap) - nr_conv_zones = bitmap_weight(disk->conv_zones_bitmap, - disk->nr_zones); + nr_conv_zones = + disk_set_conv_zones_bitmap(disk, args->conv_zones_bitmap); if (nr_conv_zones >= disk->nr_zones) { pr_warn("%s: Invalid number of conventional zones %u / %u\n", disk->disk_name, nr_conv_zones, disk->nr_zones); @@ -1593,7 +1602,11 @@ static int disk_update_zone_resources(struct gendisk *disk, } commit: - return queue_limits_commit_update(q, &lim); + blk_mq_freeze_queue(q); + ret = queue_limits_commit_update(q, &lim); + blk_mq_unfreeze_queue(q); + + return ret; } static int blk_revalidate_conv_zone(struct blk_zone *zone, unsigned int idx, @@ -1774,12 +1787,6 @@ int blk_revalidate_disk_zones(struct gendisk *disk) return -ENODEV; } - if (!queue_max_zone_append_sectors(q)) { - pr_warn("%s: Invalid 0 maximum zone append limit\n", - disk->disk_name); - return -ENODEV; - } - /* * Ensure that all memory allocations in this context are done as if * GFP_NOIO was specified. @@ -1814,16 +1821,15 @@ int blk_revalidate_disk_zones(struct gendisk *disk) * Set the new disk zone parameters only once the queue is frozen and * all I/Os are completed. */ - blk_mq_freeze_queue(q); if (ret > 0) ret = disk_update_zone_resources(disk, &args); else pr_warn("%s: failed to revalidate zones\n", disk->disk_name); - if (ret) + if (ret) { + blk_mq_freeze_queue(q); disk_free_zone_resources(disk); - blk_mq_unfreeze_queue(q); - - kfree(args.conv_zones_bitmap); + blk_mq_unfreeze_queue(q); + } return ret; } @@ -1851,7 +1857,7 @@ int queue_zone_wplugs_show(void *data, struct seq_file *m) spin_lock_irqsave(&zwplug->lock, flags); zwp_zone_no = zwplug->zone_no; zwp_flags = zwplug->flags; - zwp_ref = atomic_read(&zwplug->ref); + zwp_ref = refcount_read(&zwplug->ref); zwp_wp_offset = zwplug->wp_offset; zwp_bio_list_size = bio_list_size(&zwplug->bio_list); spin_unlock_irqrestore(&zwplug->lock, flags); |