diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-09-25 14:56:40 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-09-25 14:56:40 -0700 |
commit | 11a299a7933e03c83818b431e6a1c53ad387423d (patch) | |
tree | b9758050b496000f25746fa4e8adf29f7b638baf /block | |
parent | fe29393877be63363247510b99ae9a8068cacb31 (diff) | |
parent | a045553362b53fb8f34bb1c3e5de5e020af79550 (diff) |
Merge tag 'for-6.12/block-20240925' of git://git.kernel.dk/linux
Pull more block updates from Jens Axboe:
- Improve blk-integrity segment counting and merging (Keith)
- NVMe pull request via Keith:
- Multipath fixes (Hannes)
- Sysfs attribute list NULL terminate fix (Shin'ichiro)
- Remove problematic read-back (Keith)
- Fix for a regression with the IO scheduler switching freezing from
6.11 (Damien)
- Use a raw spinlock for sbitmap, as it may get called from preempt
disabled context (Ming)
- Cleanup for bd_claiming waiting, using var_waitqueue() rather than
the bit waitqueues, as that more accurately describes that it does
(Neil)
- Various cleanups (Kanchan, Qiu-ji, David)
* tag 'for-6.12/block-20240925' of git://git.kernel.dk/linux:
nvme: remove CC register read-back during enabling
nvme: null terminate nvme_tls_attrs
nvme-multipath: avoid hang on inaccessible namespaces
nvme-multipath: system fails to create generic nvme device
lib/sbitmap: define swap_lock as raw_spinlock_t
block: Remove unused blk_limits_io_{min,opt}
drbd: Fix atomicity violation in drbd_uuid_set_bm()
block: Fix elv_iosched_local_module handling of "none" scheduler
block: remove bogus union
block: change wait on bd_claiming to use a var_waitqueue
blk-integrity: improved sg segment mapping
block: unexport blk_rq_count_integrity_sg
nvme-rdma: use request to get integrity segments
scsi: use request to get integrity segments
block: provide a request helper for user integrity segments
blk-integrity: consider entire bio list for merging
blk-integrity: properly account for segments
blk-mq: set the nr_integrity_segments from bio
blk-mq: unconditional nr_integrity_segments
Diffstat (limited to 'block')
-rw-r--r-- | block/bdev.c | 4 | ||||
-rw-r--r-- | block/bio-integrity.c | 1 | ||||
-rw-r--r-- | block/blk-integrity.c | 36 | ||||
-rw-r--r-- | block/blk-merge.c | 4 | ||||
-rw-r--r-- | block/blk-mq.c | 5 | ||||
-rw-r--r-- | block/blk-settings.c | 42 | ||||
-rw-r--r-- | block/elevator.c | 4 |
7 files changed, 37 insertions, 59 deletions
diff --git a/block/bdev.c b/block/bdev.c index 33f9c4605e3a..738e3c8457e7 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -555,7 +555,7 @@ retry: /* if claiming is already in progress, wait for it to finish */ if (whole->bd_claiming) { - wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0); + wait_queue_head_t *wq = __var_waitqueue(&whole->bd_claiming); DEFINE_WAIT(wait); prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); @@ -578,7 +578,7 @@ static void bd_clear_claiming(struct block_device *whole, void *holder) /* tell others that we're done */ BUG_ON(whole->bd_claiming != holder); whole->bd_claiming = NULL; - wake_up_bit(&whole->bd_claiming, 0); + wake_up_var(&whole->bd_claiming); } /** diff --git a/block/bio-integrity.c b/block/bio-integrity.c index 96a2653905ae..88e3ad73c385 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -367,7 +367,6 @@ free_bvec: kfree(bvec); return ret; } -EXPORT_SYMBOL_GPL(bio_integrity_map_user); /** * bio_integrity_prep - Prepare bio for integrity I/O diff --git a/block/blk-integrity.c b/block/blk-integrity.c index 010decc892ea..0a2b1c5d0ebf 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c @@ -53,7 +53,6 @@ new_segment: return segments; } -EXPORT_SYMBOL(blk_rq_count_integrity_sg); /** * blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist @@ -63,19 +62,20 @@ EXPORT_SYMBOL(blk_rq_count_integrity_sg); * * Description: Map the integrity vectors in request into a * scatterlist. The scatterlist must be big enough to hold all - * elements. I.e. sized using blk_rq_count_integrity_sg(). + * elements. I.e. sized using blk_rq_count_integrity_sg() or + * rq->nr_integrity_segments. */ -int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio, - struct scatterlist *sglist) +int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist) { struct bio_vec iv, ivprv = { NULL }; + struct request_queue *q = rq->q; struct scatterlist *sg = NULL; + struct bio *bio = rq->bio; unsigned int segments = 0; struct bvec_iter iter; int prev = 0; bio_for_each_integrity_vec(iv, bio, iter) { - if (prev) { if (!biovec_phys_mergeable(q, &ivprv, &iv)) goto new_segment; @@ -103,10 +103,30 @@ new_segment: if (sg) sg_mark_end(sg); + /* + * Something must have been wrong if the figured number of segment + * is bigger than number of req's physical integrity segments + */ + BUG_ON(segments > rq->nr_integrity_segments); + BUG_ON(segments > queue_max_integrity_segments(q)); return segments; } EXPORT_SYMBOL(blk_rq_map_integrity_sg); +int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf, + ssize_t bytes, u32 seed) +{ + int ret = bio_integrity_map_user(rq->bio, ubuf, bytes, seed); + + if (ret) + return ret; + + rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q, rq->bio); + rq->cmd_flags |= REQ_INTEGRITY; + return 0; +} +EXPORT_SYMBOL_GPL(blk_rq_integrity_map_user); + bool blk_integrity_merge_rq(struct request_queue *q, struct request *req, struct request *next) { @@ -134,7 +154,6 @@ bool blk_integrity_merge_bio(struct request_queue *q, struct request *req, struct bio *bio) { int nr_integrity_segs; - struct bio *next = bio->bi_next; if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL) return true; @@ -145,16 +164,11 @@ bool blk_integrity_merge_bio(struct request_queue *q, struct request *req, if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags) return false; - bio->bi_next = NULL; nr_integrity_segs = blk_rq_count_integrity_sg(q, bio); - bio->bi_next = next; - if (req->nr_integrity_segments + nr_integrity_segs > q->limits.max_integrity_segments) return false; - req->nr_integrity_segments += nr_integrity_segs; - return true; } diff --git a/block/blk-merge.c b/block/blk-merge.c index 56769c4bcd79..ad763ec313b6 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -639,6 +639,9 @@ static inline int ll_new_hw_segment(struct request *req, struct bio *bio, * counters. */ req->nr_phys_segments += nr_phys_segs; + if (bio_integrity(bio)) + req->nr_integrity_segments += blk_rq_count_integrity_sg(req->q, + bio); return 1; no_merge: @@ -731,6 +734,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, /* Merge is OK... */ req->nr_phys_segments = total_phys_segments; + req->nr_integrity_segments += next->nr_integrity_segments; return 1; } diff --git a/block/blk-mq.c b/block/blk-mq.c index 831c5cf5d874..4b2c8e940f59 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -376,9 +376,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, rq->io_start_time_ns = 0; rq->stats_sectors = 0; rq->nr_phys_segments = 0; -#if defined(CONFIG_BLK_DEV_INTEGRITY) rq->nr_integrity_segments = 0; -#endif rq->end_io = NULL; rq->end_io_data = NULL; @@ -2546,6 +2544,9 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, rq->__sector = bio->bi_iter.bi_sector; rq->write_hint = bio->bi_write_hint; blk_rq_bio_prep(rq, bio, nr_segs); + if (bio_integrity(bio)) + rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q, + bio); /* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */ err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO); diff --git a/block/blk-settings.c b/block/blk-settings.c index cd8a8eabc9a5..a446654ddee5 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -437,48 +437,6 @@ int queue_limits_set(struct request_queue *q, struct queue_limits *lim) } EXPORT_SYMBOL_GPL(queue_limits_set); -/** - * blk_limits_io_min - set minimum request size for a device - * @limits: the queue limits - * @min: smallest I/O size in bytes - * - * Description: - * Some devices have an internal block size bigger than the reported - * hardware sector size. This function can be used to signal the - * smallest I/O the device can perform without incurring a performance - * penalty. - */ -void blk_limits_io_min(struct queue_limits *limits, unsigned int min) -{ - limits->io_min = min; - - if (limits->io_min < limits->logical_block_size) - limits->io_min = limits->logical_block_size; - - if (limits->io_min < limits->physical_block_size) - limits->io_min = limits->physical_block_size; -} -EXPORT_SYMBOL(blk_limits_io_min); - -/** - * blk_limits_io_opt - set optimal request size for a device - * @limits: the queue limits - * @opt: smallest I/O size in bytes - * - * Description: - * Storage devices may report an optimal I/O size, which is the - * device's preferred unit for sustained I/O. This is rarely reported - * for disk drives. For RAID arrays it is usually the stripe width or - * the internal track size. A properly aligned multiple of - * optimal_io_size is the preferred request size for workloads where - * sustained throughput is desired. - */ -void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) -{ - limits->io_opt = opt; -} -EXPORT_SYMBOL(blk_limits_io_opt); - static int queue_limit_alignment_offset(const struct queue_limits *lim, sector_t sector) { diff --git a/block/elevator.c b/block/elevator.c index c355b55d0107..4122026b11f1 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -715,7 +715,9 @@ int elv_iosched_load_module(struct gendisk *disk, const char *buf, strscpy(elevator_name, buf, sizeof(elevator_name)); - return request_module("%s-iosched", strstrip(elevator_name)); + request_module("%s-iosched", strstrip(elevator_name)); + + return 0; } ssize_t elv_iosched_store(struct gendisk *disk, const char *buf, |