diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-11 08:41:17 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-11 08:41:17 -0700 |
commit | 23d4ed53b7342bf5999b3ea227d9f69e75e5a625 (patch) | |
tree | 86229fb558235c2f742b35c0c66d5d98003f5f6e /include | |
parent | e413a19a8ef49ae3b76310bb569dabe66b22f5a3 (diff) | |
parent | a2d445d440003f2d70ee4cd4970ea82ace616fee (diff) |
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block layer fixes from Jens Axboe:
"Final small batch of fixes to be included before -rc1. Some general
cleanups in here as well, but some of the blk-mq fixes we need for the
NVMe conversion and/or scsi-mq. The pull request contains:
- Support for not merging across a specified "chunk size", if set by
the driver. Some NVMe devices perform poorly for IO that crosses
such a chunk, so we need to support it generically as part of
request merging avoid having to do complicated split logic. From
me.
- Bump max tag depth to 10Ki tags. Some scsi devices have a huge
shared tag space. Before we failed with EINVAL if a too large tag
depth was specified, now we truncate it and pass back the actual
value. From me.
- Various blk-mq rq init fixes from me and others.
- A fix for enter on a dying queue for blk-mq from Keith. This is
needed to prevent oopsing on hot device removal.
- Fixup for blk-mq timer addition from Ming Lei.
- Small round of performance fixes for mtip32xx from Sam Bradshaw.
- Minor stack leak fix from Rickard Strandqvist.
- Two __init annotations from Fabian Frederick"
* 'for-linus' of git://git.kernel.dk/linux-block:
block: add __init to blkcg_policy_register
block: add __init to elv_register
block: ensure that bio_add_page() always accepts a page for an empty bio
blk-mq: add timer in blk_mq_start_request
blk-mq: always initialize request->start_time
block: blk-exec.c: Cleaning up local variable address returnd
mtip32xx: minor performance enhancements
blk-mq: ->timeout should be cleared in blk_mq_rq_ctx_init()
blk-mq: don't allow queue entering for a dying queue
blk-mq: bump max tag depth to 10K tags
block: add blk_rq_set_block_pc()
block: add notion of a chunk size for request merging
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/blk-mq.h | 2 | ||||
-rw-r--r-- | include/linux/blkdev.h | 23 | ||||
-rw-r--r-- | include/linux/elevator.h | 2 |
3 files changed, 24 insertions, 3 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 0feedebfde48..a002cf191427 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -135,7 +135,7 @@ enum { BLK_MQ_S_STOPPED = 0, BLK_MQ_S_TAG_ACTIVE = 1, - BLK_MQ_MAX_DEPTH = 2048, + BLK_MQ_MAX_DEPTH = 10240, BLK_MQ_CPU_WORK_BATCH = 8, }; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3cd426e971db..31e11051f1ba 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -280,6 +280,7 @@ struct queue_limits { unsigned long seg_boundary_mask; unsigned int max_hw_sectors; + unsigned int chunk_sectors; unsigned int max_sectors; unsigned int max_segment_size; unsigned int physical_block_size; @@ -795,6 +796,7 @@ extern void __blk_put_request(struct request_queue *, struct request *); extern struct request *blk_get_request(struct request_queue *, int, gfp_t); extern struct request *blk_make_request(struct request_queue *, struct bio *, gfp_t); +extern void blk_rq_set_block_pc(struct request *); extern void blk_requeue_request(struct request_queue *, struct request *); extern void blk_add_request_payload(struct request *rq, struct page *page, unsigned int len); @@ -910,6 +912,20 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, return q->limits.max_sectors; } +/* + * Return maximum size of a request at given offset. Only valid for + * file system requests. + */ +static inline unsigned int blk_max_size_offset(struct request_queue *q, + sector_t offset) +{ + if (!q->limits.chunk_sectors) + return q->limits.max_hw_sectors; + + return q->limits.chunk_sectors - + (offset & (q->limits.chunk_sectors - 1)); +} + static inline unsigned int blk_rq_get_max_sectors(struct request *rq) { struct request_queue *q = rq->q; @@ -917,7 +933,11 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq) if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) return q->limits.max_hw_sectors; - return blk_queue_get_max_sectors(q, rq->cmd_flags); + if (!q->limits.chunk_sectors) + return blk_queue_get_max_sectors(q, rq->cmd_flags); + + return min(blk_max_size_offset(q, blk_rq_pos(rq)), + blk_queue_get_max_sectors(q, rq->cmd_flags)); } static inline unsigned int blk_rq_count_bios(struct request *rq) @@ -983,6 +1003,7 @@ extern void blk_queue_make_request(struct request_queue *, make_request_fn *); extern void blk_queue_bounce_limit(struct request_queue *, u64); extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); +extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); extern void blk_queue_max_segments(struct request_queue *, unsigned short); extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); extern void blk_queue_max_discard_sectors(struct request_queue *q, diff --git a/include/linux/elevator.h b/include/linux/elevator.h index df63bd3a8cf1..4ff262e2bf37 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -144,7 +144,7 @@ extern void elv_drain_elevator(struct request_queue *); * io scheduler registration */ extern void __init load_default_elevator_module(void); -extern int elv_register(struct elevator_type *); +extern int __init elv_register(struct elevator_type *); extern void elv_unregister(struct elevator_type *); /* |