summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2014-05-14 16:49:19 -0700
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2014-05-14 16:49:19 -0700
commit879f99ef2c4c05d9a7f0a67a05f1415663119825 (patch)
tree0399664761f675eef741f8b08b92635069586151 /block
parent70a26071f8552a0f193cf442f424decfe0f2e569 (diff)
parentd6d211db37e75de2ddc3a4f979038c40df7cc79c (diff)
Merge tag 'v3.15-rc5' into next
Merge with Linux 3.15-rc5 to sync up Wacom and other changes.
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c13
-rw-r--r--block/blk-cgroup.h22
-rw-r--r--block/blk-core.c31
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c15
-rw-r--r--block/blk-ioc.c2
-rw-r--r--block/blk-iopoll.c3
-rw-r--r--block/blk-map.c4
-rw-r--r--block/blk-mq-cpu.c14
-rw-r--r--block/blk-mq-cpumap.c10
-rw-r--r--block/blk-mq-sysfs.c31
-rw-r--r--block/blk-mq.c180
-rw-r--r--block/blk-mq.h3
-rw-r--r--block/blk-softirq.c14
-rw-r--r--block/blk-throttle.c8
-rw-r--r--block/blk.h2
-rw-r--r--block/cfq-iosched.c15
-rw-r--r--block/deadline-iosched.c8
-rw-r--r--block/elevator.c2
-rw-r--r--block/partitions/atari.h4
-rw-r--r--block/partitions/efi.h9
-rw-r--r--block/partitions/karma.c3
22 files changed, 202 insertions, 193 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 4e491d9b5292..e4a4145926f6 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -336,7 +336,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
* under queue_lock. If it's not pointing to @blkg now, it never
* will. Hint assignment itself can race safely.
*/
- if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
+ if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
rcu_assign_pointer(blkcg->blkg_hint, NULL);
/*
@@ -894,7 +894,7 @@ static int blkcg_can_attach(struct cgroup_subsys_state *css,
int ret = 0;
/* task_lock() is needed to avoid races with exit_io_context() */
- cgroup_taskset_for_each(task, css, tset) {
+ cgroup_taskset_for_each(task, tset) {
task_lock(task);
ioc = task->io_context;
if (ioc && atomic_read(&ioc->nr_tasks) > 1)
@@ -906,17 +906,14 @@ static int blkcg_can_attach(struct cgroup_subsys_state *css,
return ret;
}
-struct cgroup_subsys blkio_subsys = {
- .name = "blkio",
+struct cgroup_subsys blkio_cgrp_subsys = {
.css_alloc = blkcg_css_alloc,
.css_offline = blkcg_css_offline,
.css_free = blkcg_css_free,
.can_attach = blkcg_can_attach,
- .subsys_id = blkio_subsys_id,
.base_cftypes = blkcg_files,
- .module = THIS_MODULE,
};
-EXPORT_SYMBOL_GPL(blkio_subsys);
+EXPORT_SYMBOL_GPL(blkio_cgrp_subsys);
/**
* blkcg_activate_policy - activate a blkcg policy on a request_queue
@@ -1106,7 +1103,7 @@ int blkcg_policy_register(struct blkcg_policy *pol)
/* everything is in place, add intf files for the new policy */
if (pol->cftypes)
- WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
+ WARN_ON(cgroup_add_cftypes(&blkio_cgrp_subsys, pol->cftypes));
ret = 0;
out_unlock:
mutex_unlock(&blkcg_pol_mutex);
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 86154eab9523..371fe8e92ab5 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -186,7 +186,7 @@ static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
static inline struct blkcg *task_blkcg(struct task_struct *tsk)
{
- return css_to_blkcg(task_css(tsk, blkio_subsys_id));
+ return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
}
static inline struct blkcg *bio_blkcg(struct bio *bio)
@@ -241,12 +241,16 @@ static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
*/
static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
{
- int ret;
+ char *p;
- ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
- if (ret)
+ p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
+ if (!p) {
strncpy(buf, "<unavailable>", buflen);
- return ret;
+ return -ENAMETOOLONG;
+ }
+
+ memmove(buf, p, buf + buflen - p);
+ return 0;
}
/**
@@ -435,9 +439,9 @@ static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
uint64_t v;
do {
- start = u64_stats_fetch_begin_bh(&stat->syncp);
+ start = u64_stats_fetch_begin_irq(&stat->syncp);
v = stat->cnt;
- } while (u64_stats_fetch_retry_bh(&stat->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
return v;
}
@@ -508,9 +512,9 @@ static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
struct blkg_rwstat tmp;
do {
- start = u64_stats_fetch_begin_bh(&rwstat->syncp);
+ start = u64_stats_fetch_begin_irq(&rwstat->syncp);
tmp = *rwstat;
- } while (u64_stats_fetch_retry_bh(&rwstat->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
return tmp;
}
diff --git a/block/blk-core.c b/block/blk-core.c
index 853f92749202..a0e3096c4bb5 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -693,20 +693,11 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
if (!uninit_q)
return NULL;
- uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
- if (!uninit_q->flush_rq)
- goto out_cleanup_queue;
-
q = blk_init_allocated_queue(uninit_q, rfn, lock);
if (!q)
- goto out_free_flush_rq;
- return q;
+ blk_cleanup_queue(uninit_q);
-out_free_flush_rq:
- kfree(uninit_q->flush_rq);
-out_cleanup_queue:
- blk_cleanup_queue(uninit_q);
- return NULL;
+ return q;
}
EXPORT_SYMBOL(blk_init_queue_node);
@@ -717,9 +708,13 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
if (!q)
return NULL;
- if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
+ q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
+ if (!q->flush_rq)
return NULL;
+ if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
+ goto fail;
+
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unprep_rq_fn = NULL;
@@ -742,12 +737,16 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
/* init elevator */
if (elevator_init(q, NULL)) {
mutex_unlock(&q->sysfs_lock);
- return NULL;
+ goto fail;
}
mutex_unlock(&q->sysfs_lock);
return q;
+
+fail:
+ kfree(q->flush_rq);
+ return NULL;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -1308,7 +1307,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
struct request_list *rl = blk_rq_rl(req);
BUG_ON(!list_empty(&req->queuelist));
- BUG_ON(!hlist_unhashed(&req->hash));
+ BUG_ON(ELV_ON_HASH(req));
blk_free_request(rl, req);
freed_request(rl, flags);
@@ -1929,7 +1928,7 @@ EXPORT_SYMBOL(submit_bio);
* in some cases below, so export this function.
* Request stacking drivers like request-based dm may change the queue
* limits while requests are in the queue (e.g. dm's table swapping).
- * Such request stacking drivers should check those requests agaist
+ * Such request stacking drivers should check those requests against
* the new queue limits again when they dispatch those requests,
* although such checkings are also done against the old queue limits
* when submitting requests.
@@ -2354,7 +2353,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
if (!req->bio)
return false;
- trace_block_rq_complete(req->q, req);
+ trace_block_rq_complete(req->q, req, nr_bytes);
/*
* For fs requests, rq is just carrier of independent bio's
diff --git a/block/blk-exec.c b/block/blk-exec.c
index c68613bb4c79..dbf4502b1d67 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
* be resued after dying flag is set
*/
if (q->mq_ops) {
- blk_mq_insert_request(q, rq, at_head, true);
+ blk_mq_insert_request(rq, at_head, true, false);
return;
}
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 66e2b697f5db..43e6b4755e9a 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -137,17 +137,20 @@ static void mq_flush_run(struct work_struct *work)
rq = container_of(work, struct request, mq_flush_work);
memset(&rq->csd, 0, sizeof(rq->csd));
- blk_mq_run_request(rq, true, false);
+ blk_mq_insert_request(rq, false, true, false);
}
-static bool blk_flush_queue_rq(struct request *rq)
+static bool blk_flush_queue_rq(struct request *rq, bool add_front)
{
if (rq->q->mq_ops) {
INIT_WORK(&rq->mq_flush_work, mq_flush_run);
kblockd_schedule_work(rq->q, &rq->mq_flush_work);
return false;
} else {
- list_add_tail(&rq->queuelist, &rq->q->queue_head);
+ if (add_front)
+ list_add(&rq->queuelist, &rq->q->queue_head);
+ else
+ list_add_tail(&rq->queuelist, &rq->q->queue_head);
return true;
}
}
@@ -193,7 +196,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
case REQ_FSEQ_DATA:
list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
- queued = blk_flush_queue_rq(rq);
+ queued = blk_flush_queue_rq(rq, true);
break;
case REQ_FSEQ_DONE:
@@ -326,7 +329,7 @@ static bool blk_kick_flush(struct request_queue *q)
q->flush_rq->rq_disk = first_rq->rq_disk;
q->flush_rq->end_io = flush_end_io;
- return blk_flush_queue_rq(q->flush_rq);
+ return blk_flush_queue_rq(q->flush_rq, false);
}
static void flush_data_end_io(struct request *rq, int error)
@@ -411,7 +414,7 @@ void blk_insert_flush(struct request *rq)
if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
if (q->mq_ops) {
- blk_mq_run_request(rq, false, true);
+ blk_mq_insert_request(rq, false, false, true);
} else
list_add_tail(&rq->queuelist, &q->queue_head);
return;
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 242df01413f6..1a27f45ec776 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -68,7 +68,7 @@ static void ioc_destroy_icq(struct io_cq *icq)
* under queue_lock. If it's not pointing to @icq now, it never
* will. Hint assignment itself can race safely.
*/
- if (rcu_dereference_raw(ioc->icq_hint) == icq)
+ if (rcu_access_pointer(ioc->icq_hint) == icq)
rcu_assign_pointer(ioc->icq_hint, NULL);
ioc_exit_icq(icq);
diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
index 1855bf51edb0..c11d24e379e2 100644
--- a/block/blk-iopoll.c
+++ b/block/blk-iopoll.c
@@ -14,9 +14,6 @@
#include "blk.h"
-int blk_iopoll_enabled = 1;
-EXPORT_SYMBOL(blk_iopoll_enabled);
-
static unsigned int blk_iopoll_budget __read_mostly = 256;
static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
diff --git a/block/blk-map.c b/block/blk-map.c
index ae4ae1047fd9..f7b22bc21518 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -188,7 +188,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
* unmapping.
*/
int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
- struct rq_map_data *map_data, struct sg_iovec *iov,
+ struct rq_map_data *map_data, const struct sg_iovec *iov,
int iov_count, unsigned int len, gfp_t gfp_mask)
{
struct bio *bio;
@@ -285,7 +285,7 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
*
* Description:
* Data will be mapped directly if possible. Otherwise a bounce
- * buffer is used. Can be called multple times to append multple
+ * buffer is used. Can be called multiple times to append multiple
* buffers.
*/
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c
index 3146befb56aa..136ef8643bba 100644
--- a/block/blk-mq-cpu.c
+++ b/block/blk-mq-cpu.c
@@ -11,7 +11,7 @@
#include "blk-mq.h"
static LIST_HEAD(blk_mq_cpu_notify_list);
-static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
+static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);
static int blk_mq_main_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
@@ -19,12 +19,12 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
unsigned int cpu = (unsigned long) hcpu;
struct blk_mq_cpu_notifier *notify;
- spin_lock(&blk_mq_cpu_notify_lock);
+ raw_spin_lock(&blk_mq_cpu_notify_lock);
list_for_each_entry(notify, &blk_mq_cpu_notify_list, list)
notify->notify(notify->data, action, cpu);
- spin_unlock(&blk_mq_cpu_notify_lock);
+ raw_spin_unlock(&blk_mq_cpu_notify_lock);
return NOTIFY_OK;
}
@@ -32,16 +32,16 @@ void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
{
BUG_ON(!notifier->notify);
- spin_lock(&blk_mq_cpu_notify_lock);
+ raw_spin_lock(&blk_mq_cpu_notify_lock);
list_add_tail(&notifier->list, &blk_mq_cpu_notify_list);
- spin_unlock(&blk_mq_cpu_notify_lock);
+ raw_spin_unlock(&blk_mq_cpu_notify_lock);
}
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
{
- spin_lock(&blk_mq_cpu_notify_lock);
+ raw_spin_lock(&blk_mq_cpu_notify_lock);
list_del(&notifier->list);
- spin_unlock(&blk_mq_cpu_notify_lock);
+ raw_spin_unlock(&blk_mq_cpu_notify_lock);
}
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index f8721278601c..097921329619 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -9,15 +9,6 @@
#include "blk.h"
#include "blk-mq.h"
-static void show_map(unsigned int *map, unsigned int nr)
-{
- int i;
-
- pr_info("blk-mq: CPU -> queue map\n");
- for_each_online_cpu(i)
- pr_info(" CPU%2u -> Queue %u\n", i, map[i]);
-}
-
static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
const int cpu)
{
@@ -85,7 +76,6 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
map[i] = map[first_sibling];
}
- show_map(map, nr_cpus);
free_cpumask_var(cpus);
return 0;
}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index b91ce75bd35d..b0ba264b0522 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -244,6 +244,32 @@ static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
return blk_mq_tag_sysfs_show(hctx->tags, page);
}
+static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
+{
+ unsigned int i, queue_num, first = 1;
+ ssize_t ret = 0;
+
+ blk_mq_disable_hotplug();
+
+ for_each_online_cpu(i) {
+ queue_num = hctx->queue->mq_map[i];
+ if (queue_num != hctx->queue_num)
+ continue;
+
+ if (first)
+ ret += sprintf(ret + page, "%u", i);
+ else
+ ret += sprintf(ret + page, ", %u", i);
+
+ first = 0;
+ }
+
+ blk_mq_enable_hotplug();
+
+ ret += sprintf(ret + page, "\n");
+ return ret;
+}
+
static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
.attr = {.name = "dispatched", .mode = S_IRUGO },
.show = blk_mq_sysfs_dispatched_show,
@@ -294,6 +320,10 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
.attr = {.name = "tags", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_tags_show,
};
+static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
+ .attr = {.name = "cpu_list", .mode = S_IRUGO },
+ .show = blk_mq_hw_sysfs_cpus_show,
+};
static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_queued.attr,
@@ -302,6 +332,7 @@ static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_pending.attr,
&blk_mq_hw_sysfs_ipi.attr,
&blk_mq_hw_sysfs_tags.attr,
+ &blk_mq_hw_sysfs_cpus.attr,
NULL,
};
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 1fa9dd153fde..1d2a9bdbee57 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -73,8 +73,8 @@ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
set_bit(ctx->index_hw, hctx->ctx_map);
}
-static struct request *blk_mq_alloc_rq(struct blk_mq_hw_ctx *hctx, gfp_t gfp,
- bool reserved)
+static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
+ gfp_t gfp, bool reserved)
{
struct request *rq;
unsigned int tag;
@@ -193,12 +193,6 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
}
-static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
- gfp_t gfp, bool reserved)
-{
- return blk_mq_alloc_rq(hctx, gfp, reserved);
-}
-
static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
int rw, gfp_t gfp,
bool reserved)
@@ -289,38 +283,10 @@ void blk_mq_free_request(struct request *rq)
__blk_mq_free_request(hctx, ctx, rq);
}
-static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error)
+bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes)
{
- if (error)
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
- else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
- error = -EIO;
-
- if (unlikely(rq->cmd_flags & REQ_QUIET))
- set_bit(BIO_QUIET, &bio->bi_flags);
-
- /* don't actually finish bio if it's part of flush sequence */
- if (!(rq->cmd_flags & REQ_FLUSH_SEQ))
- bio_endio(bio, error);
-}
-
-void blk_mq_end_io(struct request *rq, int error)
-{
- struct bio *bio = rq->bio;
- unsigned int bytes = 0;
-
- trace_block_rq_complete(rq->q, rq);
-
- while (bio) {
- struct bio *next = bio->bi_next;
-
- bio->bi_next = NULL;
- bytes += bio->bi_iter.bi_size;
- blk_mq_bio_endio(rq, bio, error);
- bio = next;
- }
-
- blk_account_io_completion(rq, bytes);
+ if (blk_update_request(rq, error, blk_rq_bytes(rq)))
+ return true;
blk_account_io_done(rq);
@@ -328,8 +294,9 @@ void blk_mq_end_io(struct request *rq, int error)
rq->end_io(rq, error);
else
blk_mq_free_request(rq);
+ return false;
}
-EXPORT_SYMBOL(blk_mq_end_io);
+EXPORT_SYMBOL(blk_mq_end_io_partial);
static void __blk_mq_complete_request_remote(void *data)
{
@@ -353,7 +320,7 @@ void __blk_mq_complete_request(struct request *rq)
rq->csd.func = __blk_mq_complete_request_remote;
rq->csd.info = rq;
rq->csd.flags = 0;
- __smp_call_function_single(ctx->cpu, &rq->csd, 0);
+ smp_call_function_single_async(ctx->cpu, &rq->csd);
} else {
rq->q->softirq_done_fn(rq);
}
@@ -547,7 +514,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
LIST_HEAD(rq_list);
int bit, queued;
- if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->flags)))
+ if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
return;
hctx->run++;
@@ -636,7 +603,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
- if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->flags)))
+ if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
return;
if (!async)
@@ -656,7 +623,7 @@ void blk_mq_run_queues(struct request_queue *q, bool async)
queue_for_each_hw_ctx(q, hctx, i) {
if ((!blk_mq_hctx_has_pending(hctx) &&
list_empty_careful(&hctx->dispatch)) ||
- test_bit(BLK_MQ_S_STOPPED, &hctx->flags))
+ test_bit(BLK_MQ_S_STOPPED, &hctx->state))
continue;
blk_mq_run_hw_queue(hctx, async);
@@ -730,61 +697,28 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
blk_mq_add_timer(rq);
}
-void blk_mq_insert_request(struct request_queue *q, struct request *rq,
- bool at_head, bool run_queue)
+void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
+ bool async)
{
+ struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx;
- struct blk_mq_ctx *ctx, *current_ctx;
+ struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
+
+ current_ctx = blk_mq_get_ctx(q);
+ if (!cpu_online(ctx->cpu))
+ rq->mq_ctx = ctx = current_ctx;
- ctx = rq->mq_ctx;
hctx = q->mq_ops->map_queue(q, ctx->cpu);
- if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
+ if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
+ !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
blk_insert_flush(rq);
} else {
- current_ctx = blk_mq_get_ctx(q);
-
- if (!cpu_online(ctx->cpu)) {
- ctx = current_ctx;
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
- rq->mq_ctx = ctx;
- }
spin_lock(&ctx->lock);
__blk_mq_insert_request(hctx, rq, at_head);
spin_unlock(&ctx->lock);
-
- blk_mq_put_ctx(current_ctx);
}
- if (run_queue)
- __blk_mq_run_hw_queue(hctx);
-}
-EXPORT_SYMBOL(blk_mq_insert_request);
-
-/*
- * This is a special version of blk_mq_insert_request to bypass FLUSH request
- * check. Should only be used internally.
- */
-void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
-{
- struct request_queue *q = rq->q;
- struct blk_mq_hw_ctx *hctx;
- struct blk_mq_ctx *ctx, *current_ctx;
-
- current_ctx = blk_mq_get_ctx(q);
-
- ctx = rq->mq_ctx;
- if (!cpu_online(ctx->cpu)) {
- ctx = current_ctx;
- rq->mq_ctx = ctx;
- }
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
-
- /* ctx->cpu might be offline */
- spin_lock(&ctx->lock);
- __blk_mq_insert_request(hctx, rq, false);
- spin_unlock(&ctx->lock);
-
blk_mq_put_ctx(current_ctx);
if (run_queue)
@@ -926,6 +860,8 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ if (is_sync)
+ rw |= REQ_SYNC;
trace_block_getrq(q, bio, rw);
rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
if (likely(rq))
@@ -1020,6 +956,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
unsigned int cpu)
{
struct blk_mq_hw_ctx *hctx = data;
+ struct request_queue *q = hctx->queue;
struct blk_mq_ctx *ctx;
LIST_HEAD(tmp);
@@ -1029,7 +966,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
/*
* Move ctx entries to new CPU, if this one is going away.
*/
- ctx = __blk_mq_get_ctx(hctx->queue, cpu);
+ ctx = __blk_mq_get_ctx(q, cpu);
spin_lock(&ctx->lock);
if (!list_empty(&ctx->rq_list)) {
@@ -1041,7 +978,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
if (list_empty(&tmp))
return;
- ctx = blk_mq_get_ctx(hctx->queue);
+ ctx = blk_mq_get_ctx(q);
spin_lock(&ctx->lock);
while (!list_empty(&tmp)) {
@@ -1052,14 +989,55 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
list_move_tail(&rq->queuelist, &ctx->rq_list);
}
+ hctx = q->mq_ops->map_queue(q, ctx->cpu);
blk_mq_hctx_mark_pending(hctx, ctx);
spin_unlock(&ctx->lock);
blk_mq_put_ctx(ctx);
+
+ blk_mq_run_hw_queue(hctx, true);
+}
+
+static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
+ int (*init)(void *, struct blk_mq_hw_ctx *,
+ struct request *, unsigned int),
+ void *data)
+{
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < hctx->queue_depth; i++) {
+ struct request *rq = hctx->rqs[i];
+
+ ret = init(data, hctx, rq, i);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+int blk_mq_init_commands(struct request_queue *q,
+ int (*init)(void *, struct blk_mq_hw_ctx *,
+ struct request *, unsigned int),
+ void *data)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned int i;
+ int ret = 0;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ ret = blk_mq_init_hw_commands(hctx, init, data);
+ if (ret)
+ break;
+ }
+
+ return ret;
}
+EXPORT_SYMBOL(blk_mq_init_commands);
-static void blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
- void (*init)(void *, struct blk_mq_hw_ctx *,
+static void blk_mq_free_hw_commands(struct blk_mq_hw_ctx *hctx,
+ void (*free)(void *, struct blk_mq_hw_ctx *,
struct request *, unsigned int),
void *data)
{
@@ -1068,12 +1046,12 @@ static void blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
for (i = 0; i < hctx->queue_depth; i++) {
struct request *rq = hctx->rqs[i];
- init(data, hctx, rq, i);
+ free(data, hctx, rq, i);
}
}
-void blk_mq_init_commands(struct request_queue *q,
- void (*init)(void *, struct blk_mq_hw_ctx *,
+void blk_mq_free_commands(struct request_queue *q,
+ void (*free)(void *, struct blk_mq_hw_ctx *,
struct request *, unsigned int),
void *data)
{
@@ -1081,9 +1059,9 @@ void blk_mq_init_commands(struct request_queue *q,
unsigned int i;
queue_for_each_hw_ctx(q, hctx, i)
- blk_mq_init_hw_commands(hctx, init, data);
+ blk_mq_free_hw_commands(hctx, free, data);
}
-EXPORT_SYMBOL(blk_mq_init_commands);
+EXPORT_SYMBOL(blk_mq_free_commands);
static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx)
{
@@ -1494,6 +1472,16 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
return NOTIFY_OK;
}
+void blk_mq_disable_hotplug(void)
+{
+ mutex_lock(&all_q_mutex);
+}
+
+void blk_mq_enable_hotplug(void)
+{
+ mutex_unlock(&all_q_mutex);
+}
+
static int __init blk_mq_init(void)
{
blk_mq_cpu_init();
diff --git a/block/blk-mq.h b/block/blk-mq.h
index ed0035cd458e..ebbe6bac9d61 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -23,7 +23,6 @@ struct blk_mq_ctx {
};
void __blk_mq_complete_request(struct request *rq);
-void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_init_flush(struct request_queue *q);
void blk_mq_drain_queue(struct request_queue *q);
@@ -40,6 +39,8 @@ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
void blk_mq_cpu_init(void);
+void blk_mq_enable_hotplug(void);
+void blk_mq_disable_hotplug(void);
/*
* CPU -> queue mappings
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 57790c1a97eb..53b1737e978d 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -30,8 +30,8 @@ static void blk_done_softirq(struct softirq_action *h)
while (!list_empty(&local_list)) {
struct request *rq;
- rq = list_entry(local_list.next, struct request, csd.list);
- list_del_init(&rq->csd.list);
+ rq = list_entry(local_list.next, struct request, ipi_list);
+ list_del_init(&rq->ipi_list);
rq->q->softirq_done_fn(rq);
}
}
@@ -45,9 +45,9 @@ static void trigger_softirq(void *data)
local_irq_save(flags);
list = this_cpu_ptr(&blk_cpu_done);
- list_add_tail(&rq->csd.list, list);
+ list_add_tail(&rq->ipi_list, list);
- if (list->next == &rq->csd.list)
+ if (list->next == &rq->ipi_list)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_restore(flags);
@@ -65,7 +65,7 @@ static int raise_blk_irq(int cpu, struct request *rq)
data->info = rq;
data->flags = 0;
- __smp_call_function_single(cpu, data, 0);
+ smp_call_function_single_async(cpu, data);
return 0;
}
@@ -136,7 +136,7 @@ void __blk_complete_request(struct request *req)
struct list_head *list;
do_local:
list = this_cpu_ptr(&blk_cpu_done);
- list_add_tail(&req->csd.list, list);
+ list_add_tail(&req->ipi_list, list);
/*
* if the list only contains our just added request,
@@ -144,7 +144,7 @@ do_local:
* entries there, someone already raised the irq but it
* hasn't run yet.
*/
- if (list->next == &req->csd.list)
+ if (list->next == &req->ipi_list)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
} else if (raise_blk_irq(ccpu, req))
goto do_local;
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 1474c3ab7e72..033745cd7fba 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1408,13 +1408,13 @@ static int tg_set_conf(struct cgroup_subsys_state *css, struct cftype *cft,
}
static int tg_set_conf_u64(struct cgroup_subsys_state *css, struct cftype *cft,
- const char *buf)
+ char *buf)
{
return tg_set_conf(css, cft, buf, true);
}
static int tg_set_conf_uint(struct cgroup_subsys_state *css, struct cftype *cft,
- const char *buf)
+ char *buf)
{
return tg_set_conf(css, cft, buf, false);
}
@@ -1425,28 +1425,24 @@ static struct cftype throtl_files[] = {
.private = offsetof(struct throtl_grp, bps[READ]),
.seq_show = tg_print_conf_u64,
.write_string = tg_set_conf_u64,
- .max_write_len = 256,
},
{
.name = "throttle.write_bps_device",
.private = offsetof(struct throtl_grp, bps[WRITE]),
.seq_show = tg_print_conf_u64,
.write_string = tg_set_conf_u64,
- .max_write_len = 256,
},
{
.name = "throttle.read_iops_device",
.private = offsetof(struct throtl_grp, iops[READ]),
.seq_show = tg_print_conf_uint,
.write_string = tg_set_conf_uint,
- .max_write_len = 256,
},
{
.name = "throttle.write_iops_device",
.private = offsetof(struct throtl_grp, iops[WRITE]),
.seq_show = tg_print_conf_uint,
.write_string = tg_set_conf_uint,
- .max_write_len = 256,
},
{
.name = "throttle.io_service_bytes",
diff --git a/block/blk.h b/block/blk.h
index d23b415b8a28..1d880f1f957f 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -78,7 +78,7 @@ static inline void blk_clear_rq_complete(struct request *rq)
/*
* Internal elevator interface
*/
-#define ELV_ON_HASH(rq) hash_hashed(&(rq)->hash)
+#define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED)
void blk_insert_flush(struct request *rq);
void blk_abort_flushes(struct request_queue *q);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 744833b630c6..e0985f1955e7 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1701,13 +1701,13 @@ static int __cfqg_set_weight_device(struct cgroup_subsys_state *css,
}
static int cfqg_set_weight_device(struct cgroup_subsys_state *css,
- struct cftype *cft, const char *buf)
+ struct cftype *cft, char *buf)
{
return __cfqg_set_weight_device(css, cft, buf, false);
}
static int cfqg_set_leaf_weight_device(struct cgroup_subsys_state *css,
- struct cftype *cft, const char *buf)
+ struct cftype *cft, char *buf)
{
return __cfqg_set_weight_device(css, cft, buf, true);
}
@@ -1838,7 +1838,6 @@ static struct cftype cfq_blkcg_files[] = {
.flags = CFTYPE_ONLY_ON_ROOT,
.seq_show = cfqg_print_leaf_weight_device,
.write_string = cfqg_set_leaf_weight_device,
- .max_write_len = 256,
},
{
.name = "weight",
@@ -1853,7 +1852,6 @@ static struct cftype cfq_blkcg_files[] = {
.flags = CFTYPE_NOT_ON_ROOT,
.seq_show = cfqg_print_weight_device,
.write_string = cfqg_set_weight_device,
- .max_write_len = 256,
},
{
.name = "weight",
@@ -1866,7 +1864,6 @@ static struct cftype cfq_blkcg_files[] = {
.name = "leaf_weight_device",
.seq_show = cfqg_print_leaf_weight_device,
.write_string = cfqg_set_leaf_weight_device,
- .max_write_len = 256,
},
{
.name = "leaf_weight",
@@ -2367,10 +2364,10 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
* reposition in fifo if next is older than rq
*/
if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
- time_before(rq_fifo_time(next), rq_fifo_time(rq)) &&
+ time_before(next->fifo_time, rq->fifo_time) &&
cfqq == RQ_CFQQ(next)) {
list_move(&rq->queuelist, &next->queuelist);
- rq_set_fifo_time(rq, rq_fifo_time(next));
+ rq->fifo_time = next->fifo_time;
}
if (cfqq->next_rq == next)
@@ -2814,7 +2811,7 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
return NULL;
rq = rq_entry_fifo(cfqq->fifo.next);
- if (time_before(jiffies, rq_fifo_time(rq)))
+ if (time_before(jiffies, rq->fifo_time))
rq = NULL;
cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
@@ -3927,7 +3924,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
cfq_log_cfqq(cfqd, cfqq, "insert_request");
cfq_init_prio_data(cfqq, RQ_CIC(rq));
- rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
+ rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_add_rq_rb(rq);
cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 9ef66406c625..a753df2b3fc2 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -106,7 +106,7 @@ deadline_add_request(struct request_queue *q, struct request *rq)
/*
* set expire time and add to fifo list
*/
- rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
+ rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
}
@@ -174,9 +174,9 @@ deadline_merged_requests(struct request_queue *q, struct request *req,
* and move into next position (next will be deleted) in fifo
*/
if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
- if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
+ if (time_before(next->fifo_time, req->fifo_time)) {
list_move(&req->queuelist, &next->queuelist);
- rq_set_fifo_time(req, rq_fifo_time(next));
+ req->fifo_time = next->fifo_time;
}
}
@@ -230,7 +230,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
/*
* rq is expired!
*/
- if (time_after_eq(jiffies, rq_fifo_time(rq)))
+ if (time_after_eq(jiffies, rq->fifo_time))
return 1;
return 0;
diff --git a/block/elevator.c b/block/elevator.c
index 42c45a7d6714..1e01b66a0b92 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -247,6 +247,7 @@ EXPORT_SYMBOL(elevator_exit);
static inline void __elv_rqhash_del(struct request *rq)
{
hash_del(&rq->hash);
+ rq->cmd_flags &= ~REQ_HASHED;
}
static void elv_rqhash_del(struct request_queue *q, struct request *rq)
@@ -261,6 +262,7 @@ static void elv_rqhash_add(struct request_queue *q, struct request *rq)
BUG_ON(ELV_ON_HASH(rq));
hash_add(e->hash, &rq->hash, rq_hash_key(rq));
+ rq->cmd_flags |= REQ_HASHED;
}
static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
diff --git a/block/partitions/atari.h b/block/partitions/atari.h
index fe2d32a89f36..f2ec43bfeec1 100644
--- a/block/partitions/atari.h
+++ b/block/partitions/atari.h
@@ -11,6 +11,8 @@
* by Guenther Kelleter (guenther@pool.informatik.rwth-aachen.de)
*/
+#include <linux/compiler.h>
+
struct partition_info
{
u8 flg; /* bit 0: active; bit 7: bootable */
@@ -29,6 +31,6 @@ struct rootsector
u32 bsl_st; /* start of bad sector list */
u32 bsl_cnt; /* length of bad sector list */
u16 checksum; /* checksum for bootable disks */
-} __attribute__((__packed__));
+} __packed;
int atari_partition(struct parsed_partitions *state);
diff --git a/block/partitions/efi.h b/block/partitions/efi.h
index 4efcafba7e64..abd0b19288a6 100644
--- a/block/partitions/efi.h
+++ b/block/partitions/efi.h
@@ -32,6 +32,7 @@
#include <linux/major.h>
#include <linux/string.h>
#include <linux/efi.h>
+#include <linux/compiler.h>
#define MSDOS_MBR_SIGNATURE 0xaa55
#define EFI_PMBR_OSTYPE_EFI 0xEF
@@ -87,13 +88,13 @@ typedef struct _gpt_header {
*
* uint8_t reserved2[ BlockSize - 92 ];
*/
-} __attribute__ ((packed)) gpt_header;
+} __packed gpt_header;
typedef struct _gpt_entry_attributes {
u64 required_to_function:1;
u64 reserved:47;
u64 type_guid_specific:16;
-} __attribute__ ((packed)) gpt_entry_attributes;
+} __packed gpt_entry_attributes;
typedef struct _gpt_entry {
efi_guid_t partition_type_guid;
@@ -102,7 +103,7 @@ typedef struct _gpt_entry {
__le64 ending_lba;
gpt_entry_attributes attributes;
efi_char16_t partition_name[72 / sizeof (efi_char16_t)];
-} __attribute__ ((packed)) gpt_entry;
+} __packed gpt_entry;
typedef struct _gpt_mbr_record {
u8 boot_indicator; /* unused by EFI, set to 0x80 for bootable */
@@ -124,7 +125,7 @@ typedef struct _legacy_mbr {
__le16 unknown;
gpt_mbr_record partition_record[4];
__le16 signature;
-} __attribute__ ((packed)) legacy_mbr;
+} __packed legacy_mbr;
/* Functions */
extern int efi_partition(struct parsed_partitions *state);
diff --git a/block/partitions/karma.c b/block/partitions/karma.c
index 0ea19312706b..9721fa589bb1 100644
--- a/block/partitions/karma.c
+++ b/block/partitions/karma.c
@@ -8,6 +8,7 @@
#include "check.h"
#include "karma.h"
+#include <linux/compiler.h>
int karma_partition(struct parsed_partitions *state)
{
@@ -26,7 +27,7 @@ int karma_partition(struct parsed_partitions *state)
} d_partitions[2];
u8 d_blank[208];
__le16 d_magic;
- } __attribute__((packed)) *label;
+ } __packed *label;
struct d_partition *p;
data = read_part_sector(state, 0, &sect);