diff options
author | Mike Christie <michael.christie@oracle.com> | 2021-02-27 10:59:57 -0600 |
---|---|---|
committer | Martin K. Petersen <martin.petersen@oracle.com> | 2021-03-04 17:37:02 -0500 |
commit | eb44ce8c8c7d3b45f9204c7f34577960c00d5919 (patch) | |
tree | 25db69f2e729f3a9a11ed38281d4502394f40be5 /drivers/target/target_core_transport.c | |
parent | 08694199477da412baf1852c6d1bf5fedbd40c7e (diff) |
scsi: target: core: Add workqueue based cmd submission
loop and vhost/scsi do their target cmd submission from driver
workqueues. This allows them to avoid an issue where the backend may block
waiting for resources like tags/requests, mem/locks, etc and that ends up
blocking their entire submission path and for the case of vhost-scsi both
the submission and completion path.
This patch adds a helper drivers can use to submit from a LIO workqueue.
This code will then be extended in the next patches to fix the plugging of
backend devices.
We are only converting vhost/loop initially, but the workqueue based
submission will work for other drivers and have similar benefits where the
main target loops will not end up blocking one some backend resource.
Link: https://lore.kernel.org/r/20210227170006.5077-17-michael.christie@oracle.com
Tested-by: Laurence Oberman <loberman@redhat.com>
Reviewed-by: Bodo Stroesser <bostroesser@gmail.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/target/target_core_transport.c')
-rw-r--r-- | drivers/target/target_core_transport.c | 42 |
1 files changed, 41 insertions, 1 deletions
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index bd3d125a3978..eea7c27dc4cd 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -41,6 +41,7 @@ #include <trace/events/target.h> static struct workqueue_struct *target_completion_wq; +static struct workqueue_struct *target_submission_wq; static struct kmem_cache *se_sess_cache; struct kmem_cache *se_ua_cache; struct kmem_cache *t10_pr_reg_cache; @@ -129,8 +130,15 @@ int init_se_kmem_caches(void) if (!target_completion_wq) goto out_free_lba_map_mem_cache; + target_submission_wq = alloc_workqueue("target_submission", + WQ_MEM_RECLAIM, 0); + if (!target_submission_wq) + goto out_free_completion_wq; + return 0; +out_free_completion_wq: + destroy_workqueue(target_completion_wq); out_free_lba_map_mem_cache: kmem_cache_destroy(t10_alua_lba_map_mem_cache); out_free_lba_map_cache: @@ -153,6 +161,7 @@ out: void release_se_kmem_caches(void) { + destroy_workqueue(target_submission_wq); destroy_workqueue(target_completion_wq); kmem_cache_destroy(se_sess_cache); kmem_cache_destroy(se_ua_cache); @@ -1382,7 +1391,6 @@ void __target_init_cmd( { INIT_LIST_HEAD(&cmd->se_delayed_node); INIT_LIST_HEAD(&cmd->se_qf_node); - INIT_LIST_HEAD(&cmd->se_cmd_list); INIT_LIST_HEAD(&cmd->state_list); init_completion(&cmd->t_transport_stop_comp); cmd->free_compl = NULL; @@ -1799,6 +1807,38 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, } EXPORT_SYMBOL(target_submit_cmd); +void target_queued_submit_work(struct work_struct *work) +{ + struct se_cmd_queue *sq = container_of(work, struct se_cmd_queue, work); + struct se_cmd *se_cmd, *next_cmd; + struct llist_node *cmd_list; + + cmd_list = llist_del_all(&sq->cmd_list); + if (!cmd_list) + /* Previous call took what we were queued to submit */ + return; + + cmd_list = llist_reverse_order(cmd_list); + llist_for_each_entry_safe(se_cmd, next_cmd, cmd_list, se_cmd_list) + target_submit(se_cmd); +} + +/** + * target_queue_submission - queue the cmd to run on the LIO workqueue + * @se_cmd: command descriptor to submit + */ +void target_queue_submission(struct se_cmd *se_cmd) +{ + struct se_device *se_dev = se_cmd->se_dev; + int cpu = se_cmd->cpuid; + struct se_cmd_queue *sq; + + sq = &se_dev->queues[cpu].sq; + llist_add(&se_cmd->se_cmd_list, &sq->cmd_list); + queue_work_on(cpu, target_submission_wq, &sq->work); +} +EXPORT_SYMBOL_GPL(target_queue_submission); + static void target_complete_tmr_failure(struct work_struct *work) { struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); |