summaryrefslogtreecommitdiff
path: root/drivers/ufs/core
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2023-05-31 11:45:16 -0400
committerMartin K. Petersen <martin.petersen@oracle.com>2023-05-31 11:45:16 -0400
commit2ef23e4b537be3b417080bdade7ef48cf9f95266 (patch)
treede29a318b6a70b3c0a98801b15f33107d2e05bdd /drivers/ufs/core
parent0e5e41ee3d73823d65b33463d557b8b6833b457d (diff)
parent078f4f4b34d6c2dadabb363d3fc6c84b32927dea (diff)
Merge patch series "ufs: Do not requeue while ungating the clock"
Bart Van Assche <bvanassche@acm.org> says: In the traces we recorded while testing zoned storage we noticed that UFS commands are requeued while the clock is being ungated. Command requeueing makes it harder than necessary to preserve the command order. Hence this patch series that modifies the SCSI core and also the UFS driver such that clock ungating does not trigger command requeueing. Link: https://lore.kernel.org/r/20230529202640.11883-1-bvanassche@acm.org Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/ufs/core')
-rw-r--r--drivers/ufs/core/ufs-sysfs.c2
-rw-r--r--drivers/ufs/core/ufshcd-crypto.c2
-rw-r--r--drivers/ufs/core/ufshcd-priv.h3
-rw-r--r--drivers/ufs/core/ufshcd.c87
4 files changed, 30 insertions, 64 deletions
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 883f0e44b54e..cdf3d5f2b77b 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -168,7 +168,7 @@ static ssize_t auto_hibern8_show(struct device *dev,
}
pm_runtime_get_sync(hba->dev);
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
ufshcd_release(hba);
pm_runtime_put_sync(hba->dev);
diff --git a/drivers/ufs/core/ufshcd-crypto.c b/drivers/ufs/core/ufshcd-crypto.c
index 198360fe5e8e..f2c4422cab86 100644
--- a/drivers/ufs/core/ufshcd-crypto.c
+++ b/drivers/ufs/core/ufshcd-crypto.c
@@ -24,7 +24,7 @@ static int ufshcd_program_key(struct ufs_hba *hba,
u32 slot_offset = hba->crypto_cfg_register + slot * sizeof(*cfg);
int err = 0;
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
if (hba->vops && hba->vops->program_key) {
err = hba->vops->program_key(hba, cfg, slot);
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index d53b93c21a0c..8f58c2169398 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -84,9 +84,6 @@ unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
u8 **buf, bool ascii);
-int ufshcd_hold(struct ufs_hba *hba, bool async);
-void ufshcd_release(struct ufs_hba *hba);
-
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index fdf5073c7c6c..941e613548da 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -1189,7 +1189,7 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
bool timeout = false, do_last_check = false;
ktime_t start;
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
/*
* Wait for all the outstanding tasks/transfer requests.
@@ -1310,7 +1310,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
}
/* let's not get into low power until clock scaling is completed */
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
out:
return ret;
@@ -1640,7 +1640,7 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
goto out;
ufshcd_rpm_get_sync(hba);
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
hba->clk_scaling.is_enabled = value;
@@ -1723,7 +1723,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->clk_gating.state == CLKS_ON) {
spin_unlock_irqrestore(hba->host->host_lock, flags);
- goto unblock_reqs;
+ return;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1746,25 +1746,21 @@ static void ufshcd_ungate_work(struct work_struct *work)
}
hba->clk_gating.is_suspended = false;
}
-unblock_reqs:
- ufshcd_scsi_unblock_requests(hba);
}
/**
* ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
* Also, exit from hibern8 mode and set the link as active.
* @hba: per adapter instance
- * @async: This indicates whether caller should ungate clocks asynchronously.
*/
-int ufshcd_hold(struct ufs_hba *hba, bool async)
+void ufshcd_hold(struct ufs_hba *hba)
{
- int rc = 0;
bool flush_result;
unsigned long flags;
if (!ufshcd_is_clkgating_allowed(hba) ||
!hba->clk_gating.is_initialized)
- goto out;
+ return;
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.active_reqs++;
@@ -1781,15 +1777,10 @@ start:
*/
if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) {
- if (async) {
- rc = -EAGAIN;
- hba->clk_gating.active_reqs--;
- break;
- }
spin_unlock_irqrestore(hba->host->host_lock, flags);
flush_result = flush_work(&hba->clk_gating.ungate_work);
if (hba->clk_gating.is_suspended && !flush_result)
- goto out;
+ return;
spin_lock_irqsave(hba->host->host_lock, flags);
goto start;
}
@@ -1811,21 +1802,14 @@ start:
hba->clk_gating.state = REQ_CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- if (queue_work(hba->clk_gating.clk_gating_workq,
- &hba->clk_gating.ungate_work))
- ufshcd_scsi_block_requests(hba);
+ queue_work(hba->clk_gating.clk_gating_workq,
+ &hba->clk_gating.ungate_work);
/*
* fall through to check if we should wait for this
* work to be done or not.
*/
fallthrough;
case REQ_CLKS_ON:
- if (async) {
- rc = -EAGAIN;
- hba->clk_gating.active_reqs--;
- break;
- }
-
spin_unlock_irqrestore(hba->host->host_lock, flags);
flush_work(&hba->clk_gating.ungate_work);
/* Make sure state is CLKS_ON before returning */
@@ -1837,8 +1821,6 @@ start:
break;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
-out:
- return rc;
}
EXPORT_SYMBOL_GPL(ufshcd_hold);
@@ -2070,7 +2052,7 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
ufshcd_remove_clk_gating_sysfs(hba);
/* Ungate the clock if necessary. */
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
hba->clk_gating.is_initialized = false;
ufshcd_release(hba);
@@ -2468,7 +2450,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
return 0;
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
@@ -2871,12 +2853,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
- /*
- * Allows the UFS error handler to wait for prior ufshcd_queuecommand()
- * calls.
- */
- rcu_read_lock();
-
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
break;
@@ -2922,13 +2898,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
hba->req_abort_count = 0;
- err = ufshcd_hold(hba, true);
- if (err) {
- err = SCSI_MLQUEUE_HOST_BUSY;
- goto out;
- }
- WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
- (hba->clk_gating.state != CLKS_ON));
+ ufshcd_hold(hba);
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
@@ -2958,8 +2928,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
ufshcd_send_command(hba, tag, hwq);
out:
- rcu_read_unlock();
-
if (ufs_trigger_eh()) {
unsigned long flags;
@@ -3253,7 +3221,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
BUG_ON(!hba);
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
@@ -3327,7 +3295,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
return -EINVAL;
}
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
@@ -3423,7 +3391,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
return -EINVAL;
}
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
@@ -4241,7 +4209,7 @@ int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
uic_cmd.command = UIC_CMD_DME_SET;
uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
uic_cmd.argument3 = mode;
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
ufshcd_release(hba);
@@ -4348,7 +4316,7 @@ void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
if (update &&
!pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
ufshcd_rpm_get_sync(hba);
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
ufshcd_auto_hibern8_enable(hba);
ufshcd_release(hba);
ufshcd_rpm_put_sync(hba);
@@ -4941,7 +4909,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
int err = 0;
int retries;
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
@@ -6227,22 +6195,22 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
ufshcd_setup_vreg(hba, true);
ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
if (!ufshcd_is_clkgating_allowed(hba))
ufshcd_setup_clocks(hba, true);
ufshcd_release(hba);
pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
ufshcd_vops_resume(hba, pm_op);
} else {
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
if (ufshcd_is_clkscaling_supported(hba) &&
hba->clk_scaling.is_enabled)
ufshcd_suspend_clkscaling(hba);
ufshcd_clk_scaling_allow(hba, false);
}
ufshcd_scsi_block_requests(hba);
- /* Drain ufshcd_queuecommand() */
- synchronize_rcu();
+ /* Wait for ongoing ufshcd_queuecommand() calls to finish. */
+ blk_mq_wait_quiesce_done(&hba->host->tag_set);
cancel_work_sync(&hba->eeh_work);
}
@@ -6887,7 +6855,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
return PTR_ERR(req);
req->end_io_data = &wait;
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
spin_lock_irqsave(host->host_lock, flags);
@@ -7124,7 +7092,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
cmd_type = DEV_CMD_TYPE_NOP;
fallthrough;
case UPIU_TRANSACTION_QUERY_REQ:
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
desc_buff, buff_len,
@@ -7190,7 +7158,7 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
u16 ehs_len;
/* Protects use of hba->reserved_slot. */
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
down_read(&hba->clk_scaling_lock);
@@ -7425,7 +7393,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
/* If command is already aborted/completed, return FAILED. */
if (!(test_bit(tag, &hba->outstanding_reqs))) {
@@ -9416,7 +9384,7 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* If we can't transition into any of the low power modes
* just gate the clocks.
*/
- ufshcd_hold(hba, false);
+ ufshcd_hold(hba);
hba->clk_gating.is_suspended = true;
if (ufshcd_is_clkscaling_supported(hba))
@@ -10204,6 +10172,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
host->max_channel = UFSHCD_MAX_CHANNEL;
host->unique_id = host->host_no;
host->max_cmd_len = UFS_CDB_SIZE;
+ host->queuecommand_may_block = !!(hba->caps & UFSHCD_CAP_CLK_GATING);
hba->max_pwr_info.is_valid = false;